prompt
stringlengths 43
25.9k
| completion
stringlengths 7
362
| api
stringlengths 18
90
|
---|---|---|
import os
os.environ["BING_SUBSCRIPTION_KEY"] = "<key>"
os.environ["BING_SEARCH_URL"] = "https://api.bing.microsoft.com/v7.0/search"
from langchain_community.utilities import BingSearchAPIWrapper
search = | BingSearchAPIWrapper() | langchain_community.utilities.BingSearchAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sqlite-vss')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
texts = [doc.page_content for doc in docs]
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = SQLiteVSS.from_texts(
texts=texts,
embedding=embedding_function,
table="state_union",
db_file="/tmp/vss.db",
)
query = "What did the president say about Ketanji Brown Jackson"
data = db.similarity_search(query)
data[0].page_content
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
texts = [doc.page_content for doc in docs]
embedding_function = | SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") | langchain_community.embeddings.sentence_transformer.SentenceTransformerEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet amadeus > /dev/null')
import os
os.environ["AMADEUS_CLIENT_ID"] = "CLIENT_ID"
os.environ["AMADEUS_CLIENT_SECRET"] = "CLIENT_SECRET"
os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit
toolkit = AmadeusToolkit()
tools = toolkit.get_tools()
from langchain_community.llms import HuggingFaceHub
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "YOUR_HF_API_TOKEN"
llm = HuggingFaceHub(
repo_id="tiiuae/falcon-7b-instruct",
model_kwargs={"temperature": 0.5, "max_length": 64},
)
toolkit_hf = AmadeusToolkit(llm=llm)
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
from langchain.tools.render import render_text_description_and_args
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
prompt = hub.pull("hwchase17/react-json")
agent = create_react_agent(
llm,
tools,
prompt,
tools_renderer=render_text_description_and_args,
output_parser= | ReActJsonSingleInputOutputParser() | langchain.agents.output_parsers.ReActJsonSingleInputOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyvespa')
from vespa.package import ApplicationPackage, Field, RankProfile
app_package = ApplicationPackage(name="testapp")
app_package.schema.add_fields(
Field(
name="text", type="string", indexing=["index", "summary"], index="enable-bm25"
),
Field(
name="embedding",
type="tensor<float>(x[384])",
indexing=["attribute", "summary"],
attribute=["distance-metric: angular"],
),
)
app_package.schema.add_rank_profile(
RankProfile(
name="default",
first_phase="closeness(field, embedding)",
inputs=[("query(query_embedding)", "tensor<float>(x[384])")],
)
)
from vespa.deployment import VespaDocker
vespa_docker = VespaDocker()
vespa_app = vespa_docker.deploy(application_package=app_package)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
embedding_function = | SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") | langchain_community.embeddings.sentence_transformer.SentenceTransformerEmbeddings |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
path = "/Users/rlm/Desktop/Papers/LLaVA/"
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
texts = [i.text for i in text_elements]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n')
import glob
import os
file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt")))
img_summaries = []
for file_path in file_paths:
with open(file_path, "r") as file:
img_summaries.append(file.read())
logging_header = "clip_model_load: total allocated memory: 201.27 MB\n\n"
cleaned_img_summary = [s.split(logging_header, 1)[1].strip() for s in img_summaries]
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())
store = | InMemoryStore() | langchain.storage.InMemoryStore |
from langchain_community.document_loaders.recursive_url_loader import RecursiveUrlLoader
from bs4 import BeautifulSoup as Soup
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url=url, max_depth=2, extractor=lambda x: Soup(x, "html.parser").text
)
docs = loader.load()
docs[0].page_content[:50]
docs[-1].metadata
url = "https://js.langchain.com/docs/modules/memory/integrations/"
loader = | RecursiveUrlLoader(url=url) | langchain_community.document_loaders.recursive_url_loader.RecursiveUrlLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_core.tools import tool
@tool
def multiply(first_int: int, second_int: int) -> int:
"""Multiply two integers together."""
return first_int * second_int
@tool
def add(first_int: int, second_int: int) -> int:
"Add two integers."
return first_int + second_int
@tool
def exponentiate(base: int, exponent: int) -> int:
"Exponentiate the base to the exponent power."
return base**exponent
from operator import itemgetter
from typing import Union
from langchain.output_parsers import JsonOutputToolsParser
from langchain_core.runnables import (
Runnable,
RunnableLambda,
RunnableMap,
RunnablePassthrough,
)
from langchain_openai import ChatOpenAI
model = | ChatOpenAI(model="gpt-3.5-turbo-1106") | langchain_openai.ChatOpenAI |
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import ChatOpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = hub.pull("hwchase17/react")
llm = ChatOpenAI(temperature=0)
agent = | create_react_agent(llm, tools, prompt) | langchain.agents.create_react_agent |
from langchain.chains import LLMMathChain
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.tools import Tool
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
from langchain_openai import ChatOpenAI, OpenAI
search = DuckDuckGoSearchAPIWrapper()
llm = OpenAI(temperature=0)
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="Calculator",
func=llm_math_chain.run,
description="useful for when you need to answer questions about math",
),
]
model = ChatOpenAI(temperature=0)
planner = load_chat_planner(model)
executor = | load_agent_executor(model, tools, verbose=True) | langchain_experimental.plan_and_execute.load_agent_executor |
from langchain_openai import OpenAIEmbeddings
from langchain_pinecone import PineconeVectorStore
all_documents = {
"doc1": "Climate change and economic impact.",
"doc2": "Public health concerns due to climate change.",
"doc3": "Climate change: A social perspective.",
"doc4": "Technological solutions to climate change.",
"doc5": "Policy changes needed to combat climate change.",
"doc6": "Climate change and its impact on biodiversity.",
"doc7": "Climate change: The science and models.",
"doc8": "Global warming: A subset of climate change.",
"doc9": "How climate change affects daily weather.",
"doc10": "The history of climate change activism.",
}
vectorstore = PineconeVectorStore.from_texts(
list(all_documents.values()), OpenAIEmbeddings(), index_name="rag-fusion"
)
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from langchain import hub
prompt = hub.pull("langchain-ai/rag-fusion-query-generation")
generate_queries = (
prompt | ChatOpenAI(temperature=0) | StrOutputParser() | (lambda x: x.split("\n"))
)
original_query = "impact of climate change"
vectorstore = PineconeVectorStore.from_existing_index("rag-fusion", OpenAIEmbeddings())
retriever = vectorstore.as_retriever()
from langchain.load import dumps, loads
def reciprocal_rank_fusion(results: list[list], k=60):
fused_scores = {}
for docs in results:
for rank, doc in enumerate(docs):
doc_str = | dumps(doc) | langchain.load.dumps |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyairtable')
from langchain_community.document_loaders import AirtableLoader
api_key = "xxx"
base_id = "xxx"
table_id = "xxx"
loader = | AirtableLoader(api_key, table_id, base_id) | langchain_community.document_loaders.AirtableLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet airbyte-source-salesforce')
from langchain_community.document_loaders.airbyte import AirbyteSalesforceLoader
config = {
}
loader = AirbyteSalesforceLoader(
config=config, stream_name="asset"
) # check the documentation linked above for a list of all streams
docs = loader.load()
docs_iterator = loader.lazy_load()
from langchain.docstore.document import Document
def handle_record(record, id):
return | Document(page_content=record.data["title"], metadata=record.data) | langchain.docstore.document.Document |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/cpi/"
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader(path + "cpi.pdf")
pdf_pages = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits_pypdf = text_splitter.split_documents(pdf_pages)
all_splits_pypdf_texts = [d.page_content for d in all_splits_pypdf]
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "cpi.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
baseline = Chroma.from_texts(
texts=all_splits_pypdf_texts,
collection_name="baseline",
embedding=OpenAIEmbeddings(),
)
retriever_baseline = baseline.as_retriever()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = | ChatPromptTemplate.from_template(prompt_text) | langchain_core.prompts.ChatPromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sqlite-vss')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
texts = [doc.page_content for doc in docs]
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = SQLiteVSS.from_texts(
texts=texts,
embedding=embedding_function,
table="state_union",
db_file="/tmp/vss.db",
)
query = "What did the president say about Ketanji Brown Jackson"
data = db.similarity_search(query)
data[0].page_content
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
texts = [doc.page_content for doc in docs]
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
connection = | SQLiteVSS.create_connection(db_file="/tmp/vss.db") | langchain_community.vectorstores.SQLiteVSS.create_connection |
from typing import List
from langchain.output_parsers import YamlOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = | Field(description="answer to resolve the joke") | langchain_core.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-pinecone langchain-openai langchain')
from langchain_community.document_loaders import TextLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet chromadb')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
REGION = "us-central1" # @param {type:"string"}
INSTANCE = "test-instance" # @param {type:"string"}
DATABASE = "test" # @param {type:"string"}
TABLE_NAME = "test-default" # @param {type:"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-cloud-sql-mysql')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable sqladmin.googleapis.com')
from langchain_google_cloud_sql_mysql import MySQLEngine
engine = MySQLEngine.from_instance(
project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE
)
engine.init_document_table(TABLE_NAME, overwrite_existing=True)
from langchain_core.documents import Document
from langchain_google_cloud_sql_mysql import MySQLDocumentSaver
test_docs = [
Document(
page_content="Apple Granny Smith 150 0.99 1",
metadata={"fruit_id": 1},
),
Document(
page_content="Banana Cavendish 200 0.59 0",
metadata={"fruit_id": 2},
),
Document(
page_content="Orange Navel 80 1.29 1",
metadata={"fruit_id": 3},
),
]
saver = MySQLDocumentSaver(engine=engine, table_name=TABLE_NAME)
saver.add_documents(test_docs)
from langchain_google_cloud_sql_mysql import MySQLLoader
loader = MySQLLoader(engine=engine, table_name=TABLE_NAME)
docs = loader.lazy_load()
for doc in docs:
print("Loaded documents:", doc)
from langchain_google_cloud_sql_mysql import MySQLLoader
loader = | MySQLLoader(
engine=engine,
query=f"select * from `{TABLE_NAME}` where JSON_EXTRACT(langchain_metadata, '$.fruit_id') | langchain_google_cloud_sql_mysql.MySQLLoader |
get_ipython().run_line_magic('', 'pip install --upgrade --quiet flashrank')
get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss')
get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss_cpu')
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = TextLoader(
"../../modules/state_of_the_union.txt",
).load()
text_splitter = | RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) | langchain_text_splitters.RecursiveCharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_1 = prompt | llm_with_tool | output_parser
chain_1 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_1)
.assign(cited_answer=answer_1)
.pick(["cited_answer", "docs"])
)
chain_1.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class quoted_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
output_parser_2 = JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True)
llm_with_tool_2 = llm.bind_tools(
[quoted_answer],
tool_choice="quoted_answer",
)
format_2 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_2 = prompt | llm_with_tool_2 | output_parser_2
chain_2 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_2)
.assign(quoted_answer=answer_2)
.pick(["quoted_answer", "docs"])
)
chain_2.invoke("How fast are cheetahs?")
from langchain_anthropic import ChatAnthropicMessages
anthropic = | ChatAnthropicMessages(model_name="claude-instant-1.2") | langchain_anthropic.ChatAnthropicMessages |
from langchain_community.document_loaders import AirbyteJSONLoader
get_ipython().system('ls /tmp/airbyte_local/json_data/')
loader = | AirbyteJSONLoader("/tmp/airbyte_local/json_data/_airbyte_raw_pokemon.jsonl") | langchain_community.document_loaders.AirbyteJSONLoader |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai.chat_models import ChatOpenAI
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're an assistant who's good at {ability}. Respond in 20 words or fewer",
),
| MessagesPlaceholder(variable_name="history") | langchain_core.prompts.MessagesPlaceholder |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet gpt4all > /dev/null')
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import GPT4All
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
local_path = (
"./models/ggml-gpt4all-l13b-snoozy.bin" # replace with your desired local file path
)
callbacks = [StreamingStdOutCallbackHandler()]
llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)
llm = | GPT4All(model=local_path, backend="gptj", callbacks=callbacks, verbose=True) | langchain_community.llms.GPT4All |
get_ipython().run_line_magic('pip', 'install --upgrade huggingface-hub')
from langchain_community.embeddings import HuggingFaceHubEmbeddings
embeddings = | HuggingFaceHubEmbeddings(model="http://localhost:8080") | langchain_community.embeddings.HuggingFaceHubEmbeddings |
from langchain_community.utilities import SerpAPIWrapper
search = | SerpAPIWrapper() | langchain_community.utilities.SerpAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark opensearch-py')
import getpass
import os
from langchain_community.vectorstores import OpenSearchVectorSearch
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from langchain.chains import create_citation_fuzzy_match_chain
from langchain_openai import ChatOpenAI
question = "What did the author do during college?"
context = """
My name is Jason Liu, and I grew up in Toronto Canada but I was born in China.
I went to an arts highschool but in university I studied Computational Mathematics and physics.
As part of coop I worked at many companies including Stitchfix, Facebook.
I also started the Data Science club at the University of Waterloo and I was the president of the club for 2 years.
"""
llm = | ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613") | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core databricks-vectorsearch langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
path = "/Users/rlm/Desktop/Papers/LLaVA/"
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
texts = [i.text for i in text_elements]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n')
import glob
import os
file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt")))
img_summaries = []
for file_path in file_paths:
with open(file_path, "r") as file:
img_summaries.append(file.read())
logging_header = "clip_model_load: total allocated memory: 201.27 MB\n\n"
cleaned_img_summary = [s.split(logging_header, 1)[1].strip() for s in img_summaries]
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
doc_ids = [str(uuid.uuid4()) for _ in texts]
summary_texts = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(text_summaries)
]
retriever.vectorstore.add_documents(summary_texts)
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]
summary_img = [
| Document(page_content=s, metadata={id_key: img_ids[i]}) | langchain_core.documents.Document |
get_ipython().system(' docker run -d -p 8123:8123 -p9000:9000 --name langchain-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:23.4.2.11')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clickhouse-connect')
import getpass
import os
if not os.environ["OPENAI_API_KEY"]:
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Clickhouse, ClickhouseSettings
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 > /dev/null')
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
from typing import List, Optional
from langchain.chains.openai_tools import create_extraction_chain_pydantic
from langchain_core.pydantic_v1 import BaseModel
from langchain_openai import ChatOpenAI
model = | ChatOpenAI(model="gpt-3.5-turbo-1106") | langchain_openai.ChatOpenAI |
from langchain_community.chat_models.human import HumanInputChatModel
get_ipython().run_line_magic('pip', 'install wikipedia')
from langchain.agents import AgentType, initialize_agent, load_tools
tools = | load_tools(["wikipedia"]) | langchain.agents.load_tools |
from langchain_community.document_loaders import UnstructuredURLLoader
urls = [
"https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-8-2023",
"https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-9-2023",
]
loader = UnstructuredURLLoader(urls=urls)
data = loader.load()
from langchain_community.document_loaders import SeleniumURLLoader
urls = [
"https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"https://goo.gl/maps/NDSHwePEyaHMFGwh8",
]
loader = SeleniumURLLoader(urls=urls)
data = loader.load()
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "playwright"')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "unstructured"')
get_ipython().system('playwright install')
from langchain_community.document_loaders import PlaywrightURLLoader
urls = [
"https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"https://goo.gl/maps/NDSHwePEyaHMFGwh8",
]
loader = | PlaywrightURLLoader(urls=urls, remove_selectors=["header", "footer"]) | langchain_community.document_loaders.PlaywrightURLLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet vearch')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet vearch_cluster')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores.vearch import Vearch
from langchain_text_splitters import RecursiveCharacterTextSplitter
from transformers import AutoModel, AutoTokenizer
model_path = "/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda(0)
query = "你好!"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
query = "你知道凌波微步吗,你知道都有谁学会了吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")
file_path = "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt" # Your local file path"
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
embedding_path = "/data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese"
embeddings = | HuggingFaceEmbeddings(model_name=embedding_path) | langchain_community.embeddings.huggingface.HuggingFaceEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet comet_ml langchain langchain-openai google-search-results spacy textstat pandas')
get_ipython().system('{sys.executable} -m spacy download en_core_web_sm')
import comet_ml
comet_ml.init(project_name="comet-example-langchain")
import os
os.environ["OPENAI_API_KEY"] = "..."
os.environ["SERPAPI_API_KEY"] = "..."
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
comet_callback = CometCallbackHandler(
project_name="comet-example-langchain",
complexity_metrics=True,
stream_logs=True,
tags=["llm"],
visualizations=["dep"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks, verbose=True)
llm_result = llm.generate(["Tell me a joke", "Tell me a poem", "Tell me a fact"] * 3)
print("LLM result", llm_result)
comet_callback.flush_tracker(llm, finish=True)
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
comet_callback = CometCallbackHandler(
complexity_metrics=True,
project_name="comet-example-langchain",
stream_logs=True,
tags=["synopsis-chain"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks)
test_prompts = [{"title": "Documentary about Bigfoot in Paris"}]
print(synopsis_chain.apply(test_prompts))
comet_callback.flush_tracker(synopsis_chain, finish=True)
from langchain.agents import initialize_agent, load_tools
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
comet_callback = CometCallbackHandler(
project_name="comet-example-langchain",
complexity_metrics=True,
stream_logs=True,
tags=["agent"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
tools = load_tools(["serpapi", "llm-math"], llm=llm, callbacks=callbacks)
agent = initialize_agent(
tools,
llm,
agent="zero-shot-react-description",
callbacks=callbacks,
verbose=True,
)
agent.run(
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?"
)
comet_callback.flush_tracker(agent, finish=True)
get_ipython().run_line_magic('pip', 'install --upgrade --quiet rouge-score')
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from rouge_score import rouge_scorer
class Rouge:
def __init__(self, reference):
self.reference = reference
self.scorer = rouge_scorer.RougeScorer(["rougeLsum"], use_stemmer=True)
def compute_metric(self, generation, prompt_idx, gen_idx):
prediction = generation.text
results = self.scorer.score(target=self.reference, prediction=prediction)
return {
"rougeLsum_score": results["rougeLsum"].fmeasure,
"reference": self.reference,
}
reference = """
The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building.
It was the first structure to reach a height of 300 metres.
It is now taller than the Chrysler Building in New York City by 5.2 metres (17 ft)
Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France .
"""
rouge_score = Rouge(reference=reference)
template = """Given the following article, it is your job to write a summary.
Article:
{article}
Summary: This is the summary for the above article:"""
prompt_template = PromptTemplate(input_variables=["article"], template=template)
comet_callback = CometCallbackHandler(
project_name="comet-example-langchain",
complexity_metrics=False,
stream_logs=True,
tags=["custom_metrics"],
custom_metrics=rouge_score.compute_metric,
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9)
synopsis_chain = | LLMChain(llm=llm, prompt=prompt_template) | langchain.chains.LLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from operator import itemgetter
from langchain.memory import ConversationBufferMemory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful chatbot"),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
]
)
memory = ConversationBufferMemory(return_messages=True)
memory.load_memory_variables({})
chain = (
RunnablePassthrough.assign(
history= | RunnableLambda(memory.load_memory_variables) | langchain_core.runnables.RunnableLambda |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet dingodb')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet git+https://git@github.com/dingodb/pydingo.git')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Dingo
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
from dingodb import DingoDB
index_name = "langchain_demo"
dingo_client = DingoDB(user="", password="", host=["127.0.0.1:13000"])
if (
index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
dingo_client.create_index(
index_name=index_name, dimension=1536, metric_type="cosine", auto_id=False
)
docsearch = Dingo.from_documents(
docs, embeddings, client=dingo_client, index_name=index_name
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Dingo
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
query = "What did the president say about Ketanji Brown Jackson"
docs = docsearch.similarity_search(query)
print(docs[0].page_content)
vectorstore = | Dingo(embeddings, "text", client=dingo_client, index_name=index_name) | langchain_community.vectorstores.Dingo |
from typing import List
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
@validator("setup")
def question_ends_with_question_mark(cls, field):
if field[-1] != "?":
raise ValueError("Badly formed question!")
return field
joke_query = "Tell me a joke."
parser = PydanticOutputParser(pydantic_object=Joke)
prompt = PromptTemplate(
template="Answer the user query.\n{format_instructions}\n{query}\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
chain = prompt | model | parser
chain.invoke({"query": joke_query})
class Actor(BaseModel):
name: str = | Field(description="name of an actor") | langchain_core.pydantic_v1.Field |
from datetime import datetime, timedelta
import faiss
from langchain.docstore import InMemoryDocstore
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model, index, InMemoryDocstore({}), {})
retriever = TimeWeightedVectorStoreRetriever(
vectorstore=vectorstore, decay_rate=0.0000000000000000000000001, k=1
)
yesterday = datetime.now() - timedelta(days=1)
retriever.add_documents(
[Document(page_content="hello world", metadata={"last_accessed_at": yesterday})]
)
retriever.add_documents([Document(page_content="hello foo")])
retriever.get_relevant_documents("hello world")
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model, index, | InMemoryDocstore({}) | langchain.docstore.InMemoryDocstore |
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt", encoding="utf-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, text in enumerate(texts):
text.metadata["source"] = f"{i}-pl"
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings)
from langchain.chains import create_qa_with_sources_chain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
qa_chain = create_qa_with_sources_chain(llm)
doc_prompt = PromptTemplate(
template="Content: {page_content}\nSource: {source}",
input_variables=["page_content", "source"],
)
final_qa_chain = StuffDocumentsChain(
llm_chain=qa_chain,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
)
query = "What did the president say about russia"
retrieval_qa.run(query)
qa_chain_pydantic = create_qa_with_sources_chain(llm, output_parser="pydantic")
final_qa_chain_pydantic = StuffDocumentsChain(
llm_chain=qa_chain_pydantic,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa_pydantic = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain_pydantic
)
retrieval_qa_pydantic.run(query)
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\
Make sure to avoid using any unclear pronouns.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
condense_question_chain = LLMChain(
llm=llm,
prompt=CONDENSE_QUESTION_PROMPT,
)
qa = ConversationalRetrievalChain(
question_generator=condense_question_chain,
retriever=docsearch.as_retriever(),
memory=memory,
combine_docs_chain=final_qa_chain,
)
query = "What did the president say about Ketanji Brown Jackson"
result = qa({"question": query})
result
query = "what did he say about her predecessor?"
result = qa({"question": query})
result
from typing import List
from langchain.chains.openai_functions import create_qa_with_structure_chain
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.messages import HumanMessage, SystemMessage
from pydantic import BaseModel, Field
class CustomResponseSchema(BaseModel):
"""An answer to the question being asked, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
countries_referenced: List[str] = Field(
..., description="All of the countries mentioned in the sources"
)
sources: List[str] = Field(
..., description="List of sources used to answer the question"
)
prompt_messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
| HumanMessage(content="Answer question using the following context") | langchain_core.messages.HumanMessage |
import boto3
dynamodb = boto3.resource("dynamodb")
table = dynamodb.create_table(
TableName="SessionTable",
KeySchema=[{"AttributeName": "SessionId", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "SessionId", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
table.meta.client.get_waiter("table_exists").wait(TableName="SessionTable")
print(table.item_count)
from langchain_community.chat_message_histories import DynamoDBChatMessageHistory
history = DynamoDBChatMessageHistory(table_name="SessionTable", session_id="0")
history.add_user_message("hi!")
history.add_ai_message("whats up?")
history.messages
from langchain_community.chat_message_histories import DynamoDBChatMessageHistory
history = DynamoDBChatMessageHistory(
table_name="SessionTable",
session_id="0",
endpoint_url="http://localhost.localstack.cloud:4566",
)
from langchain_community.chat_message_histories import DynamoDBChatMessageHistory
composite_table = dynamodb.create_table(
TableName="CompositeTable",
KeySchema=[
{"AttributeName": "PK", "KeyType": "HASH"},
{"AttributeName": "SK", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "PK", "AttributeType": "S"},
{"AttributeName": "SK", "AttributeType": "S"},
],
BillingMode="PAY_PER_REQUEST",
)
composite_table.meta.client.get_waiter("table_exists").wait(TableName="CompositeTable")
print(composite_table.item_count)
my_key = {
"PK": "session_id::0",
"SK": "langchain_history",
}
composite_key_history = DynamoDBChatMessageHistory(
table_name="CompositeTable",
session_id="0",
endpoint_url="http://localhost.localstack.cloud:4566",
key=my_key,
)
composite_key_history.add_user_message("hello, composite dynamodb table!")
composite_key_history.messages
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
| MessagesPlaceholder(variable_name="history") | langchain_core.prompts.MessagesPlaceholder |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymilvus')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Milvus
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vector_db = Milvus.from_documents(
docs,
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
)
query = "What did the president say about Ketanji Brown Jackson"
docs = vector_db.similarity_search(query)
docs[0].page_content
vector_db = Milvus.from_documents(
docs,
embeddings,
collection_name="collection_1",
connection_args={"host": "127.0.0.1", "port": "19530"},
)
vector_db = Milvus(
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
collection_name="collection_1",
)
from langchain_core.documents import Document
docs = [
Document(page_content="i worked at kensho", metadata={"namespace": "harrison"}),
Document(page_content="i worked at facebook", metadata={"namespace": "ankush"}),
]
vectorstore = Milvus.from_documents(
docs,
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
drop_old=True,
partition_key_field="namespace", # Use the "namespace" field as the partition key
)
vectorstore.as_retriever(
search_kwargs={"expr": 'namespace == "ankush"'}
).get_relevant_documents("where did i work?")
vectorstore.as_retriever(
search_kwargs={"expr": 'namespace == "harrison"'}
).get_relevant_documents("where did i work?")
from langchain.docstore.document import Document
docs = [
Document(page_content="foo", metadata={"id": 1}),
Document(page_content="bar", metadata={"id": 2}),
Document(page_content="baz", metadata={"id": 3}),
]
vector_db = Milvus.from_documents(
docs,
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
)
expr = "id in [1,2]"
pks = vector_db.get_pks(expr)
result = vector_db.delete(pks)
new_docs = [
Document(page_content="new_foo", metadata={"id": 1}),
Document(page_content="new_bar", metadata={"id": 2}),
| Document(page_content="upserted_bak", metadata={"id": 3}) | langchain.docstore.document.Document |
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken")
from langchain_community.vectorstores import DeepLake
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
activeloop_token = getpass.getpass("activeloop token:")
embeddings = OpenAIEmbeddings()
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, overwrite=True)
db.add_documents(docs)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, read_only=True)
docs = db.similarity_search(query)
from langchain.chains import RetrievalQA
from langchain_openai import OpenAIChat
qa = RetrievalQA.from_chain_type(
llm=OpenAIChat(model="gpt-3.5-turbo"),
chain_type="stuff",
retriever=db.as_retriever(),
)
query = "What did the president say about Ketanji Brown Jackson"
qa.run(query)
import random
for d in docs:
d.metadata["year"] = random.randint(2012, 2014)
db = DeepLake.from_documents(
docs, embeddings, dataset_path="./my_deeplake/", overwrite=True
)
db.similarity_search(
"What did the president say about Ketanji Brown Jackson",
filter={"metadata": {"year": 2013}},
)
db.similarity_search(
"What did the president say about Ketanji Brown Jackson?", distance_metric="cos"
)
db.max_marginal_relevance_search(
"What did the president say about Ketanji Brown Jackson?"
)
db.delete_dataset()
DeepLake.force_delete_by_path("./my_deeplake")
os.environ["ACTIVELOOP_TOKEN"] = activeloop_token
username = "<USERNAME_OR_ORG>" # your username on app.activeloop.ai
dataset_path = f"hub://{username}/langchain_testing_python" # could be also ./local/path (much faster locally), s3://bucket/path/to/dataset, gcs://path/to/dataset, etc.
docs = text_splitter.split_documents(documents)
embedding = OpenAIEmbeddings()
db = DeepLake(dataset_path=dataset_path, embedding=embeddings, overwrite=True)
ids = db.add_documents(docs)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
username = "<USERNAME_OR_ORG>" # your username on app.activeloop.ai
dataset_path = f"hub://{username}/langchain_testing"
docs = text_splitter.split_documents(documents)
embedding = OpenAIEmbeddings()
db = DeepLake(
dataset_path=dataset_path,
embedding=embeddings,
overwrite=True,
runtime={"tensor_db": True},
)
ids = db.add_documents(docs)
search_id = db.vectorstore.dataset.id[0].numpy()
search_id[0]
docs = db.similarity_search(
query=None,
tql=f"SELECT * WHERE id == '{search_id[0]}'",
)
db.vectorstore.summary()
dataset_path = "s3://BUCKET/langchain_test" # could be also ./local/path (much faster locally), hub://bucket/path/to/dataset, gcs://path/to/dataset, etc.
embedding = OpenAIEmbeddings()
db = DeepLake.from_documents(
docs,
dataset_path=dataset_path,
embedding=embeddings,
overwrite=True,
creds={
"aws_access_key_id": os.environ["AWS_ACCESS_KEY_ID"],
"aws_secret_access_key": os.environ["AWS_SECRET_ACCESS_KEY"],
"aws_session_token": os.environ["AWS_SESSION_TOKEN"], # Optional
},
)
db.vectorstore.summary()
embeds = db.vectorstore.dataset.embedding.numpy()
import deeplake
username = "davitbun" # your username on app.activeloop.ai
source = f"hub://{username}/langchain_testing" # could be local, s3, gcs, etc.
destination = f"hub://{username}/langchain_test_copy" # could be local, s3, gcs, etc.
deeplake.deepcopy(src=source, dest=destination, overwrite=True)
db = | DeepLake(dataset_path=destination, embedding=embeddings) | langchain_community.vectorstores.DeepLake |
from datetime import datetime, timedelta
import faiss
from langchain.docstore import InMemoryDocstore
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model, index, InMemoryDocstore({}), {})
retriever = TimeWeightedVectorStoreRetriever(
vectorstore=vectorstore, decay_rate=0.0000000000000000000000001, k=1
)
yesterday = datetime.now() - timedelta(days=1)
retriever.add_documents(
[Document(page_content="hello world", metadata={"last_accessed_at": yesterday})]
)
retriever.add_documents([Document(page_content="hello foo")])
retriever.get_relevant_documents("hello world")
embeddings_model = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().system('pip3 install cerebrium')
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import CerebriumAI
os.environ["CEREBRIUMAI_API_KEY"] = "YOUR_KEY_HERE"
llm = | CerebriumAI(endpoint_url="YOUR ENDPOINT URL HERE") | langchain_community.llms.CerebriumAI |
import random
from docarray import BaseDoc
from docarray.typing import NdArray
from langchain.retrievers import DocArrayRetriever
from langchain_community.embeddings import FakeEmbeddings
embeddings = | FakeEmbeddings(size=32) | langchain_community.embeddings.FakeEmbeddings |
from langchain_community.llms import HuggingFaceEndpoint
get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub')
from getpass import getpass
HUGGINGFACEHUB_API_TOKEN = getpass()
import os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
from langchain_community.llms import HuggingFaceEndpoint
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
question = "Who won the FIFA World Cup in the year 1994? "
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python')
get_ipython().system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python')
get_ipython().system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir')
get_ipython().system('CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install llama-cpp-python')
get_ipython().system('CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir')
get_ipython().system('python -m pip install -e . --force-reinstall --no-cache-dir')
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import LlamaCpp
template = """Question: {question}
Answer: Let's work this out in a step by step way to be sure we have the right answer."""
prompt = PromptTemplate.from_template(template)
callback_manager = CallbackManager([ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
from langchain.agents.agent_types import AgentType
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain_openai import ChatOpenAI
import pandas as pd
from langchain_openai import OpenAI
df = pd.read_csv("titanic.csv")
agent = create_pandas_dataframe_agent( | OpenAI(temperature=0) | langchain_openai.OpenAI |
from langchain.agents import Tool
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from pydantic import BaseModel, Field
class DocumentInput(BaseModel):
question: str = Field()
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
tools = []
files = [
{
"name": "alphabet-earnings",
"path": "/Users/harrisonchase/Downloads/2023Q1_alphabet_earnings_release.pdf",
},
{
"name": "tesla-earnings",
"path": "/Users/harrisonchase/Downloads/TSLA-Q1-2023-Update.pdf",
},
]
for file in files:
loader = PyPDFLoader(file["path"])
pages = loader.load_and_split()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet playwright > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lxml')
from langchain_community.agent_toolkits import PlayWrightBrowserToolkit
from langchain_community.tools.playwright.utils import (
create_async_playwright_browser, # A synchronous browser is available, though it isn't compatible with jupyter.\n", },
)
import nest_asyncio
nest_asyncio.apply()
async_browser = | create_async_playwright_browser() | langchain_community.tools.playwright.utils.create_async_playwright_browser |
from langchain.memory import ConversationSummaryBufferMemory
from langchain_openai import OpenAI
llm = OpenAI()
memory = | ConversationSummaryBufferMemory(llm=llm, max_token_limit=10) | langchain.memory.ConversationSummaryBufferMemory |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import ChatOpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = | hub.pull("hwchase17/react") | langchain.hub.pull |
from langchain.indexes import SQLRecordManager, index
from langchain_core.documents import Document
from langchain_elasticsearch import ElasticsearchStore
from langchain_openai import OpenAIEmbeddings
collection_name = "test_index"
embedding = OpenAIEmbeddings()
vectorstore = ElasticsearchStore(
es_url="http://localhost:9200", index_name="test_index", embedding=embedding
)
namespace = f"elasticsearch/{collection_name}"
record_manager = SQLRecordManager(
namespace, db_url="sqlite:///record_manager_cache.sql"
)
record_manager.create_schema()
doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"})
doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"})
def _clear():
"""Hacky helper method to clear content. See the `full` mode section to to understand why it works."""
index([], record_manager, vectorstore, cleanup="full", source_id_key="source")
_clear()
index(
[doc1, doc1, doc1, doc1, doc1],
record_manager,
vectorstore,
cleanup=None,
source_id_key="source",
)
_clear()
index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")
index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")
_clear()
index(
[doc1, doc2],
record_manager,
vectorstore,
cleanup="incremental",
source_id_key="source",
)
index(
[doc1, doc2],
record_manager,
vectorstore,
cleanup="incremental",
source_id_key="source",
)
index([], record_manager, vectorstore, cleanup="incremental", source_id_key="source")
changed_doc_2 = Document(page_content="puppy", metadata={"source": "doggy.txt"})
index(
[changed_doc_2],
record_manager,
vectorstore,
cleanup="incremental",
source_id_key="source",
)
_clear()
all_docs = [doc1, doc2]
| index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source") | langchain.indexes.index |
get_ipython().system('pip install -qU langchain-ibm')
import os
from getpass import getpass
watsonx_api_key = getpass()
os.environ["WATSONX_APIKEY"] = watsonx_api_key
import os
os.environ["WATSONX_URL"] = "your service instance url"
os.environ["WATSONX_TOKEN"] = "your token for accessing the CPD cluster"
os.environ["WATSONX_PASSWORD"] = "your password for accessing the CPD cluster"
os.environ["WATSONX_USERNAME"] = "your username for accessing the CPD cluster"
os.environ["WATSONX_INSTANCE_ID"] = "your instance_id for accessing the CPD cluster"
parameters = {
"decoding_method": "sample",
"max_new_tokens": 100,
"min_new_tokens": 1,
"temperature": 0.5,
"top_k": 50,
"top_p": 1,
}
from langchain_ibm import WatsonxLLM
watsonx_llm = WatsonxLLM(
model_id="ibm/granite-13b-instruct-v2",
url="https://us-south.ml.cloud.ibm.com",
project_id="PASTE YOUR PROJECT_ID HERE",
params=parameters,
)
watsonx_llm = WatsonxLLM(
model_id="ibm/granite-13b-instruct-v2",
url="PASTE YOUR URL HERE",
username="PASTE YOUR USERNAME HERE",
password="PASTE YOUR PASSWORD HERE",
instance_id="openshift",
version="4.8",
project_id="PASTE YOUR PROJECT_ID HERE",
params=parameters,
)
watsonx_llm = WatsonxLLM(
deployment_id="PASTE YOUR DEPLOYMENT_ID HERE",
url="https://us-south.ml.cloud.ibm.com",
project_id="PASTE YOUR PROJECT_ID HERE",
params=parameters,
)
from langchain.prompts import PromptTemplate
template = "Generate a random question about {topic}: Question: "
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
REGION = "us-central1" # @param {type:"string"}
INSTANCE = "test-instance" # @param {type:"string"}
DATABASE = "test" # @param {type:"string"}
TABLE_NAME = "test-default" # @param {type:"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-cloud-sql-mysql')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable sqladmin.googleapis.com')
from langchain_google_cloud_sql_mysql import MySQLEngine
engine = MySQLEngine.from_instance(
project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE
)
engine.init_document_table(TABLE_NAME, overwrite_existing=True)
from langchain_core.documents import Document
from langchain_google_cloud_sql_mysql import MySQLDocumentSaver
test_docs = [
Document(
page_content="Apple Granny Smith 150 0.99 1",
metadata={"fruit_id": 1},
),
Document(
page_content="Banana Cavendish 200 0.59 0",
metadata={"fruit_id": 2},
),
Document(
page_content="Orange Navel 80 1.29 1",
metadata={"fruit_id": 3},
),
]
saver = | MySQLDocumentSaver(engine=engine, table_name=TABLE_NAME) | langchain_google_cloud_sql_mysql.MySQLDocumentSaver |
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool, StructuredTool, tool
@tool
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
print(multiply.name)
print(multiply.description)
print(multiply.args)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
@tool("search-tool", args_schema=SearchInput, return_direct=True)
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
print(search.return_direct)
from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
class CalculatorInput(BaseModel):
a: int = Field(description="first number")
b: int = Field(description="second number")
class CustomSearchTool(BaseTool):
name = "custom_search"
description = "useful for when you need to answer questions about current events"
args_schema: Type[BaseModel] = SearchInput
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return "LangChain"
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
class CustomCalculatorTool(BaseTool):
name = "Calculator"
description = "useful for when you need to answer questions about math"
args_schema: Type[BaseModel] = CalculatorInput
return_direct: bool = True
def _run(
self, a: int, b: int, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return a * b
async def _arun(
self,
a: int,
b: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("Calculator does not support async")
search = CustomSearchTool()
print(search.name)
print(search.description)
print(search.args)
multiply = CustomCalculatorTool()
print(multiply.name)
print(multiply.description)
print(multiply.args)
print(multiply.return_direct)
def search_function(query: str):
return "LangChain"
search = StructuredTool.from_function(
func=search_function,
name="Search",
description="useful for when you need to answer questions about current events",
)
print(search.name)
print(search.description)
print(search.args)
class CalculatorInput(BaseModel):
a: int = | Field(description="first number") | langchain.pydantic_v1.Field |
from langchain_community.document_loaders import AZLyricsLoader
loader = | AZLyricsLoader("https://www.azlyrics.com/lyrics/mileycyrus/flowers.html") | langchain_community.document_loaders.AZLyricsLoader |
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_openai import ChatOpenAI
examples = [
{
"input": "Could the members of The Police perform lawful arrests?",
"output": "what can the members of The Police do?",
},
{
"input": "Jan Sindel’s was born in what country?",
"output": "what is Jan Sindel’s personal history?",
},
]
example_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=examples,
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
),
few_shot_prompt,
("user", "{question}"),
]
)
question_gen = prompt | ChatOpenAI(temperature=0) | StrOutputParser()
question = "was chatgpt around while trump was president?"
question_gen.invoke({"question": question})
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
search = DuckDuckGoSearchAPIWrapper(max_results=4)
def retriever(query):
return search.run(query)
retriever(question)
retriever(question_gen.invoke({"question": question}))
from langchain import hub
response_prompt = hub.pull("langchain-ai/stepback-answer")
chain = (
{
"normal_context": RunnableLambda(lambda x: x["question"]) | retriever,
"step_back_context": question_gen | retriever,
"question": lambda x: x["question"],
}
| response_prompt
| | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = | JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True) | langchain.output_parsers.openai_tools.JsonOutputKeyToolsParser |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
path = "/Users/rlm/Desktop/Papers/LLaVA/"
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
texts = [i.text for i in text_elements]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n')
import glob
import os
file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt")))
img_summaries = []
for file_path in file_paths:
with open(file_path, "r") as file:
img_summaries.append(file.read())
logging_header = "clip_model_load: total allocated memory: 201.27 MB\n\n"
cleaned_img_summary = [s.split(logging_header, 1)[1].strip() for s in img_summaries]
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
doc_ids = [str(uuid.uuid4()) for _ in texts]
summary_texts = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(text_summaries)
]
retriever.vectorstore.add_documents(summary_texts)
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]
summary_img = [
Document(page_content=s, metadata={id_key: img_ids[i]})
for i, s in enumerate(cleaned_img_summary)
]
retriever.vectorstore.add_documents(summary_img)
retriever.docstore.mset(list(zip(img_ids, cleaned_img_summary)))
img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]
summary_img = [
Document(page_content=s, metadata={id_key: img_ids[i]})
for i, s in enumerate(cleaned_img_summary)
]
retriever.vectorstore.add_documents(summary_img)
retriever.docstore.mset(
list(
zip(
img_ids,
)
)
)
tables[2]
table_summaries[2]
retriever.get_relevant_documents(
"What are results for LLaMA across across domains / subjects?"
)[1]
retriever.get_relevant_documents("Images / figures with playful and creative examples")[
1
]
from langchain_core.runnables import RunnablePassthrough
template = """Answer the question based only on the following context, which can include text and tables:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = | ChatOpenAI(temperature=0, model="gpt-4") | langchain_openai.ChatOpenAI |
from langchain_core.messages import (
AIMessage,
BaseMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.messages import (
AIMessageChunk,
FunctionMessageChunk,
HumanMessageChunk,
SystemMessageChunk,
ToolMessageChunk,
)
AIMessageChunk(content="Hello") + AIMessageChunk(content=" World!")
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseChatModel, SimpleChatModel
from langchain_core.messages import AIMessageChunk, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import run_in_executor
class CustomChatModelAdvanced(BaseChatModel):
"""A custom chat model that echoes the first `n` characters of the input.
When contributing an implementation to LangChain, carefully document
the model including the initialization parameters, include
an example of how to initialize the model and include any relevant
links to the underlying models documentation or API.
Example:
.. code-block:: python
model = CustomChatModel(n=2)
result = model.invoke([HumanMessage(content="hello")])
result = model.batch([[HumanMessage(content="hello")],
[HumanMessage(content="world")]])
"""
n: int
"""The number of characters from the last message of the prompt to be echoed."""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Override the _generate method to implement the chat model logic.
This can be a call to an API, a call to a local model, or any other
implementation that generates a response to the input prompt.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
"""
last_message = messages[-1]
tokens = last_message.content[: self.n]
message = AIMessage(content=tokens)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""Stream the output of the model.
This method should be implemented if the model can generate output
in a streaming fashion. If the model does not support streaming,
do not implement it. In that case streaming requests will be automatically
handled by the _generate method.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
"""
last_message = messages[-1]
tokens = last_message.content[: self.n]
for token in tokens:
chunk = ChatGenerationChunk(message=AIMessageChunk(content=token))
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
"""An async variant of astream.
If not provided, the default behavior is to delegate to the _generate method.
The implementation below instead will delegate to `_stream` and will
kick it off in a separate thread.
If you're able to natively support async, then by all means do so!
"""
result = await run_in_executor(
None,
self._stream,
messages,
stop=stop,
run_manager=run_manager.get_sync() if run_manager else None,
**kwargs,
)
for chunk in result:
yield chunk
@property
def _llm_type(self) -> str:
"""Get the type of language model used by this chat model."""
return "echoing-chat-model-advanced"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Return a dictionary of identifying parameters."""
return {"n": self.n}
model = CustomChatModelAdvanced(n=3)
model.invoke(
[
HumanMessage(content="hello!"),
AIMessage(content="Hi there human!"),
| HumanMessage(content="Meow!") | langchain_core.messages.HumanMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_core.tools import tool
@tool
def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:
"""Do something complex with a complex tool."""
return int_arg * float_arg
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
model_with_tools = model.bind_tools(
[complex_tool],
tool_choice="complex_tool",
)
from operator import itemgetter
from langchain.output_parsers import JsonOutputKeyToolsParser
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
chain = (
model_with_tools
| | JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True) | langchain.output_parsers.JsonOutputKeyToolsParser |
import json
from pprint import pprint
from langchain.globals import set_debug
from langchain_community.llms import NIBittensorLLM
set_debug(True)
llm_sys = NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt.Explain me like I am technical lead of a project"
)
sys_resp = llm_sys(
"What is bittensor and What are the potential benefits of decentralized AI?"
)
print(f"Response provided by LLM with system prompt set is : {sys_resp}")
""" {
"choices": [
{"index": Bittensor's Metagraph index number,
"uid": Unique Identifier of a miner,
"responder_hotkey": Hotkey of a miner,
"message":{"role":"assistant","content": Contains actual response},
"response_ms": Time in millisecond required to fetch response from a miner}
]
} """
multi_response_llm = NIBittensorLLM(top_responses=10)
multi_resp = multi_response_llm("What is Neural Network Feeding Mechanism?")
json_multi_resp = json.loads(multi_resp)
pprint(json_multi_resp)
from langchain.chains import LLMChain
from langchain.globals import set_debug
from langchain.prompts import PromptTemplate
from langchain_community.llms import NIBittensorLLM
set_debug(True)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt."
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is bittensor?"
llm_chain.run(question)
from langchain.tools import Tool
from langchain_community.utilities import GoogleSearchAPIWrapper
search = GoogleSearchAPIWrapper()
tool = Tool(
name="Google Search",
description="Search Google for recent results.",
func=search.run,
)
from langchain.agents import (
AgentExecutor,
ZeroShotAgent,
)
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain_community.llms import NIBittensorLLM
memory = ConversationBufferMemory(memory_key="chat_history")
tools = [tool]
prefix = """Answer prompt based on LLM if there is need to search something then use internet and observe internet result and give accurate reply of user questions also try to use authenticated sources"""
suffix = """Begin!
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools=tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
llm = NIBittensorLLM(
system_prompt="Your task is to determine a response based on user prompt"
)
llm_chain = | LLMChain(llm=llm, prompt=prompt) | langchain.chains.LLMChain |
from langchain_community.utils.openai_functions import (
convert_pydantic_to_openai_function,
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
openai_functions = [convert_pydantic_to_openai_function(Joke)]
model = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
import asyncio
import os
import nest_asyncio
import pandas as pd
from langchain.docstore.document import Document
from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain_experimental.autonomous_agents import AutoGPT
from langchain_openai import ChatOpenAI
nest_asyncio.apply()
llm = ChatOpenAI(model_name="gpt-4", temperature=1.0)
import os
from contextlib import contextmanager
from typing import Optional
from langchain.agents import tool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
ROOT_DIR = "./data/"
@contextmanager
def pushd(new_dir):
"""Context manager for changing the current working directory."""
prev_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(prev_dir)
@tool
def process_csv(
csv_file_path: str, instructions: str, output_path: Optional[str] = None
) -> str:
"""Process a CSV by with pandas in a limited REPL.\
Only use this after writing data to disk as a csv file.\
Any figures must be saved to disk to be viewed by the human.\
Instructions should be written in natural language, not code. Assume the dataframe is already loaded."""
with pushd(ROOT_DIR):
try:
df = pd.read_csv(csv_file_path)
except Exception as e:
return f"Error: {e}"
agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True)
if output_path is not None:
instructions += f" Save output to disk at {output_path}"
try:
result = agent.run(instructions)
return result
except Exception as e:
return f"Error: {e}"
async def async_load_playwright(url: str) -> str:
"""Load the specified URLs using Playwright and parse using BeautifulSoup."""
from bs4 import BeautifulSoup
from playwright.async_api import async_playwright
results = ""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
try:
page = await browser.new_page()
await page.goto(url)
page_source = await page.content()
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
results = f"Error: {e}"
await browser.close()
return results
def run_async(coro):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(coro)
@tool
def browse_web_page(url: str) -> str:
"""Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
return run_async(async_load_playwright(url))
from langchain.chains.qa_with_sources.loading import (
BaseCombineDocumentsChain,
load_qa_with_sources_chain,
)
from langchain.tools import BaseTool, DuckDuckGoSearchRun
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pydantic import Field
def _get_text_splitter():
return RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=20,
length_function=len,
)
class WebpageQATool(BaseTool):
name = "query_webpage"
description = (
"Browse a webpage and retrieve the information relevant to the question."
)
text_splitter: RecursiveCharacterTextSplitter = Field(
default_factory=_get_text_splitter
)
qa_chain: BaseCombineDocumentsChain
def _run(self, url: str, question: str) -> str:
"""Useful for browsing websites and scraping the text information."""
result = browse_web_page.run(url)
docs = [Document(page_content=result, metadata={"source": url})]
web_docs = self.text_splitter.split_documents(docs)
results = []
for i in range(0, len(web_docs), 4):
input_docs = web_docs[i : i + 4]
window_result = self.qa_chain(
{"input_documents": input_docs, "question": question},
return_only_outputs=True,
)
results.append(f"Response from window {i} - {window_result}")
results_docs = [
Document(page_content="\n".join(results), metadata={"source": url})
]
return self.qa_chain(
{"input_documents": results_docs, "question": question},
return_only_outputs=True,
)
async def _arun(self, url: str, question: str) -> str:
raise NotImplementedError
query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm))
import faiss
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
web_search = DuckDuckGoSearchRun()
tools = [
web_search,
WriteFileTool(root_dir="./data"),
| ReadFileTool(root_dir="./data") | langchain_community.tools.file_management.read.ReadFileTool |
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryByteStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loaders = [
TextLoader("../../paul_graham_essay.txt"),
TextLoader("../../state_of_the_union.txt"),
]
docs = []
for loader in loaders:
docs.extend(loader.load())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)
docs = text_splitter.split_documents(docs)
vectorstore = Chroma(
collection_name="full_documents", embedding_function=OpenAIEmbeddings()
)
store = InMemoryByteStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
byte_store=store,
id_key=id_key,
)
import uuid
doc_ids = [str(uuid.uuid4()) for _ in docs]
child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
sub_docs = []
for i, doc in enumerate(docs):
_id = doc_ids[i]
_sub_docs = child_text_splitter.split_documents([doc])
for _doc in _sub_docs:
_doc.metadata[id_key] = _id
sub_docs.extend(_sub_docs)
retriever.vectorstore.add_documents(sub_docs)
retriever.docstore.mset(list(zip(doc_ids, docs)))
retriever.vectorstore.similarity_search("justice breyer")[0]
len(retriever.get_relevant_documents("justice breyer")[0].page_content)
from langchain.retrievers.multi_vector import SearchType
retriever.search_type = SearchType.mmr
len(retriever.get_relevant_documents("justice breyer")[0].page_content)
import uuid
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
chain = (
{"doc": lambda x: x.page_content}
| ChatPromptTemplate.from_template("Summarize the following document:\n\n{doc}")
| ChatOpenAI(max_retries=0)
| StrOutputParser()
)
summaries = chain.batch(docs, {"max_concurrency": 5})
vectorstore = Chroma(collection_name="summaries", embedding_function= | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
import getpass
import os
os.environ["POLYGON_API_KEY"] = getpass.getpass()
from langchain_community.tools.polygon.financials import PolygonFinancials
from langchain_community.tools.polygon.last_quote import PolygonLastQuote
from langchain_community.tools.polygon.ticker_news import PolygonTickerNews
from langchain_community.utilities.polygon import PolygonAPIWrapper
api_wrapper = PolygonAPIWrapper()
ticker = "AAPL"
last_quote_tool = | PolygonLastQuote(api_wrapper=api_wrapper) | langchain_community.tools.polygon.last_quote.PolygonLastQuote |
import os
os.environ["LANGCHAIN_PROJECT"] = "movie-qa"
import pandas as pd
df = pd.read_csv("data/imdb_top_1000.csv")
df["Released_Year"] = df["Released_Year"].astype(int, errors="ignore")
from langchain.schema import Document
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
records = df.to_dict("records")
documents = [Document(page_content=d["Overview"], metadata=d) for d in records]
vectorstore = | Chroma.from_documents(documents, embeddings) | langchain_community.vectorstores.Chroma.from_documents |
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import ChatOpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = hub.pull("hwchase17/react")
llm = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python')
import os
from langchain.callbacks import ContextCallbackHandler
token = os.environ["CONTEXT_API_TOKEN"]
context_callback = ContextCallbackHandler(token)
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
chat = ChatOpenAI(
headers={"user_id": "123"}, temperature=0, callbacks=[ContextCallbackHandler(token)]
)
messages = [
SystemMessage(
content="You are a helpful assistant that translates English to French."
),
HumanMessage(content="I love programming."),
]
print(chat(messages))
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
human_message_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="What is a good name for a company that makes {product}?",
input_variables=["product"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
callback = ContextCallbackHandler(token)
chat = | ChatOpenAI(temperature=0.9, callbacks=[callback]) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet elevenlabs')
import os
os.environ["ELEVEN_API_KEY"] = ""
from langchain.tools import ElevenLabsText2SpeechTool
text_to_speak = "Hello world! I am the real slim shady"
tts = | ElevenLabsText2SpeechTool() | langchain.tools.ElevenLabsText2SpeechTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-documentai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-documentai-toolbox')
GCS_OUTPUT_PATH = "gs://BUCKET_NAME/FOLDER_PATH"
PROCESSOR_NAME = "projects/PROJECT_NUMBER/locations/LOCATION/processors/PROCESSOR_ID"
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.parsers import DocAIParser
parser = DocAIParser(
location="us", processor_name=PROCESSOR_NAME, gcs_output_path=GCS_OUTPUT_PATH
)
blob = | Blob(
path="gs://cloud-samples-data/gen-app-builder/search/alphabet-investor-pdfs/2022Q1_alphabet_earnings_release.pdf"
) | langchain_community.document_loaders.blob_loaders.Blob |
import getpass
import os
os.environ["POLYGON_API_KEY"] = getpass.getpass()
from langchain_community.tools.polygon.financials import PolygonFinancials
from langchain_community.tools.polygon.last_quote import PolygonLastQuote
from langchain_community.tools.polygon.ticker_news import PolygonTickerNews
from langchain_community.utilities.polygon import PolygonAPIWrapper
api_wrapper = PolygonAPIWrapper()
ticker = "AAPL"
last_quote_tool = PolygonLastQuote(api_wrapper=api_wrapper)
last_quote = last_quote_tool.run(ticker)
print(f"Tool output: {last_quote}")
import json
last_quote = last_quote_tool.run(ticker)
last_quote_json = json.loads(last_quote)
latest_price = last_quote_json["p"]
print(f"Latest price for {ticker} is ${latest_price}")
ticker_news_tool = | PolygonTickerNews(api_wrapper=api_wrapper) | langchain_community.tools.polygon.ticker_news.PolygonTickerNews |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymysql')
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (
DirectoryLoader,
UnstructuredMarkdownLoader,
)
from langchain_community.vectorstores import StarRocks
from langchain_community.vectorstores.starrocks import StarRocksSettings
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import TokenTextSplitter
update_vectordb = False
loader = DirectoryLoader(
"./docs", glob="**/*.md", loader_cls=UnstructuredMarkdownLoader
)
documents = loader.load()
text_splitter = | TokenTextSplitter(chunk_size=400, chunk_overlap=50) | langchain_text_splitters.TokenTextSplitter |
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers import GrobidParser
loader = GenericLoader.from_filesystem(
"../Papers/",
glob="*",
suffixes=[".pdf"],
parser= | GrobidParser(segment_sentences=False) | langchain_community.document_loaders.parsers.GrobidParser |
get_ipython().system('poetry run pip -q install psychicapi')
from langchain_community.document_loaders import PsychicLoader
from psychicapi import ConnectorId
google_drive_loader = PsychicLoader(
api_key="7ddb61c1-8b6a-4d31-a58e-30d1c9ea480e",
connector_id=ConnectorId.gdrive.value,
connection_id="google-test",
)
documents = google_drive_loader.load()
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = | Chroma.from_documents(texts, embeddings) | langchain_community.vectorstores.Chroma.from_documents |
from langchain.chains import LLMMathChain
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
llm_math = | LLMMathChain.from_llm(llm, verbose=True) | langchain.chains.LLMMathChain.from_llm |
from langchain.globals import set_llm_cache
from langchain_openai import ChatOpenAI
llm = ChatOpenAI()
get_ipython().run_cell_magic('time', '', 'from langchain.cache import InMemoryCache\n\nset_llm_cache(InMemoryCache())\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict("Tell me a joke")\n')
get_ipython().run_cell_magic('time', '', '# The second time it is, so it goes faster\nllm.predict("Tell me a joke")\n')
get_ipython().system('rm .langchain.db')
from langchain.cache import SQLiteCache
set_llm_cache( | SQLiteCache(database_path=".langchain.db") | langchain.cache.SQLiteCache |
import getpass
import os
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") or getpass.getpass(
"OpenAI API Key:"
)
from langchain.sql_database import SQLDatabase
from langchain_openai import ChatOpenAI
CONNECTION_STRING = "postgresql+psycopg2://postgres:test@localhost:5432/vectordb" # Replace with your own
db = SQLDatabase.from_uri(CONNECTION_STRING)
from langchain_openai import OpenAIEmbeddings
embeddings_model = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from langchain_community.document_loaders import WhatsAppChatLoader
loader = | WhatsAppChatLoader("example_data/whatsapp_chat.txt") | langchain_community.document_loaders.WhatsAppChatLoader |
get_ipython().system('pip install -U openai langchain langchain-experimental')
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=256)
chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "What is this image showing"},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/img/langchain_stack.png",
"detail": "auto",
},
},
]
)
]
)
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
interpreter_assistant = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview",
)
output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"})
output
get_ipython().system('pip install e2b duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool
tools = [ | E2BDataAnalysisTool(api_key="...") | langchain.tools.E2BDataAnalysisTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet weaviate-client')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
WEAVIATE_URL = getpass.getpass("WEAVIATE_URL:")
os.environ["WEAVIATE_API_KEY"] = getpass.getpass("WEAVIATE_API_KEY:")
WEAVIATE_API_KEY = os.environ["WEAVIATE_API_KEY"]
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Weaviate
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = Weaviate.from_documents(docs, embeddings, weaviate_url=WEAVIATE_URL, by_text=False)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
import weaviate
client = weaviate.Client(
url=WEAVIATE_URL, auth_client_secret=weaviate.AuthApiKey(WEAVIATE_API_KEY)
)
vectorstore = Weaviate.from_documents(
documents, embeddings, client=client, by_text=False
)
docs = db.similarity_search_with_score(query, by_text=False)
docs[0]
retriever = db.as_retriever(search_type="mmr")
retriever.get_relevant_documents(query)[0]
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
llm.predict("What did the president say about Justice Breyer")
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_openai import OpenAI
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
docsearch = Weaviate.from_texts(
texts,
embeddings,
weaviate_url=WEAVIATE_URL,
by_text=False,
metadatas=[{"source": f"{i}-pl"} for i in range(len(texts))],
)
chain = RetrievalQAWithSourcesChain.from_chain_type(
OpenAI(temperature=0), chain_type="stuff", retriever=docsearch.as_retriever()
)
chain(
{"question": "What did the president say about Justice Breyer"},
return_only_outputs=True,
)
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet hdbcli')
import os
from hdbcli import dbapi
connection = dbapi.connect(
address=os.environ.get("HANA_DB_ADDRESS"),
port=os.environ.get("HANA_DB_PORT"),
user=os.environ.get("HANA_DB_USER"),
password=os.environ.get("HANA_DB_PASSWORD"),
autocommit=True,
sslValidateCertificate=False,
)
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores.hanavector import HanaDB
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
text_documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
text_chunks = text_splitter.split_documents(text_documents)
print(f"Number of document chunks: {len(text_chunks)}")
embeddings = OpenAIEmbeddings()
db = HanaDB(
embedding=embeddings, connection=connection, table_name="STATE_OF_THE_UNION"
)
db.delete(filter={})
db.add_documents(text_chunks)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query, k=2)
for doc in docs:
print("-" * 80)
print(doc.page_content)
from langchain_community.vectorstores.utils import DistanceStrategy
db = HanaDB(
embedding=embeddings,
connection=connection,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name="STATE_OF_THE_UNION",
)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query, k=2)
for doc in docs:
print("-" * 80)
print(doc.page_content)
docs = db.max_marginal_relevance_search(query, k=2, fetch_k=20)
for doc in docs:
print("-" * 80)
print(doc.page_content)
db = HanaDB(
connection=connection, embedding=embeddings, table_name="LANGCHAIN_DEMO_BASIC"
)
db.delete(filter={})
docs = [Document(page_content="Some text"), Document(page_content="Other docs")]
db.add_documents(docs)
docs = [
Document(
page_content="foo",
metadata={"start": 100, "end": 150, "doc_name": "foo.txt", "quality": "bad"},
),
Document(
page_content="bar",
metadata={"start": 200, "end": 250, "doc_name": "bar.txt", "quality": "good"},
),
]
db.add_documents(docs)
docs = db.similarity_search("foobar", k=2, filter={"quality": "bad"})
for doc in docs:
print("-" * 80)
print(doc.page_content)
print(doc.metadata)
db.delete(filter={"quality": "bad"})
docs = db.similarity_search("foobar", k=2, filter={"quality": "bad"})
print(len(docs))
from langchain.memory import ConversationBufferMemory
from langchain_openai import ChatOpenAI
db = HanaDB(
connection=connection,
embedding=embeddings,
table_name="LANGCHAIN_DEMO_RETRIEVAL_CHAIN",
)
db.delete(filter={})
db.add_documents(text_chunks)
retriever = db.as_retriever()
from langchain.prompts import PromptTemplate
prompt_template = """
You are an expert in state of the union topics. You are provided multiple context items that are related to the prompt you have to answer.
Use the following pieces of context to answer the question at the end.
```
{context}
```
Question: {question}
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
from langchain.chains import ConversationalRetrievalChain
llm = | ChatOpenAI(model_name="gpt-3.5-turbo") | langchain_openai.ChatOpenAI |
from langchain_core.messages import (
AIMessage,
BaseMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.messages import (
AIMessageChunk,
FunctionMessageChunk,
HumanMessageChunk,
SystemMessageChunk,
ToolMessageChunk,
)
AIMessageChunk(content="Hello") + AIMessageChunk(content=" World!")
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseChatModel, SimpleChatModel
from langchain_core.messages import AIMessageChunk, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import run_in_executor
class CustomChatModelAdvanced(BaseChatModel):
"""A custom chat model that echoes the first `n` characters of the input.
When contributing an implementation to LangChain, carefully document
the model including the initialization parameters, include
an example of how to initialize the model and include any relevant
links to the underlying models documentation or API.
Example:
.. code-block:: python
model = CustomChatModel(n=2)
result = model.invoke([HumanMessage(content="hello")])
result = model.batch([[HumanMessage(content="hello")],
[HumanMessage(content="world")]])
"""
n: int
"""The number of characters from the last message of the prompt to be echoed."""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Override the _generate method to implement the chat model logic.
This can be a call to an API, a call to a local model, or any other
implementation that generates a response to the input prompt.
Args:
messages: the prompt composed of a list of messages.
stop: a list of strings on which the model should stop generating.
If generation stops due to a stop token, the stop token itself
SHOULD BE INCLUDED as part of the output. This is not enforced
across models right now, but it's a good practice to follow since
it makes it much easier to parse the output of the model
downstream and understand why generation stopped.
run_manager: A run manager with callbacks for the LLM.
"""
last_message = messages[-1]
tokens = last_message.content[: self.n]
message = | AIMessage(content=tokens) | langchain_core.messages.AIMessage |
from langchain.output_parsers import (
OutputFixingParser,
PydanticOutputParser,
)
from langchain.prompts import (
PromptTemplate,
)
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI, OpenAI
template = """Based on the user question, provide an Action and Action Input for what step should be taken.
{format_instructions}
Question: {query}
Response:"""
class Action(BaseModel):
action: str = Field(description="action to take")
action_input: str = Field(description="input to the action")
parser = PydanticOutputParser(pydantic_object=Action)
prompt = PromptTemplate(
template="Answer the user query.\n{format_instructions}\n{query}\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
prompt_value = prompt.format_prompt(query="who is leo di caprios gf?")
bad_response = '{"action": "search"}'
parser.parse(bad_response)
fix_parser = OutputFixingParser.from_llm(parser=parser, llm= | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet atlassian-python-api')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit
from langchain_community.utilities.jira import JiraAPIWrapper
from langchain_openai import OpenAI
os.environ["JIRA_API_TOKEN"] = "abc"
os.environ["JIRA_USERNAME"] = "123"
os.environ["JIRA_INSTANCE_URL"] = "https://jira.atlassian.com"
os.environ["OPENAI_API_KEY"] = "xyz"
llm = OpenAI(temperature=0)
jira = JiraAPIWrapper()
toolkit = | JiraToolkit.from_jira_api_wrapper(jira) | langchain_community.agent_toolkits.jira.toolkit.JiraToolkit.from_jira_api_wrapper |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_1 = prompt | llm_with_tool | output_parser
chain_1 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_1)
.assign(cited_answer=answer_1)
.pick(["cited_answer", "docs"])
)
chain_1.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class quoted_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
output_parser_2 = JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True)
llm_with_tool_2 = llm.bind_tools(
[quoted_answer],
tool_choice="quoted_answer",
)
format_2 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_2 = prompt | llm_with_tool_2 | output_parser_2
chain_2 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_2)
.assign(quoted_answer=answer_2)
.pick(["quoted_answer", "docs"])
)
chain_2.invoke("How fast are cheetahs?")
from langchain_anthropic import ChatAnthropicMessages
anthropic = ChatAnthropicMessages(model_name="claude-instant-1.2")
system = """You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, \
answer the user question and provide citations. If none of the articles answer the question, just say you don't know.
Remember, you must return both an answer and citations. A citation consists of a VERBATIM quote that \
justifies the answer and the ID of the quote article. Return a citation for every quote across all articles \
that justify the answer. Use the following format for your final output:
<cited_answer>
<answer></answer>
<citations>
<citation><source_id></source_id><quote></quote></citation>
<citation><source_id></source_id><quote></quote></citation>
...
</citations>
</cited_answer>
Here are the Wikipedia articles:{context}"""
prompt_3 = ChatPromptTemplate.from_messages(
[("system", system), ("human", "{question}")]
)
from langchain_core.output_parsers import XMLOutputParser
def format_docs_xml(docs: List[Document]) -> str:
formatted = []
for i, doc in enumerate(docs):
doc_str = f"""\
<source id=\"{i}\">
<title>{doc.metadata['title']}</title>
<article_snippet>{doc.page_content}</article_snippet>
</source>"""
formatted.append(doc_str)
return "\n\n<sources>" + "\n".join(formatted) + "</sources>"
format_3 = itemgetter("docs") | | RunnableLambda(format_docs_xml) | langchain_core.runnables.RunnableLambda |
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = | RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) | langchain_text_splitters.RecursiveCharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet comet_ml langchain langchain-openai google-search-results spacy textstat pandas')
get_ipython().system('{sys.executable} -m spacy download en_core_web_sm')
import comet_ml
comet_ml.init(project_name="comet-example-langchain")
import os
os.environ["OPENAI_API_KEY"] = "..."
os.environ["SERPAPI_API_KEY"] = "..."
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
comet_callback = CometCallbackHandler(
project_name="comet-example-langchain",
complexity_metrics=True,
stream_logs=True,
tags=["llm"],
visualizations=["dep"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = | OpenAI(temperature=0.9, callbacks=callbacks, verbose=True) | langchain_openai.OpenAI |
import os
os.environ["OPENAI_API_KEY"] = "..."
from langchain.prompts import PromptTemplate
from langchain_experimental.smart_llm import SmartLLMChain
from langchain_openai import ChatOpenAI
hard_question = "I have a 12 liter jug and a 6 liter jug. I want to measure 6 liters. How do I do it?"
prompt = PromptTemplate.from_template(hard_question)
llm = | ChatOpenAI(temperature=0, model_name="gpt-4") | langchain_openai.ChatOpenAI |
import os
os.environ["GOOGLE_CSE_ID"] = ""
os.environ["GOOGLE_API_KEY"] = ""
from langchain.tools import Tool
from langchain_community.utilities import GoogleSearchAPIWrapper
search = GoogleSearchAPIWrapper()
tool = Tool(
name="google_search",
description="Search Google for recent results.",
func=search.run,
)
tool.run("Obama's first name?")
search = | GoogleSearchAPIWrapper(k=1) | langchain_community.utilities.GoogleSearchAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb')
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
len(docs)
docs[0]
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python')
get_ipython().system(' CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 /Users/rlm/miniforge3/envs/llama/bin/pip install -U llama-cpp-python --no-cache-dir')
from langchain_community.llms import LlamaCpp
n_gpu_layers = 1 # Metal set to 1 is enough.
n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/llama-2-13b-chat.ggufv3.q4_0.bin",
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
n_ctx=2048,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
verbose=True,
)
llm.invoke("Simulate a rap battle between Stephen Colbert and John Oliver")
from langchain_community.llms import GPT4All
gpt4all = GPT4All(
model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin",
max_tokens=2048,
)
from langchain_community.llms.llamafile import Llamafile
llamafile = Llamafile()
llamafile.invoke("Here is my grandmother's beloved recipe for spaghetti and meatballs:")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
prompt = PromptTemplate.from_template(
"Summarize the main themes in these retrieved docs: {docs}"
)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
chain = {"docs": format_docs} | prompt | llm | StrOutputParser()
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
chain.invoke(docs)
from langchain import hub
rag_prompt = | hub.pull("rlm/rag-prompt") | langchain.hub.pull |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai argilla')
import os
os.environ["ARGILLA_API_URL"] = "..."
os.environ["ARGILLA_API_KEY"] = "..."
os.environ["OPENAI_API_KEY"] = "..."
import argilla as rg
from packaging.version import parse as parse_version
if parse_version(rg.__version__) < parse_version("1.8.0"):
raise RuntimeError(
"`FeedbackDataset` is only available in Argilla v1.8.0 or higher, please "
"upgrade `argilla` as `pip install argilla --upgrade`."
)
dataset = rg.FeedbackDataset(
fields=[
rg.TextField(name="prompt"),
rg.TextField(name="response"),
],
questions=[
rg.RatingQuestion(
name="response-rating",
description="How would you rate the quality of the response?",
values=[1, 2, 3, 4, 5],
required=True,
),
rg.TextQuestion(
name="response-feedback",
description="What feedback do you have for the response?",
required=False,
),
],
guidelines="You're asked to rate the quality of the response and provide feedback.",
)
rg.init(
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
dataset.push_to_argilla("langchain-dataset")
from langchain.callbacks import ArgillaCallbackHandler
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [StdOutCallbackHandler(), argilla_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
llm.generate(["Tell me a joke", "Tell me a poem"] * 3)
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [StdOutCallbackHandler(), argilla_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = | PromptTemplate(input_variables=["title"], template=template) | langchain.prompts.PromptTemplate |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2')
import os
from langchain_community.llms import HuggingFaceTextGenInference
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
llm = HuggingFaceTextGenInference(
inference_server_url=ENDPOINT_URL,
max_new_tokens=512,
top_k=50,
temperature=0.1,
repetition_penalty=1.03,
server_kwargs={
"headers": {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json",
}
},
)
from langchain_community.llms import HuggingFaceEndpoint
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
llm = HuggingFaceEndpoint(
endpoint_url=ENDPOINT_URL,
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 50,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain_community.llms import HuggingFaceHub
llm = HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 30,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.huggingface import ChatHuggingFace
messages = [
SystemMessage(content="You're a helpful assistant"),
HumanMessage(
content="What happens when an unstoppable force meets an immovable object?"
),
]
chat_model = ChatHuggingFace(llm=llm)
chat_model.model_id
chat_model._to_chat_prompt(messages)
res = chat_model.invoke(messages)
print(res.content)
from langchain import hub
from langchain.agents import AgentExecutor, load_tools
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import (
ReActJsonSingleInputOutputParser,
)
from langchain.tools.render import render_text_description
from langchain_community.utilities import SerpAPIWrapper
tools = load_tools(["serpapi", "llm-math"], llm=llm)
prompt = | hub.pull("hwchase17/react-json") | langchain.hub.pull |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import os
import uuid
uid = uuid.uuid4().hex[:6]
project_name = f"Run Fine-tuning Walkthrough {uid}"
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "YOUR API KEY"
os.environ["LANGCHAIN_PROJECT"] = project_name
from enum import Enum
from langchain_core.pydantic_v1 import BaseModel, Field
class Operation(Enum):
add = "+"
subtract = "-"
multiply = "*"
divide = "/"
class Calculator(BaseModel):
"""A calculator function"""
num1: float
num2: float
operation: Operation = Field(..., description="+,-,*,/")
def calculate(self):
if self.operation == Operation.add:
return self.num1 + self.num2
elif self.operation == Operation.subtract:
return self.num1 - self.num2
elif self.operation == Operation.multiply:
return self.num1 * self.num2
elif self.operation == Operation.divide:
if self.num2 != 0:
return self.num1 / self.num2
else:
return "Cannot divide by zero"
from pprint import pprint
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from langchain_core.pydantic_v1 import BaseModel
openai_function_def = | convert_pydantic_to_openai_function(Calculator) | langchain.utils.openai_functions.convert_pydantic_to_openai_function |
get_ipython().system('pip install langchain lark openai elasticsearch pandas')
import pandas as pd
details = (
pd.read_csv("~/Downloads/archive/Hotel_details.csv")
.drop_duplicates(subset="hotelid")
.set_index("hotelid")
)
attributes = pd.read_csv(
"~/Downloads/archive/Hotel_Room_attributes.csv", index_col="id"
)
price = pd.read_csv("~/Downloads/archive/hotels_RoomPrice.csv", index_col="id")
latest_price = price.drop_duplicates(subset="refid", keep="last")[
[
"hotelcode",
"roomtype",
"onsiterate",
"roomamenities",
"maxoccupancy",
"mealinclusiontype",
]
]
latest_price["ratedescription"] = attributes.loc[latest_price.index]["ratedescription"]
latest_price = latest_price.join(
details[["hotelname", "city", "country", "starrating"]], on="hotelcode"
)
latest_price = latest_price.rename({"ratedescription": "roomdescription"}, axis=1)
latest_price["mealsincluded"] = ~latest_price["mealinclusiontype"].isnull()
latest_price.pop("hotelcode")
latest_price.pop("mealinclusiontype")
latest_price = latest_price.reset_index(drop=True)
latest_price.head()
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4")
res = model.predict(
"Below is a table with information about hotel rooms. "
"Return a JSON list with an entry for each column. Each entry should have "
'{"name": "column name", "description": "column description", "type": "column data type"}'
f"\n\n{latest_price.head()}\n\nJSON:\n"
)
import json
attribute_info = json.loads(res)
attribute_info
latest_price.nunique()[latest_price.nunique() < 40]
attribute_info[-2][
"description"
] += f". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}"
attribute_info[3][
"description"
] += f". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}"
attribute_info[-3][
"description"
] += f". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}"
attribute_info
from langchain.chains.query_constructor.base import (
get_query_constructor_prompt,
load_query_constructor_runnable,
)
doc_contents = "Detailed description of a hotel room"
prompt = get_query_constructor_prompt(doc_contents, attribute_info)
print(prompt.format(query="{query}"))
chain = load_query_constructor_runnable(
ChatOpenAI(model="gpt-3.5-turbo", temperature=0), doc_contents, attribute_info
)
chain.invoke({"query": "I want a hotel in Southern Europe and my budget is 200 bucks."})
chain.invoke(
{
"query": "Find a 2-person room in Vienna or London, preferably with meals included and AC"
}
)
attribute_info[-3][
"description"
] += ". NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter."
chain = load_query_constructor_runnable(
ChatOpenAI(model="gpt-3.5-turbo", temperature=0),
doc_contents,
attribute_info,
)
chain.invoke({"query": "I want a hotel in Southern Europe and my budget is 200 bucks."})
content_attr = ["roomtype", "roomamenities", "roomdescription", "hotelname"]
doc_contents = "A detailed description of a hotel room, including information about the room type and room amenities."
filter_attribute_info = tuple(
ai for ai in attribute_info if ai["name"] not in content_attr
)
chain = load_query_constructor_runnable(
| ChatOpenAI(model="gpt-3.5-turbo", temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import ChatOpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = hub.pull("hwchase17/react")
llm = ChatOpenAI(temperature=0)
agent = | create_react_agent(llm, tools, prompt) | langchain.agents.create_react_agent |
get_ipython().run_line_magic('pip', 'install --editable /mnt/disks/data/langchain/libs/partners/fireworks')
get_ipython().run_line_magic('pip', 'install langchain')
from langchain_fireworks import FireworksEmbeddings
import getpass
import os
if "FIREWORKS_API_KEY" not in os.environ:
os.environ["FIREWORKS_API_KEY"] = getpass.getpass("Fireworks API Key:")
embedding = | FireworksEmbeddings() | langchain_fireworks.FireworksEmbeddings |