method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_get_text_from_llm_result
"""Between steps, only the LLM result text is passed, not the LLMResult object. This function extracts the text from an LLMResult.""" if len(result.generations) != 1: raise ValueError( f'In SmartLLM the LLM result in step {step} is not exactly 1 element. This should never happen' ) if len(result.generations[0]) != 1: raise ValueError( f'In SmartLLM the LLM in step {step} returned more than 1 output. SmartLLM only works with LLMs returning exactly 1 output.' ) return result.generations[0][0].text
def _get_text_from_llm_result(self, result: LLMResult, step: str) ->str: """Between steps, only the LLM result text is passed, not the LLMResult object. This function extracts the text from an LLMResult.""" if len(result.generations) != 1: raise ValueError( f'In SmartLLM the LLM result in step {step} is not exactly 1 element. This should never happen' ) if len(result.generations[0]) != 1: raise ValueError( f'In SmartLLM the LLM in step {step} returned more than 1 output. SmartLLM only works with LLMs returning exactly 1 output.' ) return result.generations[0][0].text
Between steps, only the LLM result text is passed, not the LLMResult object. This function extracts the text from an LLMResult.
print_text
"""Print text with highlighting and no end characters.""" text_to_print = get_colored_text(text, color) if color else text print(text_to_print, end=end, file=file) if file: file.flush()
def print_text(text: str, color: Optional[str]=None, end: str='', file: Optional[TextIO]=None) ->None: """Print text with highlighting and no end characters.""" text_to_print = get_colored_text(text, color) if color else text print(text_to_print, end=end, file=file) if file: file.flush()
Print text with highlighting and no end characters.
test_finish
"""Test standard parsing of agent finish.""" parser = ReActSingleInputOutputParser() _input = """Thought: agent thought here Final Answer: The temperature is 100""" output = parser.invoke(_input) expected_output = AgentFinish(return_values={'output': 'The temperature is 100'}, log=_input) assert output == expected_output
def test_finish() ->None: """Test standard parsing of agent finish.""" parser = ReActSingleInputOutputParser() _input = ( 'Thought: agent thought here\nFinal Answer: The temperature is 100') output = parser.invoke(_input) expected_output = AgentFinish(return_values={'output': 'The temperature is 100'}, log=_input) assert output == expected_output
Test standard parsing of agent finish.
similarity_search_with_score
""" Run a similarity search with BagelDB and return documents with their corresponding similarity scores. Args: query (str): The query text to search for similar documents. k (int): The number of results to return. where (Optional[Dict[str, str]]): Filter using metadata. Returns: List[Tuple[Document, float]]: List of tuples, each containing a Document object representing a similar document and its corresponding similarity score. """ results = self.__query_cluster(query_texts=[query], n_results=k, where=where) return _results_to_docs_and_scores(results)
def similarity_search_with_score(self, query: str, k: int=DEFAULT_K, where: Optional[Dict[str, str]]=None, **kwargs: Any) ->List[Tuple[Document, float] ]: """ Run a similarity search with BagelDB and return documents with their corresponding similarity scores. Args: query (str): The query text to search for similar documents. k (int): The number of results to return. where (Optional[Dict[str, str]]): Filter using metadata. Returns: List[Tuple[Document, float]]: List of tuples, each containing a Document object representing a similar document and its corresponding similarity score. """ results = self.__query_cluster(query_texts=[query], n_results=k, where= where) return _results_to_docs_and_scores(results)
Run a similarity search with BagelDB and return documents with their corresponding similarity scores. Args: query (str): The query text to search for similar documents. k (int): The number of results to return. where (Optional[Dict[str, str]]): Filter using metadata. Returns: List[Tuple[Document, float]]: List of tuples, each containing a Document object representing a similar document and its corresponding similarity score.
_run
"""Use the Golden tool.""" return self.api_wrapper.run(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the Golden tool.""" return self.api_wrapper.run(query)
Use the Golden tool.
get
"""GET the URL and return the text.""" return self.requests.get(url, **kwargs).text
def get(self, url: str, **kwargs: Any) ->str: """GET the URL and return the text.""" return self.requests.get(url, **kwargs).text
GET the URL and return the text.
__init__
"""Create a new TextSplitter. Args: chunk_size: Maximum size of chunks to return chunk_overlap: Overlap in characters between chunks length_function: Function that measures the length of given chunks keep_separator: Whether to keep the separator in the chunks add_start_index: If `True`, includes chunk's start index in metadata strip_whitespace: If `True`, strips whitespace from the start and end of every document """ if chunk_overlap > chunk_size: raise ValueError( f'Got a larger chunk overlap ({chunk_overlap}) than chunk size ({chunk_size}), should be smaller.' ) self._chunk_size = chunk_size self._chunk_overlap = chunk_overlap self._length_function = length_function self._keep_separator = keep_separator self._add_start_index = add_start_index self._strip_whitespace = strip_whitespace
def __init__(self, chunk_size: int=4000, chunk_overlap: int=200, length_function: Callable[[str], int]=len, keep_separator: bool=False, add_start_index: bool=False, strip_whitespace: bool=True) ->None: """Create a new TextSplitter. Args: chunk_size: Maximum size of chunks to return chunk_overlap: Overlap in characters between chunks length_function: Function that measures the length of given chunks keep_separator: Whether to keep the separator in the chunks add_start_index: If `True`, includes chunk's start index in metadata strip_whitespace: If `True`, strips whitespace from the start and end of every document """ if chunk_overlap > chunk_size: raise ValueError( f'Got a larger chunk overlap ({chunk_overlap}) than chunk size ({chunk_size}), should be smaller.' ) self._chunk_size = chunk_size self._chunk_overlap = chunk_overlap self._length_function = length_function self._keep_separator = keep_separator self._add_start_index = add_start_index self._strip_whitespace = strip_whitespace
Create a new TextSplitter. Args: chunk_size: Maximum size of chunks to return chunk_overlap: Overlap in characters between chunks length_function: Function that measures the length of given chunks keep_separator: Whether to keep the separator in the chunks add_start_index: If `True`, includes chunk's start index in metadata strip_whitespace: If `True`, strips whitespace from the start and end of every document
embed_documents
"""Compute doc embeddings using a JohnSnowLabs transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ df = self.model.predict(texts, output_level='document') emb_col = None for c in df.columns: if 'embedding' in c: emb_col = c return [vec.tolist() for vec in df[emb_col].tolist()]
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Compute doc embeddings using a JohnSnowLabs transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ df = self.model.predict(texts, output_level='document') emb_col = None for c in df.columns: if 'embedding' in c: emb_col = c return [vec.tolist() for vec in df[emb_col].tolist()]
Compute doc embeddings using a JohnSnowLabs transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
validate_limit_to_domains
"""Check that allowed domains are valid.""" if 'limit_to_domains' not in values: raise ValueError( 'You must specify a list of domains to limit access using `limit_to_domains`' ) if not values['limit_to_domains'] and values['limit_to_domains'] is not None: raise ValueError( 'Please provide a list of domains to limit access using `limit_to_domains`.' ) return values
@root_validator(pre=True) def validate_limit_to_domains(cls, values: Dict) ->Dict: """Check that allowed domains are valid.""" if 'limit_to_domains' not in values: raise ValueError( 'You must specify a list of domains to limit access using `limit_to_domains`' ) if not values['limit_to_domains'] and values['limit_to_domains' ] is not None: raise ValueError( 'Please provide a list of domains to limit access using `limit_to_domains`.' ) return values
Check that allowed domains are valid.
lookup
"""Look up based on prompt and llm_string.""" hit_with_id = self.lookup_with_id(prompt, llm_string) if hit_with_id is not None: return hit_with_id[1] else: return None
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" hit_with_id = self.lookup_with_id(prompt, llm_string) if hit_with_id is not None: return hit_with_id[1] else: return None
Look up based on prompt and llm_string.
test_loadnotebook_eachnoteisindividualdocument
loader = EverNoteLoader(self.example_notebook_path('sample_notebook.enex'), False) documents = loader.load() assert len(documents) == 2
def test_loadnotebook_eachnoteisindividualdocument(self) ->None: loader = EverNoteLoader(self.example_notebook_path( 'sample_notebook.enex'), False) documents = loader.load() assert len(documents) == 2
null
_llm_type
"""Return type of chat model.""" return 'anyscale-chat'
@property def _llm_type(self) ->str: """Return type of chat model.""" return 'anyscale-chat'
Return type of chat model.
yield_keys
"""Get an iterator over keys that match the given prefix.""" yield from self.store.yield_keys(prefix=prefix)
def yield_keys(self, *, prefix: Optional[str]=None) ->Union[Iterator[K], Iterator[str]]: """Get an iterator over keys that match the given prefix.""" yield from self.store.yield_keys(prefix=prefix)
Get an iterator over keys that match the given prefix.
text
"""To log the input text as text file artifact.""" with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id=self. mlf_expid): self.mlflow.log_text(text, f'{filename}.txt')
def text(self, text: str, filename: str) ->None: """To log the input text as text file artifact.""" with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id= self.mlf_expid): self.mlflow.log_text(text, f'{filename}.txt')
To log the input text as text file artifact.
test_graph_cypher_qa_chain_prompt_selection_4
qa_prompt_template = 'QA Prompt' cypher_prompt_template = 'Cypher Prompt' memory = ConversationBufferMemory(memory_key='chat_history') readonlymemory = ReadOnlySharedMemory(memory=memory) qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[]) cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[]) chain = GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False, cypher_llm_kwargs={ 'prompt': cypher_prompt, 'memory': readonlymemory}, qa_llm_kwargs={ 'prompt': qa_prompt, 'memory': readonlymemory}) assert chain.qa_chain.prompt == qa_prompt assert chain.cypher_generation_chain.prompt == cypher_prompt
def test_graph_cypher_qa_chain_prompt_selection_4() ->None: qa_prompt_template = 'QA Prompt' cypher_prompt_template = 'Cypher Prompt' memory = ConversationBufferMemory(memory_key='chat_history') readonlymemory = ReadOnlySharedMemory(memory=memory) qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[]) cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[]) chain = GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore (), verbose=True, return_intermediate_steps=False, cypher_llm_kwargs={'prompt': cypher_prompt, 'memory': readonlymemory}, qa_llm_kwargs={'prompt': qa_prompt, 'memory': readonlymemory}) assert chain.qa_chain.prompt == qa_prompt assert chain.cypher_generation_chain.prompt == cypher_prompt
null
test_dashvector_with_text_with_metadatas
metadatas = [{'meta': i} for i in range(len(texts))] dashvector = DashVector.from_texts(texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, ids=ids) sleep(0.5) output = dashvector.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'meta': 0})]
def test_dashvector_with_text_with_metadatas() ->None: metadatas = [{'meta': i} for i in range(len(texts))] dashvector = DashVector.from_texts(texts=texts, embedding= FakeEmbeddings(), metadatas=metadatas, ids=ids) sleep(0.5) output = dashvector.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'meta': 0})]
null
test_elasticsearch_with_internal_user_agent
"""Test to make sure the user-agent is set correctly.""" texts = ['foo'] store = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_name=index_name) user_agent = store.client._headers['User-Agent'] pattern = '^langchain-py-vs/\\d+\\.\\d+\\.\\d+$' match = re.match(pattern, user_agent) assert match is not None, f"The string '{user_agent}' does not match the expected pattern."
def test_elasticsearch_with_internal_user_agent(self, elasticsearch_connection: Dict, index_name: str) ->None: """Test to make sure the user-agent is set correctly.""" texts = ['foo'] store = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_name=index_name) user_agent = store.client._headers['User-Agent'] pattern = '^langchain-py-vs/\\d+\\.\\d+\\.\\d+$' match = re.match(pattern, user_agent) assert match is not None, f"The string '{user_agent}' does not match the expected pattern."
Test to make sure the user-agent is set correctly.
_vectorstore_from_texts
from cassandra.cluster import Cluster keyspace = 'vector_test_keyspace' table_name = 'vector_test_table' cluster = Cluster() session = cluster.connect() session.execute( f"CREATE KEYSPACE IF NOT EXISTS {keyspace} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}" ) if drop: session.execute(f'DROP TABLE IF EXISTS {keyspace}.{table_name}') return Cassandra.from_texts(texts, embedding_class(), metadatas=metadatas, session=session, keyspace=keyspace, table_name=table_name)
def _vectorstore_from_texts(texts: List[str], metadatas: Optional[List[dict ]]=None, embedding_class: Type[Embeddings]=ConsistentFakeEmbeddings, drop: bool=True) ->Cassandra: from cassandra.cluster import Cluster keyspace = 'vector_test_keyspace' table_name = 'vector_test_table' cluster = Cluster() session = cluster.connect() session.execute( f"CREATE KEYSPACE IF NOT EXISTS {keyspace} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}" ) if drop: session.execute(f'DROP TABLE IF EXISTS {keyspace}.{table_name}') return Cassandra.from_texts(texts, embedding_class(), metadatas= metadatas, session=session, keyspace=keyspace, table_name=table_name)
null
get_output_schema
return self.combine_docs_chain.get_output_schema(config)
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: return self.combine_docs_chain.get_output_schema(config)
null
create_ernie_fn_runnable
"""Create a runnable sequence that uses Ernie functions. Args: functions: A sequence of either dictionaries, pydantic.BaseModels classes, or Python functions. If dictionaries are passed in, they are assumed to already be a valid Ernie functions. If only a single function is passed in, then it will be enforced that the model use that function. pydantic.BaseModels and Python functions should have docstrings describing what the function does. For best results, pydantic.BaseModels should have descriptions of the parameters and Python functions should have Google Python style args descriptions in the docstring. Additionally, Python functions should only use primitive types (str, int, float, bool) or pydantic.BaseModels for arguments. llm: Language model to use, assumed to support the Ernie function-calling API. prompt: BasePromptTemplate to pass to the model. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. If multiple functions are passed in and they are not pydantic.BaseModels, the chain output will include both the name of the function that was returned and the arguments to pass to the function. Returns: A runnable sequence that will pass in the given functions to the model when run. Example: .. code-block:: python from typing import Optional from langchain.chains.ernie_functions import create_ernie_fn_chain from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field class RecordPerson(BaseModel): ""\"Record some identifying information about a person.""\" name: str = Field(..., description="The person's name") age: int = Field(..., description="The person's age") fav_food: Optional[str] = Field(None, description="The person's favorite food") class RecordDog(BaseModel): ""\"Record some identifying information about a dog.""\" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ErnieBotChat(model_name="ERNIE-Bot-4") prompt = ChatPromptTemplate.from_messages( [ ("user", "Make calls to the relevant function to record the entities in the following input: {input}"), ("assistant", "OK!"), ("user", "Tip: Make sure to answer in the correct format"), ] ) chain = create_ernie_fn_runnable([RecordPerson, RecordDog], llm, prompt) chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) # -> RecordDog(name="Harry", color="brown", fav_food="chicken") """ if not functions: raise ValueError('Need to pass in at least one function. Received zero.') ernie_functions = [convert_to_ernie_function(f) for f in functions] llm_kwargs: Dict[str, Any] = {'functions': ernie_functions, **kwargs} if len(ernie_functions) == 1: llm_kwargs['function_call'] = {'name': ernie_functions[0]['name']} output_parser = output_parser or get_ernie_output_parser(functions) return prompt | llm.bind(**llm_kwargs) | output_parser
def create_ernie_fn_runnable(functions: Sequence[Union[Dict[str, Any], Type [BaseModel], Callable]], llm: Runnable, prompt: BasePromptTemplate, *, output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]]=None, **kwargs: Any) ->Runnable: """Create a runnable sequence that uses Ernie functions. Args: functions: A sequence of either dictionaries, pydantic.BaseModels classes, or Python functions. If dictionaries are passed in, they are assumed to already be a valid Ernie functions. If only a single function is passed in, then it will be enforced that the model use that function. pydantic.BaseModels and Python functions should have docstrings describing what the function does. For best results, pydantic.BaseModels should have descriptions of the parameters and Python functions should have Google Python style args descriptions in the docstring. Additionally, Python functions should only use primitive types (str, int, float, bool) or pydantic.BaseModels for arguments. llm: Language model to use, assumed to support the Ernie function-calling API. prompt: BasePromptTemplate to pass to the model. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. If multiple functions are passed in and they are not pydantic.BaseModels, the chain output will include both the name of the function that was returned and the arguments to pass to the function. Returns: A runnable sequence that will pass in the given functions to the model when run. Example: .. code-block:: python from typing import Optional from langchain.chains.ernie_functions import create_ernie_fn_chain from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field class RecordPerson(BaseModel): ""\"Record some identifying information about a person.""\" name: str = Field(..., description="The person's name") age: int = Field(..., description="The person's age") fav_food: Optional[str] = Field(None, description="The person's favorite food") class RecordDog(BaseModel): ""\"Record some identifying information about a dog.""\" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ErnieBotChat(model_name="ERNIE-Bot-4") prompt = ChatPromptTemplate.from_messages( [ ("user", "Make calls to the relevant function to record the entities in the following input: {input}"), ("assistant", "OK!"), ("user", "Tip: Make sure to answer in the correct format"), ] ) chain = create_ernie_fn_runnable([RecordPerson, RecordDog], llm, prompt) chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) # -> RecordDog(name="Harry", color="brown", fav_food="chicken") """ if not functions: raise ValueError( 'Need to pass in at least one function. Received zero.') ernie_functions = [convert_to_ernie_function(f) for f in functions] llm_kwargs: Dict[str, Any] = {'functions': ernie_functions, **kwargs} if len(ernie_functions) == 1: llm_kwargs['function_call'] = {'name': ernie_functions[0]['name']} output_parser = output_parser or get_ernie_output_parser(functions) return prompt | llm.bind(**llm_kwargs) | output_parser
Create a runnable sequence that uses Ernie functions. Args: functions: A sequence of either dictionaries, pydantic.BaseModels classes, or Python functions. If dictionaries are passed in, they are assumed to already be a valid Ernie functions. If only a single function is passed in, then it will be enforced that the model use that function. pydantic.BaseModels and Python functions should have docstrings describing what the function does. For best results, pydantic.BaseModels should have descriptions of the parameters and Python functions should have Google Python style args descriptions in the docstring. Additionally, Python functions should only use primitive types (str, int, float, bool) or pydantic.BaseModels for arguments. llm: Language model to use, assumed to support the Ernie function-calling API. prompt: BasePromptTemplate to pass to the model. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. If multiple functions are passed in and they are not pydantic.BaseModels, the chain output will include both the name of the function that was returned and the arguments to pass to the function. Returns: A runnable sequence that will pass in the given functions to the model when run. Example: .. code-block:: python from typing import Optional from langchain.chains.ernie_functions import create_ernie_fn_chain from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field class RecordPerson(BaseModel): """Record some identifying information about a person.""" name: str = Field(..., description="The person's name") age: int = Field(..., description="The person's age") fav_food: Optional[str] = Field(None, description="The person's favorite food") class RecordDog(BaseModel): """Record some identifying information about a dog.""" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ErnieBotChat(model_name="ERNIE-Bot-4") prompt = ChatPromptTemplate.from_messages( [ ("user", "Make calls to the relevant function to record the entities in the following input: {input}"), ("assistant", "OK!"), ("user", "Tip: Make sure to answer in the correct format"), ] ) chain = create_ernie_fn_runnable([RecordPerson, RecordDog], llm, prompt) chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) # -> RecordDog(name="Harry", color="brown", fav_food="chicken")
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
test_multimodal_history
llm = ChatVertexAI(model_name='gemini-ultra-vision') gcs_url = ( 'gs://cloud-samples-data/generative-ai/image/320px-Felis_catus-cat_on_snow.jpg' ) image_message = {'type': 'image_url', 'image_url': {'url': gcs_url}} text_message = {'type': 'text', 'text': 'What is shown in this image?'} message1 = HumanMessage(content=[text_message, image_message]) message2 = AIMessage(content= 'This is a picture of a cat in the snow. The cat is a tabby cat, which is a type of cat with a striped coat. The cat is standing in the snow, and its fur is covered in snow.' ) message3 = HumanMessage(content='What time of day is it?') response = llm([message1, message2, message3]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
def test_multimodal_history() ->None: llm = ChatVertexAI(model_name='gemini-ultra-vision') gcs_url = ( 'gs://cloud-samples-data/generative-ai/image/320px-Felis_catus-cat_on_snow.jpg' ) image_message = {'type': 'image_url', 'image_url': {'url': gcs_url}} text_message = {'type': 'text', 'text': 'What is shown in this image?'} message1 = HumanMessage(content=[text_message, image_message]) message2 = AIMessage(content= 'This is a picture of a cat in the snow. The cat is a tabby cat, which is a type of cat with a striped coat. The cat is standing in the snow, and its fur is covered in snow.' ) message3 = HumanMessage(content='What time of day is it?') response = llm([message1, message2, message3]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
null
_call
"""Run the logic of this chain and return the output.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() name = inputs[self.input_key].pop('name') args = inputs[self.input_key].pop('arguments') _pretty_name = get_colored_text(name, 'green') _pretty_args = get_colored_text(json.dumps(args, indent=2), 'green') _text = f'Calling endpoint {_pretty_name} with arguments:\n' + _pretty_args _run_manager.on_text(_text) api_response: Response = self.request_method(name, args) if api_response.status_code != 200: response = (f'{api_response.status_code}: {api_response.reason}' + f'\nFor {name} ' + f"Called with args: {args.get('params', '')}") else: try: response = api_response.json() except Exception: response = api_response.text return {self.output_key: response}
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, Any]: """Run the logic of this chain and return the output.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() name = inputs[self.input_key].pop('name') args = inputs[self.input_key].pop('arguments') _pretty_name = get_colored_text(name, 'green') _pretty_args = get_colored_text(json.dumps(args, indent=2), 'green') _text = f'Calling endpoint {_pretty_name} with arguments:\n' + _pretty_args _run_manager.on_text(_text) api_response: Response = self.request_method(name, args) if api_response.status_code != 200: response = (f'{api_response.status_code}: {api_response.reason}' + f'\nFor {name} ' + f"Called with args: {args.get('params', '')}") else: try: response = api_response.json() except Exception: response = api_response.text return {self.output_key: response}
Run the logic of this chain and return the output.
validate_dataframe
import pandas as pd if issubclass(type(val), pd.DataFrame): return val if pd.DataFrame(val).empty: raise ValueError('DataFrame cannot be empty.') raise TypeError( "Wrong type for 'dataframe', must be a subclass of Pandas DataFrame (pd.DataFrame)" )
@validator('dataframe') def validate_dataframe(cls, val: Any) ->Any: import pandas as pd if issubclass(type(val), pd.DataFrame): return val if pd.DataFrame(val).empty: raise ValueError('DataFrame cannot be empty.') raise TypeError( "Wrong type for 'dataframe', must be a subclass of Pandas DataFrame (pd.DataFrame)" )
null
_Delete
self.fill('del ') interleave(lambda : self.write(', '), self.dispatch, t.targets)
def _Delete(self, t): self.fill('del ') interleave(lambda : self.write(', '), self.dispatch, t.targets)
null
_query
try: import mlflow.gateway except ImportError as e: raise ImportError( 'Could not import `mlflow.gateway` module. Please install it with `pip install mlflow[gateway]`.' ) from e embeddings = [] for txt in _chunk(texts, 20): resp = mlflow.gateway.query(self.route, data={'text': txt}) embeddings.append(resp['embeddings']) return embeddings
def _query(self, texts: List[str]) ->List[List[float]]: try: import mlflow.gateway except ImportError as e: raise ImportError( 'Could not import `mlflow.gateway` module. Please install it with `pip install mlflow[gateway]`.' ) from e embeddings = [] for txt in _chunk(texts, 20): resp = mlflow.gateway.query(self.route, data={'text': txt}) embeddings.append(resp['embeddings']) return embeddings
null
is_lc_serializable
return False
@classmethod def is_lc_serializable(cls) ->bool: return False
null
run_query
return db.run(query)
def run_query(query): return db.run(query)
null
test_pypdf_parser
"""Test PyPDF parser.""" _assert_with_parser(PyPDFParser())
@pytest.mark.requires('pypdf') def test_pypdf_parser() ->None: """Test PyPDF parser.""" _assert_with_parser(PyPDFParser())
Test PyPDF parser.
validate_environment
"""Validate that api key exists in environment.""" zapier_nla_api_key_default = None if 'zapier_nla_oauth_access_token' in values: zapier_nla_api_key_default = '' else: values['zapier_nla_oauth_access_token'] = '' zapier_nla_api_key = get_from_dict_or_env(values, 'zapier_nla_api_key', 'ZAPIER_NLA_API_KEY', zapier_nla_api_key_default) values['zapier_nla_api_key'] = zapier_nla_api_key return values
@root_validator(pre=True) def validate_environment(cls, values: Dict) ->Dict: """Validate that api key exists in environment.""" zapier_nla_api_key_default = None if 'zapier_nla_oauth_access_token' in values: zapier_nla_api_key_default = '' else: values['zapier_nla_oauth_access_token'] = '' zapier_nla_api_key = get_from_dict_or_env(values, 'zapier_nla_api_key', 'ZAPIER_NLA_API_KEY', zapier_nla_api_key_default) values['zapier_nla_api_key'] = zapier_nla_api_key return values
Validate that api key exists in environment.
tag
return RedisTag(field)
@staticmethod def tag(field: str) ->'RedisTag': return RedisTag(field)
null
test_thoughts_rollback
a = Thought(text='a', validity=ThoughtValidity.VALID_INTERMEDIATE) b = Thought(text='b', validity=ThoughtValidity.VALID_INTERMEDIATE) c_1 = Thought(text='c_1', validity=ThoughtValidity.VALID_INTERMEDIATE) c_2 = Thought(text='c_2', validity=ThoughtValidity.VALID_INTERMEDIATE) c_3 = Thought(text='c_3', validity=ThoughtValidity.VALID_INTERMEDIATE) a.children = {b} b.children = {c_1, c_2, c_3} memory = ToTDFSMemory([a, b, c_3]) self.assertEqual(self.controller(memory), ('a',))
def test_thoughts_rollback(self) ->None: a = Thought(text='a', validity=ThoughtValidity.VALID_INTERMEDIATE) b = Thought(text='b', validity=ThoughtValidity.VALID_INTERMEDIATE) c_1 = Thought(text='c_1', validity=ThoughtValidity.VALID_INTERMEDIATE) c_2 = Thought(text='c_2', validity=ThoughtValidity.VALID_INTERMEDIATE) c_3 = Thought(text='c_3', validity=ThoughtValidity.VALID_INTERMEDIATE) a.children = {b} b.children = {c_1, c_2, c_3} memory = ToTDFSMemory([a, b, c_3]) self.assertEqual(self.controller(memory), ('a',))
null
model_id
return 'your_model_id'
@pytest.fixture def model_id() ->str: return 'your_model_id'
null
__init__
"""Initialize the modelscope""" super().__init__(**kwargs) try: from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks except ImportError as e: raise ImportError( 'Could not import some python packages.Please install it with `pip install modelscope`.' ) from e self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id, model_revision=self.model_revision)
def __init__(self, **kwargs: Any): """Initialize the modelscope""" super().__init__(**kwargs) try: from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks except ImportError as e: raise ImportError( 'Could not import some python packages.Please install it with `pip install modelscope`.' ) from e self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id, model_revision=self.model_revision)
Initialize the modelscope
_import_merriam_webster_tool
from langchain_community.tools.merriam_webster.tool import MerriamWebsterQueryRun return MerriamWebsterQueryRun
def _import_merriam_webster_tool() ->Any: from langchain_community.tools.merriam_webster.tool import MerriamWebsterQueryRun return MerriamWebsterQueryRun
null
similarity_search
"""Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, search_k) return [doc for doc, _ in docs_and_scores]
def similarity_search(self, query: str, k: int=4, search_k: int=-1, ** kwargs: Any) ->List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, search_k) return [doc for doc, _ in docs_and_scores]
Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query.
_run
"""Use the tool.""" try: return self.api_wrapper.results(query, self.max_results) except Exception as e: return repr(e)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->Union[List[Dict], str]: """Use the tool.""" try: return self.api_wrapper.results(query, self.max_results) except Exception as e: return repr(e)
Use the tool.
_on_retriever_end
"""Process the Retriever Run.""" self._submit(self._update_run_single, _copy(run))
def _on_retriever_end(self, run: Run) ->None: """Process the Retriever Run.""" self._submit(self._update_run_single, _copy(run))
Process the Retriever Run.
_load_api_chain
if 'api_request_chain' in config: api_request_chain_config = config.pop('api_request_chain') api_request_chain = load_chain_from_config(api_request_chain_config) elif 'api_request_chain_path' in config: api_request_chain = load_chain(config.pop('api_request_chain_path')) else: raise ValueError( 'One of `api_request_chain` or `api_request_chain_path` must be present.' ) if 'api_answer_chain' in config: api_answer_chain_config = config.pop('api_answer_chain') api_answer_chain = load_chain_from_config(api_answer_chain_config) elif 'api_answer_chain_path' in config: api_answer_chain = load_chain(config.pop('api_answer_chain_path')) else: raise ValueError( 'One of `api_answer_chain` or `api_answer_chain_path` must be present.' ) if 'requests_wrapper' in kwargs: requests_wrapper = kwargs.pop('requests_wrapper') else: raise ValueError('`requests_wrapper` must be present.') return APIChain(api_request_chain=api_request_chain, api_answer_chain= api_answer_chain, requests_wrapper=requests_wrapper, **config)
def _load_api_chain(config: dict, **kwargs: Any) ->APIChain: if 'api_request_chain' in config: api_request_chain_config = config.pop('api_request_chain') api_request_chain = load_chain_from_config(api_request_chain_config) elif 'api_request_chain_path' in config: api_request_chain = load_chain(config.pop('api_request_chain_path')) else: raise ValueError( 'One of `api_request_chain` or `api_request_chain_path` must be present.' ) if 'api_answer_chain' in config: api_answer_chain_config = config.pop('api_answer_chain') api_answer_chain = load_chain_from_config(api_answer_chain_config) elif 'api_answer_chain_path' in config: api_answer_chain = load_chain(config.pop('api_answer_chain_path')) else: raise ValueError( 'One of `api_answer_chain` or `api_answer_chain_path` must be present.' ) if 'requests_wrapper' in kwargs: requests_wrapper = kwargs.pop('requests_wrapper') else: raise ValueError('`requests_wrapper` must be present.') return APIChain(api_request_chain=api_request_chain, api_answer_chain= api_answer_chain, requests_wrapper=requests_wrapper, **config)
null
_convert_message_to_dict
message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, SystemMessage): message_dict = {'role': 'system', 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} else: raise TypeError(f'Got unknown type {message}') return message_dict
def _convert_message_to_dict(message: BaseMessage) ->dict: message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, SystemMessage): message_dict = {'role': 'system', 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} else: raise TypeError(f'Got unknown type {message}') return message_dict
null
lower_case_name
if v is not None: raise NotImplementedError('system_setting is not implemented yet') return v
@validator('system_settings') def lower_case_name(cls, v: str) ->Union[str, None]: if v is not None: raise NotImplementedError('system_setting is not implemented yet') return v
null
test_fake_retriever_v1_with_kwargs_upgrade
callbacks = FakeCallbackHandler() assert fake_retriever_v1_with_kwargs._new_arg_supported is False assert fake_retriever_v1_with_kwargs._expects_other_args is True results: List[Document] = fake_retriever_v1_with_kwargs.get_relevant_documents( 'Foo', callbacks=[callbacks], where_filter={'foo': 'bar'}) assert results[0].page_content == 'Foo' assert results[0].metadata == {'foo': 'bar'} assert callbacks.retriever_starts == 1 assert callbacks.retriever_ends == 1 assert callbacks.retriever_errors == 0
def test_fake_retriever_v1_with_kwargs_upgrade(fake_retriever_v1_with_kwargs: BaseRetriever) ->None: callbacks = FakeCallbackHandler() assert fake_retriever_v1_with_kwargs._new_arg_supported is False assert fake_retriever_v1_with_kwargs._expects_other_args is True results: List[Document ] = fake_retriever_v1_with_kwargs.get_relevant_documents('Foo', callbacks=[callbacks], where_filter={'foo': 'bar'}) assert results[0].page_content == 'Foo' assert results[0].metadata == {'foo': 'bar'} assert callbacks.retriever_starts == 1 assert callbacks.retriever_ends == 1 assert callbacks.retriever_errors == 0
null
__init__
"""Initialize the loader with an Apify dataset ID and a mapping function. Args: dataset_id (str): The ID of the dataset on the Apify platform. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. """ super().__init__(dataset_id=dataset_id, dataset_mapping_function= dataset_mapping_function)
def __init__(self, dataset_id: str, dataset_mapping_function: Callable[[ Dict], Document]): """Initialize the loader with an Apify dataset ID and a mapping function. Args: dataset_id (str): The ID of the dataset on the Apify platform. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. """ super().__init__(dataset_id=dataset_id, dataset_mapping_function= dataset_mapping_function)
Initialize the loader with an Apify dataset ID and a mapping function. Args: dataset_id (str): The ID of the dataset on the Apify platform. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class.
load
with open(self.log_file, encoding='utf8') as f: data = json.load(f)[:self.num_logs] if self.num_logs else json.load(f) documents = [] for d in data: title = d['title'] messages = d['mapping'] text = ''.join([concatenate_rows(messages[key]['message'], title) for idx, key in enumerate(messages) if not (idx == 0 and messages[key][ 'message']['author']['role'] == 'system')]) metadata = {'source': str(self.log_file)} documents.append(Document(page_content=text, metadata=metadata)) return documents
def load(self) ->List[Document]: with open(self.log_file, encoding='utf8') as f: data = json.load(f)[:self.num_logs] if self.num_logs else json.load(f) documents = [] for d in data: title = d['title'] messages = d['mapping'] text = ''.join([concatenate_rows(messages[key]['message'], title) for idx, key in enumerate(messages) if not (idx == 0 and messages[ key]['message']['author']['role'] == 'system')]) metadata = {'source': str(self.log_file)} documents.append(Document(page_content=text, metadata=metadata)) return documents
null
check_operator_misuse
"""Decorator to check for misuse of equality operators.""" @wraps(func) def wrapper(instance: Any, *args: Any, **kwargs: Any) ->Any: other = kwargs.get('other') if 'other' in kwargs else None if not other: for arg in args: if isinstance(arg, type(instance)): other = arg break if isinstance(other, type(instance)): raise ValueError( 'Equality operators are overridden for FilterExpression creation. Use .equals() for equality checks' ) return func(instance, *args, **kwargs) return wrapper
def check_operator_misuse(func: Callable) ->Callable: """Decorator to check for misuse of equality operators.""" @wraps(func) def wrapper(instance: Any, *args: Any, **kwargs: Any) ->Any: other = kwargs.get('other') if 'other' in kwargs else None if not other: for arg in args: if isinstance(arg, type(instance)): other = arg break if isinstance(other, type(instance)): raise ValueError( 'Equality operators are overridden for FilterExpression creation. Use .equals() for equality checks' ) return func(instance, *args, **kwargs) return wrapper
Decorator to check for misuse of equality operators.
_llm_type
"""Return type of llm.""" return 'google_palm'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'google_palm'
Return type of llm.
test_rwkv_inference
"""Test valid gpt4all inference.""" model_path = _download_model() llm = RWKV(model=model_path, tokens_path='20B_tokenizer.json', strategy= 'cpu fp32') output = llm('Say foo:') assert isinstance(output, str)
@pytest.mark.filterwarnings('ignore::UserWarning:') def test_rwkv_inference() ->None: """Test valid gpt4all inference.""" model_path = _download_model() llm = RWKV(model=model_path, tokens_path='20B_tokenizer.json', strategy ='cpu fp32') output = llm('Say foo:') assert isinstance(output, str)
Test valid gpt4all inference.
_stream
params = self._convert_prompt_msg_params(messages, **kwargs) for res in self.client.stream_chat(params): if res: msg = convert_dict_to_message(res) yield ChatGenerationChunk(message=AIMessageChunk(content=msg.content)) if run_manager: run_manager.on_llm_new_token(cast(str, msg.content))
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: params = self._convert_prompt_msg_params(messages, **kwargs) for res in self.client.stream_chat(params): if res: msg = convert_dict_to_message(res) yield ChatGenerationChunk(message=AIMessageChunk(content=msg. content)) if run_manager: run_manager.on_llm_new_token(cast(str, msg.content))
null
is_lc_serializable
return True
@classmethod def is_lc_serializable(cls) ->bool: return True
null
embed_documents
"""Return simple embeddings.""" return [([float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i + 1)]) for i in range(len(embedding_texts))]
def embed_documents(self, embedding_texts: List[str]) ->List[List[float]]: """Return simple embeddings.""" return [([float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i + 1)]) for i in range(len(embedding_texts))]
Return simple embeddings.
input_keys
"""Expect input key. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Expect input key. :meta private: """ return [self.input_key]
Expect input key. :meta private:
on_llm_error
self._container.markdown('**LLM encountered an error...**') self._container.exception(error)
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None: self._container.markdown('**LLM encountered an error...**') self._container.exception(error)
null
return_stopped_response
"""Return response when agent has been stopped due to max iterations.""" if early_stopping_method == 'force': return AgentFinish({'output': 'Agent stopped due to max iterations.'}, '') else: raise ValueError( f'Got unsupported early_stopping_method `{early_stopping_method}`')
def return_stopped_response(self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any ) ->AgentFinish: """Return response when agent has been stopped due to max iterations.""" if early_stopping_method == 'force': return AgentFinish({'output': 'Agent stopped due to max iterations.'}, '') else: raise ValueError( f'Got unsupported early_stopping_method `{early_stopping_method}`')
Return response when agent has been stopped due to max iterations.
_import_youtube_search
from langchain_community.tools.youtube.search import YouTubeSearchTool return YouTubeSearchTool
def _import_youtube_search() ->Any: from langchain_community.tools.youtube.search import YouTubeSearchTool return YouTubeSearchTool
null
embed_query
"""Return simple embeddings.""" return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def embed_query(self, text: str) ->List[float]: """Return simple embeddings.""" return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
Return simple embeddings.
test_search_filter_with_scores
texts = ['hello bagel', 'this is langchain'] metadatas = [{'source': 'notion'}, {'source': 'google'}] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) output = txt_search.similarity_search_with_score('hello bagel', k=1, where= {'source': 'notion'}) assert output == [(Document(page_content='hello bagel', metadata={'source': 'notion'}), 0.0)] txt_search.delete_cluster()
def test_search_filter_with_scores() ->None: texts = ['hello bagel', 'this is langchain'] metadatas = [{'source': 'notion'}, {'source': 'google'}] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) output = txt_search.similarity_search_with_score('hello bagel', k=1, where={'source': 'notion'}) assert output == [(Document(page_content='hello bagel', metadata={ 'source': 'notion'}), 0.0)] txt_search.delete_cluster()
null
_call
"""Call out to AI21's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ai21("Tell me a joke.") """ if self.stop is not None and stop is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop is not None: stop = self.stop elif stop is None: stop = [] if self.base_url is not None: base_url = self.base_url elif self.model in ('j1-grande-instruct',): base_url = 'https://api.ai21.com/studio/v1/experimental' else: base_url = 'https://api.ai21.com/studio/v1' params = {**self._default_params, **kwargs} self.ai21_api_key = cast(SecretStr, self.ai21_api_key) response = requests.post(url=f'{base_url}/{self.model}/complete', headers={ 'Authorization': f'Bearer {self.ai21_api_key.get_secret_value()}'}, json={'prompt': prompt, 'stopSequences': stop, **params}) if response.status_code != 200: optional_detail = response.json().get('error') raise ValueError( f'AI21 /complete call failed with status code {response.status_code}. Details: {optional_detail}' ) response_json = response.json() return response_json['completions'][0]['data']['text']
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call out to AI21's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ai21("Tell me a joke.") """ if self.stop is not None and stop is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop is not None: stop = self.stop elif stop is None: stop = [] if self.base_url is not None: base_url = self.base_url elif self.model in ('j1-grande-instruct',): base_url = 'https://api.ai21.com/studio/v1/experimental' else: base_url = 'https://api.ai21.com/studio/v1' params = {**self._default_params, **kwargs} self.ai21_api_key = cast(SecretStr, self.ai21_api_key) response = requests.post(url=f'{base_url}/{self.model}/complete', headers={'Authorization': f'Bearer {self.ai21_api_key.get_secret_value()}'}, json={'prompt': prompt, 'stopSequences': stop, **params}) if response.status_code != 200: optional_detail = response.json().get('error') raise ValueError( f'AI21 /complete call failed with status code {response.status_code}. Details: {optional_detail}' ) response_json = response.json() return response_json['completions'][0]['data']['text']
Call out to AI21's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ai21("Tell me a joke.")
get_action_and_input
output = output_parser.parse_folder(text) if isinstance(output, AgentAction): return output.tool, str(output.tool_input) else: return 'Final Answer', output.return_values['output']
def get_action_and_input(text: str) ->Tuple[str, str]: output = output_parser.parse_folder(text) if isinstance(output, AgentAction): return output.tool, str(output.tool_input) else: return 'Final Answer', output.return_values['output']
null
_generate
text_generations: List[str] = [] for i in range(0, len(prompts), self.batch_size): batch_prompts = prompts[i:i + self.batch_size] responses = self.pipeline(batch_prompts) for j, response in enumerate(responses): if isinstance(response, list): response = response[0] if self.pipeline.task == 'text-generation': try: from transformers.pipelines.text_generation import ReturnType remove_prompt = self.pipeline._postprocess_params.get( 'return_type') != ReturnType.NEW_TEXT except Exception as e: logger.warning( f'Unable to extract pipeline return_type. Received error:\n\n{e}' ) remove_prompt = True if remove_prompt: text = response['generated_text'][len(batch_prompts[j]):] else: text = response['generated_text'] elif self.pipeline.task == 'text2text-generation': text = response['generated_text'] elif self.pipeline.task == 'summarization': text = response['summary_text'] else: raise ValueError( f'Got invalid task {self.pipeline.task}, currently only {VALID_TASKS} are supported' ) if stop: text = enforce_stop_tokens(text, stop) text_generations.append(text) return LLMResult(generations=[[Generation(text=text)] for text in text_generations])
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: text_generations: List[str] = [] for i in range(0, len(prompts), self.batch_size): batch_prompts = prompts[i:i + self.batch_size] responses = self.pipeline(batch_prompts) for j, response in enumerate(responses): if isinstance(response, list): response = response[0] if self.pipeline.task == 'text-generation': try: from transformers.pipelines.text_generation import ReturnType remove_prompt = self.pipeline._postprocess_params.get( 'return_type') != ReturnType.NEW_TEXT except Exception as e: logger.warning( f'Unable to extract pipeline return_type. Received error:\n\n{e}' ) remove_prompt = True if remove_prompt: text = response['generated_text'][len(batch_prompts[j]):] else: text = response['generated_text'] elif self.pipeline.task == 'text2text-generation': text = response['generated_text'] elif self.pipeline.task == 'summarization': text = response['summary_text'] else: raise ValueError( f'Got invalid task {self.pipeline.task}, currently only {VALID_TASKS} are supported' ) if stop: text = enforce_stop_tokens(text, stop) text_generations.append(text) return LLMResult(generations=[[Generation(text=text)] for text in text_generations])
null
test_huggingface_tokenizer
"""Test text splitter that uses a HuggingFace tokenizer.""" from transformers import GPT2TokenizerFast tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') text_splitter = CharacterTextSplitter.from_huggingface_tokenizer(tokenizer, separator=' ', chunk_size=1, chunk_overlap=0) output = text_splitter.split_text('foo bar') assert output == ['foo', 'bar']
def test_huggingface_tokenizer() ->None: """Test text splitter that uses a HuggingFace tokenizer.""" from transformers import GPT2TokenizerFast tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') text_splitter = CharacterTextSplitter.from_huggingface_tokenizer(tokenizer, separator=' ', chunk_size=1, chunk_overlap=0) output = text_splitter.split_text('foo bar') assert output == ['foo', 'bar']
Test text splitter that uses a HuggingFace tokenizer.
delete
""" Args: skip_strict_exist_check: Deprecated. This is not used basically. """ try: from vald.v1.payload import payload_pb2 from vald.v1.vald import remove_pb2_grpc except ImportError: raise ValueError( 'Could not import vald-client-python python package. Please install it with `pip install vald-client-python`.' ) if ids is None: raise ValueError('No ids provided to delete') channel = self._get_channel() stub = remove_pb2_grpc.RemoveStub(channel) cfg = payload_pb2.Remove.Config(skip_strict_exist_check=skip_strict_exist_check ) for _id in ids: oid = payload_pb2.Object.ID(id=_id) _ = stub.Remove(payload_pb2.Remove.Request(id=oid, config=cfg), metadata=grpc_metadata) channel.close() return True
def delete(self, ids: Optional[List[str]]=None, skip_strict_exist_check: bool=False, grpc_metadata: Optional[Any]=None, **kwargs: Any) ->Optional[ bool]: """ Args: skip_strict_exist_check: Deprecated. This is not used basically. """ try: from vald.v1.payload import payload_pb2 from vald.v1.vald import remove_pb2_grpc except ImportError: raise ValueError( 'Could not import vald-client-python python package. Please install it with `pip install vald-client-python`.' ) if ids is None: raise ValueError('No ids provided to delete') channel = self._get_channel() stub = remove_pb2_grpc.RemoveStub(channel) cfg = payload_pb2.Remove.Config(skip_strict_exist_check= skip_strict_exist_check) for _id in ids: oid = payload_pb2.Object.ID(id=_id) _ = stub.Remove(payload_pb2.Remove.Request(id=oid, config=cfg), metadata=grpc_metadata) channel.close() return True
Args: skip_strict_exist_check: Deprecated. This is not used basically.
_should_continue
return self.iterations < 2
def _should_continue(self) ->bool: return self.iterations < 2
null
test_delete
with mock.patch('nuclia.sdk.resource.NucliaResource.delete', new_callable= FakeDelete): ndb = NucliaDB(knowledge_box='YOUR_KB_ID', local=False, api_key= 'YOUR_API_KEY') success = ndb.delete(['123', '456']) assert success
def test_delete() ->None: with mock.patch('nuclia.sdk.resource.NucliaResource.delete', new_callable=FakeDelete): ndb = NucliaDB(knowledge_box='YOUR_KB_ID', local=False, api_key= 'YOUR_API_KEY') success = ndb.delete(['123', '456']) assert success
null
test_runnable_context_seq_key_not_found
seq: Runnable = {'bar': Context.setter('input')} | Context.getter('foo') with pytest.raises(ValueError): seq.invoke('foo')
def test_runnable_context_seq_key_not_found() ->None: seq: Runnable = {'bar': Context.setter('input')} | Context.getter('foo') with pytest.raises(ValueError): seq.invoke('foo')
null
validate_environment
"""Validate that we have all required info to access Clarifai platform and python package exists in environment.""" values['pat'] = get_from_dict_or_env(values, 'pat', 'CLARIFAI_PAT') user_id = values.get('user_id') app_id = values.get('app_id') model_id = values.get('model_id') model_url = values.get('model_url') if model_url is not None and model_id is not None: raise ValueError('Please provide either model_url or model_id, not both.') if model_url is None and model_id is None: raise ValueError('Please provide one of model_url or model_id.') if model_url is None and model_id is not None: if user_id is None or app_id is None: raise ValueError('Please provide a user_id and app_id.') return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that we have all required info to access Clarifai platform and python package exists in environment.""" values['pat'] = get_from_dict_or_env(values, 'pat', 'CLARIFAI_PAT') user_id = values.get('user_id') app_id = values.get('app_id') model_id = values.get('model_id') model_url = values.get('model_url') if model_url is not None and model_id is not None: raise ValueError( 'Please provide either model_url or model_id, not both.') if model_url is None and model_id is None: raise ValueError('Please provide one of model_url or model_id.') if model_url is None and model_id is not None: if user_id is None or app_id is None: raise ValueError('Please provide a user_id and app_id.') return values
Validate that we have all required info to access Clarifai platform and python package exists in environment.
_get_elements
from unstructured.partition.csv import partition_csv return partition_csv(filename=self.file_path, **self.unstructured_kwargs)
def _get_elements(self) ->List: from unstructured.partition.csv import partition_csv return partition_csv(filename=self.file_path, **self.unstructured_kwargs)
null
__init__
"""Vector store interface for testing things in memory.""" self.store: Dict[str, Document] = {} self.permit_upserts = permit_upserts
def __init__(self, permit_upserts: bool=False) ->None: """Vector store interface for testing things in memory.""" self.store: Dict[str, Document] = {} self.permit_upserts = permit_upserts
Vector store interface for testing things in memory.
_Name
self.write(t.id)
def _Name(self, t): self.write(t.id)
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids associated with the texts. batch_size: Optional batch size to upsert docs. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ ids = ids or [str(uuid.uuid4().hex) for _ in texts] text_list = list(texts) for i in range(0, len(text_list), batch_size): end = min(i + batch_size, len(text_list)) batch_texts = text_list[i:end] batch_ids = ids[i:end] batch_embeddings = self._embedding.embed_documents(list(batch_texts)) if metadatas: batch_metadatas = metadatas[i:end] else: batch_metadatas = [{} for _ in range(i, end)] for metadata, text in zip(batch_metadatas, batch_texts): metadata[self._text_field] = text docs = list(zip(batch_ids, batch_embeddings, batch_metadatas)) ret = self._collection.upsert(docs) if not ret: raise ValueError( f'Fail to upsert docs to dashvector vector database,Error: {ret.message}' ) return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, batch_size: int=25, **kwargs: Any ) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids associated with the texts. batch_size: Optional batch size to upsert docs. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ ids = ids or [str(uuid.uuid4().hex) for _ in texts] text_list = list(texts) for i in range(0, len(text_list), batch_size): end = min(i + batch_size, len(text_list)) batch_texts = text_list[i:end] batch_ids = ids[i:end] batch_embeddings = self._embedding.embed_documents(list(batch_texts)) if metadatas: batch_metadatas = metadatas[i:end] else: batch_metadatas = [{} for _ in range(i, end)] for metadata, text in zip(batch_metadatas, batch_texts): metadata[self._text_field] = text docs = list(zip(batch_ids, batch_embeddings, batch_metadatas)) ret = self._collection.upsert(docs) if not ret: raise ValueError( f'Fail to upsert docs to dashvector vector database,Error: {ret.message}' ) return ids
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids associated with the texts. batch_size: Optional batch size to upsert docs. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore.
_import_elasticsearch
from langchain_community.vectorstores.elasticsearch import ElasticsearchStore return ElasticsearchStore
def _import_elasticsearch() ->Any: from langchain_community.vectorstores.elasticsearch import ElasticsearchStore return ElasticsearchStore
null
_import_gooseai
from langchain_community.llms.gooseai import GooseAI return GooseAI
def _import_gooseai() ->Any: from langchain_community.llms.gooseai import GooseAI return GooseAI
null
__init__
warnings.warn( '`MlflowAIGatewayEmbeddings` is deprecated. Use `MlflowEmbeddings` or `DatabricksEmbeddings` instead.' , DeprecationWarning) try: import mlflow.gateway except ImportError as e: raise ImportError( 'Could not import `mlflow.gateway` module. Please install it with `pip install mlflow[gateway]`.' ) from e super().__init__(**kwargs) if self.gateway_uri: mlflow.gateway.set_gateway_uri(self.gateway_uri)
def __init__(self, **kwargs: Any): warnings.warn( '`MlflowAIGatewayEmbeddings` is deprecated. Use `MlflowEmbeddings` or `DatabricksEmbeddings` instead.' , DeprecationWarning) try: import mlflow.gateway except ImportError as e: raise ImportError( 'Could not import `mlflow.gateway` module. Please install it with `pip install mlflow[gateway]`.' ) from e super().__init__(**kwargs) if self.gateway_uri: mlflow.gateway.set_gateway_uri(self.gateway_uri)
null
drop
with s2.connect(TEST_SINGLESTOREDB_URL) as conn: conn.autocommit(True) with conn.cursor() as cursor: cursor.execute(f'DROP TABLE IF EXISTS {table_name};')
def drop(table_name: str) ->None: with s2.connect(TEST_SINGLESTOREDB_URL) as conn: conn.autocommit(True) with conn.cursor() as cursor: cursor.execute(f'DROP TABLE IF EXISTS {table_name};')
null
parse_output
partial_completion = outputs['partial_completion'] steps = outputs['intermediate_steps'] search_query = extract_between_tags('search_query', partial_completion + '</search_query>') if search_query is None: docs = [] str_output = '' for action, observation in steps: docs.extend(observation) str_output += action.log str_output += '</search_query>' + _format_docs(observation) str_output += partial_completion return AgentFinish({'docs': docs, 'output': str_output}, log= partial_completion) else: return AgentAction(tool='search', tool_input=search_query, log= partial_completion)
def parse_output(outputs): partial_completion = outputs['partial_completion'] steps = outputs['intermediate_steps'] search_query = extract_between_tags('search_query', partial_completion + '</search_query>') if search_query is None: docs = [] str_output = '' for action, observation in steps: docs.extend(observation) str_output += action.log str_output += '</search_query>' + _format_docs(observation) str_output += partial_completion return AgentFinish({'docs': docs, 'output': str_output}, log= partial_completion) else: return AgentAction(tool='search', tool_input=search_query, log= partial_completion)
null
transform_input
"""Transforms the input to a format that model can accept as the request Body. Should return bytes or seekable file like object in the format specified in the content_type request header. """
@abstractmethod def transform_input(self, prompt: INPUT_TYPE, model_kwargs: Dict) ->bytes: """Transforms the input to a format that model can accept as the request Body. Should return bytes or seekable file like object in the format specified in the content_type request header. """
Transforms the input to a format that model can accept as the request Body. Should return bytes or seekable file like object in the format specified in the content_type request header.
test_include_types3
structured_schema = {'node_props': {'Movie': [{'property': 'title', 'type': 'STRING'}], 'Actor': [{'property': 'name', 'type': 'STRING'}], 'Person': [{'property': 'name', 'type': 'STRING'}]}, 'rel_props': {}, 'relationships': [{'start': 'Actor', 'end': 'Movie', 'type': 'ACTED_IN' }, {'start': 'Person', 'end': 'Movie', 'type': 'DIRECTED'}]} include_types = ['Movie', 'Actor', 'ACTED_IN'] output = construct_schema(structured_schema, include_types, []) expected_schema = """Node properties are the following: Movie {title: STRING},Actor {name: STRING} Relationship properties are the following: The relationships are the following: (:Actor)-[:ACTED_IN]->(:Movie)""" assert output == expected_schema
def test_include_types3() ->None: structured_schema = {'node_props': {'Movie': [{'property': 'title', 'type': 'STRING'}], 'Actor': [{'property': 'name', 'type': 'STRING' }], 'Person': [{'property': 'name', 'type': 'STRING'}]}, 'rel_props': {}, 'relationships': [{'start': 'Actor', 'end': 'Movie', 'type': 'ACTED_IN'}, {'start': 'Person', 'end': 'Movie', 'type': 'DIRECTED'}]} include_types = ['Movie', 'Actor', 'ACTED_IN'] output = construct_schema(structured_schema, include_types, []) expected_schema = """Node properties are the following: Movie {title: STRING},Actor {name: STRING} Relationship properties are the following: The relationships are the following: (:Actor)-[:ACTED_IN]->(:Movie)""" assert output == expected_schema
null
__init__
"""Creates an empty DeepLakeVectorStore or loads an existing one. The DeepLakeVectorStore is located at the specified ``path``. Examples: >>> # Create a vector store with default tensors >>> deeplake_vectorstore = DeepLake( ... path = <path_for_storing_Data>, ... ) >>> >>> # Create a vector store in the Deep Lake Managed Tensor Database >>> data = DeepLake( ... path = "hub://org_id/dataset_name", ... runtime = {"tensor_db": True}, ... ) Args: dataset_path (str): Path to existing dataset or where to create a new one. Defaults to _LANGCHAIN_DEFAULT_DEEPLAKE_PATH. token (str, optional): Activeloop token, for fetching credentials to the dataset at path if it is a Deep Lake dataset. Tokens are normally autogenerated. Optional. embedding (Embeddings, optional): Function to convert either documents or query. Optional. embedding_function (Embeddings, optional): Function to convert either documents or query. Optional. Deprecated: keeping this parameter for backwards compatibility. read_only (bool): Open dataset in read-only mode. Default is False. ingestion_batch_size (int): During data ingestion, data is divided into batches. Batch size is the size of each batch. Default is 1000. num_workers (int): Number of workers to use during data ingestion. Default is 0. verbose (bool): Print dataset summary after each operation. Default is True. exec_option (str, optional): DeepLakeVectorStore supports 3 ways to perform searching - "python", "compute_engine", "tensor_db" and auto. Default is None. - ``auto``- Selects the best execution method based on the storage location of the Vector Store. It is the default option. - ``python`` - Pure-python implementation that runs on the client. WARNING: using this with big datasets can lead to memory issues. Data can be stored anywhere. - ``compute_engine`` - C++ implementation of the Deep Lake Compute Engine that runs on the client. Can be used for any data stored in or connected to Deep Lake. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database that is responsible for storage and query execution. Only for data stored in the Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. runtime (Dict, optional): Parameters for creating the Vector Store in Deep Lake's Managed Tensor Database. Not applicable when loading an existing Vector Store. To create a Vector Store in the Managed Tensor Database, set `runtime = {"tensor_db": True}`. index_params (Optional[Dict[str, Union[int, str]]], optional): Dictionary containing information about vector index that will be created. Defaults to None, which will utilize ``DEFAULT_VECTORSTORE_INDEX_PARAMS`` from ``deeplake.constants``. The specified key-values override the default ones. - threshold: The threshold for the dataset size above which an index will be created for the embedding tensor. When the threshold value is set to -1, index creation is turned off. Defaults to -1, which turns off the index. - distance_metric: This key specifies the method of calculating the distance between vectors when creating the vector database (VDB) index. It can either be a string that corresponds to a member of the DistanceType enumeration, or the string value itself. - If no value is provided, it defaults to "L2". - "L2" corresponds to DistanceType.L2_NORM. - "COS" corresponds to DistanceType.COSINE_SIMILARITY. - additional_params: Additional parameters for fine-tuning the index. **kwargs: Other optional keyword arguments. Raises: ValueError: If some condition is not met. """ self.ingestion_batch_size = ingestion_batch_size self.num_workers = num_workers self.verbose = verbose if _DEEPLAKE_INSTALLED is False: raise ImportError( 'Could not import deeplake python package. Please install it with `pip install deeplake[enterprise]`.' ) if runtime == {'tensor_db': True} and version_compare(deeplake.__version__, '3.6.7') == -1: raise ImportError( f'To use tensor_db option you need to update deeplake to `3.6.7` or higher. Currently installed deeplake version is {deeplake.__version__}. ' ) self.dataset_path = dataset_path if embedding_function: logger.warning( 'Using embedding function is deprecated and will be removed in the future. Please use embedding instead.' ) self.vectorstore = DeepLakeVectorStore(path=self.dataset_path, embedding_function=embedding_function or embedding, read_only=read_only, token=token, exec_option=exec_option, verbose=verbose, runtime=runtime, index_params=index_params, **kwargs) self._embedding_function = embedding_function or embedding self._id_tensor_name = 'ids' if 'ids' in self.vectorstore.tensors() else 'id'
def __init__(self, dataset_path: str=_LANGCHAIN_DEFAULT_DEEPLAKE_PATH, token: Optional[str]=None, embedding: Optional[Embeddings]=None, embedding_function: Optional[Embeddings]=None, read_only: bool=False, ingestion_batch_size: int=1000, num_workers: int=0, verbose: bool=True, exec_option: Optional[str]=None, runtime: Optional[Dict]=None, index_params: Optional[Dict[str, Union[int, str]]]=None, **kwargs: Any ) ->None: """Creates an empty DeepLakeVectorStore or loads an existing one. The DeepLakeVectorStore is located at the specified ``path``. Examples: >>> # Create a vector store with default tensors >>> deeplake_vectorstore = DeepLake( ... path = <path_for_storing_Data>, ... ) >>> >>> # Create a vector store in the Deep Lake Managed Tensor Database >>> data = DeepLake( ... path = "hub://org_id/dataset_name", ... runtime = {"tensor_db": True}, ... ) Args: dataset_path (str): Path to existing dataset or where to create a new one. Defaults to _LANGCHAIN_DEFAULT_DEEPLAKE_PATH. token (str, optional): Activeloop token, for fetching credentials to the dataset at path if it is a Deep Lake dataset. Tokens are normally autogenerated. Optional. embedding (Embeddings, optional): Function to convert either documents or query. Optional. embedding_function (Embeddings, optional): Function to convert either documents or query. Optional. Deprecated: keeping this parameter for backwards compatibility. read_only (bool): Open dataset in read-only mode. Default is False. ingestion_batch_size (int): During data ingestion, data is divided into batches. Batch size is the size of each batch. Default is 1000. num_workers (int): Number of workers to use during data ingestion. Default is 0. verbose (bool): Print dataset summary after each operation. Default is True. exec_option (str, optional): DeepLakeVectorStore supports 3 ways to perform searching - "python", "compute_engine", "tensor_db" and auto. Default is None. - ``auto``- Selects the best execution method based on the storage location of the Vector Store. It is the default option. - ``python`` - Pure-python implementation that runs on the client. WARNING: using this with big datasets can lead to memory issues. Data can be stored anywhere. - ``compute_engine`` - C++ implementation of the Deep Lake Compute Engine that runs on the client. Can be used for any data stored in or connected to Deep Lake. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database that is responsible for storage and query execution. Only for data stored in the Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. runtime (Dict, optional): Parameters for creating the Vector Store in Deep Lake's Managed Tensor Database. Not applicable when loading an existing Vector Store. To create a Vector Store in the Managed Tensor Database, set `runtime = {"tensor_db": True}`. index_params (Optional[Dict[str, Union[int, str]]], optional): Dictionary containing information about vector index that will be created. Defaults to None, which will utilize ``DEFAULT_VECTORSTORE_INDEX_PARAMS`` from ``deeplake.constants``. The specified key-values override the default ones. - threshold: The threshold for the dataset size above which an index will be created for the embedding tensor. When the threshold value is set to -1, index creation is turned off. Defaults to -1, which turns off the index. - distance_metric: This key specifies the method of calculating the distance between vectors when creating the vector database (VDB) index. It can either be a string that corresponds to a member of the DistanceType enumeration, or the string value itself. - If no value is provided, it defaults to "L2". - "L2" corresponds to DistanceType.L2_NORM. - "COS" corresponds to DistanceType.COSINE_SIMILARITY. - additional_params: Additional parameters for fine-tuning the index. **kwargs: Other optional keyword arguments. Raises: ValueError: If some condition is not met. """ self.ingestion_batch_size = ingestion_batch_size self.num_workers = num_workers self.verbose = verbose if _DEEPLAKE_INSTALLED is False: raise ImportError( 'Could not import deeplake python package. Please install it with `pip install deeplake[enterprise]`.' ) if runtime == {'tensor_db': True} and version_compare(deeplake. __version__, '3.6.7') == -1: raise ImportError( f'To use tensor_db option you need to update deeplake to `3.6.7` or higher. Currently installed deeplake version is {deeplake.__version__}. ' ) self.dataset_path = dataset_path if embedding_function: logger.warning( 'Using embedding function is deprecated and will be removed in the future. Please use embedding instead.' ) self.vectorstore = DeepLakeVectorStore(path=self.dataset_path, embedding_function=embedding_function or embedding, read_only= read_only, token=token, exec_option=exec_option, verbose=verbose, runtime=runtime, index_params=index_params, **kwargs) self._embedding_function = embedding_function or embedding self._id_tensor_name = 'ids' if 'ids' in self.vectorstore.tensors( ) else 'id'
Creates an empty DeepLakeVectorStore or loads an existing one. The DeepLakeVectorStore is located at the specified ``path``. Examples: >>> # Create a vector store with default tensors >>> deeplake_vectorstore = DeepLake( ... path = <path_for_storing_Data>, ... ) >>> >>> # Create a vector store in the Deep Lake Managed Tensor Database >>> data = DeepLake( ... path = "hub://org_id/dataset_name", ... runtime = {"tensor_db": True}, ... ) Args: dataset_path (str): Path to existing dataset or where to create a new one. Defaults to _LANGCHAIN_DEFAULT_DEEPLAKE_PATH. token (str, optional): Activeloop token, for fetching credentials to the dataset at path if it is a Deep Lake dataset. Tokens are normally autogenerated. Optional. embedding (Embeddings, optional): Function to convert either documents or query. Optional. embedding_function (Embeddings, optional): Function to convert either documents or query. Optional. Deprecated: keeping this parameter for backwards compatibility. read_only (bool): Open dataset in read-only mode. Default is False. ingestion_batch_size (int): During data ingestion, data is divided into batches. Batch size is the size of each batch. Default is 1000. num_workers (int): Number of workers to use during data ingestion. Default is 0. verbose (bool): Print dataset summary after each operation. Default is True. exec_option (str, optional): DeepLakeVectorStore supports 3 ways to perform searching - "python", "compute_engine", "tensor_db" and auto. Default is None. - ``auto``- Selects the best execution method based on the storage location of the Vector Store. It is the default option. - ``python`` - Pure-python implementation that runs on the client. WARNING: using this with big datasets can lead to memory issues. Data can be stored anywhere. - ``compute_engine`` - C++ implementation of the Deep Lake Compute Engine that runs on the client. Can be used for any data stored in or connected to Deep Lake. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database that is responsible for storage and query execution. Only for data stored in the Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. runtime (Dict, optional): Parameters for creating the Vector Store in Deep Lake's Managed Tensor Database. Not applicable when loading an existing Vector Store. To create a Vector Store in the Managed Tensor Database, set `runtime = {"tensor_db": True}`. index_params (Optional[Dict[str, Union[int, str]]], optional): Dictionary containing information about vector index that will be created. Defaults to None, which will utilize ``DEFAULT_VECTORSTORE_INDEX_PARAMS`` from ``deeplake.constants``. The specified key-values override the default ones. - threshold: The threshold for the dataset size above which an index will be created for the embedding tensor. When the threshold value is set to -1, index creation is turned off. Defaults to -1, which turns off the index. - distance_metric: This key specifies the method of calculating the distance between vectors when creating the vector database (VDB) index. It can either be a string that corresponds to a member of the DistanceType enumeration, or the string value itself. - If no value is provided, it defaults to "L2". - "L2" corresponds to DistanceType.L2_NORM. - "COS" corresponds to DistanceType.COSINE_SIMILARITY. - additional_params: Additional parameters for fine-tuning the index. **kwargs: Other optional keyword arguments. Raises: ValueError: If some condition is not met.
on_chain_end
"""Print out that we finished a chain.""" print(""" > Finished chain.""")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: """Print out that we finished a chain.""" print('\n\x1b[1m> Finished chain.\x1b[0m')
Print out that we finished a chain.
_validate_example_inputs
"""Validate that the example inputs are valid for the model.""" if isinstance(llm_or_chain_factory, BaseLanguageModel): _validate_example_inputs_for_language_model(example, input_mapper) else: chain = llm_or_chain_factory() if isinstance(chain, Chain): _validate_example_inputs_for_chain(example, chain, input_mapper) elif isinstance(chain, Runnable): logger.debug(f'Skipping input validation for {chain}')
def _validate_example_inputs(example: Example, llm_or_chain_factory: MCF, input_mapper: Optional[Callable[[Dict], Any]]) ->None: """Validate that the example inputs are valid for the model.""" if isinstance(llm_or_chain_factory, BaseLanguageModel): _validate_example_inputs_for_language_model(example, input_mapper) else: chain = llm_or_chain_factory() if isinstance(chain, Chain): _validate_example_inputs_for_chain(example, chain, input_mapper) elif isinstance(chain, Runnable): logger.debug(f'Skipping input validation for {chain}')
Validate that the example inputs are valid for the model.
test_mosaicml_embedding_query
"""Test MosaicML embeddings of queries.""" document = 'foo bar' embedding = MosaicMLInstructorEmbeddings() output = embedding.embed_query(document) assert len(output) == 768
def test_mosaicml_embedding_query() ->None: """Test MosaicML embeddings of queries.""" document = 'foo bar' embedding = MosaicMLInstructorEmbeddings() output = embedding.embed_query(document) assert len(output) == 768
Test MosaicML embeddings of queries.
_call
return {self.output_key: self.evaluate(**inputs)}
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, ThoughtValidity]: return {self.output_key: self.evaluate(**inputs)}
null
add_file
if target_path in self.files: raise ValueError('target_path already exists') if not Path(source_path).exists(): raise ValueError('source_path does not exist') self.files[target_path] = FileInfo(target_path=target_path, source_path= source_path, description=description)
def add_file(self, source_path: str, target_path: str, description: str ) ->None: if target_path in self.files: raise ValueError('target_path already exists') if not Path(source_path).exists(): raise ValueError('source_path does not exist') self.files[target_path] = FileInfo(target_path=target_path, source_path =source_path, description=description)
null
_llm_type
"""Return type of chat model.""" return 'databricks-chat'
@property def _llm_type(self) ->str: """Return type of chat model.""" return 'databricks-chat'
Return type of chat model.
_create_chat_result
if not isinstance(response, dict): response = response.dict() for res in response['choices']: if res.get('finish_reason', None) == 'content_filter': raise ValueError( 'Azure has not provided the response due to a content filter being triggered' ) chat_result = super()._create_chat_result(response) if 'model' in response: model = response['model'] if self.model_version: model = f'{model}-{self.model_version}' if chat_result.llm_output is not None and isinstance(chat_result. llm_output, dict): chat_result.llm_output['model_name'] = model return chat_result
def _create_chat_result(self, response: Union[dict, BaseModel]) ->ChatResult: if not isinstance(response, dict): response = response.dict() for res in response['choices']: if res.get('finish_reason', None) == 'content_filter': raise ValueError( 'Azure has not provided the response due to a content filter being triggered' ) chat_result = super()._create_chat_result(response) if 'model' in response: model = response['model'] if self.model_version: model = f'{model}-{self.model_version}' if chat_result.llm_output is not None and isinstance(chat_result. llm_output, dict): chat_result.llm_output['model_name'] = model return chat_result
null
finalize
"""Wrap the wrapped function using the wrapper and update the docstring. Args: wrapper: The wrapper function. new_doc: The new docstring. Returns: The wrapped function. """ wrapper = functools.wraps(wrapped)(wrapper) wrapper.__doc__ = new_doc return wrapper
def finalize(wrapper: Callable[..., Any], new_doc: str) ->T: """Wrap the wrapped function using the wrapper and update the docstring. Args: wrapper: The wrapper function. new_doc: The new docstring. Returns: The wrapped function. """ wrapper = functools.wraps(wrapped)(wrapper) wrapper.__doc__ = new_doc return wrapper
Wrap the wrapped function using the wrapper and update the docstring. Args: wrapper: The wrapper function. new_doc: The new docstring. Returns: The wrapped function.
_early_stop_msg
"""Try to early-terminate streaming or generation by iterating over stop list""" content = msg.get('content', '') if content and stop: for stop_str in stop: if stop_str and stop_str in content: msg['content'] = content[:content.find(stop_str) + 1] is_stopped = True return msg, is_stopped
def _early_stop_msg(self, msg: dict, is_stopped: bool, stop: Optional[ Sequence[str]]=None) ->Tuple[dict, bool]: """Try to early-terminate streaming or generation by iterating over stop list""" content = msg.get('content', '') if content and stop: for stop_str in stop: if stop_str and stop_str in content: msg['content'] = content[:content.find(stop_str) + 1] is_stopped = True return msg, is_stopped
Try to early-terminate streaming or generation by iterating over stop list
_get_paths_and_methods_from_spec_dictionary
"""Return a tuple (paths, methods) for every path in spec.""" valid_methods = [verb.value for verb in HTTPVerb] for path_name, path_item in spec['paths'].items(): for method in valid_methods: if method in path_item: yield path_name, method
def _get_paths_and_methods_from_spec_dictionary(spec: dict) ->Iterable[Tuple [str, str]]: """Return a tuple (paths, methods) for every path in spec.""" valid_methods = [verb.value for verb in HTTPVerb] for path_name, path_item in spec['paths'].items(): for method in valid_methods: if method in path_item: yield path_name, method
Return a tuple (paths, methods) for every path in spec.
is_lc_serializable
return False
@classmethod def is_lc_serializable(cls) ->bool: return False
null
_extract_token_usage
if response is None: return {'generated_token_count': 0, 'input_token_count': 0} input_token_count = 0 generated_token_count = 0 def get_count_value(key: str, result: Dict[str, Any]) ->int: return result.get(key, 0) or 0 for res in response: results = res.get('results') if results: input_token_count += get_count_value('input_token_count', results[0]) generated_token_count += get_count_value('generated_token_count', results[0]) return {'generated_token_count': generated_token_count, 'input_token_count': input_token_count}
@staticmethod def _extract_token_usage(response: Optional[List[Dict[str, Any]]]=None) ->Dict[ str, Any]: if response is None: return {'generated_token_count': 0, 'input_token_count': 0} input_token_count = 0 generated_token_count = 0 def get_count_value(key: str, result: Dict[str, Any]) ->int: return result.get(key, 0) or 0 for res in response: results = res.get('results') if results: input_token_count += get_count_value('input_token_count', results[0]) generated_token_count += get_count_value('generated_token_count', results[0]) return {'generated_token_count': generated_token_count, 'input_token_count': input_token_count}
null
__init_subclass__
super().__init_subclass__(**kwargs) if cls.get_relevant_documents != BaseRetriever.get_relevant_documents: warnings.warn( 'Retrievers must implement abstract `_get_relevant_documents` method instead of `get_relevant_documents`' , DeprecationWarning) swap = cls.get_relevant_documents cls.get_relevant_documents = BaseRetriever.get_relevant_documents cls._get_relevant_documents = swap if hasattr(cls, 'aget_relevant_documents' ) and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents: warnings.warn( 'Retrievers must implement abstract `_aget_relevant_documents` method instead of `aget_relevant_documents`' , DeprecationWarning) aswap = cls.aget_relevant_documents cls.aget_relevant_documents = BaseRetriever.aget_relevant_documents cls._aget_relevant_documents = aswap parameters = signature(cls._get_relevant_documents).parameters cls._new_arg_supported = parameters.get('run_manager') is not None cls._expects_other_args = len(set(parameters.keys()) - {'self', 'query', 'run_manager'}) > 0
def __init_subclass__(cls, **kwargs: Any) ->None: super().__init_subclass__(**kwargs) if cls.get_relevant_documents != BaseRetriever.get_relevant_documents: warnings.warn( 'Retrievers must implement abstract `_get_relevant_documents` method instead of `get_relevant_documents`' , DeprecationWarning) swap = cls.get_relevant_documents cls.get_relevant_documents = BaseRetriever.get_relevant_documents cls._get_relevant_documents = swap if hasattr(cls, 'aget_relevant_documents' ) and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents: warnings.warn( 'Retrievers must implement abstract `_aget_relevant_documents` method instead of `aget_relevant_documents`' , DeprecationWarning) aswap = cls.aget_relevant_documents cls.aget_relevant_documents = BaseRetriever.aget_relevant_documents cls._aget_relevant_documents = aswap parameters = signature(cls._get_relevant_documents).parameters cls._new_arg_supported = parameters.get('run_manager') is not None cls._expects_other_args = len(set(parameters.keys()) - {'self', 'query', 'run_manager'}) > 0
null
test_parse_examples_failes_wrong_sequence
with pytest.raises(ValueError) as exc_info: _ = _parse_examples([AIMessage(content='a')]) print(str(exc_info.value)) assert str(exc_info.value ) == 'Expect examples to have an even amount of messages, got 1.'
def test_parse_examples_failes_wrong_sequence() ->None: with pytest.raises(ValueError) as exc_info: _ = _parse_examples([AIMessage(content='a')]) print(str(exc_info.value)) assert str(exc_info.value ) == 'Expect examples to have an even amount of messages, got 1.'
null
convert_messages
history = ChatMessageHistory() for item in input: history.add_user_message(item['result']['question']) history.add_ai_message(item['result']['answer']) return history
def convert_messages(input: List[Dict[str, Any]]) ->ChatMessageHistory: history = ChatMessageHistory() for item in input: history.add_user_message(item['result']['question']) history.add_ai_message(item['result']['answer']) return history
null
__init__
"""Constructs a new RocksetChatMessageHistory. Args: - session_id: The ID of the chat session - client: The RocksetClient object to use to query - collection: The name of the collection to use to store chat messages. If a collection with the given name does not exist in the workspace, it is created. - workspace: The workspace containing `collection`. Defaults to `"commons"` - messages_key: The DB column containing message history. Defaults to `"messages"` - sync: Whether to wait for messages to be added. Defaults to `False`. NOTE: setting this to `True` will slow down performance. - message_uuid_method: The method that generates message IDs. If set, all messages will have an `id` field within the `additional_kwargs` property. If this param is not set and `sync` is `False`, message IDs will not be created. If this param is not set and `sync` is `True`, the `uuid.uuid4` method will be used to create message IDs. """ try: import rockset except ImportError: raise ImportError( 'Could not import rockset client python package. Please install it with `pip install rockset`.' ) if not isinstance(client, rockset.RocksetClient): raise ValueError( f'client should be an instance of rockset.RocksetClient, got {type(client)}' ) self.session_id = session_id self.client = client self.collection = collection self.workspace = workspace self.location = f'"{self.workspace}"."{self.collection}"' self.rockset = rockset self.messages_key = messages_key self.message_uuid_method = message_uuid_method self.sync = sync try: self.client.set_application('langchain') except AttributeError: pass if not self._collection_exists(): self._create_collection() self._wait_until_collection_created() self._create_empty_doc() elif not self._document_exists(): self._create_empty_doc()
def __init__(self, session_id: str, client: Any, collection: str, workspace: str='commons', messages_key: str='messages', sync: bool=False, message_uuid_method: Callable[[], Union[str, int]]=lambda : str(uuid4()) ) ->None: """Constructs a new RocksetChatMessageHistory. Args: - session_id: The ID of the chat session - client: The RocksetClient object to use to query - collection: The name of the collection to use to store chat messages. If a collection with the given name does not exist in the workspace, it is created. - workspace: The workspace containing `collection`. Defaults to `"commons"` - messages_key: The DB column containing message history. Defaults to `"messages"` - sync: Whether to wait for messages to be added. Defaults to `False`. NOTE: setting this to `True` will slow down performance. - message_uuid_method: The method that generates message IDs. If set, all messages will have an `id` field within the `additional_kwargs` property. If this param is not set and `sync` is `False`, message IDs will not be created. If this param is not set and `sync` is `True`, the `uuid.uuid4` method will be used to create message IDs. """ try: import rockset except ImportError: raise ImportError( 'Could not import rockset client python package. Please install it with `pip install rockset`.' ) if not isinstance(client, rockset.RocksetClient): raise ValueError( f'client should be an instance of rockset.RocksetClient, got {type(client)}' ) self.session_id = session_id self.client = client self.collection = collection self.workspace = workspace self.location = f'"{self.workspace}"."{self.collection}"' self.rockset = rockset self.messages_key = messages_key self.message_uuid_method = message_uuid_method self.sync = sync try: self.client.set_application('langchain') except AttributeError: pass if not self._collection_exists(): self._create_collection() self._wait_until_collection_created() self._create_empty_doc() elif not self._document_exists(): self._create_empty_doc()
Constructs a new RocksetChatMessageHistory. Args: - session_id: The ID of the chat session - client: The RocksetClient object to use to query - collection: The name of the collection to use to store chat messages. If a collection with the given name does not exist in the workspace, it is created. - workspace: The workspace containing `collection`. Defaults to `"commons"` - messages_key: The DB column containing message history. Defaults to `"messages"` - sync: Whether to wait for messages to be added. Defaults to `False`. NOTE: setting this to `True` will slow down performance. - message_uuid_method: The method that generates message IDs. If set, all messages will have an `id` field within the `additional_kwargs` property. If this param is not set and `sync` is `False`, message IDs will not be created. If this param is not set and `sync` is `True`, the `uuid.uuid4` method will be used to create message IDs.
_stream
request = get_cohere_chat_request(messages, **self._default_params, **kwargs) stream = self.client.chat(**request, stream=True) for data in stream: if data.event_type == 'text-generation': delta = data.text yield ChatGenerationChunk(message=AIMessageChunk(content=delta)) if run_manager: run_manager.on_llm_new_token(delta)
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: request = get_cohere_chat_request(messages, **self._default_params, ** kwargs) stream = self.client.chat(**request, stream=True) for data in stream: if data.event_type == 'text-generation': delta = data.text yield ChatGenerationChunk(message=AIMessageChunk(content=delta)) if run_manager: run_manager.on_llm_new_token(delta)
null
from_params
"""Convenience constructor that builds the MaxCompute API wrapper from given parameters. Args: query: SQL query to execute. endpoint: MaxCompute endpoint. project: A project is a basic organizational unit of MaxCompute, which is similar to a database. access_id: MaxCompute access ID. Should be passed in directly or set as the environment variable `MAX_COMPUTE_ACCESS_ID`. secret_access_key: MaxCompute secret access key. Should be passed in directly or set as the environment variable `MAX_COMPUTE_SECRET_ACCESS_KEY`. """ api_wrapper = MaxComputeAPIWrapper.from_params(endpoint, project, access_id =access_id, secret_access_key=secret_access_key) return cls(query, api_wrapper, **kwargs)
@classmethod def from_params(cls, query: str, endpoint: str, project: str, *, access_id: Optional[str]=None, secret_access_key: Optional[str]=None, **kwargs: Any ) ->MaxComputeLoader: """Convenience constructor that builds the MaxCompute API wrapper from given parameters. Args: query: SQL query to execute. endpoint: MaxCompute endpoint. project: A project is a basic organizational unit of MaxCompute, which is similar to a database. access_id: MaxCompute access ID. Should be passed in directly or set as the environment variable `MAX_COMPUTE_ACCESS_ID`. secret_access_key: MaxCompute secret access key. Should be passed in directly or set as the environment variable `MAX_COMPUTE_SECRET_ACCESS_KEY`. """ api_wrapper = MaxComputeAPIWrapper.from_params(endpoint, project, access_id=access_id, secret_access_key=secret_access_key) return cls(query, api_wrapper, **kwargs)
Convenience constructor that builds the MaxCompute API wrapper from given parameters. Args: query: SQL query to execute. endpoint: MaxCompute endpoint. project: A project is a basic organizational unit of MaxCompute, which is similar to a database. access_id: MaxCompute access ID. Should be passed in directly or set as the environment variable `MAX_COMPUTE_ACCESS_ID`. secret_access_key: MaxCompute secret access key. Should be passed in directly or set as the environment variable `MAX_COMPUTE_SECRET_ACCESS_KEY`.
test_load_success
"""Test that returns one document""" docs = api_client.load_docs('chatgpt') assert len(docs) == api_client.top_k_results == 3 assert_docs(docs)
def test_load_success(api_client: PubMedAPIWrapper) ->None: """Test that returns one document""" docs = api_client.load_docs('chatgpt') assert len(docs) == api_client.top_k_results == 3 assert_docs(docs)
Test that returns one document
is_lc_serializable
return True
@classmethod def is_lc_serializable(cls) ->bool: return True
null
from_texts
"""Create an Epsilla vectorstore from raw documents. Args: texts (List[str]): List of text data to be inserted. embeddings (Embeddings): Embedding function. client (pyepsilla.vectordb.Client): Epsilla client to connect to. metadatas (Optional[List[dict]]): Metadata for each text. Defaults to None. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: Epsilla: Epsilla vector store. """ instance = Epsilla(client, embedding, db_path=db_path, db_name=db_name) instance.add_texts(texts, metadatas=metadatas, collection_name= collection_name, drop_old=drop_old, **kwargs) return instance
@classmethod def from_texts(cls: Type[Epsilla], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, client: Any=None, db_path: Optional[str]=_LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str]= _LANGCHAIN_DEFAULT_DB_NAME, collection_name: Optional[str]= _LANGCHAIN_DEFAULT_TABLE_NAME, drop_old: Optional[bool]=False, **kwargs: Any) ->Epsilla: """Create an Epsilla vectorstore from raw documents. Args: texts (List[str]): List of text data to be inserted. embeddings (Embeddings): Embedding function. client (pyepsilla.vectordb.Client): Epsilla client to connect to. metadatas (Optional[List[dict]]): Metadata for each text. Defaults to None. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: Epsilla: Epsilla vector store. """ instance = Epsilla(client, embedding, db_path=db_path, db_name=db_name) instance.add_texts(texts, metadatas=metadatas, collection_name= collection_name, drop_old=drop_old, **kwargs) return instance
Create an Epsilla vectorstore from raw documents. Args: texts (List[str]): List of text data to be inserted. embeddings (Embeddings): Embedding function. client (pyepsilla.vectordb.Client): Epsilla client to connect to. metadatas (Optional[List[dict]]): Metadata for each text. Defaults to None. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: Epsilla: Epsilla vector store.
mock_psychic
with patch('psychicapi.Psychic') as mock_psychic: yield mock_psychic
@pytest.fixture def mock_psychic(): with patch('psychicapi.Psychic') as mock_psychic: yield mock_psychic
null
from_texts
"""Return VectorStore initialized from texts and embeddings.""" connection = cls.create_connection(db_file) vss = cls(table=table, connection=connection, db_file=db_file, embedding= embedding) vss.add_texts(texts=texts, metadatas=metadatas) return vss
@classmethod def from_texts(cls: Type[SQLiteVSS], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, table: str= 'langchain', db_file: str='vss.db', **kwargs: Any) ->SQLiteVSS: """Return VectorStore initialized from texts and embeddings.""" connection = cls.create_connection(db_file) vss = cls(table=table, connection=connection, db_file=db_file, embedding=embedding) vss.add_texts(texts=texts, metadatas=metadatas) return vss
Return VectorStore initialized from texts and embeddings.