method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
get_async_callback_manager_for_config
"""Get an async callback manager for a config. Args: config (RunnableConfig): The config. Returns: AsyncCallbackManager: The async callback manager. """ from langchain_core.callbacks.manager import AsyncCallbackManager return AsyncCallbackManager.configure(inheritable_callbacks=config.get( 'callbacks'), inheritable_tags=config.get('tags'), inheritable_metadata =config.get('metadata'))
def get_async_callback_manager_for_config(config: RunnableConfig ) ->AsyncCallbackManager: """Get an async callback manager for a config. Args: config (RunnableConfig): The config. Returns: AsyncCallbackManager: The async callback manager. """ from langchain_core.callbacks.manager import AsyncCallbackManager return AsyncCallbackManager.configure(inheritable_callbacks=config.get( 'callbacks'), inheritable_tags=config.get('tags'), inheritable_metadata=config.get('metadata'))
Get an async callback manager for a config. Args: config (RunnableConfig): The config. Returns: AsyncCallbackManager: The async callback manager.
index
""" Executes when the index is created. Args: dims_length: Numeric length of the embedding vectors, or None if not using vector-based query. vector_query_field: The field containing the vector representations in the index. similarity: The similarity strategy to use, or None if not using one. Returns: Dict: The Elasticsearch settings and mappings for the strategy. """
@abstractmethod def index(self, dims_length: Union[int, None], vector_query_field: str, similarity: Union[DistanceStrategy, None]) ->Dict: """ Executes when the index is created. Args: dims_length: Numeric length of the embedding vectors, or None if not using vector-based query. vector_query_field: The field containing the vector representations in the index. similarity: The similarity strategy to use, or None if not using one. Returns: Dict: The Elasticsearch settings and mappings for the strategy. """
Executes when the index is created. Args: dims_length: Numeric length of the embedding vectors, or None if not using vector-based query. vector_query_field: The field containing the vector representations in the index. similarity: The similarity strategy to use, or None if not using one. Returns: Dict: The Elasticsearch settings and mappings for the strategy.
_stream_completion_with_retry
responses = self.client.call(**_kwargs) for resp in responses: yield check_response(resp)
@retry_decorator def _stream_completion_with_retry(**_kwargs: Any) ->Any: responses = self.client.call(**_kwargs) for resp in responses: yield check_response(resp)
null
_import_alpha_vantage
from langchain_community.utilities.alpha_vantage import AlphaVantageAPIWrapper return AlphaVantageAPIWrapper
def _import_alpha_vantage() ->Any: from langchain_community.utilities.alpha_vantage import AlphaVantageAPIWrapper return AlphaVantageAPIWrapper
null
batch_sync_run
with ThreadPoolExecutor() as executor: results = list(executor.map(_completion_with_retry, prompt)) return results
def batch_sync_run() ->List: with ThreadPoolExecutor() as executor: results = list(executor.map(_completion_with_retry, prompt)) return results
null
input_keys
"""Will be whatever keys the prompt expects. :meta private: """ return ['problem_description']
@property def input_keys(self) ->List[str]: """Will be whatever keys the prompt expects. :meta private: """ return ['problem_description']
Will be whatever keys the prompt expects. :meta private:
__post_init__
""" Initialize the store. """ from hologres_vector import HologresVector self.storage = HologresVector(self.connection_string, ndims=self.ndims, table_name=self.table_name, table_schema={'document': 'text'}, pre_delete_table=self.pre_delete_table)
def __post_init__(self) ->None: """ Initialize the store. """ from hologres_vector import HologresVector self.storage = HologresVector(self.connection_string, ndims=self.ndims, table_name=self.table_name, table_schema={'document': 'text'}, pre_delete_table=self.pre_delete_table)
Initialize the store.
test_loademptynotebook_emptylistreturned
documents = EverNoteLoader(self.example_notebook_path('empty_export.enex'), False).load() assert len(documents) == 0
def test_loademptynotebook_emptylistreturned(self) ->None: documents = EverNoteLoader(self.example_notebook_path( 'empty_export.enex'), False).load() assert len(documents) == 0
null
__init__
self.secrets_map = secrets_map or dict() self.valid_namespaces = [*DEFAULT_NAMESPACES, *valid_namespaces ] if valid_namespaces else DEFAULT_NAMESPACES
def __init__(self, secrets_map: Optional[Dict[str, str]]=None, valid_namespaces: Optional[List[str]]=None) ->None: self.secrets_map = secrets_map or dict() self.valid_namespaces = [*DEFAULT_NAMESPACES, *valid_namespaces ] if valid_namespaces else DEFAULT_NAMESPACES
null
_get_table_indexes
indexes = self._inspector.get_indexes(table.name) indexes_formatted = '\n'.join(map(_format_index, indexes)) return f"""Table Indexes: {indexes_formatted}"""
def _get_table_indexes(self, table: Table) ->str: indexes = self._inspector.get_indexes(table.name) indexes_formatted = '\n'.join(map(_format_index, indexes)) return f'Table Indexes:\n{indexes_formatted}'
null
parse_result
_result = super().parse_result(result) if self.args_only: pydantic_args = self.pydantic_schema.parse_raw(_result) else: fn_name = _result['name'] _args = _result['arguments'] pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) return pydantic_args
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any: _result = super().parse_result(result) if self.args_only: pydantic_args = self.pydantic_schema.parse_raw(_result) else: fn_name = _result['name'] _args = _result['arguments'] pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) return pydantic_args
null
tool
"""Make tools out of functions, can be used with or without arguments. Args: *args: The arguments to the tool. return_direct: Whether to return directly from the tool rather than continuing the agent loop. args_schema: optional argument schema for user to specify infer_schema: Whether to infer the schema of the arguments from the function's signature. This also makes the resultant tool accept a dictionary input to its `run()` function. Requires: - Function must be of type (str) -> str - Function must have a docstring Examples: .. code-block:: python @tool def search_api(query: str) -> str: # Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return """ def _make_with_name(tool_name: str) ->Callable: def _make_tool(dec_func: Union[Callable, Runnable]) ->BaseTool: if isinstance(dec_func, Runnable): runnable = dec_func if runnable.input_schema.schema().get('type') != 'object': raise ValueError('Runnable must have an object schema.') async def ainvoke_wrapper(callbacks: Optional[Callbacks]=None, **kwargs: Any) ->Any: return await runnable.ainvoke(kwargs, {'callbacks': callbacks}) def invoke_wrapper(callbacks: Optional[Callbacks]=None, ** kwargs: Any) ->Any: return runnable.invoke(kwargs, {'callbacks': callbacks}) coroutine = ainvoke_wrapper func = invoke_wrapper schema: Optional[Type[BaseModel]] = runnable.input_schema description = repr(runnable) elif inspect.iscoroutinefunction(dec_func): coroutine = dec_func func = None schema = args_schema description = None else: coroutine = None func = dec_func schema = args_schema description = None if infer_schema or args_schema is not None: return StructuredTool.from_function(func, coroutine, name= tool_name, description=description, return_direct= return_direct, args_schema=schema, infer_schema=infer_schema) if func.__doc__ is None: raise ValueError( 'Function must have a docstring if description not provided and infer_schema is False.' ) return Tool(name=tool_name, func=func, description= f'{tool_name} tool', return_direct=return_direct, coroutine= coroutine) return _make_tool if len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Runnable ): return _make_with_name(args[0])(args[1]) elif len(args) == 1 and isinstance(args[0], str): return _make_with_name(args[0]) elif len(args) == 1 and callable(args[0]): return _make_with_name(args[0].__name__)(args[0]) elif len(args) == 0: def _partial(func: Callable[[str], str]) ->BaseTool: return _make_with_name(func.__name__)(func) return _partial else: raise ValueError('Too many arguments for tool decorator')
def tool(*args: Union[str, Callable, Runnable], return_direct: bool=False, args_schema: Optional[Type[BaseModel]]=None, infer_schema: bool=True ) ->Callable: """Make tools out of functions, can be used with or without arguments. Args: *args: The arguments to the tool. return_direct: Whether to return directly from the tool rather than continuing the agent loop. args_schema: optional argument schema for user to specify infer_schema: Whether to infer the schema of the arguments from the function's signature. This also makes the resultant tool accept a dictionary input to its `run()` function. Requires: - Function must be of type (str) -> str - Function must have a docstring Examples: .. code-block:: python @tool def search_api(query: str) -> str: # Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return """ def _make_with_name(tool_name: str) ->Callable: def _make_tool(dec_func: Union[Callable, Runnable]) ->BaseTool: if isinstance(dec_func, Runnable): runnable = dec_func if runnable.input_schema.schema().get('type') != 'object': raise ValueError('Runnable must have an object schema.') async def ainvoke_wrapper(callbacks: Optional[Callbacks]= None, **kwargs: Any) ->Any: return await runnable.ainvoke(kwargs, {'callbacks': callbacks}) def invoke_wrapper(callbacks: Optional[Callbacks]=None, ** kwargs: Any) ->Any: return runnable.invoke(kwargs, {'callbacks': callbacks}) coroutine = ainvoke_wrapper func = invoke_wrapper schema: Optional[Type[BaseModel]] = runnable.input_schema description = repr(runnable) elif inspect.iscoroutinefunction(dec_func): coroutine = dec_func func = None schema = args_schema description = None else: coroutine = None func = dec_func schema = args_schema description = None if infer_schema or args_schema is not None: return StructuredTool.from_function(func, coroutine, name= tool_name, description=description, return_direct= return_direct, args_schema=schema, infer_schema= infer_schema) if func.__doc__ is None: raise ValueError( 'Function must have a docstring if description not provided and infer_schema is False.' ) return Tool(name=tool_name, func=func, description= f'{tool_name} tool', return_direct=return_direct, coroutine =coroutine) return _make_tool if len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Runnable): return _make_with_name(args[0])(args[1]) elif len(args) == 1 and isinstance(args[0], str): return _make_with_name(args[0]) elif len(args) == 1 and callable(args[0]): return _make_with_name(args[0].__name__)(args[0]) elif len(args) == 0: def _partial(func: Callable[[str], str]) ->BaseTool: return _make_with_name(func.__name__)(func) return _partial else: raise ValueError('Too many arguments for tool decorator')
Make tools out of functions, can be used with or without arguments. Args: *args: The arguments to the tool. return_direct: Whether to return directly from the tool rather than continuing the agent loop. args_schema: optional argument schema for user to specify infer_schema: Whether to infer the schema of the arguments from the function's signature. This also makes the resultant tool accept a dictionary input to its `run()` function. Requires: - Function must be of type (str) -> str - Function must have a docstring Examples: .. code-block:: python @tool def search_api(query: str) -> str: # Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return
delete_collection
self.logger.debug('Trying to delete collection') with Session(self._bind) as session: collection = self.get_collection(session) if not collection: self.logger.warning('Collection not found') return session.delete(collection) session.commit()
def delete_collection(self) ->None: self.logger.debug('Trying to delete collection') with Session(self._bind) as session: collection = self.get_collection(session) if not collection: self.logger.warning('Collection not found') return session.delete(collection) session.commit()
null
test_write_file_with_root_dir
"""Test the WriteFile tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = WriteFileTool(root_dir=temp_dir) tool.run({'file_path': 'file.txt', 'text': 'Hello, world!'}) assert (Path(temp_dir) / 'file.txt').exists() assert (Path(temp_dir) / 'file.txt').read_text() == 'Hello, world!'
def test_write_file_with_root_dir() ->None: """Test the WriteFile tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = WriteFileTool(root_dir=temp_dir) tool.run({'file_path': 'file.txt', 'text': 'Hello, world!'}) assert (Path(temp_dir) / 'file.txt').exists() assert (Path(temp_dir) / 'file.txt').read_text() == 'Hello, world!'
Test the WriteFile tool when a root dir is specified.
create_pbi_agent
"""Construct a Power BI agent from an LLM and tools.""" from langchain.agents import AgentExecutor from langchain.agents.mrkl.base import ZeroShotAgent from langchain.chains.llm import LLMChain if toolkit is None: if powerbi is None: raise ValueError('Must provide either a toolkit or powerbi dataset') toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples) tools = toolkit.get_tools() tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names prompt_params = {'format_instructions': format_instructions } if format_instructions is not None else {} agent = ZeroShotAgent(llm_chain=LLMChain(llm=llm, prompt=ZeroShotAgent. create_prompt(tools, prefix=prefix.format(top_k=top_k).format(tables= tables), suffix=suffix, input_variables=input_variables, ** prompt_params), callback_manager=callback_manager, verbose=verbose), allowed_tools=[tool.name for tool in tools], **kwargs) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, ** agent_executor_kwargs or {})
def create_pbi_agent(llm: BaseLanguageModel, toolkit: Optional[ PowerBIToolkit]=None, powerbi: Optional[PowerBIDataset]=None, callback_manager: Optional[BaseCallbackManager]=None, prefix: str= POWERBI_PREFIX, suffix: str=POWERBI_SUFFIX, format_instructions: Optional[str]=None, examples: Optional[str]=None, input_variables: Optional[List[str]]=None, top_k: int=10, verbose: bool=False, agent_executor_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Any ) ->AgentExecutor: """Construct a Power BI agent from an LLM and tools.""" from langchain.agents import AgentExecutor from langchain.agents.mrkl.base import ZeroShotAgent from langchain.chains.llm import LLMChain if toolkit is None: if powerbi is None: raise ValueError('Must provide either a toolkit or powerbi dataset' ) toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples) tools = toolkit.get_tools() tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names prompt_params = {'format_instructions': format_instructions } if format_instructions is not None else {} agent = ZeroShotAgent(llm_chain=LLMChain(llm=llm, prompt=ZeroShotAgent. create_prompt(tools, prefix=prefix.format(top_k=top_k).format( tables=tables), suffix=suffix, input_variables=input_variables, ** prompt_params), callback_manager=callback_manager, verbose=verbose), allowed_tools=[tool.name for tool in tools], **kwargs) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, ** agent_executor_kwargs or {})
Construct a Power BI agent from an LLM and tools.
test_pandas_output_parser_row_no_column
try: parser.parse_folder('row:1[num_legs]') assert False, 'Should have raised OutputParserException' except OutputParserException: assert True
def test_pandas_output_parser_row_no_column() ->None: try: parser.parse_folder('row:1[num_legs]') assert False, 'Should have raised OutputParserException' except OutputParserException: assert True
null
operations_from_names
"""Initializes Long-Running Operations from their names.""" try: from google.longrunning.operations_pb2 import GetOperationRequest except ImportError as exc: raise ImportError( 'long running operations package not found, please install it with `pip install gapic-google-longrunning`' ) from exc return [self._client.get_operation(request=GetOperationRequest(name=name)) for name in operation_names]
def operations_from_names(self, operation_names: List[str]) ->List['Operation' ]: """Initializes Long-Running Operations from their names.""" try: from google.longrunning.operations_pb2 import GetOperationRequest except ImportError as exc: raise ImportError( 'long running operations package not found, please install it with `pip install gapic-google-longrunning`' ) from exc return [self._client.get_operation(request=GetOperationRequest(name= name)) for name in operation_names]
Initializes Long-Running Operations from their names.
_import_sagemaker_endpoint
from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint return SagemakerEndpoint
def _import_sagemaker_endpoint() ->Any: from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint return SagemakerEndpoint
null
_import_searchapi_tool_SearchAPIResults
from langchain_community.tools.searchapi.tool import SearchAPIResults return SearchAPIResults
def _import_searchapi_tool_SearchAPIResults() ->Any: from langchain_community.tools.searchapi.tool import SearchAPIResults return SearchAPIResults
null
from_llm
"""Initialize the LabeledPairwiseStringEvalChain from an LLM. Args: llm (BaseLanguageModel): The LLM to use. prompt (PromptTemplate, optional): The prompt to use. criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. **kwargs (Any): Additional keyword arguments. Returns: LabeledPairwiseStringEvalChain: The initialized LabeledPairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ expected_input_vars = {'prediction', 'prediction_b', 'input', 'reference', 'criteria'} prompt_ = prompt or COMPARISON_TEMPLATE_WITH_REFERENCE if expected_input_vars != set(prompt_.input_variables): raise ValueError( f'Input variables should be {expected_input_vars}, but got {prompt_.input_variables}' ) criteria_ = resolve_pairwise_criteria(criteria) criteria_str = '\n'.join(f'{k}: {v}' for k, v in criteria_.items()) criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else '' return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, *, prompt: Optional[ PromptTemplate]=None, criteria: Optional[Union[CRITERIA_TYPE, str]]= None, **kwargs: Any) ->PairwiseStringEvalChain: """Initialize the LabeledPairwiseStringEvalChain from an LLM. Args: llm (BaseLanguageModel): The LLM to use. prompt (PromptTemplate, optional): The prompt to use. criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. **kwargs (Any): Additional keyword arguments. Returns: LabeledPairwiseStringEvalChain: The initialized LabeledPairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ expected_input_vars = {'prediction', 'prediction_b', 'input', 'reference', 'criteria'} prompt_ = prompt or COMPARISON_TEMPLATE_WITH_REFERENCE if expected_input_vars != set(prompt_.input_variables): raise ValueError( f'Input variables should be {expected_input_vars}, but got {prompt_.input_variables}' ) criteria_ = resolve_pairwise_criteria(criteria) criteria_str = '\n'.join(f'{k}: {v}' for k, v in criteria_.items()) criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else '' return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs )
Initialize the LabeledPairwiseStringEvalChain from an LLM. Args: llm (BaseLanguageModel): The LLM to use. prompt (PromptTemplate, optional): The prompt to use. criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. **kwargs (Any): Additional keyword arguments. Returns: LabeledPairwiseStringEvalChain: The initialized LabeledPairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected.
zep_retriever
mock_zep_client: ZepClient = mocker.patch('zep_python.ZepClient', autospec=True ) mock_zep_client.memory = mocker.patch('zep_python.memory.client.MemoryClient', autospec=True) mock_zep_client.memory.search_memory.return_value = copy.deepcopy( search_results) mock_zep_client.memory.asearch_memory.return_value = copy.deepcopy( search_results) zep = ZepRetriever(session_id='123', url='http://localhost:8000') zep.zep_client = mock_zep_client return zep
@pytest.fixture @pytest.mark.requires('zep_python') def zep_retriever(mocker: MockerFixture, search_results: List[ MemorySearchResult]) ->ZepRetriever: mock_zep_client: ZepClient = mocker.patch('zep_python.ZepClient', autospec=True) mock_zep_client.memory = mocker.patch( 'zep_python.memory.client.MemoryClient', autospec=True) mock_zep_client.memory.search_memory.return_value = copy.deepcopy( search_results) mock_zep_client.memory.asearch_memory.return_value = copy.deepcopy( search_results) zep = ZepRetriever(session_id='123', url='http://localhost:8000') zep.zep_client = mock_zep_client return zep
null
from_loaders
"""Create a vectorstore index from loaders.""" docs = [] for loader in loaders: docs.extend(loader.load()) return self.from_documents(docs)
def from_loaders(self, loaders: List[BaseLoader]) ->VectorStoreIndexWrapper: """Create a vectorstore index from loaders.""" docs = [] for loader in loaders: docs.extend(loader.load()) return self.from_documents(docs)
Create a vectorstore index from loaders.
delete
"""Evict from cache if there's an entry.""" doc_id = self._make_id(prompt, llm_string) return self.collection.delete_one(doc_id)
def delete(self, prompt: str, llm_string: str) ->None: """Evict from cache if there's an entry.""" doc_id = self._make_id(prompt, llm_string) return self.collection.delete_one(doc_id)
Evict from cache if there's an entry.
test_rust_code_splitter
splitter = RecursiveCharacterTextSplitter.from_language(Language.RUST, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ fn main() { println!("Hello, World!"); } """ chunks = splitter.split_text(code) assert chunks == ['fn main() {', 'println!("Hello', ',', 'World!");', '}']
def test_rust_code_splitter() ->None: splitter = RecursiveCharacterTextSplitter.from_language(Language.RUST, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = '\nfn main() {\n println!("Hello, World!");\n}\n ' chunks = splitter.split_text(code) assert chunks == ['fn main() {', 'println!("Hello', ',', 'World!");', '}']
null
is_valid
try: ast.parse(self.code) return True except SyntaxError: return False
def is_valid(self) ->bool: try: ast.parse(self.code) return True except SyntaxError: return False
null
get_format_instructions
return 'Your response should be a list of comma separated values, eg: `foo, bar, baz`'
def get_format_instructions(self) ->str: return ( 'Your response should be a list of comma separated values, eg: `foo, bar, baz`' )
null
run
"""Run the directive. Called any time :example_links:`ClassName` is used in the template *.rst files.""" class_or_func_name = self.arguments[0] links = imported_classes.get(class_or_func_name, {}) list_node = nodes.bullet_list() for doc_name, link in links.items(): item_node = nodes.list_item() para_node = nodes.paragraph() link_node = nodes.reference() link_node['refuri'] = link link_node.append(nodes.Text(doc_name)) para_node.append(link_node) item_node.append(para_node) list_node.append(item_node) if list_node.children: title_node = nodes.title() title_node.append(nodes.Text(f'Examples using {class_or_func_name}')) return [title_node, list_node] return [list_node]
def run(self): """Run the directive. Called any time :example_links:`ClassName` is used in the template *.rst files.""" class_or_func_name = self.arguments[0] links = imported_classes.get(class_or_func_name, {}) list_node = nodes.bullet_list() for doc_name, link in links.items(): item_node = nodes.list_item() para_node = nodes.paragraph() link_node = nodes.reference() link_node['refuri'] = link link_node.append(nodes.Text(doc_name)) para_node.append(link_node) item_node.append(para_node) list_node.append(item_node) if list_node.children: title_node = nodes.title() title_node.append(nodes.Text(f'Examples using {class_or_func_name}')) return [title_node, list_node] return [list_node]
Run the directive. Called any time :example_links:`ClassName` is used in the template *.rst files.
test_prompt
messages = [SystemMessage(content='sys-msg'), HumanMessage(content= 'usr-msg-1'), AIMessage(content='ai-msg-1'), HumanMessage(content= 'usr-msg-2')] actual = model.predict_messages(messages).content expected = 'sys-msg USER: usr-msg-1 ASSISTANT: ai-msg-1 </s>USER: usr-msg-2 ' assert actual == expected
def test_prompt(model: Vicuna) ->None: messages = [SystemMessage(content='sys-msg'), HumanMessage(content= 'usr-msg-1'), AIMessage(content='ai-msg-1'), HumanMessage(content= 'usr-msg-2')] actual = model.predict_messages(messages).content expected = ( 'sys-msg USER: usr-msg-1 ASSISTANT: ai-msg-1 </s>USER: usr-msg-2 ') assert actual == expected
null
return_stopped_response
"""Return response when agent has been stopped due to max iterations.""" if early_stopping_method == 'force': return AgentFinish({'output': 'Agent stopped due to iteration limit or time limit.'}, '') elif early_stopping_method == 'generate': agent_decision = self.plan(intermediate_steps, with_functions=False, ** kwargs) if isinstance(agent_decision, AgentFinish): return agent_decision else: raise ValueError( f'got AgentAction with no functions provided: {agent_decision}') else: raise ValueError( f'early_stopping_method should be one of `force` or `generate`, got {early_stopping_method}' )
def return_stopped_response(self, early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any ) ->AgentFinish: """Return response when agent has been stopped due to max iterations.""" if early_stopping_method == 'force': return AgentFinish({'output': 'Agent stopped due to iteration limit or time limit.'}, '') elif early_stopping_method == 'generate': agent_decision = self.plan(intermediate_steps, with_functions=False, **kwargs) if isinstance(agent_decision, AgentFinish): return agent_decision else: raise ValueError( f'got AgentAction with no functions provided: {agent_decision}' ) else: raise ValueError( f'early_stopping_method should be one of `force` or `generate`, got {early_stopping_method}' )
Return response when agent has been stopped due to max iterations.
embed_documents
"""Call out to Cohere's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ response = requests.post('https://api.llmrails.com/v1/embeddings', headers= {'X-API-KEY': self.api_key.get_secret_value()}, json={'input': texts, 'model': self.model}, timeout=60) return [item['embedding'] for item in response.json()['data']]
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Call out to Cohere's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ response = requests.post('https://api.llmrails.com/v1/embeddings', headers={'X-API-KEY': self.api_key.get_secret_value()}, json={ 'input': texts, 'model': self.model}, timeout=60) return [item['embedding'] for item in response.json()['data']]
Call out to Cohere's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
test_chroma
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = Chroma.from_texts(collection_name='test_collection', texts= texts, embedding=FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_chroma() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = Chroma.from_texts(collection_name='test_collection', texts= texts, embedding=FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
input_keys
""" Returns a list of input keys expected by the prompt. This method defines the input keys that the prompt expects in order to perform its processing. It ensures that the specified keys are available for providing input to the prompt. Returns: List[str]: A list of input keys. Note: This method is considered private and may not be intended for direct external use. """ return [self.input_key]
@property def input_keys(self) ->List[str]: """ Returns a list of input keys expected by the prompt. This method defines the input keys that the prompt expects in order to perform its processing. It ensures that the specified keys are available for providing input to the prompt. Returns: List[str]: A list of input keys. Note: This method is considered private and may not be intended for direct external use. """ return [self.input_key]
Returns a list of input keys expected by the prompt. This method defines the input keys that the prompt expects in order to perform its processing. It ensures that the specified keys are available for providing input to the prompt. Returns: List[str]: A list of input keys. Note: This method is considered private and may not be intended for direct external use.
on_chain_end
if self.__has_valid_config is False: return try: output = _parse_output(outputs) self.__track_event('chain', 'end', run_id=str(run_id), parent_run_id= str(parent_run_id) if parent_run_id else None, output=output, app_id=self.__app_id) except Exception as e: logger.error(f'[LLMonitor] An error occurred in on_chain_end: {e}')
def on_chain_end(self, outputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Union[UUID, None]=None, **kwargs: Any) ->Any: if self.__has_valid_config is False: return try: output = _parse_output(outputs) self.__track_event('chain', 'end', run_id=str(run_id), parent_run_id=str(parent_run_id) if parent_run_id else None, output=output, app_id=self.__app_id) except Exception as e: logger.error(f'[LLMonitor] An error occurred in on_chain_end: {e}')
null
weaviate_url
"""Return the weaviate url.""" from weaviate import Client url = 'http://localhost:8080' yield url client = Client(url) client.schema.delete_all()
@pytest.fixture(scope='class', autouse=True) def weaviate_url(self) ->Union[str, Generator[str, None, None]]: """Return the weaviate url.""" from weaviate import Client url = 'http://localhost:8080' yield url client = Client(url) client.schema.delete_all()
Return the weaviate url.
llm_prefix
"""Prefix to append the LLM call with.""" return ''
@property def llm_prefix(self) ->str: """Prefix to append the LLM call with.""" return ''
Prefix to append the LLM call with.
test_jobs_call
"""Test that call gives correct answer for jobs search.""" search = SearchApiAPIWrapper(engine='google_jobs') output = search.run('AI') assert 'years of experience' in output
def test_jobs_call() ->None: """Test that call gives correct answer for jobs search.""" search = SearchApiAPIWrapper(engine='google_jobs') output = search.run('AI') assert 'years of experience' in output
Test that call gives correct answer for jobs search.
delete
raise NotImplementedError
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool ]: raise NotImplementedError
null
from_embeddings
"""Construct ScaNN wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the ScaNN database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import ScaNN from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) scann = ScaNN.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids= ids, **kwargs)
@classmethod def from_embeddings(cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, **kwargs: Any) ->ScaNN: """Construct ScaNN wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the ScaNN database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import ScaNN from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) scann = ScaNN.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=ids, **kwargs)
Construct ScaNN wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the ScaNN database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import ScaNN from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) scann = ScaNN.from_embeddings(text_embedding_pairs, embeddings)
_get_paths
"""Get all relative paths in the navbar.""" relative_paths = [] course_menu = soup.find('ul', {'class': 'courseMenu'}) if course_menu is None: raise ValueError('No course menu found.') for link in course_menu.find_all('a'): href = link.get('href') if href is not None and href.startswith('/'): relative_paths.append(href) return relative_paths
def _get_paths(self, soup: Any) ->List[str]: """Get all relative paths in the navbar.""" relative_paths = [] course_menu = soup.find('ul', {'class': 'courseMenu'}) if course_menu is None: raise ValueError('No course menu found.') for link in course_menu.find_all('a'): href = link.get('href') if href is not None and href.startswith('/'): relative_paths.append(href) return relative_paths
Get all relative paths in the navbar.
_selector_effective
if not self.visible_only: return selector return f'{selector} >> visible=1'
def _selector_effective(self, selector: str) ->str: if not self.visible_only: return selector return f'{selector} >> visible=1'
null
visit_structured_query
if structured_query.filter is None: kwargs = {} else: kwargs = {'where_filter': structured_query.filter.accept(self)} return structured_query.query, kwargs
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[ str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {'where_filter': structured_query.filter.accept(self)} return structured_query.query, kwargs
null
validate_environment
"""Validate that the python package exists in environment.""" try: import arxiv values['arxiv_search'] = arxiv.Search values['arxiv_exceptions' ] = arxiv.ArxivError, arxiv.UnexpectedEmptyPageError, arxiv.HTTPError values['arxiv_result'] = arxiv.Result except ImportError: raise ImportError( 'Could not import arxiv python package. Please install it with `pip install arxiv`.' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that the python package exists in environment.""" try: import arxiv values['arxiv_search'] = arxiv.Search values['arxiv_exceptions' ] = arxiv.ArxivError, arxiv.UnexpectedEmptyPageError, arxiv.HTTPError values['arxiv_result'] = arxiv.Result except ImportError: raise ImportError( 'Could not import arxiv python package. Please install it with `pip install arxiv`.' ) return values
Validate that the python package exists in environment.
test_load_load_extra_metadata
"""Test that returns extra metadata fields.""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), load_extra_metadata=True) doc = loader.load()[0] assert set(doc.metadata) == {'id', 'published_year', 'title', 'publication', 'authors', 'abstract', 'booktitle', 'editor', 'organization'}
@pytest.mark.requires('fitz', 'bibtexparser') def test_load_load_extra_metadata() ->None: """Test that returns extra metadata fields.""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), load_extra_metadata=True) doc = loader.load()[0] assert set(doc.metadata) == {'id', 'published_year', 'title', 'publication', 'authors', 'abstract', 'booktitle', 'editor', 'organization'}
Test that returns extra metadata fields.
input_keys
"""Input keys for the chain.""" return ['user_input', 'context', 'response']
@property def input_keys(self) ->List[str]: """Input keys for the chain.""" return ['user_input', 'context', 'response']
Input keys for the chain.
_create_api_planner_tool
from langchain.chains.llm import LLMChain endpoint_descriptions = [f'{name} {description}' for name, description, _ in api_spec.endpoints] prompt = PromptTemplate(template=API_PLANNER_PROMPT, input_variables=[ 'query'], partial_variables={'endpoints': '- ' + '- '.join( endpoint_descriptions)}) chain = LLMChain(llm=llm, prompt=prompt) tool = Tool(name=API_PLANNER_TOOL_NAME, description= API_PLANNER_TOOL_DESCRIPTION, func=chain.run) return tool
def _create_api_planner_tool(api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel) ->Tool: from langchain.chains.llm import LLMChain endpoint_descriptions = [f'{name} {description}' for name, description, _ in api_spec.endpoints] prompt = PromptTemplate(template=API_PLANNER_PROMPT, input_variables=[ 'query'], partial_variables={'endpoints': '- ' + '- '.join( endpoint_descriptions)}) chain = LLMChain(llm=llm, prompt=prompt) tool = Tool(name=API_PLANNER_TOOL_NAME, description= API_PLANNER_TOOL_DESCRIPTION, func=chain.run) return tool
null
_log_evaluation_feedback
results = self._select_eval_results(evaluator_response) for res in results: source_info_: Dict[str, Any] = {} if res.evaluator_info: source_info_ = {**res.evaluator_info, **source_info_} run_id_ = getattr(res, 'target_run_id') if hasattr(res, 'target_run_id' ) and res.target_run_id is not None else run.id self.client.create_feedback(run_id_, res.key, score=res.score, value= res.value, comment=res.comment, correction=res.correction, source_info=source_info_, source_run_id=res.source_run_id or source_run_id, feedback_source_type=langsmith.schemas. FeedbackSourceType.MODEL) return results
def _log_evaluation_feedback(self, evaluator_response: Union[ EvaluationResult, EvaluationResults], run: Run, source_run_id: Optional [UUID]=None) ->List[EvaluationResult]: results = self._select_eval_results(evaluator_response) for res in results: source_info_: Dict[str, Any] = {} if res.evaluator_info: source_info_ = {**res.evaluator_info, **source_info_} run_id_ = getattr(res, 'target_run_id') if hasattr(res, 'target_run_id' ) and res.target_run_id is not None else run.id self.client.create_feedback(run_id_, res.key, score=res.score, value=res.value, comment=res.comment, correction=res.correction, source_info=source_info_, source_run_id=res.source_run_id or source_run_id, feedback_source_type=langsmith.schemas. FeedbackSourceType.MODEL) return results
null
template_is_valid
"""Check that prefix, suffix, and input variables are consistent.""" if values['validate_template']: input_variables = values['input_variables'] expected_input_variables = set(values['suffix'].input_variables) expected_input_variables |= set(values['partial_variables']) if values['prefix'] is not None: expected_input_variables |= set(values['prefix'].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f'Got input_variables={input_variables}, but based on prefix/suffix expected {expected_input_variables}' ) else: values['input_variables'] = sorted(set(values['suffix'].input_variables ) | set(values['prefix'].input_variables if values['prefix'] else [ ]) - set(values['partial_variables'])) return values
@root_validator() def template_is_valid(cls, values: Dict) ->Dict: """Check that prefix, suffix, and input variables are consistent.""" if values['validate_template']: input_variables = values['input_variables'] expected_input_variables = set(values['suffix'].input_variables) expected_input_variables |= set(values['partial_variables']) if values['prefix'] is not None: expected_input_variables |= set(values['prefix'].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f'Got input_variables={input_variables}, but based on prefix/suffix expected {expected_input_variables}' ) else: values['input_variables'] = sorted(set(values['suffix']. input_variables) | set(values['prefix'].input_variables if values['prefix'] else []) - set(values['partial_variables'])) return values
Check that prefix, suffix, and input variables are consistent.
_dont_flip_the_cos_score
"""Keep similarity from client unchanged ad it's in [0:1] already.""" return similarity0to1
@staticmethod def _dont_flip_the_cos_score(similarity0to1: float) ->float: """Keep similarity from client unchanged ad it's in [0:1] already.""" return similarity0to1
Keep similarity from client unchanged ad it's in [0:1] already.
input_keys
return [self.input_key]
@property def input_keys(self) ->List[str]: return [self.input_key]
null
build_extra
"""Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) values['model_kwargs'] = extra return values
@root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) values['model_kwargs'] = extra return values
Build extra kwargs from additional params that were passed in.
_import_mlflow
from langchain_community.llms.mlflow import Mlflow return Mlflow
def _import_mlflow() ->Any: from langchain_community.llms.mlflow import Mlflow return Mlflow
null
similarity_search
"""Perform similarity retrieval based on text. Args: query: Vectorize text for retrieval.,should not be empty. k: top n. search_filter: Additional filtering conditions. Returns: document_list: List of documents. """ embedding = self.embedding.embed_query(query) return self.create_results(self.inner_embedding_query(embedding=embedding, search_filter=search_filter, k=k))
def similarity_search(self, query: str, k: int=4, search_filter: Optional[ Dict[str, Any]]=None, **kwargs: Any) ->List[Document]: """Perform similarity retrieval based on text. Args: query: Vectorize text for retrieval.,should not be empty. k: top n. search_filter: Additional filtering conditions. Returns: document_list: List of documents. """ embedding = self.embedding.embed_query(query) return self.create_results(self.inner_embedding_query(embedding= embedding, search_filter=search_filter, k=k))
Perform similarity retrieval based on text. Args: query: Vectorize text for retrieval.,should not be empty. k: top n. search_filter: Additional filtering conditions. Returns: document_list: List of documents.
search
"""Try to search for wiki page. If page exists, return the page summary, and a PageWithLookups object. If page does not exist, return similar entries. Args: search: search string. Returns: a Document object or error message. """ import wikipedia try: page_content = wikipedia.page(search).content url = wikipedia.page(search).url result: Union[str, Document] = Document(page_content=page_content, metadata={'page': url}) except wikipedia.PageError: result = f'Could not find [{search}]. Similar: {wikipedia.search(search)}' except wikipedia.DisambiguationError: result = f'Could not find [{search}]. Similar: {wikipedia.search(search)}' return result
def search(self, search: str) ->Union[str, Document]: """Try to search for wiki page. If page exists, return the page summary, and a PageWithLookups object. If page does not exist, return similar entries. Args: search: search string. Returns: a Document object or error message. """ import wikipedia try: page_content = wikipedia.page(search).content url = wikipedia.page(search).url result: Union[str, Document] = Document(page_content=page_content, metadata={'page': url}) except wikipedia.PageError: result = ( f'Could not find [{search}]. Similar: {wikipedia.search(search)}') except wikipedia.DisambiguationError: result = ( f'Could not find [{search}]. Similar: {wikipedia.search(search)}') return result
Try to search for wiki page. If page exists, return the page summary, and a PageWithLookups object. If page does not exist, return similar entries. Args: search: search string. Returns: a Document object or error message.
test_blob_from_str_data
"""Test reading blob from a file path.""" content = b'Hello, World!' blob = Blob.from_data(content) assert blob.encoding == 'utf-8' assert blob.path is None assert blob.mimetype is None assert blob.source is None assert blob.data == b'Hello, World!' assert blob.as_bytes() == content assert blob.as_string() == 'Hello, World!' with blob.as_bytes_io() as bytes_io: assert bytes_io.read() == content
def test_blob_from_str_data() ->None: """Test reading blob from a file path.""" content = b'Hello, World!' blob = Blob.from_data(content) assert blob.encoding == 'utf-8' assert blob.path is None assert blob.mimetype is None assert blob.source is None assert blob.data == b'Hello, World!' assert blob.as_bytes() == content assert blob.as_string() == 'Hello, World!' with blob.as_bytes_io() as bytes_io: assert bytes_io.read() == content
Test reading blob from a file path.
run_query
return db.run(query)
def run_query(query): return db.run(query)
null
_import_meilisearch
from langchain_community.vectorstores.meilisearch import Meilisearch return Meilisearch
def _import_meilisearch() ->Any: from langchain_community.vectorstores.meilisearch import Meilisearch return Meilisearch
null
load_and_split
"""Load Documents and split into chunks. Chunks are returned as Documents. Args: text_splitter: TextSplitter instance to use for splitting documents. Defaults to RecursiveCharacterTextSplitter. Returns: List of Documents. """ from langchain.text_splitter import RecursiveCharacterTextSplitter if text_splitter is None: _text_splitter: TextSplitter = RecursiveCharacterTextSplitter() else: _text_splitter = text_splitter docs = self.load() return _text_splitter.split_documents(docs)
def load_and_split(self, text_splitter: Optional[TextSplitter]=None) ->List[ Document]: """Load Documents and split into chunks. Chunks are returned as Documents. Args: text_splitter: TextSplitter instance to use for splitting documents. Defaults to RecursiveCharacterTextSplitter. Returns: List of Documents. """ from langchain.text_splitter import RecursiveCharacterTextSplitter if text_splitter is None: _text_splitter: TextSplitter = RecursiveCharacterTextSplitter() else: _text_splitter = text_splitter docs = self.load() return _text_splitter.split_documents(docs)
Load Documents and split into chunks. Chunks are returned as Documents. Args: text_splitter: TextSplitter instance to use for splitting documents. Defaults to RecursiveCharacterTextSplitter. Returns: List of Documents.
model_cfg_sys_msg
return Vicuna(llm=FakeLLM(), system_message=SystemMessage(content='sys-msg'))
@pytest.fixture def model_cfg_sys_msg() ->Vicuna: return Vicuna(llm=FakeLLM(), system_message=SystemMessage(content= 'sys-msg'))
null
validate_client
values['ydc_api_key'] = get_from_dict_or_env(values, 'ydc_api_key', 'YDC_API_KEY') return values
@root_validator(pre=True) def validate_client(cls, values: Dict[str, Any]) ->Dict[str, Any]: values['ydc_api_key'] = get_from_dict_or_env(values, 'ydc_api_key', 'YDC_API_KEY') return values
null
test_partial_text_and_metadata
""" Test loading a board cards removing some text and metadata. """ trello_loader = TrelloLoader.from_credentials('QA', api_key='API_KEY', token='API_TOKEN', extra_metadata='list', include_card_name=False, include_checklist=False, include_comments=False) documents = trello_loader.load() texts = ['Closed Card Title', 'Checklist 1', 'Item 1:pending', 'This is a comment on a Closed Card.'] for text in texts: self.assertFalse(text in documents[0].page_content) self.assertEqual(documents[0].metadata, {'title': 'Closed Card Title', 'id': '12350aca6952888df7975903', 'url': 'https://trello.com/card/12350aca6952888df7975903', 'list': 'Done'}, 'Metadata of Closed Card Matches.')
def test_partial_text_and_metadata(self) ->None: """ Test loading a board cards removing some text and metadata. """ trello_loader = TrelloLoader.from_credentials('QA', api_key='API_KEY', token='API_TOKEN', extra_metadata='list', include_card_name=False, include_checklist=False, include_comments=False) documents = trello_loader.load() texts = ['Closed Card Title', 'Checklist 1', 'Item 1:pending', 'This is a comment on a Closed Card.'] for text in texts: self.assertFalse(text in documents[0].page_content) self.assertEqual(documents[0].metadata, {'title': 'Closed Card Title', 'id': '12350aca6952888df7975903', 'url': 'https://trello.com/card/12350aca6952888df7975903', 'list': 'Done'}, 'Metadata of Closed Card Matches.')
Test loading a board cards removing some text and metadata.
to_sql_model
return self.model_class(session_id=session_id, message=json.dumps( message_to_dict(message)))
def to_sql_model(self, message: BaseMessage, session_id: str) ->Any: return self.model_class(session_id=session_id, message=json.dumps( message_to_dict(message)))
null
test_namespace
"""Test that a namespace is prepended to all keys properly.""" store = RedisStore(client=redis_client, ttl=None, namespace='meow') key_value_pairs = [('key1', b'value1'), ('key2', b'value2')] store.mset(key_value_pairs) assert sorted(redis_client.scan_iter('*')) == [b'meow/key1', b'meow/key2'] store.mdelete(['key1']) assert sorted(redis_client.scan_iter('*')) == [b'meow/key2'] assert list(store.yield_keys()) == ['key2'] assert list(store.yield_keys(prefix='key*')) == ['key2'] assert list(store.yield_keys(prefix='key1')) == []
def test_namespace(redis_client: Redis) ->None: """Test that a namespace is prepended to all keys properly.""" store = RedisStore(client=redis_client, ttl=None, namespace='meow') key_value_pairs = [('key1', b'value1'), ('key2', b'value2')] store.mset(key_value_pairs) assert sorted(redis_client.scan_iter('*')) == [b'meow/key1', b'meow/key2'] store.mdelete(['key1']) assert sorted(redis_client.scan_iter('*')) == [b'meow/key2'] assert list(store.yield_keys()) == ['key2'] assert list(store.yield_keys(prefix='key*')) == ['key2'] assert list(store.yield_keys(prefix='key1')) == []
Test that a namespace is prepended to all keys properly.
_persist_run
""" Persist a run by adding it to the traced_runs list. Parameters ---------- run : Run The run to be persisted. """ run_ = run.copy() run_.reference_example_id = self.example_id self.traced_runs.append(run_)
def _persist_run(self, run: Run) ->None: """ Persist a run by adding it to the traced_runs list. Parameters ---------- run : Run The run to be persisted. """ run_ = run.copy() run_.reference_example_id = self.example_id self.traced_runs.append(run_)
Persist a run by adding it to the traced_runs list. Parameters ---------- run : Run The run to be persisted.
test_memory_with_message_store
memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear() assert memory.chat_memory.messages == []
def test_memory_with_message_store(message_history: MomentoChatMessageHistory ) ->None: memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear() assert memory.chat_memory.messages == []
null
embed
"""Embed a list of strings. Args: texts: List[str] The list of strings to embed. batch_size: [int] The batch size of embeddings to send to the model. If zero, then the largest batch size will be detected dynamically at the first request, starting from 250, down to 5. embeddings_task_type: [str] optional embeddings task type, one of the following RETRIEVAL_QUERY - Text is a query in a search/retrieval setting. RETRIEVAL_DOCUMENT - Text is a document in a search/retrieval setting. SEMANTIC_SIMILARITY - Embeddings will be used for Semantic Textual Similarity (STS). CLASSIFICATION - Embeddings will be used for classification. CLUSTERING - Embeddings will be used for clustering. Returns: List of embeddings, one for each text. """ if len(texts) == 0: return [] embeddings: List[List[float]] = [] first_batch_result: List[List[float]] = [] if batch_size > 0: batches = VertexAIEmbeddings._prepare_batches(texts, batch_size) else: first_batch_result, batches = self._prepare_and_validate_batches(texts, embeddings_task_type) embeddings.extend(first_batch_result) tasks = [] for batch in batches: tasks.append(self.instance['task_executor'].submit(self. _get_embeddings_with_retry, texts=batch, embeddings_type= embeddings_task_type)) if len(tasks) > 0: wait(tasks) for t in tasks: embeddings.extend(t.result()) return embeddings
def embed(self, texts: List[str], batch_size: int=0, embeddings_task_type: Optional[Literal['RETRIEVAL_QUERY', 'RETRIEVAL_DOCUMENT', 'SEMANTIC_SIMILARITY', 'CLASSIFICATION', 'CLUSTERING']]=None) ->List[List [float]]: """Embed a list of strings. Args: texts: List[str] The list of strings to embed. batch_size: [int] The batch size of embeddings to send to the model. If zero, then the largest batch size will be detected dynamically at the first request, starting from 250, down to 5. embeddings_task_type: [str] optional embeddings task type, one of the following RETRIEVAL_QUERY - Text is a query in a search/retrieval setting. RETRIEVAL_DOCUMENT - Text is a document in a search/retrieval setting. SEMANTIC_SIMILARITY - Embeddings will be used for Semantic Textual Similarity (STS). CLASSIFICATION - Embeddings will be used for classification. CLUSTERING - Embeddings will be used for clustering. Returns: List of embeddings, one for each text. """ if len(texts) == 0: return [] embeddings: List[List[float]] = [] first_batch_result: List[List[float]] = [] if batch_size > 0: batches = VertexAIEmbeddings._prepare_batches(texts, batch_size) else: first_batch_result, batches = self._prepare_and_validate_batches(texts, embeddings_task_type) embeddings.extend(first_batch_result) tasks = [] for batch in batches: tasks.append(self.instance['task_executor'].submit(self. _get_embeddings_with_retry, texts=batch, embeddings_type= embeddings_task_type)) if len(tasks) > 0: wait(tasks) for t in tasks: embeddings.extend(t.result()) return embeddings
Embed a list of strings. Args: texts: List[str] The list of strings to embed. batch_size: [int] The batch size of embeddings to send to the model. If zero, then the largest batch size will be detected dynamically at the first request, starting from 250, down to 5. embeddings_task_type: [str] optional embeddings task type, one of the following RETRIEVAL_QUERY - Text is a query in a search/retrieval setting. RETRIEVAL_DOCUMENT - Text is a document in a search/retrieval setting. SEMANTIC_SIMILARITY - Embeddings will be used for Semantic Textual Similarity (STS). CLASSIFICATION - Embeddings will be used for classification. CLUSTERING - Embeddings will be used for clustering. Returns: List of embeddings, one for each text.
input_keys
"""Will be whatever keys the router chain prompt expects. :meta private: """ return self.router_chain.input_keys
@property def input_keys(self) ->List[str]: """Will be whatever keys the router chain prompt expects. :meta private: """ return self.router_chain.input_keys
Will be whatever keys the router chain prompt expects. :meta private:
__str__
"""Text representation of searx result.""" return self._data
def __str__(self) ->str: """Text representation of searx result.""" return self._data
Text representation of searx result.
to_typescript
"""Get typescript string representation of the operation.""" operation_name = self.operation_id params = [] if self.request_body: formatted_request_body_props = self._format_nested_properties(self. request_body.properties) params.append(formatted_request_body_props) for prop in self.properties: prop_name = prop.name prop_type = self.ts_type_from_python(prop.type) prop_required = '' if prop.required else '?' prop_desc = f'/* {prop.description} */' if prop.description else '' params.append(f'{prop_desc}\n\t\t{prop_name}{prop_required}: {prop_type},') formatted_params = '\n'.join(params).strip() description_str = f'/* {self.description} */' if self.description else '' typescript_definition = f""" {description_str} type {operation_name} = (_: {{ {formatted_params} }}) => any; """ return typescript_definition.strip()
def to_typescript(self) ->str: """Get typescript string representation of the operation.""" operation_name = self.operation_id params = [] if self.request_body: formatted_request_body_props = self._format_nested_properties(self. request_body.properties) params.append(formatted_request_body_props) for prop in self.properties: prop_name = prop.name prop_type = self.ts_type_from_python(prop.type) prop_required = '' if prop.required else '?' prop_desc = f'/* {prop.description} */' if prop.description else '' params.append( f'{prop_desc}\n\t\t{prop_name}{prop_required}: {prop_type},') formatted_params = '\n'.join(params).strip() description_str = f'/* {self.description} */' if self.description else '' typescript_definition = f""" {description_str} type {operation_name} = (_: {{ {formatted_params} }}) => any; """ return typescript_definition.strip()
Get typescript string representation of the operation.
_Starred
self.write('*') self.dispatch(t.value)
def _Starred(self, t): self.write('*') self.dispatch(t.value)
null
_get_package_version
with open(package_dir.parent / 'pyproject.toml', 'r') as f: pyproject = toml.load(f) return pyproject['tool']['poetry']['version']
def _get_package_version(package_dir: Path) ->str: with open(package_dir.parent / 'pyproject.toml', 'r') as f: pyproject = toml.load(f) return pyproject['tool']['poetry']['version']
null
test_mock
assert True
def test_mock() ->None: assert True
null
_run_chain
"""Run a chain on inputs.""" inputs_ = inputs if input_mapper is None else input_mapper(inputs) if isinstance(chain, Chain) and isinstance(inputs_, dict) and len(inputs_ ) == 1 and chain.input_keys: val = next(iter(inputs_.values())) output = chain(val, callbacks=callbacks, tags=tags) else: runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks) output = chain.invoke(inputs_, config=runnable_config) return output
def _run_chain(chain: Union[Chain, Runnable], inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]]=None, input_mapper: Optional[Callable[[Dict], Any]]=None) ->Union[Dict, str]: """Run a chain on inputs.""" inputs_ = inputs if input_mapper is None else input_mapper(inputs) if isinstance(chain, Chain) and isinstance(inputs_, dict) and len(inputs_ ) == 1 and chain.input_keys: val = next(iter(inputs_.values())) output = chain(val, callbacks=callbacks, tags=tags) else: runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks) output = chain.invoke(inputs_, config=runnable_config) return output
Run a chain on inputs.
test_api_key_is_string
llm = Petals(huggingface_api_key='secret-api-key') assert isinstance(llm.huggingface_api_key, SecretStr)
def test_api_key_is_string() ->None: llm = Petals(huggingface_api_key='secret-api-key') assert isinstance(llm.huggingface_api_key, SecretStr)
null
_import_promptlayer
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI return PromptLayerOpenAI
def _import_promptlayer() ->Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI return PromptLayerOpenAI
null
lc_secrets
return {'fireworks_api_key': 'FIREWORKS_API_KEY'}
@property def lc_secrets(self) ->Dict[str, str]: return {'fireworks_api_key': 'FIREWORKS_API_KEY'}
null
finalize
"""Finalize the deprecation of a class.""" try: obj.__doc__ = new_doc except AttributeError: pass obj.__init__ = functools.wraps(obj.__init__)(wrapper) return obj
def finalize(wrapper: Callable[..., Any], new_doc: str) ->T: """Finalize the deprecation of a class.""" try: obj.__doc__ = new_doc except AttributeError: pass obj.__init__ = functools.wraps(obj.__init__)(wrapper) return obj
Finalize the deprecation of a class.
load
"""Load text from the url(s) in web_path.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load text from the url(s) in web_path.""" return list(self.lazy_load())
Load text from the url(s) in web_path.
__init__
"""Initialize with a resource and an access token. Args: resource: The resource. access_token: The access token. """ self.resource = resource access_token = access_token or get_from_env('access_token', 'STRIPE_ACCESS_TOKEN') self.headers = {'Authorization': f'Bearer {access_token}'}
def __init__(self, resource: str, access_token: Optional[str]=None) ->None: """Initialize with a resource and an access token. Args: resource: The resource. access_token: The access token. """ self.resource = resource access_token = access_token or get_from_env('access_token', 'STRIPE_ACCESS_TOKEN') self.headers = {'Authorization': f'Bearer {access_token}'}
Initialize with a resource and an access token. Args: resource: The resource. access_token: The access token.
from_embeddings
"""Construct TimescaleVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. Example: .. code-block:: python from langchain_community.vectorstores import TimescaleVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) tvs = TimescaleVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids= ids, collection_name=collection_name, distance_strategy= distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs)
@classmethod def from_embeddings(cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy=DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]]=None, pre_delete_collection: bool=False, **kwargs: Any ) ->TimescaleVector: """Construct TimescaleVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. Example: .. code-block:: python from langchain_community.vectorstores import TimescaleVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) tvs = TimescaleVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy= distance_strategy, pre_delete_collection=pre_delete_collection, ** kwargs)
Construct TimescaleVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. Example: .. code-block:: python from langchain_community.vectorstores import TimescaleVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) tvs = TimescaleVector.from_embeddings(text_embedding_pairs, embeddings)
get_content
return page['body'][self.name.lower()]['value']
def get_content(self, page: dict) ->str: return page['body'][self.name.lower()]['value']
null
agent_executor
return self._agent_executor
@property def agent_executor(self) ->AgentExecutor: return self._agent_executor
null
test_ai_endpoints_streaming
"""Test streaming tokens from ai endpoints.""" llm = ChatNVIDIA(model='llama2_13b', max_tokens=36) for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
def test_ai_endpoints_streaming() ->None: """Test streaming tokens from ai endpoints.""" llm = ChatNVIDIA(model='llama2_13b', max_tokens=36) for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
Test streaming tokens from ai endpoints.
on_retry
self.on_retry_common()
def on_retry(self, *args: Any, **kwargs: Any) ->Any: self.on_retry_common()
null
_batch_block
it = iter(iterable) while (item := list(itertools.islice(it, size))): yield item
def _batch_block(iterable: Iterable, size: int) ->Generator[List[dict], None, None]: it = iter(iterable) while (item := list(itertools.islice(it, size))): yield item
null
test_gradient_wrong_setup2
with pytest.raises(Exception): GradientEmbeddings(gradient_api_url=_GRADIENT_BASE_URL, gradient_access_token=_GRADIENT_SECRET, gradient_workspace_id='', model=_MODEL_ID)
def test_gradient_wrong_setup2() ->None: with pytest.raises(Exception): GradientEmbeddings(gradient_api_url=_GRADIENT_BASE_URL, gradient_access_token=_GRADIENT_SECRET, gradient_workspace_id= '', model=_MODEL_ID)
null
_call
text = inputs[self.input_key] results = self.client.create(text) output = self._moderate(text, results['results'][0]) return {self.output_key: output}
def _call(self, inputs: Dict[str, str], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: text = inputs[self.input_key] results = self.client.create(text) output = self._moderate(text, results['results'][0]) return {self.output_key: output}
null
get_operation
"""Get the operation object for a given path and HTTP method.""" from openapi_pydantic import Operation path_item = self._get_path_strict(path) operation_obj = getattr(path_item, method, None) if not isinstance(operation_obj, Operation): raise ValueError(f'No {method} method found for {path}') return operation_obj
def get_operation(self, path: str, method: str) ->Operation: """Get the operation object for a given path and HTTP method.""" from openapi_pydantic import Operation path_item = self._get_path_strict(path) operation_obj = getattr(path_item, method, None) if not isinstance(operation_obj, Operation): raise ValueError(f'No {method} method found for {path}') return operation_obj
Get the operation object for a given path and HTTP method.
buffer_as_messages
"""Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages[-self.k * 2:] if self.k > 0 else []
@property def buffer_as_messages(self) ->List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages[-self.k * 2:] if self.k > 0 else []
Exposes the buffer as a list of messages in case return_messages is False.
test_parse_bool_value
parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})')) actual = parsed.value expected = x.lower() == 'true' assert actual == expected
@pytest.mark.parametrize('x', ('true', 'True', 'TRUE', 'false', 'False', 'FALSE')) def test_parse_bool_value(x: str) ->None: parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})')) actual = parsed.value expected = x.lower() == 'true' assert actual == expected
null
test_chat_openai_llm_output_contains_model_name
"""Test llm_output contains model_name.""" chat = ChatOpenAI(max_tokens=10) message = HumanMessage(content='Hello') llm_result = chat.generate([[message]]) assert llm_result.llm_output is not None assert llm_result.llm_output['model_name'] == chat.model_name
def test_chat_openai_llm_output_contains_model_name() ->None: """Test llm_output contains model_name.""" chat = ChatOpenAI(max_tokens=10) message = HumanMessage(content='Hello') llm_result = chat.generate([[message]]) assert llm_result.llm_output is not None assert llm_result.llm_output['model_name'] == chat.model_name
Test llm_output contains model_name.
test_metadata_not_shallow
"""Test that metadatas are not shallow.""" texts = ['foo bar'] splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=0) docs = splitter.create_documents(texts, [{'source': '1'}]) expected_docs = [Document(page_content='foo', metadata={'source': '1'}), Document(page_content='bar', metadata={'source': '1'})] assert docs == expected_docs docs[0].metadata['foo'] = 1 assert docs[0].metadata == {'source': '1', 'foo': 1} assert docs[1].metadata == {'source': '1'}
def test_metadata_not_shallow() ->None: """Test that metadatas are not shallow.""" texts = ['foo bar'] splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=0) docs = splitter.create_documents(texts, [{'source': '1'}]) expected_docs = [Document(page_content='foo', metadata={'source': '1'}), Document(page_content='bar', metadata={'source': '1'})] assert docs == expected_docs docs[0].metadata['foo'] = 1 assert docs[0].metadata == {'source': '1', 'foo': 1} assert docs[1].metadata == {'source': '1'}
Test that metadatas are not shallow.
_run
return self.spec.value(tool_input)
def _run(self, tool_input: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: return self.spec.value(tool_input)
null
test_konko_streaming_info_test
"""Ensure generation details are retained during streaming.""" class TestCallback(FakeCallbackHandler): data_store: dict = {} def on_llm_end(self, *args: Any, **kwargs: Any) ->Any: self.data_store['generation'] = args[0] callback_instance = TestCallback() callback_mgr = CallbackManager([callback_instance]) chat_instance = ChatKonko(max_tokens=2, temperature=0, callback_manager= callback_mgr) list(chat_instance.stream('hey')) gen_data = callback_instance.data_store['generation'] assert gen_data.generations[0][0].text == ' Hey'
def test_konko_streaming_info_test() ->None: """Ensure generation details are retained during streaming.""" class TestCallback(FakeCallbackHandler): data_store: dict = {} def on_llm_end(self, *args: Any, **kwargs: Any) ->Any: self.data_store['generation'] = args[0] callback_instance = TestCallback() callback_mgr = CallbackManager([callback_instance]) chat_instance = ChatKonko(max_tokens=2, temperature=0, callback_manager =callback_mgr) list(chat_instance.stream('hey')) gen_data = callback_instance.data_store['generation'] assert gen_data.generations[0][0].text == ' Hey'
Ensure generation details are retained during streaming.
lc_serializable
return True
@property def lc_serializable(self) ->bool: return True
null
test_generate_synthetic
synthetic_results = synthetic_data_generator.generate(subject= 'medical_billing', extra= """the name must be chosen at random. Make it something you wouldn't normally choose.""" , runs=10) assert len(synthetic_results) == 10 for row in synthetic_results: assert isinstance(row, MedicalBilling)
@pytest.mark.requires('openai') def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator ) ->None: synthetic_results = synthetic_data_generator.generate(subject= 'medical_billing', extra= """the name must be chosen at random. Make it something you wouldn't normally choose.""" , runs=10) assert len(synthetic_results) == 10 for row in synthetic_results: assert isinstance(row, MedicalBilling)
null
from_llm
llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, prompt: BasePromptTemplate=PROMPT, **kwargs: Any) ->LLMBashChain: llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs)
null
_call
return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop, ** kwargs)
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs)
null
test_singlestoredb_filter_metadata_2
"""Test filtering by metadata field that is similar for each document""" table_name = 'test_singlestoredb_filter_metadata_2' drop(table_name) docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget' }) for i, t in enumerate(texts)] docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(), distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name= table_name, host=TEST_SINGLESTOREDB_URL) output = docsearch.similarity_search('foo', k=1, filter={'category': 'budget'}) assert output == [Document(page_content='foo', metadata={'index': 0, 'category': 'budget'})] drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb_filter_metadata_2(texts: List[str]) ->None: """Test filtering by metadata field that is similar for each document""" table_name = 'test_singlestoredb_filter_metadata_2' drop(table_name) docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget'}) for i, t in enumerate(texts)] docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(), distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name= table_name, host=TEST_SINGLESTOREDB_URL) output = docsearch.similarity_search('foo', k=1, filter={'category': 'budget'}) assert output == [Document(page_content='foo', metadata={'index': 0, 'category': 'budget'})] drop(table_name)
Test filtering by metadata field that is similar for each document
route_question
if input.get('chat_history'): return standalone_question else: return RunnablePassthrough()
def route_question(input): if input.get('chat_history'): return standalone_question else: return RunnablePassthrough()
null
_split_generators
""" This function defines how the dataset's splits will be generated. Args: dl_manager (`DownloadManager`): Helper for downloading datasets from files and online sources. This is not being used for this test file. """ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={ 'split': 'train', 'name': self.config.name}), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={'split': 'test', 'name': self. config.name})]
def _split_generators(self, dl_manager: datasets.DownloadManager) ->List[ datasets.SplitGenerator]: """ This function defines how the dataset's splits will be generated. Args: dl_manager (`DownloadManager`): Helper for downloading datasets from files and online sources. This is not being used for this test file. """ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={ 'split': 'train', 'name': self.config.name}), datasets. SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'split': 'test', 'name': self.config.name})]
This function defines how the dataset's splits will be generated. Args: dl_manager (`DownloadManager`): Helper for downloading datasets from files and online sources. This is not being used for this test file.