method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
__ne__
"""Create a Numeric inequality filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("zipcode") != 90210 """ self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.NE) return RedisFilterExpression(str(self))
@check_operator_misuse def __ne__(self, other: Union[int, float]) ->'RedisFilterExpression': """Create a Numeric inequality filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("zipcode") != 90210 """ self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.NE) return RedisFilterExpression(str(self))
Create a Numeric inequality filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("zipcode") != 90210
get_additional_metadata
additional_metadata = {'type': self.Type} return additional_metadata
def get_additional_metadata(self) ->dict: additional_metadata = {'type': self.Type} return additional_metadata
null
_import_steam_webapi_tool
from langchain_community.tools.steam.tool import SteamWebAPIQueryRun return SteamWebAPIQueryRun
def _import_steam_webapi_tool() ->Any: from langchain_community.tools.steam.tool import SteamWebAPIQueryRun return SteamWebAPIQueryRun
null
evaluate
"""Evaluate question answering examples and predictions.""" inputs = [{'query': example[question_key], 'context': example[context_key], 'result': predictions[i][prediction_key]} for i, example in enumerate( examples)] return self.apply(inputs, callbacks=callbacks)
def evaluate(self, examples: List[dict], predictions: List[dict], question_key: str='query', context_key: str='context', prediction_key: str='result', *, callbacks: Callbacks=None) ->List[dict]: """Evaluate question answering examples and predictions.""" inputs = [{'query': example[question_key], 'context': example[ context_key], 'result': predictions[i][prediction_key]} for i, example in enumerate(examples)] return self.apply(inputs, callbacks=callbacks)
Evaluate question answering examples and predictions.
_import_powerbi_tool_ListPowerBITool
from langchain_community.tools.powerbi.tool import ListPowerBITool return ListPowerBITool
def _import_powerbi_tool_ListPowerBITool() ->Any: from langchain_community.tools.powerbi.tool import ListPowerBITool return ListPowerBITool
null
__and__
return RedisFilterExpression(operator=RedisFilterOperator.AND, left=self, right=other)
def __and__(self, other: 'RedisFilterExpression') ->'RedisFilterExpression': return RedisFilterExpression(operator=RedisFilterOperator.AND, left= self, right=other)
null
_get_key
if key is not None: return source[key] elif len(source) == 1: return next(iter(source.values())) else: raise ValueError( f"""Could not map run {which} with multiple keys: {source} Please manually specify a {which}_key""" )
def _get_key(self, source: Dict, key: Optional[str], which: str) ->str: if key is not None: return source[key] elif len(source) == 1: return next(iter(source.values())) else: raise ValueError( f"""Could not map run {which} with multiple keys: {source} Please manually specify a {which}_key""" )
null
on_chain_start
pass
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) ->None: pass
null
_on_retriever_error
"""Process the Retriever Run upon error."""
def _on_retriever_error(self, run: Run) ->None: """Process the Retriever Run upon error."""
Process the Retriever Run upon error.
test_hologres_with_metadatas
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Hologres.from_texts(texts=texts, table_name='test_table', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, connection_string=CONNECTION_STRING, pre_delete_table=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': '0'})]
def test_hologres_with_metadatas() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Hologres.from_texts(texts=texts, table_name='test_table', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, connection_string=CONNECTION_STRING, pre_delete_table=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': '0'})]
Test end to end construction and search.
get_document_attributes_dict
"""Document attributes dict.""" return {attr.Key: attr.Value.value for attr in self.DocumentAttributes or []}
def get_document_attributes_dict(self) ->Dict[str, DocumentAttributeValueType]: """Document attributes dict.""" return {attr.Key: attr.Value.value for attr in self.DocumentAttributes or []}
Document attributes dict.
_run
"""Use the LLM to check the query.""" return self.llm_chain.predict(query=query, dialect=self.db.dialect, callbacks=run_manager.get_child() if run_manager else None)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the LLM to check the query.""" return self.llm_chain.predict(query=query, dialect=self.db.dialect, callbacks=run_manager.get_child() if run_manager else None)
Use the LLM to check the query.
_get_inputs
"""Construct inputs from kwargs and docs. Format and then join all the documents together into one input with name `self.document_variable_name`. Also pluck any additional variables from **kwargs. Args: docs: List of documents to format and then join into single input **kwargs: additional inputs to chain, will pluck any other required arguments from here. Returns: dictionary of inputs to LLMChain """ doc_strings = [format_document(doc, self.document_prompt) for doc in docs] inputs = {k: v for k, v in kwargs.items() if k in self.llm_chain.prompt. input_variables} inputs[self.document_variable_name] = self.document_separator.join(doc_strings) return inputs
def _get_inputs(self, docs: List[Document], **kwargs: Any) ->dict: """Construct inputs from kwargs and docs. Format and then join all the documents together into one input with name `self.document_variable_name`. Also pluck any additional variables from **kwargs. Args: docs: List of documents to format and then join into single input **kwargs: additional inputs to chain, will pluck any other required arguments from here. Returns: dictionary of inputs to LLMChain """ doc_strings = [format_document(doc, self.document_prompt) for doc in docs] inputs = {k: v for k, v in kwargs.items() if k in self.llm_chain.prompt .input_variables} inputs[self.document_variable_name] = self.document_separator.join( doc_strings) return inputs
Construct inputs from kwargs and docs. Format and then join all the documents together into one input with name `self.document_variable_name`. Also pluck any additional variables from **kwargs. Args: docs: List of documents to format and then join into single input **kwargs: additional inputs to chain, will pluck any other required arguments from here. Returns: dictionary of inputs to LLMChain
config_specs
return get_unique_config_specs(super().config_specs + list(self. history_factory_config))
@property def config_specs(self) ->List[ConfigurableFieldSpec]: return get_unique_config_specs(super().config_specs + list(self. history_factory_config))
null
format_request_payload
input_str = json.dumps({'inputs': [prompt], 'parameters': model_kwargs, 'options': {'use_cache': False, 'wait_for_model': True}}) return input_str.encode('utf-8')
def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes: input_str = json.dumps({'inputs': [prompt], 'parameters': model_kwargs, 'options': {'use_cache': False, 'wait_for_model': True}}) return input_str.encode('utf-8')
null
post
if self.external_or_foundation: resp = self.client.predict(endpoint=self.endpoint_name, inputs=request) if transform_output_fn: return transform_output_fn(resp) if self.task == 'llm/v1/chat': return _transform_chat(resp) elif self.task == 'llm/v1/completions': return _transform_completions(resp) return resp else: wrapped_request = {'dataframe_records': [request]} response = self.client.predict(endpoint=self.endpoint_name, inputs= wrapped_request) preds = response['predictions'] pred = preds[0] if isinstance(preds, list) else preds if self.task == 'llama2/chat': return _transform_llama2_chat(pred) return transform_output_fn(pred) if transform_output_fn else pred
def post(self, request: Any, transform_output_fn: Optional[Callable[..., str]]=None) ->Any: if self.external_or_foundation: resp = self.client.predict(endpoint=self.endpoint_name, inputs=request) if transform_output_fn: return transform_output_fn(resp) if self.task == 'llm/v1/chat': return _transform_chat(resp) elif self.task == 'llm/v1/completions': return _transform_completions(resp) return resp else: wrapped_request = {'dataframe_records': [request]} response = self.client.predict(endpoint=self.endpoint_name, inputs= wrapped_request) preds = response['predictions'] pred = preds[0] if isinstance(preds, list) else preds if self.task == 'llama2/chat': return _transform_llama2_chat(pred) return transform_output_fn(pred) if transform_output_fn else pred
null
test_with_listeners
prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.' ) + '{question}' chat = FakeListChatModel(responses=['foo']) chain: Runnable = prompt | chat mock_start = mocker.Mock() mock_end = mocker.Mock() chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke({ 'question': 'Who are you?'}) assert mock_start.call_count == 1 assert mock_start.call_args[0][0].name == 'RunnableSequence' assert mock_end.call_count == 1 mock_start.reset_mock() mock_end.reset_mock() with trace_as_chain_group('hello') as manager: chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke({ 'question': 'Who are you?'}, {'callbacks': manager}) assert mock_start.call_count == 1 assert mock_start.call_args[0][0].name == 'RunnableSequence' assert mock_end.call_count == 1
def test_with_listeners(mocker: MockerFixture) ->None: prompt = SystemMessagePromptTemplate.from_template( 'You are a nice assistant.') + '{question}' chat = FakeListChatModel(responses=['foo']) chain: Runnable = prompt | chat mock_start = mocker.Mock() mock_end = mocker.Mock() chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke({ 'question': 'Who are you?'}) assert mock_start.call_count == 1 assert mock_start.call_args[0][0].name == 'RunnableSequence' assert mock_end.call_count == 1 mock_start.reset_mock() mock_end.reset_mock() with trace_as_chain_group('hello') as manager: chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke({ 'question': 'Who are you?'}, {'callbacks': manager}) assert mock_start.call_count == 1 assert mock_start.call_args[0][0].name == 'RunnableSequence' assert mock_end.call_count == 1
null
_get_python_function_arguments
"""Get JsonSchema describing a Python functions arguments. Assumes all function arguments are of primitive types (int, float, str, bool) or are subclasses of pydantic.BaseModel. """ properties = {} annotations = inspect.getfullargspec(function).annotations for arg, arg_type in annotations.items(): if arg == 'return': continue if isinstance(arg_type, type) and issubclass(arg_type, BaseModel): properties[arg] = arg_type.schema() elif arg_type.__name__ in PYTHON_TO_JSON_TYPES: properties[arg] = {'type': PYTHON_TO_JSON_TYPES[arg_type.__name__]} if arg in arg_descriptions: if arg not in properties: properties[arg] = {} properties[arg]['description'] = arg_descriptions[arg] return properties
def _get_python_function_arguments(function: Callable, arg_descriptions: dict ) ->dict: """Get JsonSchema describing a Python functions arguments. Assumes all function arguments are of primitive types (int, float, str, bool) or are subclasses of pydantic.BaseModel. """ properties = {} annotations = inspect.getfullargspec(function).annotations for arg, arg_type in annotations.items(): if arg == 'return': continue if isinstance(arg_type, type) and issubclass(arg_type, BaseModel): properties[arg] = arg_type.schema() elif arg_type.__name__ in PYTHON_TO_JSON_TYPES: properties[arg] = {'type': PYTHON_TO_JSON_TYPES[arg_type.__name__]} if arg in arg_descriptions: if arg not in properties: properties[arg] = {} properties[arg]['description'] = arg_descriptions[arg] return properties
Get JsonSchema describing a Python functions arguments. Assumes all function arguments are of primitive types (int, float, str, bool) or are subclasses of pydantic.BaseModel.
_run
pass
@abstractmethod def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: pass
null
test_parse_with_language_without_a_new_line
llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json{"action": "foo", "action_input": "bar"}``` """ output, log = get_action_and_input(llm_output) assert output == llm_output assert log == llm_output
def test_parse_with_language_without_a_new_line() ->None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json{"action": "foo", "action_input": "bar"}``` """ output, log = get_action_and_input(llm_output) assert output == llm_output assert log == llm_output
null
on_llm_error
"""Run when LLM errors."""
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None: """Run when LLM errors."""
Run when LLM errors.
batch
if isinstance(config, list): configs = cast(List[RunnableConfig], [self._merge_configs(conf) for conf in config]) else: configs = [self._merge_configs(config) for _ in range(len(inputs))] return self.bound.batch(inputs, configs, return_exceptions= return_exceptions, **{**self.kwargs, **kwargs})
def batch(self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]]=None, *, return_exceptions: bool=False, **kwargs: Optional[Any]) ->List[Output]: if isinstance(config, list): configs = cast(List[RunnableConfig], [self._merge_configs(conf) for conf in config]) else: configs = [self._merge_configs(config) for _ in range(len(inputs))] return self.bound.batch(inputs, configs, return_exceptions= return_exceptions, **{**self.kwargs, **kwargs})
null
parse
includes_answer = FINAL_ANSWER_ACTION in text regex = ( 'Action\\s*\\d*\\s*:[\\s]*(.*?)[\\s]*Action\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)' ) action_match = re.search(regex, text, re.DOTALL) if action_match and includes_answer: if text.find(FINAL_ANSWER_ACTION) < text.find(action_match.group(0)): start_index = text.find(FINAL_ANSWER_ACTION) + len(FINAL_ANSWER_ACTION) end_index = text.find('\n\n', start_index) return AgentFinish({'output': text[start_index:end_index].strip()}, text[:end_index]) else: raise OutputParserException( f'{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}') if action_match: action = action_match.group(1).strip() action_input = action_match.group(2) tool_input = action_input.strip(' ') if tool_input.startswith('SELECT ') is False: tool_input = tool_input.strip('"') return AgentAction(action, tool_input, text) elif includes_answer: return AgentFinish({'output': text.split(FINAL_ANSWER_ACTION)[-1].strip ()}, text) if not re.search('Action\\s*\\d*\\s*:[\\s]*(.*?)', text, re.DOTALL): raise OutputParserException(f'Could not parse LLM output: `{text}`', observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output= text, send_to_llm=True) elif not re.search('[\\s]*Action\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)', text, re.DOTALL): raise OutputParserException(f'Could not parse LLM output: `{text}`', observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text, send_to_llm=True) else: raise OutputParserException(f'Could not parse LLM output: `{text}`')
def parse(self, text: str) ->Union[AgentAction, AgentFinish]: includes_answer = FINAL_ANSWER_ACTION in text regex = ( 'Action\\s*\\d*\\s*:[\\s]*(.*?)[\\s]*Action\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)' ) action_match = re.search(regex, text, re.DOTALL) if action_match and includes_answer: if text.find(FINAL_ANSWER_ACTION) < text.find(action_match.group(0)): start_index = text.find(FINAL_ANSWER_ACTION) + len( FINAL_ANSWER_ACTION) end_index = text.find('\n\n', start_index) return AgentFinish({'output': text[start_index:end_index].strip ()}, text[:end_index]) else: raise OutputParserException( f'{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}') if action_match: action = action_match.group(1).strip() action_input = action_match.group(2) tool_input = action_input.strip(' ') if tool_input.startswith('SELECT ') is False: tool_input = tool_input.strip('"') return AgentAction(action, tool_input, text) elif includes_answer: return AgentFinish({'output': text.split(FINAL_ANSWER_ACTION)[-1]. strip()}, text) if not re.search('Action\\s*\\d*\\s*:[\\s]*(.*?)', text, re.DOTALL): raise OutputParserException(f'Could not parse LLM output: `{text}`', observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text, send_to_llm=True) elif not re.search('[\\s]*Action\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)', text, re.DOTALL): raise OutputParserException(f'Could not parse LLM output: `{text}`', observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text, send_to_llm=True) else: raise OutputParserException(f'Could not parse LLM output: `{text}`')
null
input_keys
""" Get the input keys. Returns: List[str]: The input keys. """ return ['reference', 'prediction']
@property def input_keys(self) ->List[str]: """ Get the input keys. Returns: List[str]: The input keys. """ return ['reference', 'prediction']
Get the input keys. Returns: List[str]: The input keys.
__init__
super().__init__(Error=Error, **kwargs)
def __init__(self, Error: BaseException, **kwargs: Any) ->None: super().__init__(Error=Error, **kwargs)
null
test_openai_streaming_callback
"""Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) llm = OpenAI(max_tokens=10, streaming=True, temperature=0, callback_manager =callback_manager, verbose=True) llm('Write me a sentence with 100 words.') assert callback_handler.llm_streams == 10
def test_openai_streaming_callback() ->None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) llm = OpenAI(max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True) llm('Write me a sentence with 100 words.') assert callback_handler.llm_streams == 10
Test that streaming correctly invokes on_llm_new_token callback.
log
if self.vw_logger.logging_enabled(): vw_ex = self.feature_embedder.format(event) self.vw_logger.log(vw_ex)
def log(self, event: TEvent) ->None: if self.vw_logger.logging_enabled(): vw_ex = self.feature_embedder.format(event) self.vw_logger.log(vw_ex)
null
__eq__
"""Create a RedisText equality (exact match) filter expression. Args: other (str): The text value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisText >>> filter = RedisText("job") == "engineer" """ self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.EQ) return RedisFilterExpression(str(self))
@check_operator_misuse def __eq__(self, other: str) ->'RedisFilterExpression': """Create a RedisText equality (exact match) filter expression. Args: other (str): The text value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisText >>> filter = RedisText("job") == "engineer" """ self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.EQ) return RedisFilterExpression(str(self))
Create a RedisText equality (exact match) filter expression. Args: other (str): The text value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisText >>> filter = RedisText("job") == "engineer"
search_api
"""Search the API for the query.""" return f'arg_0={arg_0}, arg_1={arg_1}, ping={ping}'
@tool(return_direct=True) def search_api(arg_0: str, arg_1: float=4.3, ping: str='hi') ->str: """Search the API for the query.""" return f'arg_0={arg_0}, arg_1={arg_1}, ping={ping}'
Search the API for the query.
from_embeddings
"""Construct Hologres wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Hologres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Create the connection string by calling HologresVector.connection_string_from_db_params Example: .. code-block:: python from langchain_community.vectorstores import Hologres from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids= ids, ndims=ndims, table_name=table_name, pre_delete_table= pre_delete_table, **kwargs)
@classmethod def from_embeddings(cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]]=None, ndims: int =ADA_TOKEN_COUNT, table_name: str=_LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]]=None, pre_delete_table: bool=False, **kwargs: Any ) ->Hologres: """Construct Hologres wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Hologres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Create the connection string by calling HologresVector.connection_string_from_db_params Example: .. code-block:: python from langchain_community.vectorstores import Hologres from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table= pre_delete_table, **kwargs)
Construct Hologres wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Hologres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Create the connection string by calling HologresVector.connection_string_from_db_params Example: .. code-block:: python from langchain_community.vectorstores import Hologres from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings)
test_run_info
"""Test that run_info is returned properly when specified""" chain = FakeChain() output = chain({'foo': 'bar'}, include_run_info=True) assert 'foo' in output assert 'bar' in output assert RUN_KEY in output
def test_run_info() ->None: """Test that run_info is returned properly when specified""" chain = FakeChain() output = chain({'foo': 'bar'}, include_run_info=True) assert 'foo' in output assert 'bar' in output assert RUN_KEY in output
Test that run_info is returned properly when specified
create_index
"""Create an index from a list of contexts. It modifies the index argument in-place! Args: contexts: List of contexts to embed. index: Index to use. embeddings: Embeddings model to use. sparse_encoder: Sparse encoder to use. ids: List of ids to use for the documents. metadatas: List of metadata to use for the documents. """ batch_size = 32 _iterator = range(0, len(contexts), batch_size) try: from tqdm.auto import tqdm _iterator = tqdm(_iterator) except ImportError: pass if ids is None: ids = [hash_text(context) for context in contexts] for i in _iterator: i_end = min(i + batch_size, len(contexts)) context_batch = contexts[i:i_end] batch_ids = ids[i:i_end] metadata_batch = metadatas[i:i_end] if metadatas else [{} for _ in context_batch] meta = [{'context': context, **metadata} for context, metadata in zip( context_batch, metadata_batch)] dense_embeds = embeddings.embed_documents(context_batch) sparse_embeds = sparse_encoder.encode_documents(context_batch) for s in sparse_embeds: s['values'] = [float(s1) for s1 in s['values']] vectors = [] for doc_id, sparse, dense, metadata in zip(batch_ids, sparse_embeds, dense_embeds, meta): vectors.append({'id': doc_id, 'sparse_values': sparse, 'values': dense, 'metadata': metadata}) index.upsert(vectors, namespace=namespace)
def create_index(contexts: List[str], index: Any, embeddings: Embeddings, sparse_encoder: Any, ids: Optional[List[str]]=None, metadatas: Optional [List[dict]]=None, namespace: Optional[str]=None) ->None: """Create an index from a list of contexts. It modifies the index argument in-place! Args: contexts: List of contexts to embed. index: Index to use. embeddings: Embeddings model to use. sparse_encoder: Sparse encoder to use. ids: List of ids to use for the documents. metadatas: List of metadata to use for the documents. """ batch_size = 32 _iterator = range(0, len(contexts), batch_size) try: from tqdm.auto import tqdm _iterator = tqdm(_iterator) except ImportError: pass if ids is None: ids = [hash_text(context) for context in contexts] for i in _iterator: i_end = min(i + batch_size, len(contexts)) context_batch = contexts[i:i_end] batch_ids = ids[i:i_end] metadata_batch = metadatas[i:i_end] if metadatas else [{} for _ in context_batch] meta = [{'context': context, **metadata} for context, metadata in zip(context_batch, metadata_batch)] dense_embeds = embeddings.embed_documents(context_batch) sparse_embeds = sparse_encoder.encode_documents(context_batch) for s in sparse_embeds: s['values'] = [float(s1) for s1 in s['values']] vectors = [] for doc_id, sparse, dense, metadata in zip(batch_ids, sparse_embeds, dense_embeds, meta): vectors.append({'id': doc_id, 'sparse_values': sparse, 'values': dense, 'metadata': metadata}) index.upsert(vectors, namespace=namespace)
Create an index from a list of contexts. It modifies the index argument in-place! Args: contexts: List of contexts to embed. index: Index to use. embeddings: Embeddings model to use. sparse_encoder: Sparse encoder to use. ids: List of ids to use for the documents. metadatas: List of metadata to use for the documents.
test_load_tools_with_callbacks_is_called
"""Test callbacks are called when provided to load_tools fn.""" callbacks = [FakeCallbackHandler()] tools = load_tools(['requests_get'], callbacks=callbacks) assert len(tools) == 1 with unittest.mock.patch('langchain.requests.TextRequestsWrapper.get', return_value=Mock(text='Hello world!')): result = tools[0].run('https://www.google.com') assert result.text == 'Hello world!' assert callbacks[0].tool_starts == 1 assert callbacks[0].tool_ends == 1
def test_load_tools_with_callbacks_is_called() ->None: """Test callbacks are called when provided to load_tools fn.""" callbacks = [FakeCallbackHandler()] tools = load_tools(['requests_get'], callbacks=callbacks) assert len(tools) == 1 with unittest.mock.patch('langchain.requests.TextRequestsWrapper.get', return_value=Mock(text='Hello world!')): result = tools[0].run('https://www.google.com') assert result.text == 'Hello world!' assert callbacks[0].tool_starts == 1 assert callbacks[0].tool_ends == 1
Test callbacks are called when provided to load_tools fn.
from_params
client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key ) return cls(client, places)
@classmethod def from_params(cls, places: Sequence[str], *, openweathermap_api_key: Optional[str]=None) ->WeatherDataLoader: client = OpenWeatherMapAPIWrapper(openweathermap_api_key= openweathermap_api_key) return cls(client, places)
null
embed_documents
"""Get embeddings for a list of texts. Args: texts: The list of texts to get embeddings for. Returns: List of embeddings, one for each text. """ batches = [texts[i:i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE)] embeddings = [self._generate_embeddings(batch) for batch in batches] return [embedding for batch in embeddings for embedding in batch]
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Get embeddings for a list of texts. Args: texts: The list of texts to get embeddings for. Returns: List of embeddings, one for each text. """ batches = [texts[i:i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE)] embeddings = [self._generate_embeddings(batch) for batch in batches] return [embedding for batch in embeddings for embedding in batch]
Get embeddings for a list of texts. Args: texts: The list of texts to get embeddings for. Returns: List of embeddings, one for each text.
preprocess
"""Prepares a message or list of messages for the payload""" messages = [self.prep_msg(m) for m in inputs] if labels: messages += [{'labels': labels, 'role': 'assistant'}] return {'messages': messages}
def preprocess(self, inputs: Sequence[Dict], labels: Optional[dict]=None ) ->dict: """Prepares a message or list of messages for the payload""" messages = [self.prep_msg(m) for m in inputs] if labels: messages += [{'labels': labels, 'role': 'assistant'}] return {'messages': messages}
Prepares a message or list of messages for the payload
_ListComp
self.write('[') self.dispatch(t.elt) for gen in t.generators: self.dispatch(gen) self.write(']')
def _ListComp(self, t): self.write('[') self.dispatch(t.elt) for gen in t.generators: self.dispatch(gen) self.write(']')
null
create_prompt
"""Create prompt for this agent. Args: system_message: Message to use as the system message that will be the first in the prompt. extra_prompt_messages: Prompt messages that will be placed between the system message and the new human input. Returns: A prompt template to pass into this agent. """ _prompts = extra_prompt_messages or [] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] if system_message: messages = [system_message] else: messages = [] messages.extend([*_prompts, HumanMessagePromptTemplate.from_template( '{input}'), MessagesPlaceholder(variable_name='agent_scratchpad')]) return ChatPromptTemplate(messages=messages)
@classmethod def create_prompt(cls, system_message: Optional[SystemMessage]= SystemMessage(content='You are a helpful AI assistant.'), extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]]=None ) ->BasePromptTemplate: """Create prompt for this agent. Args: system_message: Message to use as the system message that will be the first in the prompt. extra_prompt_messages: Prompt messages that will be placed between the system message and the new human input. Returns: A prompt template to pass into this agent. """ _prompts = extra_prompt_messages or [] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] if system_message: messages = [system_message] else: messages = [] messages.extend([*_prompts, HumanMessagePromptTemplate.from_template( '{input}'), MessagesPlaceholder(variable_name='agent_scratchpad')]) return ChatPromptTemplate(messages=messages)
Create prompt for this agent. Args: system_message: Message to use as the system message that will be the first in the prompt. extra_prompt_messages: Prompt messages that will be placed between the system message and the new human input. Returns: A prompt template to pass into this agent.
embed_documents
"""Embed search docs."""
@abstractmethod def embed_documents(self, texts: List[str]) ->List[List[float]]: """Embed search docs."""
Embed search docs.
test_context_w_namespace_w_some_emb
str1 = 'test1' str2 = 'test2' encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) expected = [{'test_namespace': str1, 'test_namespace2': encoded_str2}] assert base.embed({'test_namespace': str1, 'test_namespace2': base.Embed( str2)}, MockEncoder()) == expected expected_embed_and_keep = [{'test_namespace': str1, 'test_namespace2': str2 + ' ' + encoded_str2}] assert base.embed({'test_namespace': str1, 'test_namespace2': base. EmbedAndKeep(str2)}, MockEncoder()) == expected_embed_and_keep
@pytest.mark.requires('vowpal_wabbit_next') def test_context_w_namespace_w_some_emb() ->None: str1 = 'test1' str2 = 'test2' encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) expected = [{'test_namespace': str1, 'test_namespace2': encoded_str2}] assert base.embed({'test_namespace': str1, 'test_namespace2': base. Embed(str2)}, MockEncoder()) == expected expected_embed_and_keep = [{'test_namespace': str1, 'test_namespace2': str2 + ' ' + encoded_str2}] assert base.embed({'test_namespace': str1, 'test_namespace2': base. EmbedAndKeep(str2)}, MockEncoder()) == expected_embed_and_keep
null
predict_new_summary
new_lines = get_buffer_string(messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix) chain = LLMChain(llm=self.llm, prompt=self.prompt) return chain.predict(summary=existing_summary, new_lines=new_lines)
def predict_new_summary(self, messages: List[BaseMessage], existing_summary: str) ->str: new_lines = get_buffer_string(messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix) chain = LLMChain(llm=self.llm, prompt=self.prompt) return chain.predict(summary=existing_summary, new_lines=new_lines)
null
_import_cassandra
from langchain_community.vectorstores.cassandra import Cassandra return Cassandra
def _import_cassandra() ->Any: from langchain_community.vectorstores.cassandra import Cassandra return Cassandra
null
_get_number_of_pages
try: import pypdf from PIL import Image, ImageSequence except ImportError: raise ModuleNotFoundError( 'Could not import pypdf or Pilloe python package. Please install it with `pip install pypdf Pillow`.' ) if blob.mimetype == 'application/pdf': with blob.as_bytes_io() as input_pdf_file: pdf_reader = pypdf.PdfReader(input_pdf_file) return len(pdf_reader.pages) elif blob.mimetype == 'image/tiff': num_pages = 0 img = Image.open(blob.as_bytes()) for _, _ in enumerate(ImageSequence.Iterator(img)): num_pages += 1 return num_pages elif blob.mimetype in ['image/png', 'image/jpeg']: return 1 else: raise ValueError(f'unsupported mime type: {blob.mimetype}')
@staticmethod def _get_number_of_pages(blob: Blob) ->int: try: import pypdf from PIL import Image, ImageSequence except ImportError: raise ModuleNotFoundError( 'Could not import pypdf or Pilloe python package. Please install it with `pip install pypdf Pillow`.' ) if blob.mimetype == 'application/pdf': with blob.as_bytes_io() as input_pdf_file: pdf_reader = pypdf.PdfReader(input_pdf_file) return len(pdf_reader.pages) elif blob.mimetype == 'image/tiff': num_pages = 0 img = Image.open(blob.as_bytes()) for _, _ in enumerate(ImageSequence.Iterator(img)): num_pages += 1 return num_pages elif blob.mimetype in ['image/png', 'image/jpeg']: return 1 else: raise ValueError(f'unsupported mime type: {blob.mimetype}')
null
_stream
"""Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like object containing a string token. Example: .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse( model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none", streaming=True ) for chunk in llm.stream("Tell me a joke", stop=["'"," "]): print(chunk, end='', flush=True) """ inference = self.pipeline(sequences=prompt, streaming=True, **self. generation_config) for token in inference: chunk = GenerationChunk(text=token.generations[0].text) yield chunk if run_manager: run_manager.on_llm_new_token(token=chunk.text)
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: """Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like object containing a string token. Example: .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse( model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none", streaming=True ) for chunk in llm.stream("Tell me a joke", stop=["'"," "]): print(chunk, end='', flush=True) """ inference = self.pipeline(sequences=prompt, streaming=True, **self. generation_config) for token in inference: chunk = GenerationChunk(text=token.generations[0].text) yield chunk if run_manager: run_manager.on_llm_new_token(token=chunk.text)
Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like object containing a string token. Example: .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse( model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none", streaming=True ) for chunk in llm.stream("Tell me a joke", stop=["'"," "]): print(chunk, end='', flush=True)
test_load_returns_no_result
"""Test that returns no docs""" loader = ArxivLoader(query='1605.08386WWW', load_max_docs=2) docs = loader.load() assert len(docs) == 0
def test_load_returns_no_result() ->None: """Test that returns no docs""" loader = ArxivLoader(query='1605.08386WWW', load_max_docs=2) docs = loader.load() assert len(docs) == 0
Test that returns no docs
parse_result
"""Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, which is assumed to be the highest-likelihood Generation. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ return self.parse(result[0].text)
def parse_result(self, result: List[Generation], *, partial: bool=False) ->T: """Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, which is assumed to be the highest-likelihood Generation. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """ return self.parse(result[0].text)
Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, which is assumed to be the highest-likelihood Generation. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output.
test_loads_llmchain_with_non_serializable_arg
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello', http_client=NotSerializable) prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain, pretty=True) with pytest.raises(NotImplementedError): loads(chain_string, secrets_map={'OPENAI_API_KEY': 'hello'})
@pytest.mark.requires('openai') def test_loads_llmchain_with_non_serializable_arg() ->None: llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello', http_client=NotSerializable) prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain, pretty=True) with pytest.raises(NotImplementedError): loads(chain_string, secrets_map={'OPENAI_API_KEY': 'hello'})
null
assert_docs
for doc in docs: assert doc.metadata assert set(doc.metadata) == {'Copyright Information', 'uid', 'Title', 'Published'}
def assert_docs(docs: List[Document]) ->None: for doc in docs: assert doc.metadata assert set(doc.metadata) == {'Copyright Information', 'uid', 'Title', 'Published'}
null
_refresh_access_token_with_lock
with self._lock: logger.debug('Refreshing access token') base_url: str = f'{self.ernie_api_base}/oauth/2.0/token' resp = requests.post(base_url, headers={'Content-Type': 'application/json', 'Accept': 'application/json'}, params={ 'grant_type': 'client_credentials', 'client_id': self. ernie_client_id, 'client_secret': self.ernie_client_secret}) self.access_token = str(resp.json().get('access_token'))
def _refresh_access_token_with_lock(self) ->None: with self._lock: logger.debug('Refreshing access token') base_url: str = f'{self.ernie_api_base}/oauth/2.0/token' resp = requests.post(base_url, headers={'Content-Type': 'application/json', 'Accept': 'application/json'}, params={ 'grant_type': 'client_credentials', 'client_id': self. ernie_client_id, 'client_secret': self.ernie_client_secret}) self.access_token = str(resp.json().get('access_token'))
null
test_connect_neo4j_env
"""Test that Neo4j database environment variables.""" graph = Neo4jGraph() output = graph.query(""" RETURN "test" AS output """) expected_output = [{'output': 'test'}] assert output == expected_output
def test_connect_neo4j_env() ->None: """Test that Neo4j database environment variables.""" graph = Neo4jGraph() output = graph.query(""" RETURN "test" AS output """) expected_output = [{'output': 'test'}] assert output == expected_output
Test that Neo4j database environment variables.
create_index
"""Creates an index using the index name specified at instance construction Setting the numLists parameter correctly is important for achieving good accuracy and performance. Since the vector store uses IVF as the indexing strategy, you should create the index only after you have loaded a large enough sample documents to ensure that the centroids for the respective buckets are faily distributed. We recommend that numLists is set to documentCount/1000 for up to 1 million documents and to sqrt(documentCount) for more than 1 million documents. As the number of items in your database grows, you should tune numLists to be larger in order to achieve good latency performance for vector search. If you're experimenting with a new scenario or creating a small demo, you can start with numLists set to 1 to perform a brute-force search across all vectors. This should provide you with the most accurate results from the vector search, however be aware that the search speed and latency will be slow. After your initial setup, you should go ahead and tune the numLists parameter using the above guidance. Args: num_lists: This integer is the number of clusters that the inverted file (IVF) index uses to group the vector data. We recommend that numLists is set to documentCount/1000 for up to 1 million documents and to sqrt(documentCount) for more than 1 million documents. Using a numLists value of 1 is akin to performing brute-force search, which has limited performance dimensions: Number of dimensions for vector similarity. The maximum number of supported dimensions is 2000 similarity: Similarity metric to use with the IVF index. Possible options are: - CosmosDBSimilarityType.COS (cosine distance), - CosmosDBSimilarityType.L2 (Euclidean distance), and - CosmosDBSimilarityType.IP (inner product). Returns: An object describing the created index """ create_index_commands = {'createIndexes': self._collection.name, 'indexes': [{'name': self._index_name, 'key': {self._embedding_key: 'cosmosSearch' }, 'cosmosSearchOptions': {'kind': 'vector-ivf', 'numLists': num_lists, 'similarity': similarity, 'dimensions': dimensions}}]} current_database = self._collection.database create_index_responses: dict[str, Any] = current_database.command( create_index_commands) return create_index_responses
def create_index(self, num_lists: int=100, dimensions: int=1536, similarity: CosmosDBSimilarityType=CosmosDBSimilarityType.COS) ->dict[str, Any]: """Creates an index using the index name specified at instance construction Setting the numLists parameter correctly is important for achieving good accuracy and performance. Since the vector store uses IVF as the indexing strategy, you should create the index only after you have loaded a large enough sample documents to ensure that the centroids for the respective buckets are faily distributed. We recommend that numLists is set to documentCount/1000 for up to 1 million documents and to sqrt(documentCount) for more than 1 million documents. As the number of items in your database grows, you should tune numLists to be larger in order to achieve good latency performance for vector search. If you're experimenting with a new scenario or creating a small demo, you can start with numLists set to 1 to perform a brute-force search across all vectors. This should provide you with the most accurate results from the vector search, however be aware that the search speed and latency will be slow. After your initial setup, you should go ahead and tune the numLists parameter using the above guidance. Args: num_lists: This integer is the number of clusters that the inverted file (IVF) index uses to group the vector data. We recommend that numLists is set to documentCount/1000 for up to 1 million documents and to sqrt(documentCount) for more than 1 million documents. Using a numLists value of 1 is akin to performing brute-force search, which has limited performance dimensions: Number of dimensions for vector similarity. The maximum number of supported dimensions is 2000 similarity: Similarity metric to use with the IVF index. Possible options are: - CosmosDBSimilarityType.COS (cosine distance), - CosmosDBSimilarityType.L2 (Euclidean distance), and - CosmosDBSimilarityType.IP (inner product). Returns: An object describing the created index """ create_index_commands = {'createIndexes': self._collection.name, 'indexes': [{'name': self._index_name, 'key': {self._embedding_key: 'cosmosSearch'}, 'cosmosSearchOptions': {'kind': 'vector-ivf', 'numLists': num_lists, 'similarity': similarity, 'dimensions': dimensions}}]} current_database = self._collection.database create_index_responses: dict[str, Any] = current_database.command( create_index_commands) return create_index_responses
Creates an index using the index name specified at instance construction Setting the numLists parameter correctly is important for achieving good accuracy and performance. Since the vector store uses IVF as the indexing strategy, you should create the index only after you have loaded a large enough sample documents to ensure that the centroids for the respective buckets are faily distributed. We recommend that numLists is set to documentCount/1000 for up to 1 million documents and to sqrt(documentCount) for more than 1 million documents. As the number of items in your database grows, you should tune numLists to be larger in order to achieve good latency performance for vector search. If you're experimenting with a new scenario or creating a small demo, you can start with numLists set to 1 to perform a brute-force search across all vectors. This should provide you with the most accurate results from the vector search, however be aware that the search speed and latency will be slow. After your initial setup, you should go ahead and tune the numLists parameter using the above guidance. Args: num_lists: This integer is the number of clusters that the inverted file (IVF) index uses to group the vector data. We recommend that numLists is set to documentCount/1000 for up to 1 million documents and to sqrt(documentCount) for more than 1 million documents. Using a numLists value of 1 is akin to performing brute-force search, which has limited performance dimensions: Number of dimensions for vector similarity. The maximum number of supported dimensions is 2000 similarity: Similarity metric to use with the IVF index. Possible options are: - CosmosDBSimilarityType.COS (cosine distance), - CosmosDBSimilarityType.L2 (Euclidean distance), and - CosmosDBSimilarityType.IP (inner product). Returns: An object describing the created index
test_structured_tool_from_function_docstring_complex_args
"""Test that structured tools can be created from functions.""" def foo(bar: int, baz: List[str]) ->str: """Docstring Args: bar: int baz: List[str] """ raise NotImplementedError() structured_tool = StructuredTool.from_function(foo) assert structured_tool.name == 'foo' assert structured_tool.args == {'bar': {'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type': 'array', 'items': {'type': 'string'}}} assert structured_tool.args_schema.schema() == {'properties': {'bar': { 'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type': 'array', 'items': {'type': 'string'}}}, 'title': 'fooSchemaSchema', 'type': 'object', 'required': ['bar', 'baz']} prefix = 'foo(bar: int, baz: List[str]) -> str - ' assert foo.__doc__ is not None assert structured_tool.description == prefix + foo.__doc__.strip()
def test_structured_tool_from_function_docstring_complex_args() ->None: """Test that structured tools can be created from functions.""" def foo(bar: int, baz: List[str]) ->str: """Docstring Args: bar: int baz: List[str] """ raise NotImplementedError() structured_tool = StructuredTool.from_function(foo) assert structured_tool.name == 'foo' assert structured_tool.args == {'bar': {'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type': 'array', 'items': { 'type': 'string'}}} assert structured_tool.args_schema.schema() == {'properties': {'bar': { 'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type': 'array', 'items': {'type': 'string'}}}, 'title': 'fooSchemaSchema', 'type': 'object', 'required': ['bar', 'baz']} prefix = 'foo(bar: int, baz: List[str]) -> str - ' assert foo.__doc__ is not None assert structured_tool.description == prefix + foo.__doc__.strip()
Test that structured tools can be created from functions.
messages
"""Retrieve the messages from Elasticsearch""" try: from elasticsearch import ApiError result = self.client.search(index=self.index, query={'term': { 'session_id': self.session_id}}, sort='created_at:asc') except ApiError as err: logger.error(f'Could not retrieve messages from Elasticsearch: {err}') raise err if result and len(result['hits']['hits']) > 0: items = [json.loads(document['_source']['history']) for document in result['hits']['hits']] else: items = [] return messages_from_dict(items)
@property def messages(self) ->List[BaseMessage]: """Retrieve the messages from Elasticsearch""" try: from elasticsearch import ApiError result = self.client.search(index=self.index, query={'term': { 'session_id': self.session_id}}, sort='created_at:asc') except ApiError as err: logger.error(f'Could not retrieve messages from Elasticsearch: {err}') raise err if result and len(result['hits']['hits']) > 0: items = [json.loads(document['_source']['history']) for document in result['hits']['hits']] else: items = [] return messages_from_dict(items)
Retrieve the messages from Elasticsearch
_outline_api_query
raw_result = requests.post( f'{self.outline_instance_url}{self.outline_search_endpoint}', data={ 'query': query, 'limit': self.top_k_results}, headers={'Authorization': f'Bearer {self.outline_api_key}'}) if not raw_result.ok: raise ValueError('Outline API returned an error: ', raw_result.text) return raw_result.json()['data']
def _outline_api_query(self, query: str) ->List: raw_result = requests.post( f'{self.outline_instance_url}{self.outline_search_endpoint}', data= {'query': query, 'limit': self.top_k_results}, headers={ 'Authorization': f'Bearer {self.outline_api_key}'}) if not raw_result.ok: raise ValueError('Outline API returned an error: ', raw_result.text) return raw_result.json()['data']
null
_convert_message_to_dict
if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} elif isinstance(message, SystemMessage): message_dict = {'role': 'system', 'content': message.content} elif isinstance(message, FunctionMessage): raise ValueError( 'Function messages are not supported by the Javelin AI Gateway. Please create a feature request at https://docs.getjavelin.io' ) else: raise ValueError(f'Got unknown message type: {message}') if 'function_call' in message.additional_kwargs: ChatJavelinAIGateway._raise_functions_not_supported() if message.additional_kwargs: logger.warning( 'Additional message arguments are unsupported by Javelin AI Gateway and will be ignored: %s' , message.additional_kwargs) return message_dict
@staticmethod def _convert_message_to_dict(message: BaseMessage) ->dict: if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} elif isinstance(message, SystemMessage): message_dict = {'role': 'system', 'content': message.content} elif isinstance(message, FunctionMessage): raise ValueError( 'Function messages are not supported by the Javelin AI Gateway. Please create a feature request at https://docs.getjavelin.io' ) else: raise ValueError(f'Got unknown message type: {message}') if 'function_call' in message.additional_kwargs: ChatJavelinAIGateway._raise_functions_not_supported() if message.additional_kwargs: logger.warning( 'Additional message arguments are unsupported by Javelin AI Gateway and will be ignored: %s' , message.additional_kwargs) return message_dict
null
_format_document_analysis_result
formatted_result = [] if 'content' in document_analysis_result: formatted_result.append(f"Content: {document_analysis_result['content']}" .replace('\n', ' ')) if 'tables' in document_analysis_result: for i, table in enumerate(document_analysis_result['tables']): formatted_result.append(f'Table {i}: {table}'.replace('\n', ' ')) if 'key_value_pairs' in document_analysis_result: for kv_pair in document_analysis_result['key_value_pairs']: formatted_result.append(f'{kv_pair[0]}: {kv_pair[1]}'.replace('\n', ' ')) return '\n'.join(formatted_result)
def _format_document_analysis_result(self, document_analysis_result: Dict ) ->str: formatted_result = [] if 'content' in document_analysis_result: formatted_result.append( f"Content: {document_analysis_result['content']}".replace('\n', ' ')) if 'tables' in document_analysis_result: for i, table in enumerate(document_analysis_result['tables']): formatted_result.append(f'Table {i}: {table}'.replace('\n', ' ')) if 'key_value_pairs' in document_analysis_result: for kv_pair in document_analysis_result['key_value_pairs']: formatted_result.append(f'{kv_pair[0]}: {kv_pair[1]}'.replace( '\n', ' ')) return '\n'.join(formatted_result)
null
_identifying_params
"""Get the identifying parameters.""" return {'model': self.model, 'temperature': self.temperature, 'top_k': self .top_k, 'n': self.n}
@property def _identifying_params(self) ->Dict[str, Any]: """Get the identifying parameters.""" return {'model': self.model, 'temperature': self.temperature, 'top_k': self.top_k, 'n': self.n}
Get the identifying parameters.
test_initialization
"""Test embedding model initialization.""" __ModuleName__Embeddings()
def test_initialization() ->None: """Test embedding model initialization.""" __ModuleName__Embeddings()
Test embedding model initialization.
split_text
"""Split incoming text and return chunks.""" splits = self._tokenizer(text, language=self._language) return self._merge_splits(splits, self._separator)
def split_text(self, text: str) ->List[str]: """Split incoming text and return chunks.""" splits = self._tokenizer(text, language=self._language) return self._merge_splits(splits, self._separator)
Split incoming text and return chunks.
test_resolve_criteria_enum
val = resolve_pairwise_criteria(criterion) assert isinstance(val, dict) assert next(iter(val)) == criterion.value
@pytest.mark.parametrize('criterion', list(Criteria)) def test_resolve_criteria_enum(criterion: Criteria) ->None: val = resolve_pairwise_criteria(criterion) assert isinstance(val, dict) assert next(iter(val)) == criterion.value
null
test_clarifai_with_metadatas
"""Test end to end construction and search with metadata.""" texts = ['oof', 'rab', 'zab'] metadatas = [{'page': str(i)} for i in range(len(texts))] USER_ID = 'minhajul' APP_ID = 'test-lang-2' NUMBER_OF_DOCS = 1 docsearch = Clarifai.from_texts(user_id=USER_ID, app_id=APP_ID, texts=texts, pat=None, number_of_docs=NUMBER_OF_DOCS, metadatas=metadatas) time.sleep(2.5) output = docsearch.similarity_search('oof', k=1) assert output == [Document(page_content='oof', metadata={'page': '0'})]
def test_clarifai_with_metadatas() ->None: """Test end to end construction and search with metadata.""" texts = ['oof', 'rab', 'zab'] metadatas = [{'page': str(i)} for i in range(len(texts))] USER_ID = 'minhajul' APP_ID = 'test-lang-2' NUMBER_OF_DOCS = 1 docsearch = Clarifai.from_texts(user_id=USER_ID, app_id=APP_ID, texts= texts, pat=None, number_of_docs=NUMBER_OF_DOCS, metadatas=metadatas) time.sleep(2.5) output = docsearch.similarity_search('oof', k=1) assert output == [Document(page_content='oof', metadata={'page': '0'})]
Test end to end construction and search with metadata.
test_visit_operation
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) expected = "metadata.foo < 2 AND metadata.bar = 'baz'" actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
def test_visit_operation() ->None: op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) expected = "metadata.foo < 2 AND metadata.bar = 'baz'" actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
null
texts
return ['foo', 'bar', 'baz']
@pytest.fixture def texts() ->List[str]: return ['foo', 'bar', 'baz']
null
_type
return 'markdown-list'
@property def _type(self) ->str: return 'markdown-list'
null
surface_langchain_deprecation_warnings
"""Unmute LangChain deprecation warnings.""" warnings.filterwarnings('default', category=LangChainPendingDeprecationWarning) warnings.filterwarnings('default', category=LangChainDeprecationWarning)
def surface_langchain_deprecation_warnings() ->None: """Unmute LangChain deprecation warnings.""" warnings.filterwarnings('default', category= LangChainPendingDeprecationWarning) warnings.filterwarnings('default', category=LangChainDeprecationWarning)
Unmute LangChain deprecation warnings.
lazy_load
""" Lazily load text content from the provided URLs. This method yields Documents one at a time as they're scraped, instead of waiting to scrape all URLs before returning. Yields: Document: The scraped content encapsulated within a Document object. """ for url in self.urls: html_content = asyncio.run(self.ascrape_playwright(url)) metadata = {'source': url} yield Document(page_content=html_content, metadata=metadata)
def lazy_load(self) ->Iterator[Document]: """ Lazily load text content from the provided URLs. This method yields Documents one at a time as they're scraped, instead of waiting to scrape all URLs before returning. Yields: Document: The scraped content encapsulated within a Document object. """ for url in self.urls: html_content = asyncio.run(self.ascrape_playwright(url)) metadata = {'source': url} yield Document(page_content=html_content, metadata=metadata)
Lazily load text content from the provided URLs. This method yields Documents one at a time as they're scraped, instead of waiting to scrape all URLs before returning. Yields: Document: The scraped content encapsulated within a Document object.
test_run_single_arg
"""Test run method with single arg.""" chain = FakeChain() output = chain.run('bar') assert output == 'baz'
def test_run_single_arg() ->None: """Test run method with single arg.""" chain = FakeChain() output = chain.run('bar') assert output == 'baz'
Test run method with single arg.
test_function
"""Test correct functionality.""" chain = PythonREPL() code = 'def add(a, b): return a + b' output = chain.run(code) assert output == '' code = 'print(add(1, 2))' output = chain.run(code) assert output == '3\n'
def test_function() ->None: """Test correct functionality.""" chain = PythonREPL() code = 'def add(a, b): return a + b' output = chain.run(code) assert output == '' code = 'print(add(1, 2))' output = chain.run(code) assert output == '3\n'
Test correct functionality.
_llm_type
"""Return type of llm.""" return 'amazon_bedrock'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'amazon_bedrock'
Return type of llm.
test_json_output_function_parser
"""Test the JSON output function parser is configured with robust defaults.""" message = AIMessage(content='This is a test message', additional_kwargs={ 'function_call': {'name': 'function_name', 'arguments': """{"arg1": "code code"}"""}}) chat_generation = ChatGeneration(message=message) parser = JsonOutputFunctionsParser(args_only=False) result = parser.parse_result([chat_generation]) assert result == {'arguments': {'arg1': 'code\ncode'}, 'name': 'function_name'} parser = JsonOutputFunctionsParser(args_only=True) result = parser.parse_result([chat_generation]) assert result == {'arg1': 'code\ncode'} assert message.additional_kwargs == {'function_call': {'name': 'function_name', 'arguments': """{"arg1": "code code"}"""}}
def test_json_output_function_parser() ->None: """Test the JSON output function parser is configured with robust defaults.""" message = AIMessage(content='This is a test message', additional_kwargs ={'function_call': {'name': 'function_name', 'arguments': """{"arg1": "code code"}"""}}) chat_generation = ChatGeneration(message=message) parser = JsonOutputFunctionsParser(args_only=False) result = parser.parse_result([chat_generation]) assert result == {'arguments': {'arg1': 'code\ncode'}, 'name': 'function_name'} parser = JsonOutputFunctionsParser(args_only=True) result = parser.parse_result([chat_generation]) assert result == {'arg1': 'code\ncode'} assert message.additional_kwargs == {'function_call': {'name': 'function_name', 'arguments': """{"arg1": "code code"}"""}}
Test the JSON output function parser is configured with robust defaults.
lazy_load
"""Load documents lazily.""" try: from datasets import load_dataset except ImportError: raise ImportError( 'Could not import datasets python package. Please install it with `pip install datasets`.' ) dataset = load_dataset(path=self.path, name=self.name, data_dir=self. data_dir, data_files=self.data_files, cache_dir=self.cache_dir, keep_in_memory=self.keep_in_memory, save_infos=self.save_infos, use_auth_token=self.use_auth_token, num_proc=self.num_proc) yield from (Document(page_content=self.parse_obj(row.pop(self. page_content_column)), metadata=row) for key in dataset.keys() for row in dataset[key])
def lazy_load(self) ->Iterator[Document]: """Load documents lazily.""" try: from datasets import load_dataset except ImportError: raise ImportError( 'Could not import datasets python package. Please install it with `pip install datasets`.' ) dataset = load_dataset(path=self.path, name=self.name, data_dir=self. data_dir, data_files=self.data_files, cache_dir=self.cache_dir, keep_in_memory=self.keep_in_memory, save_infos=self.save_infos, use_auth_token=self.use_auth_token, num_proc=self.num_proc) yield from (Document(page_content=self.parse_obj(row.pop(self. page_content_column)), metadata=row) for key in dataset.keys() for row in dataset[key])
Load documents lazily.
load_json
try: return json.loads(s) except Exception: return {}
def load_json(s): try: return json.loads(s) except Exception: return {}
null
config_specs
return get_unique_config_specs(spec for step in [self.runnable, *self. fallbacks] for spec in step.config_specs)
@property def config_specs(self) ->List[ConfigurableFieldSpec]: return get_unique_config_specs(spec for step in [self.runnable, *self. fallbacks] for spec in step.config_specs)
null
test_issues_load
title = 'DocumentLoader for GitHub' loader = GitHubIssuesLoader(repo='langchain-ai/langchain', creator='UmerHA', state='all') docs = loader.load() titles = [d.metadata['title'] for d in docs] assert title in titles assert all(doc.metadata['creator'] == 'UmerHA' for doc in docs)
def test_issues_load() ->None: title = 'DocumentLoader for GitHub' loader = GitHubIssuesLoader(repo='langchain-ai/langchain', creator= 'UmerHA', state='all') docs = loader.load() titles = [d.metadata['title'] for d in docs] assert title in titles assert all(doc.metadata['creator'] == 'UmerHA' for doc in docs)
null
build_extra
"""Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') logger.warning( f"""{field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values['model_kwargs'] = extra return values
@root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__. values()} extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') logger.warning( f"""{field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values['model_kwargs'] = extra return values
Build extra kwargs from additional params that were passed in.
test_configurable_fields
fake_llm = FakeListLLM(responses=['a']) assert fake_llm.invoke('...') == 'a' fake_llm_configurable = fake_llm.configurable_fields(responses= ConfigurableField(id='llm_responses', name='LLM Responses', description ='A list of fake responses for this LLM')) assert fake_llm_configurable.invoke('...') == 'a' assert fake_llm_configurable.config_schema().schema() == {'title': 'RunnableConfigurableFieldsConfig', 'type': 'object', 'properties': { 'configurable': {'$ref': '#/definitions/Configurable'}}, 'definitions': {'Configurable': {'title': 'Configurable', 'type': 'object', 'properties': {'llm_responses': {'title': 'LLM Responses', 'description': 'A list of fake responses for this LLM', 'default': ['a' ], 'type': 'array', 'items': {'type': 'string'}}}}}} fake_llm_configured = fake_llm_configurable.with_config(configurable={ 'llm_responses': ['b']}) assert fake_llm_configured.invoke('...') == 'b' prompt = PromptTemplate.from_template('Hello, {name}!') assert prompt.invoke({'name': 'John'}) == StringPromptValue(text='Hello, John!' ) prompt_configurable = prompt.configurable_fields(template=ConfigurableField (id='prompt_template', name='Prompt Template', description= 'The prompt template for this chain')) assert prompt_configurable.invoke({'name': 'John'}) == StringPromptValue(text ='Hello, John!') assert prompt_configurable.config_schema().schema() == {'title': 'RunnableConfigurableFieldsConfig', 'type': 'object', 'properties': { 'configurable': {'$ref': '#/definitions/Configurable'}}, 'definitions': {'Configurable': {'title': 'Configurable', 'type': 'object', 'properties': {'prompt_template': {'title': 'Prompt Template', 'description': 'The prompt template for this chain', 'default': 'Hello, {name}!', 'type': 'string'}}}}} prompt_configured = prompt_configurable.with_config(configurable={ 'prompt_template': 'Hello, {name}! {name}!'}) assert prompt_configured.invoke({'name': 'John'}) == StringPromptValue(text ='Hello, John! John!') assert prompt_configurable.with_config(configurable={'prompt_template': 'Hello {name} in {lang}'}).input_schema.schema() == {'title': 'PromptInput', 'type': 'object', 'properties': {'lang': {'title': 'Lang', 'type': 'string'}, 'name': {'title': 'Name', 'type': 'string'}}} chain_configurable = (prompt_configurable | fake_llm_configurable | StrOutputParser()) assert chain_configurable.invoke({'name': 'John'}) == 'a' assert chain_configurable.config_schema().schema() == {'title': 'RunnableSequenceConfig', 'type': 'object', 'properties': { 'configurable': {'$ref': '#/definitions/Configurable'}}, 'definitions': {'Configurable': {'title': 'Configurable', 'type': 'object', 'properties': {'llm_responses': {'title': 'LLM Responses', 'description': 'A list of fake responses for this LLM', 'default': ['a' ], 'type': 'array', 'items': {'type': 'string'}}, 'prompt_template': { 'title': 'Prompt Template', 'description': 'The prompt template for this chain', 'default': 'Hello, {name}!', 'type': 'string'}}}}} assert chain_configurable.with_config(configurable={'prompt_template': 'A very good morning to you, {name} {lang}!', 'llm_responses': ['c']} ).invoke({'name': 'John', 'lang': 'en'}) == 'c' assert chain_configurable.with_config(configurable={'prompt_template': 'A very good morning to you, {name} {lang}!', 'llm_responses': ['c']} ).input_schema.schema() == {'title': 'PromptInput', 'type': 'object', 'properties': {'lang': {'title': 'Lang', 'type': 'string'}, 'name': { 'title': 'Name', 'type': 'string'}}} chain_with_map_configurable: Runnable = prompt_configurable | {'llm1': fake_llm_configurable | StrOutputParser(), 'llm2': fake_llm_configurable | StrOutputParser(), 'llm3': fake_llm. configurable_fields(responses=ConfigurableField('other_responses')) | StrOutputParser()} assert chain_with_map_configurable.invoke({'name': 'John'}) == {'llm1': 'a', 'llm2': 'a', 'llm3': 'a'} assert chain_with_map_configurable.config_schema().schema() == {'title': 'RunnableSequenceConfig', 'type': 'object', 'properties': { 'configurable': {'$ref': '#/definitions/Configurable'}}, 'definitions': {'Configurable': {'title': 'Configurable', 'type': 'object', 'properties': {'llm_responses': {'title': 'LLM Responses', 'description': 'A list of fake responses for this LLM', 'default': ['a' ], 'type': 'array', 'items': {'type': 'string'}}, 'other_responses': { 'title': 'Other Responses', 'default': ['a'], 'type': 'array', 'items': {'type': 'string'}}, 'prompt_template': {'title': 'Prompt Template', 'description': 'The prompt template for this chain', 'default': 'Hello, {name}!', 'type': 'string'}}}}} assert chain_with_map_configurable.with_config(configurable={ 'prompt_template': 'A very good morning to you, {name}!', 'llm_responses': ['c'], 'other_responses': ['d']}).invoke({'name': 'John'} ) == {'llm1': 'c', 'llm2': 'c', 'llm3': 'd'}
def test_configurable_fields() ->None: fake_llm = FakeListLLM(responses=['a']) assert fake_llm.invoke('...') == 'a' fake_llm_configurable = fake_llm.configurable_fields(responses= ConfigurableField(id='llm_responses', name='LLM Responses', description='A list of fake responses for this LLM')) assert fake_llm_configurable.invoke('...') == 'a' assert fake_llm_configurable.config_schema().schema() == {'title': 'RunnableConfigurableFieldsConfig', 'type': 'object', 'properties': {'configurable': {'$ref': '#/definitions/Configurable'}}, 'definitions': {'Configurable': {'title': 'Configurable', 'type': 'object', 'properties': {'llm_responses': {'title': 'LLM Responses', 'description': 'A list of fake responses for this LLM', 'default': ['a'], 'type': 'array', 'items': {'type': 'string'}}}}}} fake_llm_configured = fake_llm_configurable.with_config(configurable={ 'llm_responses': ['b']}) assert fake_llm_configured.invoke('...') == 'b' prompt = PromptTemplate.from_template('Hello, {name}!') assert prompt.invoke({'name': 'John'}) == StringPromptValue(text= 'Hello, John!') prompt_configurable = prompt.configurable_fields(template= ConfigurableField(id='prompt_template', name='Prompt Template', description='The prompt template for this chain')) assert prompt_configurable.invoke({'name': 'John'}) == StringPromptValue( text='Hello, John!') assert prompt_configurable.config_schema().schema() == {'title': 'RunnableConfigurableFieldsConfig', 'type': 'object', 'properties': {'configurable': {'$ref': '#/definitions/Configurable'}}, 'definitions': {'Configurable': {'title': 'Configurable', 'type': 'object', 'properties': {'prompt_template': {'title': 'Prompt Template', 'description': 'The prompt template for this chain', 'default': 'Hello, {name}!', 'type': 'string'}}}}} prompt_configured = prompt_configurable.with_config(configurable={ 'prompt_template': 'Hello, {name}! {name}!'}) assert prompt_configured.invoke({'name': 'John'}) == StringPromptValue(text ='Hello, John! John!') assert prompt_configurable.with_config(configurable={'prompt_template': 'Hello {name} in {lang}'}).input_schema.schema() == {'title': 'PromptInput', 'type': 'object', 'properties': {'lang': {'title': 'Lang', 'type': 'string'}, 'name': {'title': 'Name', 'type': 'string'}} } chain_configurable = (prompt_configurable | fake_llm_configurable | StrOutputParser()) assert chain_configurable.invoke({'name': 'John'}) == 'a' assert chain_configurable.config_schema().schema() == {'title': 'RunnableSequenceConfig', 'type': 'object', 'properties': { 'configurable': {'$ref': '#/definitions/Configurable'}}, 'definitions': {'Configurable': {'title': 'Configurable', 'type': 'object', 'properties': {'llm_responses': {'title': 'LLM Responses', 'description': 'A list of fake responses for this LLM', 'default': ['a'], 'type': 'array', 'items': {'type': 'string'}}, 'prompt_template': {'title': 'Prompt Template', 'description': 'The prompt template for this chain', 'default': 'Hello, {name}!', 'type': 'string'}}}}} assert chain_configurable.with_config(configurable={'prompt_template': 'A very good morning to you, {name} {lang}!', 'llm_responses': ['c']} ).invoke({'name': 'John', 'lang': 'en'}) == 'c' assert chain_configurable.with_config(configurable={'prompt_template': 'A very good morning to you, {name} {lang}!', 'llm_responses': ['c']} ).input_schema.schema() == {'title': 'PromptInput', 'type': 'object', 'properties': {'lang': {'title': 'Lang', 'type': 'string' }, 'name': {'title': 'Name', 'type': 'string'}}} chain_with_map_configurable: Runnable = prompt_configurable | {'llm1': fake_llm_configurable | StrOutputParser(), 'llm2': fake_llm_configurable | StrOutputParser(), 'llm3': fake_llm. configurable_fields(responses=ConfigurableField('other_responses')) | StrOutputParser()} assert chain_with_map_configurable.invoke({'name': 'John'}) == {'llm1': 'a', 'llm2': 'a', 'llm3': 'a'} assert chain_with_map_configurable.config_schema().schema() == {'title': 'RunnableSequenceConfig', 'type': 'object', 'properties': { 'configurable': {'$ref': '#/definitions/Configurable'}}, 'definitions': {'Configurable': {'title': 'Configurable', 'type': 'object', 'properties': {'llm_responses': {'title': 'LLM Responses', 'description': 'A list of fake responses for this LLM', 'default': ['a'], 'type': 'array', 'items': {'type': 'string'}}, 'other_responses': {'title': 'Other Responses', 'default': ['a'], 'type': 'array', 'items': {'type': 'string'}}, 'prompt_template': { 'title': 'Prompt Template', 'description': 'The prompt template for this chain', 'default': 'Hello, {name}!', 'type': 'string'}}}}} assert chain_with_map_configurable.with_config(configurable={ 'prompt_template': 'A very good morning to you, {name}!', 'llm_responses': ['c'], 'other_responses': ['d']}).invoke({'name': 'John'}) == {'llm1': 'c', 'llm2': 'c', 'llm3': 'd'}
null
_default_params
"""Get the default parameters for calling ForefrontAI API.""" return {'temperature': self.temperature, 'length': self.length, 'top_p': self.top_p, 'top_k': self.top_k, 'repetition_penalty': self. repetition_penalty}
@property def _default_params(self) ->Mapping[str, Any]: """Get the default parameters for calling ForefrontAI API.""" return {'temperature': self.temperature, 'length': self.length, 'top_p': self.top_p, 'top_k': self.top_k, 'repetition_penalty': self. repetition_penalty}
Get the default parameters for calling ForefrontAI API.
_call
"""RWKV generation Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55) """ text = self.rwkv_generate(prompt) if stop is not None: text = enforce_stop_tokens(text, stop) return text
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """RWKV generation Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55) """ text = self.rwkv_generate(prompt) if stop is not None: text = enforce_stop_tokens(text, stop) return text
RWKV generation Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55)
similarity_search_with_relevance_scores
"""Run similarity search synchronously and return relevance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. Returns: List of Documents most similar along with relevance scores """ async def _similarity_search_with_relevance_scores() ->List[Tuple[Document, float]]: await self.initialize() return await self.asimilarity_search_with_relevance_scores(query, k, ** kwargs) return asyncio.run(_similarity_search_with_relevance_scores())
def similarity_search_with_relevance_scores(self, query: str, k: int=4, ** kwargs: Any) ->List[Tuple[Document, float]]: """Run similarity search synchronously and return relevance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. Returns: List of Documents most similar along with relevance scores """ async def _similarity_search_with_relevance_scores() ->List[Tuple[ Document, float]]: await self.initialize() return await self.asimilarity_search_with_relevance_scores(query, k, **kwargs) return asyncio.run(_similarity_search_with_relevance_scores())
Run similarity search synchronously and return relevance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. Returns: List of Documents most similar along with relevance scores
test_old_sqlite_llm_caching
llm_cache = get_llm_cache() if isinstance(llm_cache, SQLAlchemyCache): prompt = 'How are you?' response = 'Test response' cached_response = 'Cached test response' llm = FakeListLLM(responses=[response]) items = [llm_cache.cache_schema(prompt=prompt, llm=create_llm_string( llm), response=cached_response, idx=0)] with Session(llm_cache.engine) as session, session.begin(): for item in items: session.merge(item) assert llm(prompt) == cached_response
def test_old_sqlite_llm_caching() ->None: llm_cache = get_llm_cache() if isinstance(llm_cache, SQLAlchemyCache): prompt = 'How are you?' response = 'Test response' cached_response = 'Cached test response' llm = FakeListLLM(responses=[response]) items = [llm_cache.cache_schema(prompt=prompt, llm= create_llm_string(llm), response=cached_response, idx=0)] with Session(llm_cache.engine) as session, session.begin(): for item in items: session.merge(item) assert llm(prompt) == cached_response
null
search
"""Search with the retriever.""" return retriever.get_relevant_documents(query)
@tool def search(query): """Search with the retriever.""" return retriever.get_relevant_documents(query)
Search with the retriever.
on_tool_end
self._require_current_thought().on_tool_end(output, color, observation_prefix, llm_prefix, **kwargs) self._complete_current_thought()
def on_tool_end(self, output: str, color: Optional[str]=None, observation_prefix: Optional[str]=None, llm_prefix: Optional[str]=None, **kwargs: Any) ->None: self._require_current_thought().on_tool_end(output, color, observation_prefix, llm_prefix, **kwargs) self._complete_current_thought()
null
__init__
self.password = password self.extract_images = extract_images
def __init__(self, password: Optional[Union[str, bytes]]=None, extract_images: bool=False): self.password = password self.extract_images = extract_images
null
as_field
from redis.commands.search.field import NumericField return NumericField(self.name, sortable=self.sortable, no_index=self.no_index)
def as_field(self) ->NumericField: from redis.commands.search.field import NumericField return NumericField(self.name, sortable=self.sortable, no_index=self. no_index)
null
qdrant_locations
if qdrant_is_not_running(): logger.warning('Running Qdrant async tests in memory mode only.') return [':memory:'] return ['http://localhost:6333', ':memory:']
def qdrant_locations() ->List[str]: if qdrant_is_not_running(): logger.warning('Running Qdrant async tests in memory mode only.') return [':memory:'] return ['http://localhost:6333', ':memory:']
null
_load_entry
import fitz parent_dir = Path(self.file_path).parent file_names = self.file_regex.findall(entry.get('file', '')) if not file_names: return None texts: List[str] = [] for file_name in file_names: try: with fitz.open(parent_dir / file_name) as f: texts.extend(page.get_text() for page in f) except FileNotFoundError as e: logger.debug(e) content = '\n'.join(texts) or entry.get('abstract', '') if self.max_content_chars: content = content[:self.max_content_chars] metadata = self.parser.get_metadata(entry, load_extra=self.load_extra_metadata) return Document(page_content=content, metadata=metadata)
def _load_entry(self, entry: Mapping[str, Any]) ->Optional[Document]: import fitz parent_dir = Path(self.file_path).parent file_names = self.file_regex.findall(entry.get('file', '')) if not file_names: return None texts: List[str] = [] for file_name in file_names: try: with fitz.open(parent_dir / file_name) as f: texts.extend(page.get_text() for page in f) except FileNotFoundError as e: logger.debug(e) content = '\n'.join(texts) or entry.get('abstract', '') if self.max_content_chars: content = content[:self.max_content_chars] metadata = self.parser.get_metadata(entry, load_extra=self. load_extra_metadata) return Document(page_content=content, metadata=metadata)
null
delete_by_document_id
""" Remove a single document from the store, given its document_id (str). Return True if a document has indeed been deleted, False if ID not found. """ deletion_response = self.collection.delete(document_id) return ((deletion_response or {}).get('status') or {}).get('deletedCount', 0 ) == 1
def delete_by_document_id(self, document_id: str) ->bool: """ Remove a single document from the store, given its document_id (str). Return True if a document has indeed been deleted, False if ID not found. """ deletion_response = self.collection.delete(document_id) return ((deletion_response or {}).get('status') or {}).get('deletedCount', 0) == 1
Remove a single document from the store, given its document_id (str). Return True if a document has indeed been deleted, False if ID not found.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
Get the namespace of the langchain object.
from_texts
"""Construct a `MongoDB Atlas Vector Search` vector store from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided MongoDB Atlas Vector Search index (Lucene) This is intended to be a quick way to get started. Example: .. code-block:: python from pymongo import MongoClient from langchain_community.vectorstores import MongoDBAtlasVectorSearch from langchain_community.embeddings import OpenAIEmbeddings mongo_client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = MongoDBAtlasVectorSearch.from_texts( texts, embeddings, metadatas=metadatas, collection=collection ) """ if collection is None: raise ValueError("Must provide 'collection' named parameter.") vectorstore = cls(collection, embedding, **kwargs) vectorstore.add_texts(texts, metadatas=metadatas) return vectorstore
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict]]=None, collection: Optional[Collection[ MongoDBDocumentType]]=None, **kwargs: Any) ->MongoDBAtlasVectorSearch: """Construct a `MongoDB Atlas Vector Search` vector store from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided MongoDB Atlas Vector Search index (Lucene) This is intended to be a quick way to get started. Example: .. code-block:: python from pymongo import MongoClient from langchain_community.vectorstores import MongoDBAtlasVectorSearch from langchain_community.embeddings import OpenAIEmbeddings mongo_client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = MongoDBAtlasVectorSearch.from_texts( texts, embeddings, metadatas=metadatas, collection=collection ) """ if collection is None: raise ValueError("Must provide 'collection' named parameter.") vectorstore = cls(collection, embedding, **kwargs) vectorstore.add_texts(texts, metadatas=metadatas) return vectorstore
Construct a `MongoDB Atlas Vector Search` vector store from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided MongoDB Atlas Vector Search index (Lucene) This is intended to be a quick way to get started. Example: .. code-block:: python from pymongo import MongoClient from langchain_community.vectorstores import MongoDBAtlasVectorSearch from langchain_community.embeddings import OpenAIEmbeddings mongo_client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = MongoDBAtlasVectorSearch.from_texts( texts, embeddings, metadatas=metadatas, collection=collection )
buffer_as_str
"""Exposes the buffer as a string in case return_messages is True.""" return get_buffer_string(self.chat_memory.messages, human_prefix=self. human_prefix, ai_prefix=self.ai_prefix)
@property def buffer_as_str(self) ->str: """Exposes the buffer as a string in case return_messages is True.""" return get_buffer_string(self.chat_memory.messages, human_prefix=self. human_prefix, ai_prefix=self.ai_prefix)
Exposes the buffer as a string in case return_messages is True.
_create_description_from_template
values['description'] = values['template'].format(name=values['name']) return values
def _create_description_from_template(values: Dict[str, Any]) ->Dict[str, Any]: values['description'] = values['template'].format(name=values['name']) return values
null
test_transcription_error
mocker.patch('assemblyai.Transcriber.transcribe', return_value=mocker. MagicMock(error='Test error')) loader = AssemblyAIAudioTranscriptLoader(file_path='./testfile.mp3', api_key='api_key') expected_error = 'Could not transcribe file: Test error' with pytest.raises(ValueError, match=expected_error): loader.load()
@pytest.mark.requires('assemblyai') def test_transcription_error(mocker: MockerFixture) ->None: mocker.patch('assemblyai.Transcriber.transcribe', return_value=mocker. MagicMock(error='Test error')) loader = AssemblyAIAudioTranscriptLoader(file_path='./testfile.mp3', api_key='api_key') expected_error = 'Could not transcribe file: Test error' with pytest.raises(ValueError, match=expected_error): loader.load()
null
level
"""Return the current level of the stack.""" return len(self.stack)
@property def level(self) ->int: """Return the current level of the stack.""" return len(self.stack)
Return the current level of the stack.
test_deduplication
"""Check edge case when loader returns no new docs.""" docs = [Document(page_content='This is a test document.', metadata={ 'source': '1'}), Document(page_content='This is a test document.', metadata={'source': '1'})] assert index(docs, record_manager, vector_store, cleanup='full') == { 'num_added': 1, 'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
def test_deduplication(record_manager: SQLRecordManager, vector_store: VectorStore) ->None: """Check edge case when loader returns no new docs.""" docs = [Document(page_content='This is a test document.', metadata={ 'source': '1'}), Document(page_content='This is a test document.', metadata={'source': '1'})] assert index(docs, record_manager, vector_store, cleanup='full') == { 'num_added': 1, 'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
Check edge case when loader returns no new docs.
test_vertex_call_count_tokens
llm = VertexAI(model_name=model_name) output = llm.get_num_tokens('How are you?') assert output == 4
@pytest.mark.parametrize('model_name', model_names_to_test) def test_vertex_call_count_tokens(model_name: str) ->None: llm = VertexAI(model_name=model_name) output = llm.get_num_tokens('How are you?') assert output == 4
null
split_text
"""Split markdown file Args: text: Markdown file""" lines = text.split('\n') lines_with_metadata: List[LineType] = [] current_content: List[str] = [] current_metadata: Dict[str, str] = {} header_stack: List[HeaderType] = [] initial_metadata: Dict[str, str] = {} in_code_block = False opening_fence = '' for line in lines: stripped_line = line.strip() if not in_code_block: if stripped_line.startswith('```') and stripped_line.count('```') == 1: in_code_block = True opening_fence = '```' elif stripped_line.startswith('~~~'): in_code_block = True opening_fence = '~~~' elif stripped_line.startswith(opening_fence): in_code_block = False opening_fence = '' if in_code_block: current_content.append(stripped_line) continue for sep, name in self.headers_to_split_on: if stripped_line.startswith(sep) and (len(stripped_line) == len(sep ) or stripped_line[len(sep)] == ' '): if name is not None: current_header_level = sep.count('#') while header_stack and header_stack[-1]['level' ] >= current_header_level: popped_header = header_stack.pop() if popped_header['name'] in initial_metadata: initial_metadata.pop(popped_header['name']) header: HeaderType = {'level': current_header_level, 'name': name, 'data': stripped_line[len(sep):].strip()} header_stack.append(header) initial_metadata[name] = header['data'] if current_content: lines_with_metadata.append({'content': '\n'.join( current_content), 'metadata': current_metadata.copy()}) current_content.clear() if not self.strip_headers: current_content.append(stripped_line) break else: if stripped_line: current_content.append(stripped_line) elif current_content: lines_with_metadata.append({'content': '\n'.join( current_content), 'metadata': current_metadata.copy()}) current_content.clear() current_metadata = initial_metadata.copy() if current_content: lines_with_metadata.append({'content': '\n'.join(current_content), 'metadata': current_metadata}) if not self.return_each_line: return self.aggregate_lines_to_chunks(lines_with_metadata) else: return [Document(page_content=chunk['content'], metadata=chunk[ 'metadata']) for chunk in lines_with_metadata]
def split_text(self, text: str) ->List[Document]: """Split markdown file Args: text: Markdown file""" lines = text.split('\n') lines_with_metadata: List[LineType] = [] current_content: List[str] = [] current_metadata: Dict[str, str] = {} header_stack: List[HeaderType] = [] initial_metadata: Dict[str, str] = {} in_code_block = False opening_fence = '' for line in lines: stripped_line = line.strip() if not in_code_block: if stripped_line.startswith('```') and stripped_line.count('```' ) == 1: in_code_block = True opening_fence = '```' elif stripped_line.startswith('~~~'): in_code_block = True opening_fence = '~~~' elif stripped_line.startswith(opening_fence): in_code_block = False opening_fence = '' if in_code_block: current_content.append(stripped_line) continue for sep, name in self.headers_to_split_on: if stripped_line.startswith(sep) and (len(stripped_line) == len (sep) or stripped_line[len(sep)] == ' '): if name is not None: current_header_level = sep.count('#') while header_stack and header_stack[-1]['level' ] >= current_header_level: popped_header = header_stack.pop() if popped_header['name'] in initial_metadata: initial_metadata.pop(popped_header['name']) header: HeaderType = {'level': current_header_level, 'name': name, 'data': stripped_line[len(sep):].strip()} header_stack.append(header) initial_metadata[name] = header['data'] if current_content: lines_with_metadata.append({'content': '\n'.join( current_content), 'metadata': current_metadata.copy()}) current_content.clear() if not self.strip_headers: current_content.append(stripped_line) break else: if stripped_line: current_content.append(stripped_line) elif current_content: lines_with_metadata.append({'content': '\n'.join( current_content), 'metadata': current_metadata.copy()}) current_content.clear() current_metadata = initial_metadata.copy() if current_content: lines_with_metadata.append({'content': '\n'.join(current_content), 'metadata': current_metadata}) if not self.return_each_line: return self.aggregate_lines_to_chunks(lines_with_metadata) else: return [Document(page_content=chunk['content'], metadata=chunk[ 'metadata']) for chunk in lines_with_metadata]
Split markdown file Args: text: Markdown file
test_endpoint
"""Test user custom model deployments like some open source models.""" chat = QianfanChatEndpoint(endpoint='qianfan_bloomz_7b_compressed') response = chat(messages=[HumanMessage(content='Hello')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
def test_endpoint() ->None: """Test user custom model deployments like some open source models.""" chat = QianfanChatEndpoint(endpoint='qianfan_bloomz_7b_compressed') response = chat(messages=[HumanMessage(content='Hello')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
Test user custom model deployments like some open source models.
test_pydantic_output_parser
"""Test PydanticOutputParser.""" pydantic_parser: PydanticOutputParser[TestModel] = PydanticOutputParser( pydantic_object=TestModel) result = pydantic_parser.parse_folder(DEF_RESULT) print('parse_result:', result) assert DEF_EXPECTED_RESULT == result
def test_pydantic_output_parser() ->None: """Test PydanticOutputParser.""" pydantic_parser: PydanticOutputParser[TestModel] = PydanticOutputParser( pydantic_object=TestModel) result = pydantic_parser.parse_folder(DEF_RESULT) print('parse_result:', result) assert DEF_EXPECTED_RESULT == result
Test PydanticOutputParser.
_import_searx_search_tool_SearxSearchRun
from langchain_community.tools.searx_search.tool import SearxSearchRun return SearxSearchRun
def _import_searx_search_tool_SearxSearchRun() ->Any: from langchain_community.tools.searx_search.tool import SearxSearchRun return SearxSearchRun
null
gen_vector
return [random() for _ in range(VECTOR_DIMS)]
def gen_vector() ->List[float]: return [random() for _ in range(VECTOR_DIMS)]
null