method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
test_colored_object_prompt | """Test colored object prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_colored_object_prompt(llm, timeout=None)
question = (
'On the desk, you see two blue booklets, two purple booklets, and two yellow pairs of sunglasses. If I remove all the pairs of sunglasses from the desk, how many purple items remain on it?'
)
output = pal_chain.run(question)
assert output == '2' | def test_colored_object_prompt() ->None:
"""Test colored object prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_colored_object_prompt(llm, timeout=None)
question = (
'On the desk, you see two blue booklets, two purple booklets, and two yellow pairs of sunglasses. If I remove all the pairs of sunglasses from the desk, how many purple items remain on it?'
)
output = pal_chain.run(question)
assert output == '2' | Test colored object prompt. |
__init__ | """Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or ['\n\n', '\n', ' ', '']
self._is_separator_regex = is_separator_regex | def __init__(self, separators: Optional[List[str]]=None, keep_separator:
bool=True, is_separator_regex: bool=False, **kwargs: Any) ->None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or ['\n\n', '\n', ' ', '']
self._is_separator_regex = is_separator_regex | Create a new TextSplitter. |
test_embedchain_retriever | retriever = EmbedchainRetriever.create()
texts = ['This document is about John']
for text in texts:
retriever.add_texts(text)
docs = retriever.get_relevant_documents('doc about john')
assert len(docs) == 1
for doc in docs:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata
assert len(list(doc.metadata.items())) > 0 | @pytest.mark.requires('embedchain')
@patch.object(Pipeline, 'search', return_value=context_value)
@patch.object(Pipeline, 'add', return_value=123)
def test_embedchain_retriever(mock_add: Any, mock_search: Any) ->None:
retriever = EmbedchainRetriever.create()
texts = ['This document is about John']
for text in texts:
retriever.add_texts(text)
docs = retriever.get_relevant_documents('doc about john')
assert len(docs) == 1
for doc in docs:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata
assert len(list(doc.metadata.items())) > 0 | null |
__init__ | """Initialize with geopandas Dataframe.
Args:
data_frame: geopandas DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "geometry".
"""
try:
import geopandas as gpd
except ImportError:
raise ImportError(
'geopandas package not found, please install it with `pip install geopandas`'
)
if not isinstance(data_frame, gpd.GeoDataFrame):
raise ValueError(
f'Expected data_frame to be a gpd.GeoDataFrame, got {type(data_frame)}'
)
if page_content_column not in data_frame.columns:
raise ValueError(
f'Expected data_frame to have a column named {page_content_column}')
if not isinstance(data_frame[page_content_column], gpd.GeoSeries):
raise ValueError(
f'Expected data_frame[{page_content_column}] to be a GeoSeries')
self.data_frame = data_frame
self.page_content_column = page_content_column | def __init__(self, data_frame: Any, page_content_column: str='geometry'):
"""Initialize with geopandas Dataframe.
Args:
data_frame: geopandas DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "geometry".
"""
try:
import geopandas as gpd
except ImportError:
raise ImportError(
'geopandas package not found, please install it with `pip install geopandas`'
)
if not isinstance(data_frame, gpd.GeoDataFrame):
raise ValueError(
f'Expected data_frame to be a gpd.GeoDataFrame, got {type(data_frame)}'
)
if page_content_column not in data_frame.columns:
raise ValueError(
f'Expected data_frame to have a column named {page_content_column}'
)
if not isinstance(data_frame[page_content_column], gpd.GeoSeries):
raise ValueError(
f'Expected data_frame[{page_content_column}] to be a GeoSeries')
self.data_frame = data_frame
self.page_content_column = page_content_column | Initialize with geopandas Dataframe.
Args:
data_frame: geopandas DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "geometry". |
__repr__ | """Text representation for ClickHouse Vector Store, prints backends, username
and schemas. Easy to use with `str(ClickHouse())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f'\x1b[92m\x1b[1m{self.config.database}.{self.config.table} @ '
_repr += f'{self.config.host}:{self.config.port}\x1b[0m\n\n'
_repr += f"""[1musername: {self.config.username}[0m
Table Schema:
"""
_repr += '-' * 51 + '\n'
for r in self.client.query(f'DESC {self.config.database}.{self.config.table}'
).named_results():
_repr += (
f"|\x1b[94m{r['name']:24s}\x1b[0m|\x1b[96m{r['type']:24s}\x1b[0m|\n")
_repr += '-' * 51 + '\n'
return _repr | def __repr__(self) ->str:
"""Text representation for ClickHouse Vector Store, prints backends, username
and schemas. Easy to use with `str(ClickHouse())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f'\x1b[92m\x1b[1m{self.config.database}.{self.config.table} @ '
_repr += f'{self.config.host}:{self.config.port}\x1b[0m\n\n'
_repr += (
f'\x1b[1musername: {self.config.username}\x1b[0m\n\nTable Schema:\n')
_repr += '-' * 51 + '\n'
for r in self.client.query(
f'DESC {self.config.database}.{self.config.table}').named_results():
_repr += (
f"|\x1b[94m{r['name']:24s}\x1b[0m|\x1b[96m{r['type']:24s}\x1b[0m|\n"
)
_repr += '-' * 51 + '\n'
return _repr | Text representation for ClickHouse Vector Store, prints backends, username
and schemas. Easy to use with `str(ClickHouse())`
Returns:
repr: string to show connection info and data schema |
delete_through_llm | """
A wrapper around `delete` with the LLM being passed.
In case the llm(prompt) calls have a `stop` param, you should pass it here
"""
llm_string = get_prompts({**llm.dict(), **{'stop': stop}}, [])[1]
return self.delete(prompt, llm_string=llm_string) | def delete_through_llm(self, prompt: str, llm: LLM, stop: Optional[List[str
]]=None) ->None:
"""
A wrapper around `delete` with the LLM being passed.
In case the llm(prompt) calls have a `stop` param, you should pass it here
"""
llm_string = get_prompts({**llm.dict(), **{'stop': stop}}, [])[1]
return self.delete(prompt, llm_string=llm_string) | A wrapper around `delete` with the LLM being passed.
In case the llm(prompt) calls have a `stop` param, you should pass it here |
convert_prompt | return self._convert_messages_to_prompt(prompt.to_messages()) | def convert_prompt(self, prompt: PromptValue) ->str:
return self._convert_messages_to_prompt(prompt.to_messages()) | null |
__init__ | """Initialize with path."""
self.file_path = path | def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path | Initialize with path. |
parse_date | if date_string is None:
return None
time_format = '%a %b %d %H:%M:%S %Y %z'
return datetime.strptime(date_string, time_format) | def parse_date(date_string: str) ->datetime:
if date_string is None:
return None
time_format = '%a %b %d %H:%M:%S %Y %z'
return datetime.strptime(date_string, time_format) | null |
_import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool | from langchain_community.tools.azure_cognitive_services import AzureCogsTextAnalyticsHealthTool
return AzureCogsTextAnalyticsHealthTool | def _import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool() ->Any:
from langchain_community.tools.azure_cognitive_services import AzureCogsTextAnalyticsHealthTool
return AzureCogsTextAnalyticsHealthTool | null |
test_selector_add_example | """Test LengthBasedExampleSelector can add an example."""
new_example = {'question': """Question: what are you?
Answer: bar"""}
selector.add_example(new_example)
short_question = 'Short question?'
output = selector.select_examples({'question': short_question})
assert output == EXAMPLES + [new_example] | def test_selector_add_example(selector: LengthBasedExampleSelector) ->None:
"""Test LengthBasedExampleSelector can add an example."""
new_example = {'question': 'Question: what are you?\nAnswer: bar'}
selector.add_example(new_example)
short_question = 'Short question?'
output = selector.select_examples({'question': short_question})
assert output == EXAMPLES + [new_example] | Test LengthBasedExampleSelector can add an example. |
get_name | name = (name or self.name or
f"RunnablePick<{','.join([self.keys] if isinstance(self.keys, str) else self.keys)}>"
)
return super().get_name(suffix, name=name) | def get_name(self, suffix: Optional[str]=None, *, name: Optional[str]=None
) ->str:
name = (name or self.name or
f"RunnablePick<{','.join([self.keys] if isinstance(self.keys, str) else self.keys)}>"
)
return super().get_name(suffix, name=name) | null |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages'] | Get the namespace of the langchain object. |
_create_retry_decorator | """Returns a tenacity retry decorator."""
multiplier = 1
min_seconds = 1
max_seconds = 4
max_retries = 6
return retry(reraise=True, stop=stop_after_attempt(max_retries), wait=
wait_exponential(multiplier=multiplier, min=min_seconds, max=
max_seconds), before_sleep=before_sleep_log(logger, logging.WARNING)) | def _create_retry_decorator() ->Callable[[Any], Any]:
"""Returns a tenacity retry decorator."""
multiplier = 1
min_seconds = 1
max_seconds = 4
max_retries = 6
return retry(reraise=True, stop=stop_after_attempt(max_retries), wait=
wait_exponential(multiplier=multiplier, min=min_seconds, max=
max_seconds), before_sleep=before_sleep_log(logger, logging.WARNING)) | Returns a tenacity retry decorator. |
from_texts | """Construct Typesense wrapper from raw text."""
if typesense_client:
vectorstore = cls(typesense_client, embedding, **kwargs)
elif typesense_client_params:
vectorstore = cls.from_client_params(embedding, **
typesense_client_params, **kwargs)
else:
raise ValueError(
'Must specify one of typesense_client or typesense_client_params.')
vectorstore.add_texts(texts, metadatas=metadatas, ids=ids)
return vectorstore | @classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, ids: Optional[List[str]]=None,
typesense_client: Optional[Client]=None, typesense_client_params:
Optional[dict]=None, typesense_collection_name: Optional[str]=None,
text_key: str='text', **kwargs: Any) ->Typesense:
"""Construct Typesense wrapper from raw text."""
if typesense_client:
vectorstore = cls(typesense_client, embedding, **kwargs)
elif typesense_client_params:
vectorstore = cls.from_client_params(embedding, **
typesense_client_params, **kwargs)
else:
raise ValueError(
'Must specify one of typesense_client or typesense_client_params.')
vectorstore.add_texts(texts, metadatas=metadatas, ids=ids)
return vectorstore | Construct Typesense wrapper from raw text. |
_get_elements | from unstructured.partition.org import partition_org
return partition_org(filename=self.file_path, **self.unstructured_kwargs) | def _get_elements(self) ->List:
from unstructured.partition.org import partition_org
return partition_org(filename=self.file_path, **self.unstructured_kwargs) | null |
test_sequential_usage_multiple_outputs | """Test sequential usage on multiple output chains."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar', 'test'])
chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=['baz'])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'])
output = chain({'foo': '123'})
expected_output = {'baz': '123foo 123foo', 'foo': '123'}
assert output == expected_output | def test_sequential_usage_multiple_outputs() ->None:
"""Test sequential usage on multiple output chains."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar',
'test'])
chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=[
'baz'])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'])
output = chain({'foo': '123'})
expected_output = {'baz': '123foo 123foo', 'foo': '123'}
assert output == expected_output | Test sequential usage on multiple output chains. |
_call | """Call out to Minimax's completion endpoint to chat
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = minimax("Tell me a joke.")
"""
request = self._default_params
request['messages'] = [{'sender_type': 'USER', 'text': prompt}]
request.update(kwargs)
text = self._client.post(request)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Minimax's completion endpoint to chat
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = minimax("Tell me a joke.")
"""
request = self._default_params
request['messages'] = [{'sender_type': 'USER', 'text': prompt}]
request.update(kwargs)
text = self._client.post(request)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | Call out to Minimax's completion endpoint to chat
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = minimax("Tell me a joke.") |
embed_documents | """Generate embeddings for documents using FastEmbed.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings: List[np.ndarray]
if self.doc_embed_type == 'passage':
embeddings = self._model.passage_embed(texts)
else:
embeddings = self._model.embed(texts)
return [e.tolist() for e in embeddings] | def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Generate embeddings for documents using FastEmbed.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings: List[np.ndarray]
if self.doc_embed_type == 'passage':
embeddings = self._model.passage_embed(texts)
else:
embeddings = self._model.embed(texts)
return [e.tolist() for e in embeddings] | Generate embeddings for documents using FastEmbed.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text. |
test_marqo_add_texts | marqo_search = Marqo(client=client, index_name=INDEX_NAME)
ids1 = marqo_search.add_texts(['1', '2', '3'])
assert len(ids1) == 3
ids2 = marqo_search.add_texts(['1', '2', '3'])
assert len(ids2) == 3
assert len(set(ids1).union(set(ids2))) == 6 | def test_marqo_add_texts(client: Marqo) ->None:
marqo_search = Marqo(client=client, index_name=INDEX_NAME)
ids1 = marqo_search.add_texts(['1', '2', '3'])
assert len(ids1) == 3
ids2 = marqo_search.add_texts(['1', '2', '3'])
assert len(ids2) == 3
assert len(set(ids1).union(set(ids2))) == 6 | null |
_on_retriever_end | """Process the Retriever Run.""" | def _on_retriever_end(self, run: Run) ->None:
"""Process the Retriever Run.""" | Process the Retriever Run. |
set_cluster_id | if v and values['endpoint_name']:
raise ValueError('Cannot set both endpoint_name and cluster_id.')
elif values['endpoint_name']:
return None
elif v:
return v
else:
try:
if (v := get_repl_context().clusterId):
return v
raise ValueError("Context doesn't contain clusterId.")
except Exception as e:
raise ValueError(
f'Neither endpoint_name nor cluster_id was set. And the cluster_id cannot be automatically determined. Received error: {e}'
) | @validator('cluster_id', always=True)
def set_cluster_id(cls, v: Any, values: Dict[str, Any]) ->Optional[str]:
if v and values['endpoint_name']:
raise ValueError('Cannot set both endpoint_name and cluster_id.')
elif values['endpoint_name']:
return None
elif v:
return v
else:
try:
if (v := get_repl_context().clusterId):
return v
raise ValueError("Context doesn't contain clusterId.")
except Exception as e:
raise ValueError(
f'Neither endpoint_name nor cluster_id was set. And the cluster_id cannot be automatically determined. Received error: {e}'
) | null |
test_valid_action_and_action_input_parse | llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar"""
agent_action: AgentAction = mrkl_output_parser.parse_folder(llm_output)
assert agent_action.tool == 'foo'
assert agent_action.tool_input == 'bar' | def test_valid_action_and_action_input_parse() ->None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar"""
agent_action: AgentAction = mrkl_output_parser.parse_folder(llm_output)
assert agent_action.tool == 'foo'
assert agent_action.tool_input == 'bar' | null |
test_no_arguments_to_delete_by_id | with pytest.raises(Exception) as exception_info:
self.invoke_delete_by_id_with_no_args(azure_openai_embeddings, collection)
assert str(exception_info.value) == 'No document id provided to delete.' | def test_no_arguments_to_delete_by_id(self, azure_openai_embeddings:
OpenAIEmbeddings, collection: Any) ->None:
with pytest.raises(Exception) as exception_info:
self.invoke_delete_by_id_with_no_args(azure_openai_embeddings,
collection)
assert str(exception_info.value) == 'No document id provided to delete.' | null |
from_chains | """User friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Args:
llm: The LLM to use as the agent LLM.
chains: The chains the MRKL system has access to.
**kwargs: parameters to be passed to initialization.
Returns:
An initialized MRKL chain.
"""
tools = [Tool(name=c.action_name, func=c.action, description=c.
action_description) for c in chains]
agent = ZeroShotAgent.from_llm_and_tools(llm, tools)
return cls(agent=agent, tools=tools, **kwargs) | @classmethod
def from_chains(cls, llm: BaseLanguageModel, chains: List[ChainConfig], **
kwargs: Any) ->AgentExecutor:
"""User friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Args:
llm: The LLM to use as the agent LLM.
chains: The chains the MRKL system has access to.
**kwargs: parameters to be passed to initialization.
Returns:
An initialized MRKL chain.
"""
tools = [Tool(name=c.action_name, func=c.action, description=c.
action_description) for c in chains]
agent = ZeroShotAgent.from_llm_and_tools(llm, tools)
return cls(agent=agent, tools=tools, **kwargs) | User friendly way to initialize the MRKL chain.
This is intended to be an easy way to get up and running with the
MRKL chain.
Args:
llm: The LLM to use as the agent LLM.
chains: The chains the MRKL system has access to.
**kwargs: parameters to be passed to initialization.
Returns:
An initialized MRKL chain. |
_get_elements | from unstructured.partition.image import partition_image
return partition_image(filename=self.file_path, **self.unstructured_kwargs) | def _get_elements(self) ->List:
from unstructured.partition.image import partition_image
return partition_image(filename=self.file_path, **self.unstructured_kwargs) | null |
add_message | """Append the message to the record in the local file"""
messages = messages_to_dict(self.messages)
messages.append(messages_to_dict([message])[0])
self.file_path.write_text(json.dumps(messages)) | def add_message(self, message: BaseMessage) ->None:
"""Append the message to the record in the local file"""
messages = messages_to_dict(self.messages)
messages.append(messages_to_dict([message])[0])
self.file_path.write_text(json.dumps(messages)) | Append the message to the record in the local file |
max_marginal_relevance_search_by_vector | """
Return docs selected using the maximal marginal relevance. Maximal marginal
relevance optimizes for similarity to query AND diversity among selected docs.
Examples:
>>> data = vector_store.max_marginal_relevance_search_by_vector(
... embedding=<your_embedding>,
... fetch_k=<elements_to_fetch_before_mmr_search>,
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch for MMR algorithm.
lambda_mult: Number between 0 and 1 determining the degree of diversity.
0 corresponds to max diversity and 1 to min diversity. Defaults to 0.5.
exec_option (str): DeepLakeVectorStore supports 3 ways for searching.
Could be "python", "compute_engine" or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
**kwargs: Additional keyword arguments.
Returns:
List[Documents] - A list of documents.
"""
return self._search(embedding=embedding, k=k, fetch_k=fetch_k,
use_maximal_marginal_relevance=True, lambda_mult=lambda_mult,
exec_option=exec_option, **kwargs) | def max_marginal_relevance_search_by_vector(self, embedding: List[float], k:
int=4, fetch_k: int=20, lambda_mult: float=0.5, exec_option: Optional[
str]=None, **kwargs: Any) ->List[Document]:
"""
Return docs selected using the maximal marginal relevance. Maximal marginal
relevance optimizes for similarity to query AND diversity among selected docs.
Examples:
>>> data = vector_store.max_marginal_relevance_search_by_vector(
... embedding=<your_embedding>,
... fetch_k=<elements_to_fetch_before_mmr_search>,
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch for MMR algorithm.
lambda_mult: Number between 0 and 1 determining the degree of diversity.
0 corresponds to max diversity and 1 to min diversity. Defaults to 0.5.
exec_option (str): DeepLakeVectorStore supports 3 ways for searching.
Could be "python", "compute_engine" or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
**kwargs: Additional keyword arguments.
Returns:
List[Documents] - A list of documents.
"""
return self._search(embedding=embedding, k=k, fetch_k=fetch_k,
use_maximal_marginal_relevance=True, lambda_mult=lambda_mult,
exec_option=exec_option, **kwargs) | Return docs selected using the maximal marginal relevance. Maximal marginal
relevance optimizes for similarity to query AND diversity among selected docs.
Examples:
>>> data = vector_store.max_marginal_relevance_search_by_vector(
... embedding=<your_embedding>,
... fetch_k=<elements_to_fetch_before_mmr_search>,
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch for MMR algorithm.
lambda_mult: Number between 0 and 1 determining the degree of diversity.
0 corresponds to max diversity and 1 to min diversity. Defaults to 0.5.
exec_option (str): DeepLakeVectorStore supports 3 ways for searching.
Could be "python", "compute_engine" or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
**kwargs: Additional keyword arguments.
Returns:
List[Documents] - A list of documents. |
embed_query | """Compute query embeddings using AwaEmbedding.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.client.Embedding(text) | def embed_query(self, text: str) ->List[float]:
"""Compute query embeddings using AwaEmbedding.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.client.Embedding(text) | Compute query embeddings using AwaEmbedding.
Args:
text: The text to embed.
Returns:
Embeddings for the text. |
similarity_search_with_score | """Return LLMRails documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5 Max 10.
alpha: parameter for hybrid search .
Returns:
List of Documents most similar to the query and score for each.
"""
response = self._session.post(headers=self._get_post_headers(), url=
f'{self.base_url}/datastores/{self._datastore_id}/search', data=json.
dumps({'k': k, 'text': query}), timeout=10)
if response.status_code != 200:
logging.error('Query failed %s',
f'(code {response.status_code}, reason {response.reason}, details {response.text})'
)
return []
results = response.json()['results']
docs = [(Document(page_content=x['text'], metadata={key: value for key,
value in x['metadata'].items() if key != 'score'}), x['metadata'][
'score']) for x in results]
return docs | def similarity_search_with_score(self, query: str, k: int=5) ->List[Tuple[
Document, float]]:
"""Return LLMRails documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5 Max 10.
alpha: parameter for hybrid search .
Returns:
List of Documents most similar to the query and score for each.
"""
response = self._session.post(headers=self._get_post_headers(), url=
f'{self.base_url}/datastores/{self._datastore_id}/search', data=
json.dumps({'k': k, 'text': query}), timeout=10)
if response.status_code != 200:
logging.error('Query failed %s',
f'(code {response.status_code}, reason {response.reason}, details {response.text})'
)
return []
results = response.json()['results']
docs = [(Document(page_content=x['text'], metadata={key: value for key,
value in x['metadata'].items() if key != 'score'}), x['metadata'][
'score']) for x in results]
return docs | Return LLMRails documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5 Max 10.
alpha: parameter for hybrid search .
Returns:
List of Documents most similar to the query and score for each. |
validate_environment | """Validate that api key and python package exists in environment."""
values['openai_api_key'] = get_from_dict_or_env(values, 'openai_api_key',
'OPENAI_API_KEY')
values['openai_api_base'] = values['openai_api_base'] or os.getenv(
'OPENAI_API_BASE')
values['openai_api_type'] = get_from_dict_or_env(values, 'openai_api_type',
'OPENAI_API_TYPE', default='')
values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy',
'OPENAI_PROXY', default='')
if values['openai_api_type'] in ('azure', 'azure_ad', 'azuread'):
default_api_version = '2023-05-15'
values['chunk_size'] = min(values['chunk_size'], 16)
else:
default_api_version = ''
values['openai_api_version'] = get_from_dict_or_env(values,
'openai_api_version', 'OPENAI_API_VERSION', default=default_api_version)
values['openai_organization'] = values['openai_organization'] or os.getenv(
'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION')
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
else:
if is_openai_v1():
if values['openai_api_type'] in ('azure', 'azure_ad', 'azuread'):
warnings.warn(
'If you have openai>=1.0.0 installed and are using Azure, please use the `AzureOpenAIEmbeddings` class.'
)
client_params = {'api_key': values['openai_api_key'],
'organization': values['openai_organization'], 'base_url':
values['openai_api_base'], 'timeout': values['request_timeout'],
'max_retries': values['max_retries'], 'default_headers': values
['default_headers'], 'default_query': values['default_query'],
'http_client': values['http_client']}
if not values.get('client'):
values['client'] = openai.OpenAI(**client_params).embeddings
if not values.get('async_client'):
values['async_client'] = openai.AsyncOpenAI(**client_params
).embeddings
elif not values.get('client'):
values['client'] = openai.Embedding
else:
pass
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['openai_api_key'] = get_from_dict_or_env(values,
'openai_api_key', 'OPENAI_API_KEY')
values['openai_api_base'] = values['openai_api_base'] or os.getenv(
'OPENAI_API_BASE')
values['openai_api_type'] = get_from_dict_or_env(values,
'openai_api_type', 'OPENAI_API_TYPE', default='')
values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy',
'OPENAI_PROXY', default='')
if values['openai_api_type'] in ('azure', 'azure_ad', 'azuread'):
default_api_version = '2023-05-15'
values['chunk_size'] = min(values['chunk_size'], 16)
else:
default_api_version = ''
values['openai_api_version'] = get_from_dict_or_env(values,
'openai_api_version', 'OPENAI_API_VERSION', default=default_api_version
)
values['openai_organization'] = values['openai_organization'] or os.getenv(
'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION')
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
else:
if is_openai_v1():
if values['openai_api_type'] in ('azure', 'azure_ad', 'azuread'):
warnings.warn(
'If you have openai>=1.0.0 installed and are using Azure, please use the `AzureOpenAIEmbeddings` class.'
)
client_params = {'api_key': values['openai_api_key'],
'organization': values['openai_organization'], 'base_url':
values['openai_api_base'], 'timeout': values[
'request_timeout'], 'max_retries': values['max_retries'],
'default_headers': values['default_headers'],
'default_query': values['default_query'], 'http_client':
values['http_client']}
if not values.get('client'):
values['client'] = openai.OpenAI(**client_params).embeddings
if not values.get('async_client'):
values['async_client'] = openai.AsyncOpenAI(**client_params
).embeddings
elif not values.get('client'):
values['client'] = openai.Embedding
else:
pass
return values | Validate that api key and python package exists in environment. |
_identifying_params | return {} | @property
def _identifying_params(self) ->Dict[str, Any]:
return {} | null |
load | return list(self.lazy_load()) | def load(self) ->List[Document]:
return list(self.lazy_load()) | null |
mock_create | nonlocal completed
completed = True
return mock_completion | def mock_create(*args: Any, **kwargs: Any) ->Any:
nonlocal completed
completed = True
return mock_completion | null |
__init__ | """Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_gong', pip_name='airbyte-source-gong'
).SourceGong
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state) | def __init__(self, config: Mapping[str, Any], stream_name: str,
record_handler: Optional[RecordHandler]=None, state: Optional[Any]=None
) ->None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_gong', pip_name='airbyte-source-gong'
).SourceGong
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state) | Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None. |
_Set | assert t.elts
self.write('{')
interleave(lambda : self.write(', '), self.dispatch, t.elts)
self.write('}') | def _Set(self, t):
assert t.elts
self.write('{')
interleave(lambda : self.write(', '), self.dispatch, t.elts)
self.write('}') | null |
extension | return 'bson' | @classmethod
def extension(cls) ->str:
return 'bson' | null |
on_llm_start_common | self.llm_starts += 1
self.starts += 1 | def on_llm_start_common(self) ->None:
self.llm_starts += 1
self.starts += 1 | null |
test_pymupdf_loader | """Test PyMuPDF loader."""
file_path = Path(__file__).parent.parent / 'examples/hello.pdf'
loader = PyMuPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / 'examples/layout-parser-paper.pdf'
loader = PyMuPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 16
assert loader.web_path is None
web_path = 'https://people.sc.fsu.edu/~jpeterson/hello_world.pdf'
loader = PyMuPDFLoader(web_path)
docs = loader.load()
assert loader.web_path == web_path
assert loader.file_path != web_path
assert len(docs) == 1 | def test_pymupdf_loader() ->None:
"""Test PyMuPDF loader."""
file_path = Path(__file__).parent.parent / 'examples/hello.pdf'
loader = PyMuPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__
).parent.parent / 'examples/layout-parser-paper.pdf'
loader = PyMuPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 16
assert loader.web_path is None
web_path = 'https://people.sc.fsu.edu/~jpeterson/hello_world.pdf'
loader = PyMuPDFLoader(web_path)
docs = loader.load()
assert loader.web_path == web_path
assert loader.file_path != web_path
assert len(docs) == 1 | Test PyMuPDF loader. |
_type | return 'openai-functions-agent' | @property
def _type(self) ->str:
return 'openai-functions-agent' | null |
_get_eval_input | """Get the evaluation input."""
input_ = {'input': input, 'output': prediction}
if self.requires_reference:
input_['reference'] = reference
return input_ | def _get_eval_input(self, prediction: str, reference: Optional[str], input:
Optional[str]) ->dict:
"""Get the evaluation input."""
input_ = {'input': input, 'output': prediction}
if self.requires_reference:
input_['reference'] = reference
return input_ | Get the evaluation input. |
_import_milvus | from langchain_community.vectorstores.milvus import Milvus
return Milvus | def _import_milvus() ->Any:
from langchain_community.vectorstores.milvus import Milvus
return Milvus | null |
delete | """Delete entity value from store."""
pass | @abstractmethod
def delete(self, key: str) ->None:
"""Delete entity value from store."""
pass | Delete entity value from store. |
test_huggingface_endpoint_text_generation | """Test valid call to HuggingFace text generation model."""
llm = HuggingFaceEndpoint(endpoint_url='', task='text-generation',
model_kwargs={'max_new_tokens': 10})
output = llm('Say foo:')
print(output)
assert isinstance(output, str) | @unittest.skip(
'This test requires an inference endpoint. Tested with Hugging Face endpoints'
)
def test_huggingface_endpoint_text_generation() ->None:
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFaceEndpoint(endpoint_url='', task='text-generation',
model_kwargs={'max_new_tokens': 10})
output = llm('Say foo:')
print(output)
assert isinstance(output, str) | Test valid call to HuggingFace text generation model. |
run | """Query the Brave search engine and return the results as a JSON string.
Args:
query: The query to search for.
Returns: The results as a JSON string.
"""
web_search_results = self._search_request(query=query)
final_results = [{'title': item.get('title'), 'link': item.get('url'),
'snippet': item.get('description')} for item in web_search_results]
return json.dumps(final_results) | def run(self, query: str) ->str:
"""Query the Brave search engine and return the results as a JSON string.
Args:
query: The query to search for.
Returns: The results as a JSON string.
"""
web_search_results = self._search_request(query=query)
final_results = [{'title': item.get('title'), 'link': item.get('url'),
'snippet': item.get('description')} for item in web_search_results]
return json.dumps(final_results) | Query the Brave search engine and return the results as a JSON string.
Args:
query: The query to search for.
Returns: The results as a JSON string. |
_call | """Call the Yandex GPT model and return the output.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = YandexGPT("Tell me a joke.")
"""
text = completion_with_retry(self, prompt=prompt)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call the Yandex GPT model and return the output.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = YandexGPT("Tell me a joke.")
"""
text = completion_with_retry(self, prompt=prompt)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | Call the Yandex GPT model and return the output.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = YandexGPT("Tell me a joke.") |
validate_environment | """Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(values, 'aleph_alpha_api_key',
'ALEPH_ALPHA_API_KEY')
try:
from aleph_alpha_client import Client
values['client'] = Client(token=aleph_alpha_api_key, host=values['host'
], hosting=values['hosting'], request_timeout_seconds=values[
'request_timeout_seconds'], total_retries=values['total_retries'],
nice=values['nice'])
except ImportError:
raise ValueError(
'Could not import aleph_alpha_client python package. Please install it with `pip install aleph_alpha_client`.'
)
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(values,
'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')
try:
from aleph_alpha_client import Client
values['client'] = Client(token=aleph_alpha_api_key, host=values[
'host'], hosting=values['hosting'], request_timeout_seconds=
values['request_timeout_seconds'], total_retries=values[
'total_retries'], nice=values['nice'])
except ImportError:
raise ValueError(
'Could not import aleph_alpha_client python package. Please install it with `pip install aleph_alpha_client`.'
)
return values | Validate that api key and python package exists in environment. |
completion_with_retry | """Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(**kwargs: Any) ->Any:
return fireworks.client.Completion.create(**kwargs)
return _completion_with_retry(**kwargs) | def completion_with_retry(llm: Fireworks, use_retry: bool, *, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(**kwargs: Any) ->Any:
return fireworks.client.Completion.create(**kwargs)
return _completion_with_retry(**kwargs) | Use tenacity to retry the completion call. |
pretty_print_str | return title + '\n' + d | def pretty_print_str(title: str, d: str) ->str:
return title + '\n' + d | null |
test__convert_dict_to_message_ai | message = {'role': 'assistant', 'content': 'foo'}
result = convert_dict_to_message(message)
expected_output = AIMessage(content='foo')
assert result == expected_output | def test__convert_dict_to_message_ai() ->None:
message = {'role': 'assistant', 'content': 'foo'}
result = convert_dict_to_message(message)
expected_output = AIMessage(content='foo')
assert result == expected_output | null |
_import_file_management_FileSearchTool | from langchain_community.tools.file_management import FileSearchTool
return FileSearchTool | def _import_file_management_FileSearchTool() ->Any:
from langchain_community.tools.file_management import FileSearchTool
return FileSearchTool | null |
__getitem__ | return getattr(self, item) | def __getitem__(self, item: str) ->Any:
return getattr(self, item) | null |
add_texts | if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError('Collection not found')
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids
):
embedding_store = EmbeddingStore(embedding=embedding, document=text,
cmetadata=metadata, custom_id=id)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
return ids | def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError('Collection not found')
for text, metadata, embedding, id in zip(texts, metadatas,
embeddings, ids):
embedding_store = EmbeddingStore(embedding=embedding, document=
text, cmetadata=metadata, custom_id=id)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
return ids | null |
_embed_query | if isinstance(self.embedding_function, Embeddings):
return self.embedding_function.embed_query(text)
else:
return self.embedding_function(text) | def _embed_query(self, text: str) ->List[float]:
if isinstance(self.embedding_function, Embeddings):
return self.embedding_function.embed_query(text)
else:
return self.embedding_function(text) | null |
output_keys | return [] | @property
def output_keys(self) ->List[str]:
return [] | null |
test_baichuan_key_masked_when_passed_from_env | """Test initialization with an API key provided via an env variable"""
monkeypatch.setenv('BAICHUAN_API_KEY', 'test-api-key')
monkeypatch.setenv('BAICHUAN_SECRET_KEY', 'test-secret-key')
chat = ChatBaichuan()
print(chat.baichuan_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.baichuan_secret_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********' | def test_baichuan_key_masked_when_passed_from_env(monkeypatch: MonkeyPatch,
capsys: CaptureFixture) ->None:
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv('BAICHUAN_API_KEY', 'test-api-key')
monkeypatch.setenv('BAICHUAN_SECRET_KEY', 'test-secret-key')
chat = ChatBaichuan()
print(chat.baichuan_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.baichuan_secret_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********' | Test initialization with an API key provided via an env variable |
__init__ | """Initialize a SparkSQL object.
Args:
spark_session: A SparkSession object.
If not provided, one will be created.
catalog: The catalog to use.
If not provided, the default catalog will be used.
schema: The schema to use.
If not provided, the default schema will be used.
ignore_tables: A list of tables to ignore.
If not provided, all tables will be used.
include_tables: A list of tables to include.
If not provided, all tables will be used.
sample_rows_in_table_info: The number of rows to include in the table info.
Defaults to 3.
"""
try:
from pyspark.sql import SparkSession
except ImportError:
raise ImportError(
'pyspark is not installed. Please install it with `pip install pyspark`'
)
self._spark = (spark_session if spark_session else SparkSession.builder.
getOrCreate())
if catalog is not None:
self._spark.catalog.setCurrentCatalog(catalog)
if schema is not None:
self._spark.catalog.setCurrentDatabase(schema)
self._all_tables = set(self._get_all_table_names())
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f'include_tables {missing_tables} not found in database')
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f'ignore_tables {missing_tables} not found in database')
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError('sample_rows_in_table_info must be an integer')
self._sample_rows_in_table_info = sample_rows_in_table_info | def __init__(self, spark_session: Optional[SparkSession]=None, catalog:
Optional[str]=None, schema: Optional[str]=None, ignore_tables: Optional
[List[str]]=None, include_tables: Optional[List[str]]=None,
sample_rows_in_table_info: int=3):
"""Initialize a SparkSQL object.
Args:
spark_session: A SparkSession object.
If not provided, one will be created.
catalog: The catalog to use.
If not provided, the default catalog will be used.
schema: The schema to use.
If not provided, the default schema will be used.
ignore_tables: A list of tables to ignore.
If not provided, all tables will be used.
include_tables: A list of tables to include.
If not provided, all tables will be used.
sample_rows_in_table_info: The number of rows to include in the table info.
Defaults to 3.
"""
try:
from pyspark.sql import SparkSession
except ImportError:
raise ImportError(
'pyspark is not installed. Please install it with `pip install pyspark`'
)
self._spark = (spark_session if spark_session else SparkSession.builder
.getOrCreate())
if catalog is not None:
self._spark.catalog.setCurrentCatalog(catalog)
if schema is not None:
self._spark.catalog.setCurrentDatabase(schema)
self._all_tables = set(self._get_all_table_names())
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f'include_tables {missing_tables} not found in database')
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f'ignore_tables {missing_tables} not found in database')
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables
) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError('sample_rows_in_table_info must be an integer')
self._sample_rows_in_table_info = sample_rows_in_table_info | Initialize a SparkSQL object.
Args:
spark_session: A SparkSession object.
If not provided, one will be created.
catalog: The catalog to use.
If not provided, the default catalog will be used.
schema: The schema to use.
If not provided, the default schema will be used.
ignore_tables: A list of tables to ignore.
If not provided, all tables will be used.
include_tables: A list of tables to include.
If not provided, all tables will be used.
sample_rows_in_table_info: The number of rows to include in the table info.
Defaults to 3. |
completion_with_retry | """Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) ->Any:
resp = self.client.call(**_kwargs)
return check_response(resp)
return _completion_with_retry(**kwargs) | def completion_with_retry(self, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) ->Any:
resp = self.client.call(**_kwargs)
return check_response(resp)
return _completion_with_retry(**kwargs) | Use tenacity to retry the completion call. |
_format_dict_to_string | formatted_str = ', '.join([f'{key}: {value}' for key, value in input_dict.
items()])
return formatted_str | @staticmethod
def _format_dict_to_string(input_dict: Dict) ->str:
formatted_str = ', '.join([f'{key}: {value}' for key, value in
input_dict.items()])
return formatted_str | null |
stringify_dict | """Stringify a dictionary.
Args:
data: The dictionary to stringify.
Returns:
str: The stringified dictionary.
"""
text = ''
for key, value in data.items():
text += key + ': ' + stringify_value(value) + '\n'
return text | def stringify_dict(data: dict) ->str:
"""Stringify a dictionary.
Args:
data: The dictionary to stringify.
Returns:
str: The stringified dictionary.
"""
text = ''
for key, value in data.items():
text += key + ': ' + stringify_value(value) + '\n'
return text | Stringify a dictionary.
Args:
data: The dictionary to stringify.
Returns:
str: The stringified dictionary. |
add_texts | """Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
document_ids: Optional list of document ids associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if not self._collection:
raise ValueError(
'collection should be an instance of a Zep DocumentCollection')
documents = self._generate_documents_to_add(texts, metadatas, document_ids)
uuids = self._collection.add_documents(documents)
return uuids | def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict[str,
Any]]]=None, document_ids: Optional[List[str]]=None, **kwargs: Any) ->List[
str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
document_ids: Optional list of document ids associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if not self._collection:
raise ValueError(
'collection should be an instance of a Zep DocumentCollection')
documents = self._generate_documents_to_add(texts, metadatas, document_ids)
uuids = self._collection.add_documents(documents)
return uuids | Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
document_ids: Optional list of document ids associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore. |
test_voyage_embedding_query | """Test voyage embeddings."""
document = 'foo bar'
embedding = VoyageEmbeddings(model=MODEL)
output = embedding.embed_query(document)
assert len(output) == 1024 | def test_voyage_embedding_query() ->None:
"""Test voyage embeddings."""
document = 'foo bar'
embedding = VoyageEmbeddings(model=MODEL)
output = embedding.embed_query(document)
assert len(output) == 1024 | Test voyage embeddings. |
test_add_texts_not_supported_for_delta_sync_index | index = mock_index(index_details)
vectorsearch = default_databricks_vector_search(index)
with pytest.raises(ValueError) as ex:
vectorsearch.add_texts(fake_texts)
assert '`add_texts` is only supported for direct-access index.' in str(ex.value
) | @pytest.mark.requires('databricks', 'databricks.vector_search')
@pytest.mark.parametrize('index_details', [
DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS,
DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS])
def test_add_texts_not_supported_for_delta_sync_index(index_details: dict
) ->None:
index = mock_index(index_details)
vectorsearch = default_databricks_vector_search(index)
with pytest.raises(ValueError) as ex:
vectorsearch.add_texts(fake_texts)
assert '`add_texts` is only supported for direct-access index.' in str(ex
.value) | null |
_load_llm_checker_chain | if 'llm' in config:
llm_config = config.pop('llm')
llm = load_llm_from_config(llm_config)
elif 'llm_path' in config:
llm = load_llm(config.pop('llm_path'))
else:
raise ValueError('One of `llm` or `llm_path` must be present.')
if 'create_draft_answer_prompt' in config:
create_draft_answer_prompt_config = config.pop('create_draft_answer_prompt'
)
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config)
elif 'create_draft_answer_prompt_path' in config:
create_draft_answer_prompt = load_prompt(config.pop(
'create_draft_answer_prompt_path'))
if 'list_assertions_prompt' in config:
list_assertions_prompt_config = config.pop('list_assertions_prompt')
list_assertions_prompt = load_prompt_from_config(
list_assertions_prompt_config)
elif 'list_assertions_prompt_path' in config:
list_assertions_prompt = load_prompt(config.pop(
'list_assertions_prompt_path'))
if 'check_assertions_prompt' in config:
check_assertions_prompt_config = config.pop('check_assertions_prompt')
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config)
elif 'check_assertions_prompt_path' in config:
check_assertions_prompt = load_prompt(config.pop(
'check_assertions_prompt_path'))
if 'revised_answer_prompt' in config:
revised_answer_prompt_config = config.pop('revised_answer_prompt')
revised_answer_prompt = load_prompt_from_config(
revised_answer_prompt_config)
elif 'revised_answer_prompt_path' in config:
revised_answer_prompt = load_prompt(config.pop(
'revised_answer_prompt_path'))
return LLMCheckerChain(llm=llm, create_draft_answer_prompt=
create_draft_answer_prompt, list_assertions_prompt=
list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt, **config) | def _load_llm_checker_chain(config: dict, **kwargs: Any) ->LLMCheckerChain:
if 'llm' in config:
llm_config = config.pop('llm')
llm = load_llm_from_config(llm_config)
elif 'llm_path' in config:
llm = load_llm(config.pop('llm_path'))
else:
raise ValueError('One of `llm` or `llm_path` must be present.')
if 'create_draft_answer_prompt' in config:
create_draft_answer_prompt_config = config.pop(
'create_draft_answer_prompt')
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config)
elif 'create_draft_answer_prompt_path' in config:
create_draft_answer_prompt = load_prompt(config.pop(
'create_draft_answer_prompt_path'))
if 'list_assertions_prompt' in config:
list_assertions_prompt_config = config.pop('list_assertions_prompt')
list_assertions_prompt = load_prompt_from_config(
list_assertions_prompt_config)
elif 'list_assertions_prompt_path' in config:
list_assertions_prompt = load_prompt(config.pop(
'list_assertions_prompt_path'))
if 'check_assertions_prompt' in config:
check_assertions_prompt_config = config.pop('check_assertions_prompt')
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config)
elif 'check_assertions_prompt_path' in config:
check_assertions_prompt = load_prompt(config.pop(
'check_assertions_prompt_path'))
if 'revised_answer_prompt' in config:
revised_answer_prompt_config = config.pop('revised_answer_prompt')
revised_answer_prompt = load_prompt_from_config(
revised_answer_prompt_config)
elif 'revised_answer_prompt_path' in config:
revised_answer_prompt = load_prompt(config.pop(
'revised_answer_prompt_path'))
return LLMCheckerChain(llm=llm, create_draft_answer_prompt=
create_draft_answer_prompt, list_assertions_prompt=
list_assertions_prompt, check_assertions_prompt=
check_assertions_prompt, revised_answer_prompt=
revised_answer_prompt, **config) | null |
from_prompts | """Convenience constructor for instantiating from destination prompts."""
destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos]
destinations_str = '\n'.join(destinations)
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=
destinations_str)
router_prompt = PromptTemplate(template=router_template, input_variables=[
'input'], output_parser=RouterOutputParser())
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
destination_chains = {}
for p_info in prompt_infos:
name = p_info['name']
prompt_template = p_info['prompt_template']
prompt = PromptTemplate(template=prompt_template, input_variables=['input']
)
chain = LLMChain(llm=llm, prompt=prompt)
destination_chains[name] = chain
_default_chain = default_chain or ConversationChain(llm=llm, output_key='text')
return cls(router_chain=router_chain, destination_chains=destination_chains,
default_chain=_default_chain, **kwargs) | @classmethod
def from_prompts(cls, llm: BaseLanguageModel, prompt_infos: List[Dict[str,
str]], default_chain: Optional[Chain]=None, **kwargs: Any
) ->MultiPromptChain:
"""Convenience constructor for instantiating from destination prompts."""
destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos]
destinations_str = '\n'.join(destinations)
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=
destinations_str)
router_prompt = PromptTemplate(template=router_template,
input_variables=['input'], output_parser=RouterOutputParser())
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
destination_chains = {}
for p_info in prompt_infos:
name = p_info['name']
prompt_template = p_info['prompt_template']
prompt = PromptTemplate(template=prompt_template, input_variables=[
'input'])
chain = LLMChain(llm=llm, prompt=prompt)
destination_chains[name] = chain
_default_chain = default_chain or ConversationChain(llm=llm, output_key
='text')
return cls(router_chain=router_chain, destination_chains=
destination_chains, default_chain=_default_chain, **kwargs) | Convenience constructor for instantiating from destination prompts. |
__init__ | self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.recursive = recursive
self.silent_errors = silent_errors
self.extract_images = extract_images | def __init__(self, path: str, glob: str='**/[!.]*.pdf', silent_errors: bool
=False, load_hidden: bool=False, recursive: bool=False, extract_images:
bool=False):
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.recursive = recursive
self.silent_errors = silent_errors
self.extract_images = extract_images | null |
add_documents | """Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]): List of documents to add to the vectorstore.
Returns:
List of ids of the added documents.
"""
return self.add_texts([document.page_content for document in documents], [
document.metadata for document in documents], **kwargs) | def add_documents(self, documents: List[Document], **kwargs: Any) ->List[str]:
"""Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]): List of documents to add to the vectorstore.
Returns:
List of ids of the added documents.
"""
return self.add_texts([document.page_content for document in documents],
[document.metadata for document in documents], **kwargs) | Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]): List of documents to add to the vectorstore.
Returns:
List of ids of the added documents. |
_wait_for_run | in_progress = True
while in_progress:
run = self.client.beta.threads.runs.retrieve(run_id, thread_id=thread_id)
in_progress = run.status in ('in_progress', 'queued')
if in_progress:
sleep(self.check_every_ms / 1000)
return run | def _wait_for_run(self, run_id: str, thread_id: str) ->Any:
in_progress = True
while in_progress:
run = self.client.beta.threads.runs.retrieve(run_id, thread_id=
thread_id)
in_progress = run.status in ('in_progress', 'queued')
if in_progress:
sleep(self.check_every_ms / 1000)
return run | null |
validate_environment | """Validate that api key and endpoint exists in environment."""
bing_subscription_key = get_from_dict_or_env(values,
'bing_subscription_key', 'BING_SUBSCRIPTION_KEY')
values['bing_subscription_key'] = bing_subscription_key
bing_search_url = get_from_dict_or_env(values, 'bing_search_url',
'BING_SEARCH_URL')
values['bing_search_url'] = bing_search_url
return values | @root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and endpoint exists in environment."""
bing_subscription_key = get_from_dict_or_env(values,
'bing_subscription_key', 'BING_SUBSCRIPTION_KEY')
values['bing_subscription_key'] = bing_subscription_key
bing_search_url = get_from_dict_or_env(values, 'bing_search_url',
'BING_SEARCH_URL')
values['bing_search_url'] = bing_search_url
return values | Validate that api key and endpoint exists in environment. |
_import_edenai_EdenAiParsingInvoiceTool | from langchain_community.tools.edenai import EdenAiParsingInvoiceTool
return EdenAiParsingInvoiceTool | def _import_edenai_EdenAiParsingInvoiceTool() ->Any:
from langchain_community.tools.edenai import EdenAiParsingInvoiceTool
return EdenAiParsingInvoiceTool | null |
_import_lancedb | from langchain_community.vectorstores.lancedb import LanceDB
return LanceDB | def _import_lancedb() ->Any:
from langchain_community.vectorstores.lancedb import LanceDB
return LanceDB | null |
_import_file_management_ReadFileTool | from langchain_community.tools.file_management import ReadFileTool
return ReadFileTool | def _import_file_management_ReadFileTool() ->Any:
from langchain_community.tools.file_management import ReadFileTool
return ReadFileTool | null |
results | """Run query through GoogleSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
search_params: Parameters to be passed on search
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._google_search_results(query, num=num_results, **
search_params or {})
if len(results) == 0:
return [{'Result': 'No good Google Search Result was found'}]
for result in results:
metadata_result = {'title': result['title'], 'link': result['link']}
if 'snippet' in result:
metadata_result['snippet'] = result['snippet']
metadata_results.append(metadata_result)
return metadata_results | def results(self, query: str, num_results: int, search_params: Optional[
Dict[str, str]]=None) ->List[Dict]:
"""Run query through GoogleSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
search_params: Parameters to be passed on search
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._google_search_results(query, num=num_results, **
search_params or {})
if len(results) == 0:
return [{'Result': 'No good Google Search Result was found'}]
for result in results:
metadata_result = {'title': result['title'], 'link': result['link']}
if 'snippet' in result:
metadata_result['snippet'] = result['snippet']
metadata_results.append(metadata_result)
return metadata_results | Run query through GoogleSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
search_params: Parameters to be passed on search
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result. |
on_tool_error | if self.__has_valid_config is False:
return
try:
self.__track_event('tool', 'error', run_id=str(run_id), parent_run_id=
str(parent_run_id) if parent_run_id else None, error={'message':
str(error), 'stack': traceback.format_exc()}, app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_tool_error: {e}') | def on_tool_error(self, error: BaseException, *, run_id: UUID,
parent_run_id: Union[UUID, None]=None, **kwargs: Any) ->Any:
if self.__has_valid_config is False:
return
try:
self.__track_event('tool', 'error', run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={'message': str(error), 'stack': traceback.format_exc()},
app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_tool_error: {e}') | null |
to_pandas | import pandas as pd
return pd.DataFrame(self.history) | def to_pandas(self) ->'pd.DataFrame':
import pandas as pd
return pd.DataFrame(self.history) | null |
__str__ | return get_buffer_string(self.messages) | def __str__(self) ->str:
return get_buffer_string(self.messages) | null |
parse_obj | try:
cls._alert_unsupported_spec(obj)
return super().parse_obj(obj)
except ValidationError as e:
new_obj = copy.deepcopy(obj)
for error in e.errors():
keys = error['loc']
item = new_obj
for key in keys[:-1]:
item = item[key]
item.pop(keys[-1], None)
return cls.parse_obj(new_obj) | @classmethod
def parse_obj(cls, obj: dict) ->OpenAPISpec:
try:
cls._alert_unsupported_spec(obj)
return super().parse_obj(obj)
except ValidationError as e:
new_obj = copy.deepcopy(obj)
for error in e.errors():
keys = error['loc']
item = new_obj
for key in keys[:-1]:
item = item[key]
item.pop(keys[-1], None)
return cls.parse_obj(new_obj) | null |
embed_with_retry | """Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) ->Any:
return embeddings.embed(*args, **kwargs)
return _embed_with_retry(*args, **kwargs) | def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any
) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) ->Any:
return embeddings.embed(*args, **kwargs)
return _embed_with_retry(*args, **kwargs) | Use tenacity to retry the completion call. |
__init__ | """Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_salesforce', pip_name=
'airbyte-source-salesforce').SourceSalesforce
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state) | def __init__(self, config: Mapping[str, Any], stream_name: str,
record_handler: Optional[RecordHandler]=None, state: Optional[Any]=None
) ->None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_salesforce', pip_name=
'airbyte-source-salesforce').SourceSalesforce
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state) | Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None. |
_call | baseten_api_key = os.environ['BASETEN_API_KEY']
model_id = self.model
if self.deployment == 'production':
model_url = f'https://model-{model_id}.api.baseten.co/production/predict'
elif self.deployment == 'development':
model_url = f'https://model-{model_id}.api.baseten.co/development/predict'
else:
model_url = (
f'https://model-{model_id}.api.baseten.co/deployment/{self.deployment}/predict'
)
response = requests.post(model_url, headers={'Authorization':
f'Api-Key {baseten_api_key}'}, json={'prompt': prompt, **kwargs})
return response.json() | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
baseten_api_key = os.environ['BASETEN_API_KEY']
model_id = self.model
if self.deployment == 'production':
model_url = (
f'https://model-{model_id}.api.baseten.co/production/predict')
elif self.deployment == 'development':
model_url = (
f'https://model-{model_id}.api.baseten.co/development/predict')
else:
model_url = (
f'https://model-{model_id}.api.baseten.co/deployment/{self.deployment}/predict'
)
response = requests.post(model_url, headers={'Authorization':
f'Api-Key {baseten_api_key}'}, json={'prompt': prompt, **kwargs})
return response.json() | null |
on_llm_new_token | """Run when LLM generates a new token."""
self.metrics['step'] += 1
self.metrics['llm_streams'] += 1
llm_streams = self.metrics['llm_streams']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_new_token', 'token': token})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f'llm_new_tokens_{llm_streams}') | def on_llm_new_token(self, token: str, **kwargs: Any) ->None:
"""Run when LLM generates a new token."""
self.metrics['step'] += 1
self.metrics['llm_streams'] += 1
llm_streams = self.metrics['llm_streams']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_new_token', 'token': token})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f'llm_new_tokens_{llm_streams}') | Run when LLM generates a new token. |
_skip_reference_warning | """Warning to show when reference is ignored."""
return f"""Ignoring reference in {self.__class__.__name__}, as it is not expected.
To use references, use the labeled_criteria instead.""" | @property
def _skip_reference_warning(self) ->str:
"""Warning to show when reference is ignored."""
return f"""Ignoring reference in {self.__class__.__name__}, as it is not expected.
To use references, use the labeled_criteria instead.""" | Warning to show when reference is ignored. |
_convert_message_to_dict | if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
if 'function_call' in message.additional_kwargs:
message_dict['function_call'] = message.additional_kwargs[
'function_call']
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
message_dict = {'role': 'function', 'content': message.content, 'name':
message.name}
else:
raise ValueError(f'Got unknown type {message}')
if 'name' in message.additional_kwargs:
message_dict['name'] = message.additional_kwargs['name']
return message_dict | def _convert_message_to_dict(message: BaseMessage) ->dict:
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
if 'function_call' in message.additional_kwargs:
message_dict['function_call'] = message.additional_kwargs[
'function_call']
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
message_dict = {'role': 'function', 'content': message.content,
'name': message.name}
else:
raise ValueError(f'Got unknown type {message}')
if 'name' in message.additional_kwargs:
message_dict['name'] = message.additional_kwargs['name']
return message_dict | null |
get_table_info_no_throw | """Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f'Error: {e}' | def get_table_info_no_throw(self, table_names: Optional[List[str]]=None) ->str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f'Error: {e}' | Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper. |
on_llm_new_token | """Run on new LLM token. Only available when streaming is enabled."""
llm_run = self._get_run(run_id, run_type='llm')
event_kwargs: Dict[str, Any] = {'token': token}
if chunk:
event_kwargs['chunk'] = chunk
llm_run.events.append({'name': 'new_token', 'time': datetime.now(timezone.
utc), 'kwargs': event_kwargs})
self._on_llm_new_token(llm_run, token, chunk)
return llm_run | def on_llm_new_token(self, token: str, *, chunk: Optional[Union[
GenerationChunk, ChatGenerationChunk]]=None, run_id: UUID,
parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Run:
"""Run on new LLM token. Only available when streaming is enabled."""
llm_run = self._get_run(run_id, run_type='llm')
event_kwargs: Dict[str, Any] = {'token': token}
if chunk:
event_kwargs['chunk'] = chunk
llm_run.events.append({'name': 'new_token', 'time': datetime.now(
timezone.utc), 'kwargs': event_kwargs})
self._on_llm_new_token(llm_run, token, chunk)
return llm_run | Run on new LLM token. Only available when streaming is enabled. |
test_load_pupmed_from_universal_entry | pubmed_tool = _load_pubmed_from_universal_entry()
search_string = (
'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature'
)
output = pubmed_tool(search_string)
test_string = (
'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature: Findings and Implications'
)
assert test_string in output | def test_load_pupmed_from_universal_entry() ->None:
pubmed_tool = _load_pubmed_from_universal_entry()
search_string = (
'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature'
)
output = pubmed_tool(search_string)
test_string = (
'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature: Findings and Implications'
)
assert test_string in output | null |
_identifying_params | """Get the identifying parameters."""
return {**{'model_name': self.model_name}, **{'gpu': self.gpu}, **{'lang':
self.lang}, **self._default_params} | @property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'model_name': self.model_name}, **{'gpu': self.gpu}, **{
'lang': self.lang}, **self._default_params} | Get the identifying parameters. |
on_llm_start | for prompt in prompts:
self.prompt_records.append(prompt.replace('\n', '')) | def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
for prompt in prompts:
self.prompt_records.append(prompt.replace('\n', '')) | null |
_stream | request = Requests(headers=self._headers())
response = request.post(url=self._url(), data=self._body(prompt, {**kwargs,
'stream': True}))
self._handle_status(response.status_code, response.text)
for line in _parse_stream(response.iter_lines()):
chunk = _handle_sse_line(line)
if chunk:
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text) | def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
request = Requests(headers=self._headers())
response = request.post(url=self._url(), data=self._body(prompt, {**
kwargs, 'stream': True}))
self._handle_status(response.status_code, response.text)
for line in _parse_stream(response.iter_lines()):
chunk = _handle_sse_line(line)
if chunk:
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text) | null |
_import_azuresearch | from langchain_community.vectorstores.azuresearch import AzureSearch
return AzureSearch | def _import_azuresearch() ->Any:
from langchain_community.vectorstores.azuresearch import AzureSearch
return AzureSearch | null |
format_messages | base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs['goals'])
)
time_prompt = SystemMessage(content=
f"The current time and date is {time.strftime('%c')}")
used_tokens = self.token_counter(cast(str, base_prompt.content)
) + self.token_counter(cast(str, time_prompt.content))
memory: VectorStoreRetriever = kwargs['memory']
previous_messages = kwargs['messages']
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
relevant_memory = [d.page_content for d in relevant_docs]
relevant_memory_tokens = sum([self.token_counter(doc) for doc in
relevant_memory])
while used_tokens + relevant_memory_tokens > 2500:
relevant_memory = relevant_memory[:-1]
relevant_memory_tokens = sum([self.token_counter(doc) for doc in
relevant_memory])
content_format = f"""This reminds you of these events from your past:
{relevant_memory}
"""
memory_message = SystemMessage(content=content_format)
used_tokens += self.token_counter(cast(str, memory_message.content))
historical_messages: List[BaseMessage] = []
for message in previous_messages[-10:][::-1]:
message_tokens = self.token_counter(message.content)
if used_tokens + message_tokens > self.send_token_limit - 1000:
break
historical_messages = [message] + historical_messages
used_tokens += message_tokens
input_message = HumanMessage(content=kwargs['user_input'])
messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message]
messages += historical_messages
messages.append(input_message)
return messages | def format_messages(self, **kwargs: Any) ->List[BaseMessage]:
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs[
'goals']))
time_prompt = SystemMessage(content=
f"The current time and date is {time.strftime('%c')}")
used_tokens = self.token_counter(cast(str, base_prompt.content)
) + self.token_counter(cast(str, time_prompt.content))
memory: VectorStoreRetriever = kwargs['memory']
previous_messages = kwargs['messages']
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
relevant_memory = [d.page_content for d in relevant_docs]
relevant_memory_tokens = sum([self.token_counter(doc) for doc in
relevant_memory])
while used_tokens + relevant_memory_tokens > 2500:
relevant_memory = relevant_memory[:-1]
relevant_memory_tokens = sum([self.token_counter(doc) for doc in
relevant_memory])
content_format = (
f'This reminds you of these events from your past:\n{relevant_memory}\n\n'
)
memory_message = SystemMessage(content=content_format)
used_tokens += self.token_counter(cast(str, memory_message.content))
historical_messages: List[BaseMessage] = []
for message in previous_messages[-10:][::-1]:
message_tokens = self.token_counter(message.content)
if used_tokens + message_tokens > self.send_token_limit - 1000:
break
historical_messages = [message] + historical_messages
used_tokens += message_tokens
input_message = HumanMessage(content=kwargs['user_input'])
messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message]
messages += historical_messages
messages.append(input_message)
return messages | null |
memory_variables | return [self.memory_key] | @property
def memory_variables(self) ->List[str]:
return [self.memory_key] | null |
encode | return '[encoded]' + to_encode | def encode(self, to_encode: str) ->str:
return '[encoded]' + to_encode | null |
return_values | """Return values of the agent."""
return [] | @property
def return_values(self) ->List[str]:
"""Return values of the agent."""
return [] | Return values of the agent. |
test_generate_stream | """Test valid call to volc engine."""
llm = VolcEngineMaasLLM(streaming=True)
output = llm.stream('tell me a joke')
assert isinstance(output, Generator) | def test_generate_stream() ->None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM(streaming=True)
output = llm.stream('tell me a joke')
assert isinstance(output, Generator) | Test valid call to volc engine. |
delete_document_by_id | """Removes a Specific Document by Id
Args:
document_id: The document identifier
"""
try:
from bson.objectid import ObjectId
except ImportError as e:
raise ImportError(
'Unable to import bson, please install with `pip install bson`.'
) from e
if document_id is None:
raise ValueError('No document id provided to delete.')
self._collection.delete_one({'_id': ObjectId(document_id)}) | def delete_document_by_id(self, document_id: Optional[str]=None) ->None:
"""Removes a Specific Document by Id
Args:
document_id: The document identifier
"""
try:
from bson.objectid import ObjectId
except ImportError as e:
raise ImportError(
'Unable to import bson, please install with `pip install bson`.'
) from e
if document_id is None:
raise ValueError('No document id provided to delete.')
self._collection.delete_one({'_id': ObjectId(document_id)}) | Removes a Specific Document by Id
Args:
document_id: The document identifier |
init_hnsw | from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32]
other_emb: NdArray[32]
year: int
embeddings = FakeEmbeddings(size=32)
hnsw_db = HnswDocumentIndex[MyDoc](work_dir=tmp_path)
hnsw_db.index([MyDoc(title=f'My document {i}', title_embedding=np.array(
embeddings.embed_query(f'fake emb {i}')), other_emb=np.array(embeddings
.embed_query(f'other fake emb {i}')), year=i) for i in range(100)])
filter_query = {'year': {'$lte': 90}}
return hnsw_db, filter_query, embeddings | @pytest.fixture
def init_hnsw(tmp_path: Path) ->Tuple[HnswDocumentIndex, Dict[str, Any],
FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32]
other_emb: NdArray[32]
year: int
embeddings = FakeEmbeddings(size=32)
hnsw_db = HnswDocumentIndex[MyDoc](work_dir=tmp_path)
hnsw_db.index([MyDoc(title=f'My document {i}', title_embedding=np.array
(embeddings.embed_query(f'fake emb {i}')), other_emb=np.array(
embeddings.embed_query(f'other fake emb {i}')), year=i) for i in
range(100)])
filter_query = {'year': {'$lte': 90}}
return hnsw_db, filter_query, embeddings | null |
refresh_schema | """
Refreshes the Neo4j graph schema information.
"""
node_properties = [el['output'] for el in self.query(node_properties_query)]
rel_properties = [el['output'] for el in self.query(rel_properties_query)]
relationships = [el['output'] for el in self.query(rel_query)]
self.structured_schema = {'node_props': {el['labels']: el['properties'] for
el in node_properties}, 'rel_props': {el['type']: el['properties'] for
el in rel_properties}, 'relationships': relationships}
formatted_node_props = []
for el in node_properties:
props_str = ', '.join([f"{prop['property']}: {prop['type']}" for prop in
el['properties']])
formatted_node_props.append(f"{el['labels']} {{{props_str}}}")
formatted_rel_props = []
for el in rel_properties:
props_str = ', '.join([f"{prop['property']}: {prop['type']}" for prop in
el['properties']])
formatted_rel_props.append(f"{el['type']} {{{props_str}}}")
formatted_rels = [f"(:{el['start']})-[:{el['type']}]->(:{el['end']})" for
el in relationships]
self.schema = '\n'.join(['Node properties are the following:', ','.join(
formatted_node_props), 'Relationship properties are the following:',
','.join(formatted_rel_props), 'The relationships are the following:',
','.join(formatted_rels)]) | def refresh_schema(self) ->None:
"""
Refreshes the Neo4j graph schema information.
"""
node_properties = [el['output'] for el in self.query(node_properties_query)
]
rel_properties = [el['output'] for el in self.query(rel_properties_query)]
relationships = [el['output'] for el in self.query(rel_query)]
self.structured_schema = {'node_props': {el['labels']: el['properties'] for
el in node_properties}, 'rel_props': {el['type']: el['properties'] for
el in rel_properties}, 'relationships': relationships}
formatted_node_props = []
for el in node_properties:
props_str = ', '.join([f"{prop['property']}: {prop['type']}" for
prop in el['properties']])
formatted_node_props.append(f"{el['labels']} {{{props_str}}}")
formatted_rel_props = []
for el in rel_properties:
props_str = ', '.join([f"{prop['property']}: {prop['type']}" for
prop in el['properties']])
formatted_rel_props.append(f"{el['type']} {{{props_str}}}")
formatted_rels = [f"(:{el['start']})-[:{el['type']}]->(:{el['end']})" for
el in relationships]
self.schema = '\n'.join(['Node properties are the following:', ','.join
(formatted_node_props),
'Relationship properties are the following:', ','.join(
formatted_rel_props), 'The relationships are the following:', ','.
join(formatted_rels)]) | Refreshes the Neo4j graph schema information. |
list | if item is None:
return []
return list(item) | def list(self, item: Any) ->list:
if item is None:
return []
return list(item) | null |
lazy_load | """Load bibtex file using bibtexparser and get the article texts plus the
article metadata.
See https://bibtexparser.readthedocs.io/en/master/
Returns:
a list of documents with the document.page_content in text format
"""
try:
import fitz
except ImportError:
raise ImportError(
'PyMuPDF package not found, please install it with `pip install pymupdf`'
)
entries = self.parser.load_bibtex_entries(self.file_path)
if self.max_docs:
entries = entries[:self.max_docs]
for entry in entries:
doc = self._load_entry(entry)
if doc:
yield doc | def lazy_load(self) ->Iterator[Document]:
"""Load bibtex file using bibtexparser and get the article texts plus the
article metadata.
See https://bibtexparser.readthedocs.io/en/master/
Returns:
a list of documents with the document.page_content in text format
"""
try:
import fitz
except ImportError:
raise ImportError(
'PyMuPDF package not found, please install it with `pip install pymupdf`'
)
entries = self.parser.load_bibtex_entries(self.file_path)
if self.max_docs:
entries = entries[:self.max_docs]
for entry in entries:
doc = self._load_entry(entry)
if doc:
yield doc | Load bibtex file using bibtexparser and get the article texts plus the
article metadata.
See https://bibtexparser.readthedocs.io/en/master/
Returns:
a list of documents with the document.page_content in text format |