method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_load_success
"""Test that returns one document""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE)) docs = loader.load() assert len(docs) == 1 doc = docs[0] assert doc.page_content assert set(doc.metadata) == {'id', 'published_year', 'title', 'publication', 'authors', 'abstract'}
@pytest.mark.requires('fitz', 'bibtexparser') def test_load_success() ->None: """Test that returns one document""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE)) docs = loader.load() assert len(docs) == 1 doc = docs[0] assert doc.page_content assert set(doc.metadata) == {'id', 'published_year', 'title', 'publication', 'authors', 'abstract'}
Test that returns one document
parse
expected_keys = [rs.name for rs in self.response_schemas] return parse_and_check_json_markdown(text, expected_keys)
def parse(self, text: str) ->Any: expected_keys = [rs.name for rs in self.response_schemas] return parse_and_check_json_markdown(text, expected_keys)
null
_default_params
"""Get the default parameters for calling text generation inference API.""" return {'max_new_tokens': self.max_new_tokens, 'top_k': self.top_k, 'top_p': self.top_p, 'typical_p': self.typical_p, 'temperature': self. temperature, 'repetition_penalty': self.repetition_penalty, 'return_full_text': self.return_full_text, 'truncate': self.truncate, 'stop_sequences': self.stop_sequences, 'seed': self.seed, 'do_sample': self.do_sample, 'watermark': self.watermark, **self.model_kwargs}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling text generation inference API.""" return {'max_new_tokens': self.max_new_tokens, 'top_k': self.top_k, 'top_p': self.top_p, 'typical_p': self.typical_p, 'temperature': self.temperature, 'repetition_penalty': self.repetition_penalty, 'return_full_text': self.return_full_text, 'truncate': self. truncate, 'stop_sequences': self.stop_sequences, 'seed': self.seed, 'do_sample': self.do_sample, 'watermark': self.watermark, **self. model_kwargs}
Get the default parameters for calling text generation inference API.
__init__
""" Initialize the loader with a list of URL paths. Args: urls (List[str]): A list of URLs to scrape content from. Raises: ImportError: If the required 'playwright' package is not installed. """ self.urls = urls try: import playwright except ImportError: raise ImportError( 'playwright is required for AsyncChromiumLoader. Please install it with `pip install playwright`.' )
def __init__(self, urls: List[str]): """ Initialize the loader with a list of URL paths. Args: urls (List[str]): A list of URLs to scrape content from. Raises: ImportError: If the required 'playwright' package is not installed. """ self.urls = urls try: import playwright except ImportError: raise ImportError( 'playwright is required for AsyncChromiumLoader. Please install it with `pip install playwright`.' )
Initialize the loader with a list of URL paths. Args: urls (List[str]): A list of URLs to scrape content from. Raises: ImportError: If the required 'playwright' package is not installed.
save_json
"""Save dict to local file path. Parameters: data (dict): The dictionary to be saved. file_path (str): Local file path. """ with open(file_path, 'w') as outfile: json.dump(data, outfile)
def save_json(data: dict, file_path: str) ->None: """Save dict to local file path. Parameters: data (dict): The dictionary to be saved. file_path (str): Local file path. """ with open(file_path, 'w') as outfile: json.dump(data, outfile)
Save dict to local file path. Parameters: data (dict): The dictionary to be saved. file_path (str): Local file path.
lazy_load
""" Lazy load the chat sessions from the Slack dump file and yield them in the required format. :return: Iterator of chat sessions containing messages. """ with zipfile.ZipFile(str(self.zip_path), 'r') as zip_file: for file_path in zip_file.namelist(): if file_path.endswith('.json'): messages = self._read_json(zip_file, file_path) yield self._load_single_chat_session(messages)
def lazy_load(self) ->Iterator[ChatSession]: """ Lazy load the chat sessions from the Slack dump file and yield them in the required format. :return: Iterator of chat sessions containing messages. """ with zipfile.ZipFile(str(self.zip_path), 'r') as zip_file: for file_path in zip_file.namelist(): if file_path.endswith('.json'): messages = self._read_json(zip_file, file_path) yield self._load_single_chat_session(messages)
Lazy load the chat sessions from the Slack dump file and yield them in the required format. :return: Iterator of chat sessions containing messages.
_create_retry_decorator
"""Creates a retry decorator for Vertex / Palm LLMs.""" errors = [google.api_core.exceptions.ResourceExhausted, google.api_core. exceptions.ServiceUnavailable, google.api_core.exceptions.Aborted, google.api_core.exceptions.DeadlineExceeded, google.api_core.exceptions .GoogleAPIError] decorator = create_base_retry_decorator(error_types=errors, max_retries= max_retries, run_manager=run_manager) return decorator
def _create_retry_decorator(llm: BaseLLM, *, max_retries: int=1, run_manager: Optional[Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None) ->Callable[[Any], Any]: """Creates a retry decorator for Vertex / Palm LLMs.""" errors = [google.api_core.exceptions.ResourceExhausted, google.api_core .exceptions.ServiceUnavailable, google.api_core.exceptions.Aborted, google.api_core.exceptions.DeadlineExceeded, google.api_core. exceptions.GoogleAPIError] decorator = create_base_retry_decorator(error_types=errors, max_retries =max_retries, run_manager=run_manager) return decorator
Creates a retry decorator for Vertex / Palm LLMs.
_import_forefrontai
from langchain_community.llms.forefrontai import ForefrontAI return ForefrontAI
def _import_forefrontai() ->Any: from langchain_community.llms.forefrontai import ForefrontAI return ForefrontAI
null
_import_openai_chat
from langchain_community.llms.openai import OpenAIChat return OpenAIChat
def _import_openai_chat() ->Any: from langchain_community.llms.openai import OpenAIChat return OpenAIChat
null
_get_invoke_url
"""Helper method to get invoke URL from a model name, URL, or endpoint stub""" if not invoke_url: if not model_name: raise ValueError('URL or model name must be specified to invoke') if model_name in self.available_models: invoke_url = self.available_models[model_name] elif f'playground_{model_name}' in self.available_models: invoke_url = self.available_models[f'playground_{model_name}'] else: available_models_str = '\n'.join([f'{k} - {v}' for k, v in self. available_models.items()]) raise ValueError( f"""Unknown model name {model_name} specified. Available models are: {available_models_str}""" ) if not invoke_url: raise ValueError('URL or model name must be specified to invoke') if 'http' not in invoke_url: invoke_url = f'{self.call_invoke_base}/{invoke_url}' return invoke_url
def _get_invoke_url(self, model_name: Optional[str]=None, invoke_url: Optional[str]=None) ->str: """Helper method to get invoke URL from a model name, URL, or endpoint stub""" if not invoke_url: if not model_name: raise ValueError('URL or model name must be specified to invoke') if model_name in self.available_models: invoke_url = self.available_models[model_name] elif f'playground_{model_name}' in self.available_models: invoke_url = self.available_models[f'playground_{model_name}'] else: available_models_str = '\n'.join([f'{k} - {v}' for k, v in self .available_models.items()]) raise ValueError( f"""Unknown model name {model_name} specified. Available models are: {available_models_str}""" ) if not invoke_url: raise ValueError('URL or model name must be specified to invoke') if 'http' not in invoke_url: invoke_url = f'{self.call_invoke_base}/{invoke_url}' return invoke_url
Helper method to get invoke URL from a model name, URL, or endpoint stub
get_type_to_cls_dict
return {'ai21': _import_ai21, 'aleph_alpha': _import_aleph_alpha, 'amazon_api_gateway': _import_amazon_api_gateway, 'amazon_bedrock': _import_bedrock, 'anthropic': _import_anthropic, 'anyscale': _import_anyscale, 'arcee': _import_arcee, 'aviary': _import_aviary, 'azure': _import_azure_openai, 'azureml_endpoint': _import_azureml_endpoint, 'bananadev': _import_bananadev, 'baseten': _import_baseten, 'beam': _import_beam, 'cerebriumai': _import_cerebriumai, 'chat_glm': _import_chatglm, 'clarifai': _import_clarifai, 'cohere': _import_cohere, 'ctransformers': _import_ctransformers, 'ctranslate2': _import_ctranslate2, 'databricks': _import_databricks, 'databricks-chat': _import_databricks_chat, 'deepinfra': _import_deepinfra, 'deepsparse': _import_deepsparse, 'edenai': _import_edenai, 'fake-list': _import_fake, 'forefrontai': _import_forefrontai, 'giga-chat-model': _import_gigachat, 'google_palm': _import_google_palm, 'gooseai': _import_gooseai, 'gradient': _import_gradient_ai, 'gpt4all': _import_gpt4all, 'huggingface_endpoint': _import_huggingface_endpoint, 'huggingface_hub': _import_huggingface_hub, 'huggingface_pipeline': _import_huggingface_pipeline, 'huggingface_textgen_inference': _import_huggingface_text_gen_inference, 'human-input': _import_human, 'koboldai': _import_koboldai, 'llamacpp': _import_llamacpp, 'textgen': _import_textgen, 'minimax': _import_minimax, 'mlflow': _import_mlflow, 'mlflow-chat': _import_mlflow_chat, 'mlflow-ai-gateway': _import_mlflow_ai_gateway, 'modal': _import_modal, 'mosaic': _import_mosaicml, 'nebula': _import_symblai_nebula, 'nibittensor': _import_bittensor, 'nlpcloud': _import_nlpcloud, 'ollama': _import_ollama, 'openai': _import_openai, 'openlm': _import_openlm, 'pai_eas_endpoint': _import_pai_eas_endpoint, 'petals': _import_petals, 'pipelineai': _import_pipelineai, 'predibase': _import_predibase, 'opaqueprompts': _import_opaqueprompts, 'replicate': _import_replicate, 'rwkv': _import_rwkv, 'sagemaker_endpoint': _import_sagemaker_endpoint, 'self_hosted': _import_self_hosted, 'self_hosted_hugging_face': _import_self_hosted_hugging_face, 'stochasticai': _import_stochasticai, 'together': _import_together, 'tongyi': _import_tongyi, 'titan_takeoff': _import_titan_takeoff, 'titan_takeoff_pro': _import_titan_takeoff_pro, 'vertexai': _import_vertex, 'vertexai_model_garden': _import_vertex_model_garden, 'openllm': _import_openllm, 'openllm_client': _import_openllm, 'vllm': _import_vllm, 'vllm_openai': _import_vllm_openai, 'watsonxllm': _import_watsonxllm, 'writer': _import_writer, 'xinference': _import_xinference, 'javelin-ai-gateway': _import_javelin_ai_gateway, 'qianfan_endpoint': _import_baidu_qianfan_endpoint, 'yandex_gpt': _import_yandex_gpt, 'VolcEngineMaasLLM': _import_volcengine_maas}
def get_type_to_cls_dict() ->Dict[str, Callable[[], Type[BaseLLM]]]: return {'ai21': _import_ai21, 'aleph_alpha': _import_aleph_alpha, 'amazon_api_gateway': _import_amazon_api_gateway, 'amazon_bedrock': _import_bedrock, 'anthropic': _import_anthropic, 'anyscale': _import_anyscale, 'arcee': _import_arcee, 'aviary': _import_aviary, 'azure': _import_azure_openai, 'azureml_endpoint': _import_azureml_endpoint, 'bananadev': _import_bananadev, 'baseten': _import_baseten, 'beam': _import_beam, 'cerebriumai': _import_cerebriumai, 'chat_glm': _import_chatglm, 'clarifai': _import_clarifai, 'cohere': _import_cohere, 'ctransformers': _import_ctransformers, 'ctranslate2': _import_ctranslate2, 'databricks': _import_databricks, 'databricks-chat': _import_databricks_chat, 'deepinfra': _import_deepinfra, 'deepsparse': _import_deepsparse, 'edenai': _import_edenai, 'fake-list': _import_fake, 'forefrontai': _import_forefrontai, 'giga-chat-model': _import_gigachat, 'google_palm': _import_google_palm, 'gooseai': _import_gooseai, 'gradient': _import_gradient_ai, 'gpt4all': _import_gpt4all, 'huggingface_endpoint': _import_huggingface_endpoint, 'huggingface_hub': _import_huggingface_hub, 'huggingface_pipeline': _import_huggingface_pipeline, 'huggingface_textgen_inference': _import_huggingface_text_gen_inference, 'human-input': _import_human, 'koboldai': _import_koboldai, 'llamacpp': _import_llamacpp, 'textgen': _import_textgen, 'minimax': _import_minimax, 'mlflow': _import_mlflow, 'mlflow-chat': _import_mlflow_chat, 'mlflow-ai-gateway': _import_mlflow_ai_gateway, 'modal': _import_modal, 'mosaic': _import_mosaicml, 'nebula': _import_symblai_nebula, 'nibittensor': _import_bittensor, 'nlpcloud': _import_nlpcloud, 'ollama': _import_ollama, 'openai': _import_openai, 'openlm': _import_openlm, 'pai_eas_endpoint': _import_pai_eas_endpoint, 'petals': _import_petals, 'pipelineai': _import_pipelineai, 'predibase': _import_predibase, 'opaqueprompts': _import_opaqueprompts, 'replicate': _import_replicate, 'rwkv': _import_rwkv, 'sagemaker_endpoint': _import_sagemaker_endpoint, 'self_hosted': _import_self_hosted, 'self_hosted_hugging_face': _import_self_hosted_hugging_face, 'stochasticai': _import_stochasticai, 'together': _import_together, 'tongyi': _import_tongyi, 'titan_takeoff': _import_titan_takeoff, 'titan_takeoff_pro': _import_titan_takeoff_pro, 'vertexai': _import_vertex, 'vertexai_model_garden': _import_vertex_model_garden, 'openllm': _import_openllm, 'openllm_client': _import_openllm, 'vllm': _import_vllm, 'vllm_openai': _import_vllm_openai, 'watsonxllm': _import_watsonxllm, 'writer': _import_writer, 'xinference': _import_xinference, 'javelin-ai-gateway': _import_javelin_ai_gateway, 'qianfan_endpoint': _import_baidu_qianfan_endpoint, 'yandex_gpt': _import_yandex_gpt, 'VolcEngineMaasLLM': _import_volcengine_maas}
null
create
...
@overload @staticmethod def create(messages: Sequence[Dict[str, Any]], *, provider: str= 'ChatOpenAI', stream: Literal[False]=False, **kwargs: Any) ->dict: ...
null
_default_params
"""Get the default parameters for calling Writer API.""" return {'minTokens': self.min_tokens, 'maxTokens': self.max_tokens, 'temperature': self.temperature, 'topP': self.top_p, 'stop': self.stop, 'presencePenalty': self.presence_penalty, 'repetitionPenalty': self. repetition_penalty, 'bestOf': self.best_of, 'logprobs': self.logprobs, 'n': self.n}
@property def _default_params(self) ->Mapping[str, Any]: """Get the default parameters for calling Writer API.""" return {'minTokens': self.min_tokens, 'maxTokens': self.max_tokens, 'temperature': self.temperature, 'topP': self.top_p, 'stop': self. stop, 'presencePenalty': self.presence_penalty, 'repetitionPenalty': self.repetition_penalty, 'bestOf': self.best_of, 'logprobs': self. logprobs, 'n': self.n}
Get the default parameters for calling Writer API.
_scrape
from bs4 import BeautifulSoup if parser is None: if url.endswith('.xml'): parser = 'xml' else: parser = self.default_parser self._check_parser(parser) html_doc = self._fetch_valid_connection_docs(url) if not getattr(html_doc, 'ok', False): return None if self.raise_for_status: html_doc.raise_for_status() if self.encoding is not None: html_doc.encoding = self.encoding elif self.autoset_encoding: html_doc.encoding = html_doc.apparent_encoding return BeautifulSoup(html_doc.text, parser, **bs_kwargs or {})
def _scrape(self, url: str, parser: Union[str, None]=None, bs_kwargs: Optional[dict]=None) ->Any: from bs4 import BeautifulSoup if parser is None: if url.endswith('.xml'): parser = 'xml' else: parser = self.default_parser self._check_parser(parser) html_doc = self._fetch_valid_connection_docs(url) if not getattr(html_doc, 'ok', False): return None if self.raise_for_status: html_doc.raise_for_status() if self.encoding is not None: html_doc.encoding = self.encoding elif self.autoset_encoding: html_doc.encoding = html_doc.apparent_encoding return BeautifulSoup(html_doc.text, parser, **bs_kwargs or {})
null
lazy_load
for doc in self.load(): yield doc
def lazy_load(self) ->Iterator[Document]: for doc in self.load(): yield doc
null
test__validate_example_inputs_for_chain_input_mapper
mock_ = mock.MagicMock() mock_.inputs = {'foo': 'bar', 'baz': 'qux'} chain = mock.MagicMock() chain.input_keys = ['not foo', 'not baz', 'not qux'] def wrong_output_format(inputs: dict) ->str: assert 'foo' in inputs assert 'baz' in inputs return 'hehe' with pytest.raises(InputFormatError, match='must be a dictionary'): _validate_example_inputs_for_chain(mock_, chain, wrong_output_format) def wrong_output_keys(inputs: dict) ->dict: assert 'foo' in inputs assert 'baz' in inputs return {'not foo': 'foo', 'not baz': 'baz'} with pytest.raises(InputFormatError, match='Missing keys after loading example' ): _validate_example_inputs_for_chain(mock_, chain, wrong_output_keys) def input_mapper(inputs: dict) ->dict: assert 'foo' in inputs assert 'baz' in inputs return {'not foo': inputs['foo'], 'not baz': inputs['baz'], 'not qux': 'qux'} _validate_example_inputs_for_chain(mock_, chain, input_mapper)
def test__validate_example_inputs_for_chain_input_mapper() ->None: mock_ = mock.MagicMock() mock_.inputs = {'foo': 'bar', 'baz': 'qux'} chain = mock.MagicMock() chain.input_keys = ['not foo', 'not baz', 'not qux'] def wrong_output_format(inputs: dict) ->str: assert 'foo' in inputs assert 'baz' in inputs return 'hehe' with pytest.raises(InputFormatError, match='must be a dictionary'): _validate_example_inputs_for_chain(mock_, chain, wrong_output_format) def wrong_output_keys(inputs: dict) ->dict: assert 'foo' in inputs assert 'baz' in inputs return {'not foo': 'foo', 'not baz': 'baz'} with pytest.raises(InputFormatError, match= 'Missing keys after loading example'): _validate_example_inputs_for_chain(mock_, chain, wrong_output_keys) def input_mapper(inputs: dict) ->dict: assert 'foo' in inputs assert 'baz' in inputs return {'not foo': inputs['foo'], 'not baz': inputs['baz'], 'not qux': 'qux'} _validate_example_inputs_for_chain(mock_, chain, input_mapper)
null
yield_keys
"""Get an iterator over keys that match the given prefix. Args: prefix (str): The prefix to match. Returns: Iterator[K | str]: An iterator over keys that match the given prefix. This method is allowed to return an iterator over either K or str depending on what makes more sense for the given store. """
@abstractmethod def yield_keys(self, *, prefix: Optional[str]=None) ->Union[Iterator[K], Iterator[str]]: """Get an iterator over keys that match the given prefix. Args: prefix (str): The prefix to match. Returns: Iterator[K | str]: An iterator over keys that match the given prefix. This method is allowed to return an iterator over either K or str depending on what makes more sense for the given store. """
Get an iterator over keys that match the given prefix. Args: prefix (str): The prefix to match. Returns: Iterator[K | str]: An iterator over keys that match the given prefix. This method is allowed to return an iterator over either K or str depending on what makes more sense for the given store.
check_only_one_provider_selected
""" This tool has no feature to combine providers results. Therefore we only allow one provider """ if len(v) > 1: raise ValueError( 'Please select only one provider. The feature to combine providers results is not available for this tool.' ) return v
@validator('providers') def check_only_one_provider_selected(cls, v: List[str]) ->List[str]: """ This tool has no feature to combine providers results. Therefore we only allow one provider """ if len(v) > 1: raise ValueError( 'Please select only one provider. The feature to combine providers results is not available for this tool.' ) return v
This tool has no feature to combine providers results. Therefore we only allow one provider
validate
redact = config.get('redact') return self._detect_pii(prompt_value=prompt_value, config=config ) if redact else self._contains_pii(prompt_value=prompt_value, config= config)
def validate(self, prompt_value: str, config: Any=None) ->str: redact = config.get('redact') return self._detect_pii(prompt_value=prompt_value, config=config ) if redact else self._contains_pii(prompt_value=prompt_value, config=config)
null
test_tracer_llm_run_on_error_callback
"""Test tracer on an LLM run with an error and a callback.""" exception = Exception('test') uuid = uuid4() compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc), end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, {'name': 'error', 'time': datetime.now( timezone.utc)}], extra={}, execution_order=1, child_execution_order=1, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=None, error= repr(exception), run_type='llm', trace_id=uuid, dotted_order= f'20230101T000000000000Z{uuid}') class FakeTracerWithLlmErrorCallback(FakeTracer): error_run = None def _on_llm_error(self, run: Run) ->None: self.error_run = run tracer = FakeTracerWithLlmErrorCallback() tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_error(exception, run_id=uuid) assert tracer.error_run is not None _compare_run_with_error(tracer.error_run, compare_run)
@freeze_time('2023-01-01') def test_tracer_llm_run_on_error_callback() ->None: """Test tracer on an LLM run with an error and a callback.""" exception = Exception('test') uuid = uuid4() compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc), end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, {'name': 'error', 'time': datetime.now(timezone.utc)}], extra={}, execution_order=1, child_execution_order=1, serialized=SERIALIZED, inputs=dict(prompts =[]), outputs=None, error=repr(exception), run_type='llm', trace_id =uuid, dotted_order=f'20230101T000000000000Z{uuid}') class FakeTracerWithLlmErrorCallback(FakeTracer): error_run = None def _on_llm_error(self, run: Run) ->None: self.error_run = run tracer = FakeTracerWithLlmErrorCallback() tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_error(exception, run_id=uuid) assert tracer.error_run is not None _compare_run_with_error(tracer.error_run, compare_run)
Test tracer on an LLM run with an error and a callback.
max_marginal_relevance_search_by_vector
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ results = self.__query_collection(query_embeddings=embedding, n_results= fetch_k, where=filter, where_document=where_document, include=[ 'metadatas', 'documents', 'distances', 'embeddings'], **kwargs) mmr_selected = maximal_marginal_relevance(np.array(embedding, dtype=np. float32), results['embeddings'][0], k=k, lambda_mult=lambda_mult) candidates = _results_to_docs(results) selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected] return selected_results
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=DEFAULT_K, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[Dict[str, str]]=None, where_document: Optional[Dict[str, str]] =None, **kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ results = self.__query_collection(query_embeddings=embedding, n_results =fetch_k, where=filter, where_document=where_document, include=[ 'metadatas', 'documents', 'distances', 'embeddings'], **kwargs) mmr_selected = maximal_marginal_relevance(np.array(embedding, dtype=np. float32), results['embeddings'][0], k=k, lambda_mult=lambda_mult) candidates = _results_to_docs(results) selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected] return selected_results
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance.
on_chain_end
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: pass
null
get_tools
"""Get the tools in the toolkit.""" tools: List[BaseTool] = [AzureCogsFormRecognizerTool(), AzureCogsSpeech2TextTool(), AzureCogsText2SpeechTool(), AzureCogsTextAnalyticsHealthTool()] if sys.platform.startswith('linux') or sys.platform.startswith('win'): tools.append(AzureCogsImageAnalysisTool()) return tools
def get_tools(self) ->List[BaseTool]: """Get the tools in the toolkit.""" tools: List[BaseTool] = [AzureCogsFormRecognizerTool(), AzureCogsSpeech2TextTool(), AzureCogsText2SpeechTool(), AzureCogsTextAnalyticsHealthTool()] if sys.platform.startswith('linux') or sys.platform.startswith('win'): tools.append(AzureCogsImageAnalysisTool()) return tools
Get the tools in the toolkit.
_default_params
"""Get the default parameters for calling OpenAI API.""" return {'model': self.model, 'tokens_to_generate': self.max_tokens, 'temperature': self.temperature, 'top_p': self.top_p, **self.model_kwargs}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return {'model': self.model, 'tokens_to_generate': self.max_tokens, 'temperature': self.temperature, 'top_p': self.top_p, **self. model_kwargs}
Get the default parameters for calling OpenAI API.
prompt_safety_callback
return self.on_after_prompt_safety.__func__ is not BaseModerationCallbackHandler.on_after_prompt_safety
@property def prompt_safety_callback(self) ->bool: return (self.on_after_prompt_safety.__func__ is not BaseModerationCallbackHandler.on_after_prompt_safety)
null
load
"""Load documents from the Notion database. Returns: List[Document]: List of documents. """ page_summaries = self._retrieve_page_summaries() return list(self.load_page(page_summary) for page_summary in page_summaries)
def load(self) ->List[Document]: """Load documents from the Notion database. Returns: List[Document]: List of documents. """ page_summaries = self._retrieve_page_summaries() return list(self.load_page(page_summary) for page_summary in page_summaries )
Load documents from the Notion database. Returns: List[Document]: List of documents.
get_abbr
words = s.split(' ') first_letters = [word[0] for word in words] return ''.join(first_letters)
def get_abbr(s: str) ->str: words = s.split(' ') first_letters = [word[0] for word in words] return ''.join(first_letters)
null
_call
return super()._call(run_manager=run_manager, inputs=inputs)
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, Any]: return super()._call(run_manager=run_manager, inputs=inputs)
null
_import_ainetwork_app
from langchain_community.tools.ainetwork.app import AINAppOps return AINAppOps
def _import_ainetwork_app() ->Any: from langchain_community.tools.ainetwork.app import AINAppOps return AINAppOps
null
test_load_valid_dict_content
file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='{"text": "value1"}', metadata={ 'source': file_path, 'seq_num': 1}), Document(page_content= '{"text": "value2"}', metadata={'source': file_path, 'seq_num': 2})] mocker.patch('builtins.open', mocker.mock_open()) mocker.patch('pathlib.Path.read_text', return_value= """ [{"text": "value1"}, {"text": "value2"}] """) loader = JSONLoader(file_path=file_path, jq_schema='.[]', text_content=False) result = loader.load() assert result == expected_docs
def test_load_valid_dict_content(mocker: MockerFixture) ->None: file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='{"text": "value1"}', metadata={ 'source': file_path, 'seq_num': 1}), Document(page_content= '{"text": "value2"}', metadata={'source': file_path, 'seq_num': 2})] mocker.patch('builtins.open', mocker.mock_open()) mocker.patch('pathlib.Path.read_text', return_value= """ [{"text": "value1"}, {"text": "value2"}] """) loader = JSONLoader(file_path=file_path, jq_schema='.[]', text_content= False) result = loader.load() assert result == expected_docs
null
test_tracing_session_env_var
from langchain.agents import AgentType, initialize_agent, load_tools os.environ['LANGCHAIN_WANDB_TRACING'] = 'true' llm = OpenAI(temperature=0) tools = load_tools(['llm-math', 'serpapi'], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, verbose=True) agent.run(questions[0])
def test_tracing_session_env_var() ->None: from langchain.agents import AgentType, initialize_agent, load_tools os.environ['LANGCHAIN_WANDB_TRACING'] = 'true' llm = OpenAI(temperature=0) tools = load_tools(['llm-math', 'serpapi'], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, verbose=True) agent.run(questions[0])
null
_default_params
return {'max_tokens': self.max_tokens, 'n_predict': self.n_predict, 'top_k': self.top_k, 'top_p': self.top_p, 'temp': self.temp, 'n_batch': self. n_batch, 'repeat_penalty': self.repeat_penalty, 'repeat_last_n': self. repeat_last_n}
def _default_params(self) ->Dict[str, Any]: return {'max_tokens': self.max_tokens, 'n_predict': self.n_predict, 'top_k': self.top_k, 'top_p': self.top_p, 'temp': self.temp, 'n_batch': self.n_batch, 'repeat_penalty': self.repeat_penalty, 'repeat_last_n': self.repeat_last_n}
null
test_runnable_branch_stream_with_callbacks
"""Verify that stream works for RunnableBranch when using callbacks.""" tracer = FakeTracer() def raise_value_error(x: str) ->Any: """Raise a value error.""" raise ValueError(f'x is {x}') llm_res = "i'm a textbot" llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) branch = RunnableBranch[str, Any]((lambda x: x == 'error', raise_value_error), (lambda x: x == 'hello', llm), lambda x: x) config: RunnableConfig = {'callbacks': [tracer]} assert list(branch.stream('hello', config=config)) == list(llm_res) assert len(tracer.runs) == 1 assert tracer.runs[0].error is None assert tracer.runs[0].outputs == {'output': llm_res} with pytest.raises(ValueError): for _ in branch.stream('error', config=config): pass assert len(tracer.runs) == 2 assert "ValueError('x is error')" in str(tracer.runs[1].error) assert tracer.runs[1].outputs is None assert list(branch.stream('bye', config=config)) == ['bye'] assert len(tracer.runs) == 3 assert tracer.runs[2].error is None assert tracer.runs[2].outputs == {'output': 'bye'}
def test_runnable_branch_stream_with_callbacks() ->None: """Verify that stream works for RunnableBranch when using callbacks.""" tracer = FakeTracer() def raise_value_error(x: str) ->Any: """Raise a value error.""" raise ValueError(f'x is {x}') llm_res = "i'm a textbot" llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) branch = RunnableBranch[str, Any]((lambda x: x == 'error', raise_value_error), (lambda x: x == 'hello', llm), lambda x: x) config: RunnableConfig = {'callbacks': [tracer]} assert list(branch.stream('hello', config=config)) == list(llm_res) assert len(tracer.runs) == 1 assert tracer.runs[0].error is None assert tracer.runs[0].outputs == {'output': llm_res} with pytest.raises(ValueError): for _ in branch.stream('error', config=config): pass assert len(tracer.runs) == 2 assert "ValueError('x is error')" in str(tracer.runs[1].error) assert tracer.runs[1].outputs is None assert list(branch.stream('bye', config=config)) == ['bye'] assert len(tracer.runs) == 3 assert tracer.runs[2].error is None assert tracer.runs[2].outputs == {'output': 'bye'}
Verify that stream works for RunnableBranch when using callbacks.
_buffer_to_array
return np.frombuffer(buffer, dtype=dtype).tolist()
def _buffer_to_array(buffer: bytes, dtype: Any=np.float32) ->List[float]: return np.frombuffer(buffer, dtype=dtype).tolist()
null
delete
"""Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if not ids or len(ids) == 0: return True from google.cloud import bigquery job_config = bigquery.QueryJobConfig(query_parameters=[bigquery. ArrayQueryParameter('ids', 'STRING', ids)]) self.bq_client.query( f""" DELETE FROM `{self.full_table_id}` WHERE {self.doc_id_field} IN UNNEST(@ids) """ , job_config=job_config).result() return True
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool ]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if not ids or len(ids) == 0: return True from google.cloud import bigquery job_config = bigquery.QueryJobConfig(query_parameters=[bigquery. ArrayQueryParameter('ids', 'STRING', ids)]) self.bq_client.query( f""" DELETE FROM `{self.full_table_id}` WHERE {self.doc_id_field} IN UNNEST(@ids) """ , job_config=job_config).result() return True
Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented.
configurable_fields
return self.default.configurable_fields(**{**self.fields, **kwargs})
def configurable_fields(self, **kwargs: AnyConfigurableField ) ->RunnableSerializable[Input, Output]: return self.default.configurable_fields(**{**self.fields, **kwargs})
null
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'messages']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'messages']
Get the namespace of the langchain object.
_build_payload
payload: Dict[str, Any] = {'messages': [{'role': 'user', 'content': m} for m in messages], 'profanity_check': self.profanity} if self.temperature is not None: payload['temperature'] = self.temperature if self.max_tokens is not None: payload['max_tokens'] = self.max_tokens if self.model: payload['model'] = self.model if self.verbose: logger.info('Giga request: %s', payload) return payload
def _build_payload(self, messages: List[str]) ->Dict[str, Any]: payload: Dict[str, Any] = {'messages': [{'role': 'user', 'content': m} for m in messages], 'profanity_check': self.profanity} if self.temperature is not None: payload['temperature'] = self.temperature if self.max_tokens is not None: payload['max_tokens'] = self.max_tokens if self.model: payload['model'] = self.model if self.verbose: logger.info('Giga request: %s', payload) return payload
null
test_visit_comparison
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1', '2']) expected = {'foo': {'$lt': ['1', '2']}} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
def test_visit_comparison() ->None: comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1', '2']) expected = {'foo': {'$lt': ['1', '2']}} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
null
get_sub_prompts
"""Get the sub prompts for llm call.""" if stop is not None: if 'stop' in params: raise ValueError('`stop` found in both the input and default params.') params['stop'] = stop if params['max_tokens'] == -1: if len(prompts) != 1: raise ValueError( 'max_tokens set to -1 not supported for multiple inputs.') params['max_tokens'] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [prompts[i:i + self.batch_size] for i in range(0, len(prompts ), self.batch_size)] return sub_prompts
def get_sub_prompts(self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]]=None) ->List[List[str]]: """Get the sub prompts for llm call.""" if stop is not None: if 'stop' in params: raise ValueError( '`stop` found in both the input and default params.') params['stop'] = stop if params['max_tokens'] == -1: if len(prompts) != 1: raise ValueError( 'max_tokens set to -1 not supported for multiple inputs.') params['max_tokens'] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [prompts[i:i + self.batch_size] for i in range(0, len( prompts), self.batch_size)] return sub_prompts
Get the sub prompts for llm call.
_determine_prediction_key
prediction_key = None if config.prediction_key: prediction_key = config.prediction_key if run_outputs and prediction_key not in run_outputs: logger.warning( f"Prediction key {prediction_key} not in chain's specified output keys {run_outputs}. Evaluation behavior may be undefined." ) elif run_outputs and len(run_outputs) == 1: prediction_key = run_outputs[0] elif run_outputs is not None and len(run_outputs) > 1: logger.warning( f'Chain expects multiple output keys: {run_outputs}, Evaluation behavior may be undefined. Specify a prediction_key in the RunEvalConfig to avoid this warning.' ) return prediction_key
def _determine_prediction_key(config: smith_eval.RunEvalConfig, run_outputs: Optional[List[str]]) ->Optional[str]: prediction_key = None if config.prediction_key: prediction_key = config.prediction_key if run_outputs and prediction_key not in run_outputs: logger.warning( f"Prediction key {prediction_key} not in chain's specified output keys {run_outputs}. Evaluation behavior may be undefined." ) elif run_outputs and len(run_outputs) == 1: prediction_key = run_outputs[0] elif run_outputs is not None and len(run_outputs) > 1: logger.warning( f'Chain expects multiple output keys: {run_outputs}, Evaluation behavior may be undefined. Specify a prediction_key in the RunEvalConfig to avoid this warning.' ) return prediction_key
null
_reset
for k, v in self.metrics.items(): self.metrics[k] = 0
def _reset(self) ->None: for k, v in self.metrics.items(): self.metrics[k] = 0
null
as_retriever
tags = kwargs.pop('tags', None) or [] tags.extend(self._get_retriever_tags()) return RedisVectorStoreRetriever(vectorstore=self, **kwargs, tags=tags)
def as_retriever(self, **kwargs: Any) ->RedisVectorStoreRetriever: tags = kwargs.pop('tags', None) or [] tags.extend(self._get_retriever_tags()) return RedisVectorStoreRetriever(vectorstore=self, **kwargs, tags=tags)
null
test_invocation_params_stop_sequences
llm = HuggingFaceTextGenInference() assert llm._default_params['stop_sequences'] == [] runtime_stop = None assert llm._invocation_params(runtime_stop)['stop_sequences'] == [] assert llm._default_params['stop_sequences'] == [] runtime_stop = ['stop'] assert llm._invocation_params(runtime_stop)['stop_sequences'] == ['stop'] assert llm._default_params['stop_sequences'] == [] llm = HuggingFaceTextGenInference(stop_sequences=['.']) runtime_stop = ['stop'] assert llm._invocation_params(runtime_stop)['stop_sequences'] == ['.', 'stop'] assert llm._default_params['stop_sequences'] == ['.']
def test_invocation_params_stop_sequences() ->None: llm = HuggingFaceTextGenInference() assert llm._default_params['stop_sequences'] == [] runtime_stop = None assert llm._invocation_params(runtime_stop)['stop_sequences'] == [] assert llm._default_params['stop_sequences'] == [] runtime_stop = ['stop'] assert llm._invocation_params(runtime_stop)['stop_sequences'] == ['stop'] assert llm._default_params['stop_sequences'] == [] llm = HuggingFaceTextGenInference(stop_sequences=['.']) runtime_stop = ['stop'] assert llm._invocation_params(runtime_stop)['stop_sequences'] == ['.', 'stop'] assert llm._default_params['stop_sequences'] == ['.']
null
test_public_api
"""Test for regressions or changes in the public API.""" assert set(public_api) == set(_EXPECTED)
def test_public_api() ->None: """Test for regressions or changes in the public API.""" assert set(public_api) == set(_EXPECTED)
Test for regressions or changes in the public API.
_get_prompt_input_key
"""Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key
def _get_prompt_input_key(self, inputs: Dict[str, Any]) ->str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key
Get the input key for the prompt.
on_decision
self.denom += 1
def on_decision(self) ->None: self.denom += 1
null
test_parse_int_value
_test_parse_value(x)
@pytest.mark.parametrize('x', (-1, 0, 1000000)) def test_parse_int_value(x: int) ->None: _test_parse_value(x)
null
_convert_run_to_wb_span
"""Base utility to create a span from a run. :param run: The run to convert. :return: The converted Span. """ attributes = {**run.extra} if run.extra else {} attributes['execution_order'] = run.execution_order return self.trace_tree.Span(span_id=str(run.id) if run.id is not None else None, name=run.name, start_time_ms=int(run.start_time.timestamp() * 1000), end_time_ms=int(run.end_time.timestamp() * 1000) if run.end_time is not None else None, status_code=self.trace_tree.StatusCode.SUCCESS if run.error is None else self.trace_tree.StatusCode.ERROR, status_message =run.error, attributes=attributes)
def _convert_run_to_wb_span(self, run: Run) ->'Span': """Base utility to create a span from a run. :param run: The run to convert. :return: The converted Span. """ attributes = {**run.extra} if run.extra else {} attributes['execution_order'] = run.execution_order return self.trace_tree.Span(span_id=str(run.id) if run.id is not None else None, name=run.name, start_time_ms=int(run.start_time.timestamp() * 1000), end_time_ms=int(run.end_time.timestamp() * 1000) if run. end_time is not None else None, status_code=self.trace_tree. StatusCode.SUCCESS if run.error is None else self.trace_tree. StatusCode.ERROR, status_message=run.error, attributes=attributes)
Base utility to create a span from a run. :param run: The run to convert. :return: The converted Span.
output_keys
return ['feedback']
@property def output_keys(self) ->List[str]: return ['feedback']
null
_default_params
"""Get the default parameters for calling GPTRouter API.""" return {'max_tokens': self.max_tokens, 'stream': self.streaming, 'n': self. n, 'temperature': self.temperature, **self.model_kwargs}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling GPTRouter API.""" return {'max_tokens': self.max_tokens, 'stream': self.streaming, 'n': self.n, 'temperature': self.temperature, **self.model_kwargs}
Get the default parameters for calling GPTRouter API.
_check_for_cluster
import redis try: cluster_info = redis_client.info('cluster') return cluster_info['cluster_enabled'] == 1 except redis.exceptions.RedisError: return False
def _check_for_cluster(redis_client: RedisType) ->bool: import redis try: cluster_info = redis_client.info('cluster') return cluster_info['cluster_enabled'] == 1 except redis.exceptions.RedisError: return False
null
test_visit_structured_query
query = 'What is the capital of France?' structured_query = StructuredQuery(query=query, filter=None) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual comp = Comparison(comparator=Comparator.EQ, attribute='foo', value='1') structured_query = StructuredQuery(query=query, filter=comp) expected = query, {'where_filter': {'path': ['foo'], 'operator': 'Equal', 'valueText': '1'}} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) structured_query = StructuredQuery(query=query, filter=op) expected = query, {'where_filter': {'operator': 'And', 'operands': [{'path': ['foo'], 'operator': 'Equal', 'valueInt': 2}, {'path': ['bar'], 'operator': 'Equal', 'valueText': 'baz'}]}} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
def test_visit_structured_query() ->None: query = 'What is the capital of France?' structured_query = StructuredQuery(query=query, filter=None) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual comp = Comparison(comparator=Comparator.EQ, attribute='foo', value='1') structured_query = StructuredQuery(query=query, filter=comp) expected = query, {'where_filter': {'path': ['foo'], 'operator': 'Equal', 'valueText': '1'}} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) structured_query = StructuredQuery(query=query, filter=op) expected = query, {'where_filter': {'operator': 'And', 'operands': [{ 'path': ['foo'], 'operator': 'Equal', 'valueInt': 2}, {'path': [ 'bar'], 'operator': 'Equal', 'valueText': 'baz'}]}} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
null
test_semantic_hybrid_search
"""Test end to end construction and search.""" embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1) vector_store: AzureSearch = AzureSearch(azure_search_endpoint= vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query, semantic_configuration_name='default') vector_store.add_texts(['Test 1', 'Test 2', 'Test 3'], [{'title': 'Title 1', 'any_metadata': 'Metadata 1'}, {'title': 'Title 2', 'any_metadata': 'Metadata 2'}, {'title': 'Title 3', 'any_metadata': 'Metadata 3'}]) time.sleep(1) res = vector_store.semantic_hybrid_search(query="What's Azure Search?", k=3) assert len(res) == 3
def test_semantic_hybrid_search() ->None: """Test end to end construction and search.""" embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1) vector_store: AzureSearch = AzureSearch(azure_search_endpoint= vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query, semantic_configuration_name='default') vector_store.add_texts(['Test 1', 'Test 2', 'Test 3'], [{'title': 'Title 1', 'any_metadata': 'Metadata 1'}, {'title': 'Title 2', 'any_metadata': 'Metadata 2'}, {'title': 'Title 3', 'any_metadata': 'Metadata 3'}]) time.sleep(1) res = vector_store.semantic_hybrid_search(query="What's Azure Search?", k=3 ) assert len(res) == 3
Test end to end construction and search.
_check_response
if 'data' not in response: raise RuntimeError(f'Voyage API Error. Message: {json.dumps(response)}') return response
def _check_response(response: dict) ->dict: if 'data' not in response: raise RuntimeError(f'Voyage API Error. Message: {json.dumps(response)}' ) return response
null
test_edenai_call_with_old_params
""" Test simple call to edenai with using `params` to pass optional parameters to api """ llm = EdenAI(provider='openai', params={'temperature': 0.2, 'max_tokens': 250}) output = llm('Say foo:') assert llm._llm_type == 'edenai' assert llm.feature == 'text' assert llm.subfeature == 'generation' assert isinstance(output, str)
def test_edenai_call_with_old_params() ->None: """ Test simple call to edenai with using `params` to pass optional parameters to api """ llm = EdenAI(provider='openai', params={'temperature': 0.2, 'max_tokens': 250}) output = llm('Say foo:') assert llm._llm_type == 'edenai' assert llm.feature == 'text' assert llm.subfeature == 'generation' assert isinstance(output, str)
Test simple call to edenai with using `params` to pass optional parameters to api
test_json_distance_evaluator_evaluate_strings_simple_diff
prediction = '{"a": 1}' reference = '{"a": 2}' result = json_distance_evaluator._evaluate_strings(prediction=prediction, reference=reference) pytest.approx(1 / 7, result['score'])
@pytest.mark.requires('rapidfuzz') def test_json_distance_evaluator_evaluate_strings_simple_diff( json_distance_evaluator: JsonEditDistanceEvaluator) ->None: prediction = '{"a": 1}' reference = '{"a": 2}' result = json_distance_evaluator._evaluate_strings(prediction= prediction, reference=reference) pytest.approx(1 / 7, result['score'])
null
test_deep_stream
prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.' ) + '{question}' llm = FakeStreamingListLLM(responses=['foo-lish']) chain = prompt | llm | StrOutputParser() stream = chain.stream({'question': 'What up'}) chunks = [] for chunk in stream: chunks.append(chunk) assert len(chunks) == len('foo-lish') assert ''.join(chunks) == 'foo-lish' chunks = [] for chunk in (chain | RunnablePassthrough()).stream({'question': 'What up'}): chunks.append(chunk) assert len(chunks) == len('foo-lish') assert ''.join(chunks) == 'foo-lish'
def test_deep_stream() ->None: prompt = SystemMessagePromptTemplate.from_template( 'You are a nice assistant.') + '{question}' llm = FakeStreamingListLLM(responses=['foo-lish']) chain = prompt | llm | StrOutputParser() stream = chain.stream({'question': 'What up'}) chunks = [] for chunk in stream: chunks.append(chunk) assert len(chunks) == len('foo-lish') assert ''.join(chunks) == 'foo-lish' chunks = [] for chunk in (chain | RunnablePassthrough()).stream({'question': 'What up'} ): chunks.append(chunk) assert len(chunks) == len('foo-lish') assert ''.join(chunks) == 'foo-lish'
null
load
""" Load and return all Documents from the provided URLs. Returns: List[Document]: A list of Document objects containing the scraped content from each URL. """ return list(self.lazy_load())
def load(self) ->List[Document]: """ Load and return all Documents from the provided URLs. Returns: List[Document]: A list of Document objects containing the scraped content from each URL. """ return list(self.lazy_load())
Load and return all Documents from the provided URLs. Returns: List[Document]: A list of Document objects containing the scraped content from each URL.
_import_google_finance
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper return GoogleFinanceAPIWrapper
def _import_google_finance() ->Any: from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper return GoogleFinanceAPIWrapper
null
test_redis_vector_field_validation
"""Test validation for RedisVectorField's datatype.""" from langchain_core.pydantic_v1 import ValidationError with pytest.raises(ValidationError): RedisVectorField(name='vector', dims=128, algorithm='INVALID_ALGO', datatype='INVALID_TYPE') vector_field = RedisVectorField(name='vector', dims=128, algorithm= 'SOME_ALGO', datatype='FLOAT32') assert vector_field.datatype == 'FLOAT32'
def test_redis_vector_field_validation() ->None: """Test validation for RedisVectorField's datatype.""" from langchain_core.pydantic_v1 import ValidationError with pytest.raises(ValidationError): RedisVectorField(name='vector', dims=128, algorithm='INVALID_ALGO', datatype='INVALID_TYPE') vector_field = RedisVectorField(name='vector', dims=128, algorithm= 'SOME_ALGO', datatype='FLOAT32') assert vector_field.datatype == 'FLOAT32'
Test validation for RedisVectorField's datatype.
save_context
"""Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self.buffer = self.predict_new_summary(self.chat_memory.messages[-2:], self .buffer)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self.buffer = self.predict_new_summary(self.chat_memory.messages[-2:], self.buffer)
Save context from this conversation to buffer.
test_from_texts_with_metadatas_benchmark
"""Test end to end construction and search.""" texts = [document.page_content for document in documents] * data_multiplier uuids = [uuid.uuid4().hex for _ in range(len(texts))] metadatas = [{'page': i} for i in range(len(texts))] docsearch = Pinecone.from_texts(texts, embedding_openai, ids=uuids, metadatas=metadatas, index_name=index_name, namespace=namespace_name, pool_threads=pool_threads, batch_size=batch_size, embeddings_chunk_size =embeddings_chunk_size) query = 'What did the president say about Ketanji Brown Jackson' _ = docsearch.similarity_search(query, k=1, namespace=namespace_name)
@pytest.mark.skipif(reason='slow to run for benchmark') @pytest.mark.parametrize( 'pool_threads,batch_size,embeddings_chunk_size,data_multiplier', [(1, 32, 32, 1000), (1, 32, 1000, 1000), (4, 32, 1000, 1000), (20, 64, 5000, 1000)]) def test_from_texts_with_metadatas_benchmark(self, pool_threads: int, batch_size: int, embeddings_chunk_size: int, data_multiplier: int, documents: List[Document], embedding_openai: OpenAIEmbeddings) ->None: """Test end to end construction and search.""" texts = [document.page_content for document in documents] * data_multiplier uuids = [uuid.uuid4().hex for _ in range(len(texts))] metadatas = [{'page': i} for i in range(len(texts))] docsearch = Pinecone.from_texts(texts, embedding_openai, ids=uuids, metadatas=metadatas, index_name=index_name, namespace= namespace_name, pool_threads=pool_threads, batch_size=batch_size, embeddings_chunk_size=embeddings_chunk_size) query = 'What did the president say about Ketanji Brown Jackson' _ = docsearch.similarity_search(query, k=1, namespace=namespace_name)
Test end to end construction and search.
_identifying_params
"""Get the identifying parameters.""" return {**{'model_url': self.model_url, 'user_id': self.user_id, 'app_id': self.app_id, 'model_id': self.model_id}}
@property def _identifying_params(self) ->Dict[str, Any]: """Get the identifying parameters.""" return {**{'model_url': self.model_url, 'user_id': self.user_id, 'app_id': self.app_id, 'model_id': self.model_id}}
Get the identifying parameters.
test_deeplake_overwrite_flag
"""Test overwrite behavior""" import deeplake dataset_path = './tests/persist_dir' if deeplake.exists(dataset_path): deeplake.delete(dataset_path) texts = ['foo', 'bar', 'baz'] docsearch = DeepLake.from_texts(dataset_path=dataset_path, texts=texts, embedding=FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch = DeepLake(dataset_path=dataset_path, embedding_function= FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch = DeepLake(dataset_path=dataset_path, embedding_function= FakeEmbeddings(), overwrite=False) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch = DeepLake(dataset_path=dataset_path, embedding_function= FakeEmbeddings(), overwrite=True) with pytest.raises(ValueError): output = docsearch.similarity_search('foo', k=1)
def test_deeplake_overwrite_flag() ->None: """Test overwrite behavior""" import deeplake dataset_path = './tests/persist_dir' if deeplake.exists(dataset_path): deeplake.delete(dataset_path) texts = ['foo', 'bar', 'baz'] docsearch = DeepLake.from_texts(dataset_path=dataset_path, texts=texts, embedding=FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch = DeepLake(dataset_path=dataset_path, embedding_function= FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch = DeepLake(dataset_path=dataset_path, embedding_function= FakeEmbeddings(), overwrite=False) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch = DeepLake(dataset_path=dataset_path, embedding_function= FakeEmbeddings(), overwrite=True) with pytest.raises(ValueError): output = docsearch.similarity_search('foo', k=1)
Test overwrite behavior
test_api_key_is_secret_string
llm = AI21(ai21_api_key='secret-api-key') assert isinstance(llm.ai21_api_key, SecretStr)
def test_api_key_is_secret_string() ->None: llm = AI21(ai21_api_key='secret-api-key') assert isinstance(llm.ai21_api_key, SecretStr)
null
validate_tools
selected_tools = values.get('selected_tools') or [] for tool_name in selected_tools: if tool_name not in _FILE_TOOLS: raise ValueError( f'File Tool of name {tool_name} not supported. Permitted tools: {list(_FILE_TOOLS)}' ) return values
@root_validator def validate_tools(cls, values: dict) ->dict: selected_tools = values.get('selected_tools') or [] for tool_name in selected_tools: if tool_name not in _FILE_TOOLS: raise ValueError( f'File Tool of name {tool_name} not supported. Permitted tools: {list(_FILE_TOOLS)}' ) return values
null
_create_retry_decorator
import openai return retry(reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=embeddings.retry_min_seconds, max=embeddings.retry_max_seconds), retry=retry_if_exception_type(openai .error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError), before_sleep=before_sleep_log(logger, logging.WARNING))
def _create_retry_decorator(embeddings: OpenAIEmbeddings) ->Callable[[Any], Any ]: import openai return retry(reraise=True, stop=stop_after_attempt(embeddings. max_retries), wait=wait_exponential(multiplier=1, min=embeddings. retry_min_seconds, max=embeddings.retry_max_seconds), retry= retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError), before_sleep=before_sleep_log(logger, logging.WARNING))
null
_dict2document
summary = doc.pop('Summary') return Document(page_content=summary, metadata=doc)
def _dict2document(self, doc: dict) ->Document: summary = doc.pop('Summary') return Document(page_content=summary, metadata=doc)
null
test_run_returns_no_result
"""Test that gives no result.""" output = api_client.run('1605.08386WWW') assert 'No good PubMed Result was found' == output
def test_run_returns_no_result(api_client: PubMedAPIWrapper) ->None: """Test that gives no result.""" output = api_client.run('1605.08386WWW') assert 'No good PubMed Result was found' == output
Test that gives no result.
test_load_fail_no_func
"""Test that fails to load""" with pytest.raises(ValidationError) as exc_info: TensorflowDatasetLoader(dataset_name='mlqa/en', split_name='test', load_max_docs=MAX_DOCS) assert 'Please provide a function' in str(exc_info.value)
def test_load_fail_no_func() ->None: """Test that fails to load""" with pytest.raises(ValidationError) as exc_info: TensorflowDatasetLoader(dataset_name='mlqa/en', split_name='test', load_max_docs=MAX_DOCS) assert 'Please provide a function' in str(exc_info.value)
Test that fails to load
trim_first_node
"""Remove the first node if it exists and has a single outgoing edge, ie. if removing it would not leave the graph without a "first" node.""" first_node = self.first_node() if first_node: if len(self.nodes) == 1 or len([edge for edge in self.edges if edge. source == first_node.id]) == 1: self.remove_node(first_node)
def trim_first_node(self) ->None: """Remove the first node if it exists and has a single outgoing edge, ie. if removing it would not leave the graph without a "first" node.""" first_node = self.first_node() if first_node: if len(self.nodes) == 1 or len([edge for edge in self.edges if edge .source == first_node.id]) == 1: self.remove_node(first_node)
Remove the first node if it exists and has a single outgoing edge, ie. if removing it would not leave the graph without a "first" node.
create_file
""" Creates a new file on the Github repo Parameters: file_query(str): a string which contains the file path and the file contents. The file path is the first line in the string, and the contents are the rest of the string. For example, "hello_world.md # Hello World!" Returns: str: A success or failure message """ if self.active_branch == self.github_base_branch: return ( f"You're attempting to commit to the directly to the{self.github_base_branch} branch, which is protected. Please create a new branch and try again." ) file_path = file_query.split('\n')[0] file_contents = file_query[len(file_path) + 2:] try: try: file = self.github_repo_instance.get_contents(file_path, ref=self. active_branch) if file: return ( f'File already exists at `{file_path}` on branch `{self.active_branch}`. You must use `update_file` to modify it.' ) except Exception: pass self.github_repo_instance.create_file(path=file_path, message='Create ' + file_path, content=file_contents, branch=self.active_branch) return 'Created file ' + file_path except Exception as e: return 'Unable to make file due to error:\n' + str(e)
def create_file(self, file_query: str) ->str: """ Creates a new file on the Github repo Parameters: file_query(str): a string which contains the file path and the file contents. The file path is the first line in the string, and the contents are the rest of the string. For example, "hello_world.md # Hello World!" Returns: str: A success or failure message """ if self.active_branch == self.github_base_branch: return ( f"You're attempting to commit to the directly to the{self.github_base_branch} branch, which is protected. Please create a new branch and try again." ) file_path = file_query.split('\n')[0] file_contents = file_query[len(file_path) + 2:] try: try: file = self.github_repo_instance.get_contents(file_path, ref= self.active_branch) if file: return ( f'File already exists at `{file_path}` on branch `{self.active_branch}`. You must use `update_file` to modify it.' ) except Exception: pass self.github_repo_instance.create_file(path=file_path, message= 'Create ' + file_path, content=file_contents, branch=self. active_branch) return 'Created file ' + file_path except Exception as e: return 'Unable to make file due to error:\n' + str(e)
Creates a new file on the Github repo Parameters: file_query(str): a string which contains the file path and the file contents. The file path is the first line in the string, and the contents are the rest of the string. For example, "hello_world.md # Hello World!" Returns: str: A success or failure message
check_blob_is_valid
"""Verify that either data or path is provided.""" if 'data' not in values and 'path' not in values: raise ValueError('Either data or path must be provided') return values
@root_validator(pre=True) def check_blob_is_valid(cls, values: Mapping[str, Any]) ->Mapping[str, Any]: """Verify that either data or path is provided.""" if 'data' not in values and 'path' not in values: raise ValueError('Either data or path must be provided') return values
Verify that either data or path is provided.
ingest_documents
""" Ingest PDF to Redis from the data/ directory that contains Edgar 10k filings data for Nike. """ company_name = 'Nike' data_path = 'data/' doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0] print('Parsing 10k filing doc for NIKE', doc) text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100, add_start_index=True) loader = UnstructuredFileLoader(doc, mode='single', strategy='fast') chunks = loader.load_and_split(text_splitter) print('Done preprocessing. Created', len(chunks), 'chunks of the original pdf') embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL) _ = Redis.from_texts(texts=[(f'Company: {company_name}. ' + chunk. page_content) for chunk in chunks], metadatas=[chunk.metadata for chunk in chunks], embedding=embedder, index_name=INDEX_NAME, index_schema= INDEX_SCHEMA, redis_url=REDIS_URL)
def ingest_documents(): """ Ingest PDF to Redis from the data/ directory that contains Edgar 10k filings data for Nike. """ company_name = 'Nike' data_path = 'data/' doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0] print('Parsing 10k filing doc for NIKE', doc) text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100, add_start_index=True) loader = UnstructuredFileLoader(doc, mode='single', strategy='fast') chunks = loader.load_and_split(text_splitter) print('Done preprocessing. Created', len(chunks), 'chunks of the original pdf') embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL) _ = Redis.from_texts(texts=[(f'Company: {company_name}. ' + chunk. page_content) for chunk in chunks], metadatas=[chunk.metadata for chunk in chunks], embedding=embedder, index_name=INDEX_NAME, index_schema=INDEX_SCHEMA, redis_url=REDIS_URL)
Ingest PDF to Redis from the data/ directory that contains Edgar 10k filings data for Nike.
_stream
params = self._convert_prompt_msg_params(messages, **kwargs) for res in self.client.do(**params): if res: msg = _convert_dict_to_message(res) chunk = ChatGenerationChunk(text=res['result'], message= AIMessageChunk(content=msg.content, role='assistant', additional_kwargs=msg.additional_kwargs)) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: params = self._convert_prompt_msg_params(messages, **kwargs) for res in self.client.do(**params): if res: msg = _convert_dict_to_message(res) chunk = ChatGenerationChunk(text=res['result'], message= AIMessageChunk(content=msg.content, role='assistant', additional_kwargs=msg.additional_kwargs)) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk)
null
run
""" Run any query statement in jaguardb Args: query (str): query statement to jaguardb Returns: None for invalid token, or json result string """ if self._token == '': logger.error(f'E0005 error run({query})') return {} resp = self._jag.post(query, self._token, withFile) txt = resp.text try: js = json.loads(txt) return js except Exception: return {}
def run(self, query: str, withFile: bool=False) ->dict: """ Run any query statement in jaguardb Args: query (str): query statement to jaguardb Returns: None for invalid token, or json result string """ if self._token == '': logger.error(f'E0005 error run({query})') return {} resp = self._jag.post(query, self._token, withFile) txt = resp.text try: js = json.loads(txt) return js except Exception: return {}
Run any query statement in jaguardb Args: query (str): query statement to jaguardb Returns: None for invalid token, or json result string
_combine_llm_outputs
overall_token_usage: dict = {} for output in llm_outputs: if output is None: continue token_usage = output['token_usage'] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v return {'token_usage': overall_token_usage}
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) ->dict: overall_token_usage: dict = {} for output in llm_outputs: if output is None: continue token_usage = output['token_usage'] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v return {'token_usage': overall_token_usage}
null
test_iterative_text_splitter_discard_separator
chunk_size = 5 output = __test_iterative_text_splitter(chunk_size=chunk_size, keep_separator=False) assert output == ['....5', '..3', '...4', '....5', '...']
def test_iterative_text_splitter_discard_separator() ->None: chunk_size = 5 output = __test_iterative_text_splitter(chunk_size=chunk_size, keep_separator=False) assert output == ['....5', '..3', '...4', '....5', '...']
null
test_json_equality_evaluator_evaluate_strings_custom_operator_not_equal
def operator(x: dict, y: dict) ->bool: return x['a'] == y['a'] evaluator = JsonEqualityEvaluator(operator=operator) prediction = '{"a": 1}' reference = '{"a": 2}' result = evaluator.evaluate_strings(prediction=prediction, reference=reference) assert result == {'score': False}
def test_json_equality_evaluator_evaluate_strings_custom_operator_not_equal( ) ->None: def operator(x: dict, y: dict) ->bool: return x['a'] == y['a'] evaluator = JsonEqualityEvaluator(operator=operator) prediction = '{"a": 1}' reference = '{"a": 2}' result = evaluator.evaluate_strings(prediction=prediction, reference= reference) assert result == {'score': False}
null
on_llm_end
"""Run when LLM ends running."""
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None: """Run when LLM ends running."""
Run when LLM ends running.
test_include_types2
"""Test include types from schema.""" url = os.environ.get('NEO4J_URI') username = os.environ.get('NEO4J_USERNAME') password = os.environ.get('NEO4J_PASSWORD') assert url is not None assert username is not None assert password is not None graph = Neo4jGraph(url=url, username=username, password=password) graph.query('MATCH (n) DETACH DELETE n') graph.query( "CREATE (a:Actor {name:'Bruce Willis'})-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})<-[:DIRECTED]-(p:Person {name:'John'})" ) graph.refresh_schema() chain = GraphCypherQAChain.from_llm(OpenAI(temperature=0), graph=graph, include_types=['Movie', 'ACTED_IN']) expected_schema = """Node properties are the following: Movie {title: STRING} Relationship properties are the following: The relationships are the following: """ assert chain.graph_schema == expected_schema
def test_include_types2() ->None: """Test include types from schema.""" url = os.environ.get('NEO4J_URI') username = os.environ.get('NEO4J_USERNAME') password = os.environ.get('NEO4J_PASSWORD') assert url is not None assert username is not None assert password is not None graph = Neo4jGraph(url=url, username=username, password=password) graph.query('MATCH (n) DETACH DELETE n') graph.query( "CREATE (a:Actor {name:'Bruce Willis'})-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})<-[:DIRECTED]-(p:Person {name:'John'})" ) graph.refresh_schema() chain = GraphCypherQAChain.from_llm(OpenAI(temperature=0), graph=graph, include_types=['Movie', 'ACTED_IN']) expected_schema = """Node properties are the following: Movie {title: STRING} Relationship properties are the following: The relationships are the following: """ assert chain.graph_schema == expected_schema
Test include types from schema.
__eq__
"""Create a Numeric equality filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("zipcode") == 90210 """ self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.EQ) return RedisFilterExpression(str(self))
@check_operator_misuse def __eq__(self, other: Union[int, float]) ->'RedisFilterExpression': """Create a Numeric equality filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("zipcode") == 90210 """ self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.EQ) return RedisFilterExpression(str(self))
Create a Numeric equality filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("zipcode") == 90210
format
"""Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """
@abstractmethod def format(self, **kwargs: Any) ->BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """
Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message.
_embed
payload = {'inputs': input} headers = {'Authorization': f'{self.mosaicml_api_token}', 'Content-Type': 'application/json'} try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f'Error raised by inference endpoint: {e}') try: if response.status_code == 429: if not is_retry: import time time.sleep(self.retry_sleep) return self._embed(input, is_retry=True) raise ValueError( f"""Error raised by inference API: rate limit exceeded. Response: {response.text}""" ) parsed_response = response.json() if isinstance(parsed_response, dict): output_keys = ['data', 'output', 'outputs'] for key in output_keys: if key in parsed_response: output_item = parsed_response[key] break else: raise ValueError( f'No key data or output in response: {parsed_response}') if isinstance(output_item, list) and isinstance(output_item[0], list): embeddings = output_item else: embeddings = [output_item] else: raise ValueError(f'Unexpected response type: {parsed_response}') except requests.exceptions.JSONDecodeError as e: raise ValueError( f'Error raised by inference API: {e}.\nResponse: {response.text}') return embeddings
def _embed(self, input: List[Tuple[str, str]], is_retry: bool=False) ->List[ List[float]]: payload = {'inputs': input} headers = {'Authorization': f'{self.mosaicml_api_token}', 'Content-Type': 'application/json'} try: response = requests.post(self.endpoint_url, headers=headers, json= payload) except requests.exceptions.RequestException as e: raise ValueError(f'Error raised by inference endpoint: {e}') try: if response.status_code == 429: if not is_retry: import time time.sleep(self.retry_sleep) return self._embed(input, is_retry=True) raise ValueError( f"""Error raised by inference API: rate limit exceeded. Response: {response.text}""" ) parsed_response = response.json() if isinstance(parsed_response, dict): output_keys = ['data', 'output', 'outputs'] for key in output_keys: if key in parsed_response: output_item = parsed_response[key] break else: raise ValueError( f'No key data or output in response: {parsed_response}') if isinstance(output_item, list) and isinstance(output_item[0], list): embeddings = output_item else: embeddings = [output_item] else: raise ValueError(f'Unexpected response type: {parsed_response}') except requests.exceptions.JSONDecodeError as e: raise ValueError( f'Error raised by inference API: {e}.\nResponse: {response.text}') return embeddings
null
get_num_tokens
tokenized_text = self.client.tokenize(text.encode('utf-8')) return len(tokenized_text)
def get_num_tokens(self, text: str) ->int: tokenized_text = self.client.tokenize(text.encode('utf-8')) return len(tokenized_text)
null
on_agent_finish
"""Do nothing""" pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None: """Do nothing""" pass
Do nothing
test_similarity_search_by_vector
index = mock_index(index_details) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query_embedding = DEFAULT_EMBEDDING_MODEL.embed_query('foo') filters = {'some filter': True} limit = 7 search_result = vectorsearch.similarity_search_by_vector(query_embedding, k =limit, filters=filters) index.similarity_search.assert_called_once_with(columns=[ DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN], query_vector=query_embedding, filters=filters, num_results=limit) assert len(search_result) == len(fake_texts) assert sorted([d.page_content for d in search_result]) == sorted(fake_texts) assert all([(DEFAULT_PRIMARY_KEY in d.metadata) for d in search_result])
@pytest.mark.requires('databricks', 'databricks.vector_search') @pytest.mark.parametrize('index_details', [ DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX]) def test_similarity_search_by_vector(index_details: dict) ->None: index = mock_index(index_details) index.similarity_search.return_value = EXAMPLE_SEARCH_RESPONSE vectorsearch = default_databricks_vector_search(index) query_embedding = DEFAULT_EMBEDDING_MODEL.embed_query('foo') filters = {'some filter': True} limit = 7 search_result = vectorsearch.similarity_search_by_vector(query_embedding, k=limit, filters=filters) index.similarity_search.assert_called_once_with(columns=[ DEFAULT_PRIMARY_KEY, DEFAULT_TEXT_COLUMN], query_vector= query_embedding, filters=filters, num_results=limit) assert len(search_result) == len(fake_texts) assert sorted([d.page_content for d in search_result]) == sorted(fake_texts ) assert all([(DEFAULT_PRIMARY_KEY in d.metadata) for d in search_result])
null
_convert_message_to_document
""" Convert a message to a Document object. Args: message (dict): A message in the form of a dictionary. channel_name (str): The name of the channel the message belongs to. Returns: Document: A Document object representing the message. """ text = message.get('text', '') metadata = self._get_message_metadata(message, channel_name) return Document(page_content=text, metadata=metadata)
def _convert_message_to_document(self, message: dict, channel_name: str ) ->Document: """ Convert a message to a Document object. Args: message (dict): A message in the form of a dictionary. channel_name (str): The name of the channel the message belongs to. Returns: Document: A Document object representing the message. """ text = message.get('text', '') metadata = self._get_message_metadata(message, channel_name) return Document(page_content=text, metadata=metadata)
Convert a message to a Document object. Args: message (dict): A message in the form of a dictionary. channel_name (str): The name of the channel the message belongs to. Returns: Document: A Document object representing the message.
test_fauna_loader
"""Test Fauna loader.""" loader = FaunaLoader(query=self.valid_fql_query, page_content_field=self. valid_page_content_field, secret=self.fauna_secret, metadata_fields= self.valid_metadata_fields) docs = loader.load() assert len(docs) > 0 for doc in docs: assert doc.page_content != '' assert 'id' in doc.metadata and doc.metadata['id'] != '' assert 'ts' in doc.metadata and doc.metadata['ts'] != ''
def test_fauna_loader(self) ->None: """Test Fauna loader.""" loader = FaunaLoader(query=self.valid_fql_query, page_content_field= self.valid_page_content_field, secret=self.fauna_secret, metadata_fields=self.valid_metadata_fields) docs = loader.load() assert len(docs) > 0 for doc in docs: assert doc.page_content != '' assert 'id' in doc.metadata and doc.metadata['id'] != '' assert 'ts' in doc.metadata and doc.metadata['ts'] != ''
Test Fauna loader.
validate_environment
"""Validates that the python package exists in environment.""" google_api_key = get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY') if isinstance(google_api_key, SecretStr): google_api_key = google_api_key.get_secret_value() genai.configure(api_key=google_api_key) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validates that the python package exists in environment.""" google_api_key = get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY') if isinstance(google_api_key, SecretStr): google_api_key = google_api_key.get_secret_value() genai.configure(api_key=google_api_key) return values
Validates that the python package exists in environment.
fuzzy_matching_strategy
""" Fuzzy matching strategy for deanonymization. It uses fuzzy matching to find the position of the anonymized entity in the text. It replaces all the anonymized entities with the original ones. Args: text: text to deanonymize deanonymizer_mapping: mapping between anonymized entities and original ones max_l_dist: maximum Levenshtein distance between the anonymized entity and the text segment to consider it a match Examples of matching: Kaenu Reves -> Keanu Reeves John F. Kennedy -> John Kennedy """ try: from fuzzysearch import find_near_matches except ImportError as e: raise ImportError( 'Could not import fuzzysearch, please install with `pip install fuzzysearch`.' ) from e for entity_type in deanonymizer_mapping: for anonymized, original in deanonymizer_mapping[entity_type].items(): matches = find_near_matches(anonymized, text, max_l_dist=max_l_dist) new_text = '' last_end = 0 for m in matches: new_text += text[last_end:m.start] new_text += original last_end = m.end new_text += text[last_end:] text = new_text return text
def fuzzy_matching_strategy(text: str, deanonymizer_mapping: MappingDataType, max_l_dist: int=3) ->str: """ Fuzzy matching strategy for deanonymization. It uses fuzzy matching to find the position of the anonymized entity in the text. It replaces all the anonymized entities with the original ones. Args: text: text to deanonymize deanonymizer_mapping: mapping between anonymized entities and original ones max_l_dist: maximum Levenshtein distance between the anonymized entity and the text segment to consider it a match Examples of matching: Kaenu Reves -> Keanu Reeves John F. Kennedy -> John Kennedy """ try: from fuzzysearch import find_near_matches except ImportError as e: raise ImportError( 'Could not import fuzzysearch, please install with `pip install fuzzysearch`.' ) from e for entity_type in deanonymizer_mapping: for anonymized, original in deanonymizer_mapping[entity_type].items(): matches = find_near_matches(anonymized, text, max_l_dist=max_l_dist ) new_text = '' last_end = 0 for m in matches: new_text += text[last_end:m.start] new_text += original last_end = m.end new_text += text[last_end:] text = new_text return text
Fuzzy matching strategy for deanonymization. It uses fuzzy matching to find the position of the anonymized entity in the text. It replaces all the anonymized entities with the original ones. Args: text: text to deanonymize deanonymizer_mapping: mapping between anonymized entities and original ones max_l_dist: maximum Levenshtein distance between the anonymized entity and the text segment to consider it a match Examples of matching: Kaenu Reves -> Keanu Reeves John F. Kennedy -> John Kennedy
from_documents
"""Create an AtlasDB vectorstore from a list of documents. Args: name (str): Name of the collection to create. api_key (str): Your nomic API key, documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. ids (Optional[List[str]]): Optional list of document IDs. If None, ids will be auto created description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally useful during development and testing. index_kwargs (Optional[dict]): Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns: AtlasDB: Nomic's neural database and finest rhizomatic instrument """ if name is None or api_key is None: raise ValueError('`name` and `api_key` cannot be None.') texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts(name=name, api_key=api_key, texts=texts, embedding= embedding, metadatas=metadatas, ids=ids, description=description, is_public=is_public, reset_project_if_exists=reset_project_if_exists, index_kwargs=index_kwargs)
@classmethod def from_documents(cls: Type[AtlasDB], documents: List[Document], embedding: Optional[Embeddings]=None, ids: Optional[List[str]]=None, name: Optional[str]=None, api_key: Optional[str]=None, persist_directory: Optional[str]=None, description: str='A description for your project', is_public: bool=True, reset_project_if_exists: bool=False, index_kwargs: Optional[dict]=None, **kwargs: Any) ->AtlasDB: """Create an AtlasDB vectorstore from a list of documents. Args: name (str): Name of the collection to create. api_key (str): Your nomic API key, documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. ids (Optional[List[str]]): Optional list of document IDs. If None, ids will be auto created description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally useful during development and testing. index_kwargs (Optional[dict]): Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns: AtlasDB: Nomic's neural database and finest rhizomatic instrument """ if name is None or api_key is None: raise ValueError('`name` and `api_key` cannot be None.') texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts(name=name, api_key=api_key, texts=texts, embedding=embedding, metadatas=metadatas, ids=ids, description= description, is_public=is_public, reset_project_if_exists= reset_project_if_exists, index_kwargs=index_kwargs)
Create an AtlasDB vectorstore from a list of documents. Args: name (str): Name of the collection to create. api_key (str): Your nomic API key, documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. ids (Optional[List[str]]): Optional list of document IDs. If None, ids will be auto created description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally useful during development and testing. index_kwargs (Optional[dict]): Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns: AtlasDB: Nomic's neural database and finest rhizomatic instrument
warn_once
"""Warn once about the dangers of PythonREPL.""" logger.warning('Python REPL can execute arbitrary code. Use with caution.')
@functools.lru_cache(maxsize=None) def warn_once() ->None: """Warn once about the dangers of PythonREPL.""" logger.warning('Python REPL can execute arbitrary code. Use with caution.')
Warn once about the dangers of PythonREPL.
test_parsing_error
"""Test that LLM Output without a bash block raises an exce""" question = "Please echo 'hello world' to the terminal." prompt = _PROMPT_TEMPLATE.format(question=question) queries = {prompt: """ ```text echo 'hello world' ``` """} fake_llm = FakeLLM(queries=queries) fake_llm_bash_chain = LLMBashChain.from_llm(fake_llm, input_key='q', output_key='a') with pytest.raises(OutputParserException): fake_llm_bash_chain.run(question)
def test_parsing_error() ->None: """Test that LLM Output without a bash block raises an exce""" question = "Please echo 'hello world' to the terminal." prompt = _PROMPT_TEMPLATE.format(question=question) queries = {prompt: "\n```text\necho 'hello world'\n```\n"} fake_llm = FakeLLM(queries=queries) fake_llm_bash_chain = LLMBashChain.from_llm(fake_llm, input_key='q', output_key='a') with pytest.raises(OutputParserException): fake_llm_bash_chain.run(question)
Test that LLM Output without a bash block raises an exce
on_llm_new_token
"""Run on new LLM token. Only available when streaming is enabled.""" sys.stdout.write(token) sys.stdout.flush()
def on_llm_new_token(self, token: str, **kwargs: Any) ->None: """Run on new LLM token. Only available when streaming is enabled.""" sys.stdout.write(token) sys.stdout.flush()
Run on new LLM token. Only available when streaming is enabled.
can_cast_to_float
"""Check if a string can be cast to a float.""" try: float(string) return True except ValueError: return False
def can_cast_to_float(string: str) ->bool: """Check if a string can be cast to a float.""" try: float(string) return True except ValueError: return False
Check if a string can be cast to a float.
_import_starrocks
from langchain_community.vectorstores.starrocks import StarRocks return StarRocks
def _import_starrocks() ->Any: from langchain_community.vectorstores.starrocks import StarRocks return StarRocks
null
sample_data_frame
import polars as pl data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date': [ '2022-01-01', '2022-01-02']} return pl.DataFrame(data)
@pytest.fixture def sample_data_frame() ->pl.DataFrame: import polars as pl data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date': ['2022-01-01', '2022-01-02']} return pl.DataFrame(data)
null
test_golang_code_splitter
splitter = RecursiveCharacterTextSplitter.from_language(Language.GO, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ package main import "fmt" func helloWorld() { fmt.Println("Hello, World!") } func main() { helloWorld() } """ chunks = splitter.split_text(code) assert chunks == ['package main', 'import "fmt"', 'func', 'helloWorld() {', 'fmt.Println("He', 'llo,', 'World!")', '}', 'func main() {', 'helloWorld()', '}']
def test_golang_code_splitter() ->None: splitter = RecursiveCharacterTextSplitter.from_language(Language.GO, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ package main import "fmt" func helloWorld() { fmt.Println("Hello, World!") } func main() { helloWorld() } """ chunks = splitter.split_text(code) assert chunks == ['package main', 'import "fmt"', 'func', 'helloWorld() {', 'fmt.Println("He', 'llo,', 'World!")', '}', 'func main() {', 'helloWorld()', '}']
null