method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_import_slack_schedule_message
from langchain_community.tools.slack.schedule_message import SlackScheduleMessage return SlackScheduleMessage
def _import_slack_schedule_message() ->Any: from langchain_community.tools.slack.schedule_message import SlackScheduleMessage return SlackScheduleMessage
null
set
if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value))
def set(self, key: str, value: Optional[str]) ->None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value))
null
get_output_schema
"""Get a pydantic model that can be used to validate output to the runnable. Runnables that leverage the configurable_fields and configurable_alternatives methods will have a dynamic output schema that depends on which configuration the runnable is invoked with. This method allows to get an output schema for a specific configuration. Args: config: A config to use when generating the schema. Returns: A pydantic model that can be used to validate output. """ root_type = self.OutputType if inspect.isclass(root_type) and issubclass(root_type, BaseModel): return root_type return create_model(self.get_name('Output'), __root__=(root_type, None), __config__=_SchemaConfig)
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: """Get a pydantic model that can be used to validate output to the runnable. Runnables that leverage the configurable_fields and configurable_alternatives methods will have a dynamic output schema that depends on which configuration the runnable is invoked with. This method allows to get an output schema for a specific configuration. Args: config: A config to use when generating the schema. Returns: A pydantic model that can be used to validate output. """ root_type = self.OutputType if inspect.isclass(root_type) and issubclass(root_type, BaseModel): return root_type return create_model(self.get_name('Output'), __root__=(root_type, None), __config__=_SchemaConfig)
Get a pydantic model that can be used to validate output to the runnable. Runnables that leverage the configurable_fields and configurable_alternatives methods will have a dynamic output schema that depends on which configuration the runnable is invoked with. This method allows to get an output schema for a specific configuration. Args: config: A config to use when generating the schema. Returns: A pydantic model that can be used to validate output.
on_llm_end
self.on_llm_end_common()
def on_llm_end(self, *args: Any, **kwargs: Any) ->Any: self.on_llm_end_common()
null
setUp
self.example_code = """import os def hello(text): print(text) class Simple: def __init__(self): self.a = 1 hello("Hello!")""" self.expected_simplified_code = """import os # Code for: def hello(text): # Code for: class Simple: hello("Hello!")""" self.expected_extracted_code = ["""def hello(text): print(text)""", """class Simple: def __init__(self): self.a = 1"""]
def setUp(self) ->None: self.example_code = """import os def hello(text): print(text) class Simple: def __init__(self): self.a = 1 hello("Hello!")""" self.expected_simplified_code = """import os # Code for: def hello(text): # Code for: class Simple: hello("Hello!")""" self.expected_extracted_code = ["""def hello(text): print(text)""", """class Simple: def __init__(self): self.a = 1"""]
null
_evaluate_string_pairs
""" Evaluate the string distance between two predictions. Args: prediction (str): The first prediction string. prediction_b (str): The second prediction string. callbacks (Callbacks, optional): The callbacks to use. tags (List[str], optional): Tags to apply to traces. metadata (Dict[str, Any], optional): Metadata to apply to traces. **kwargs: Additional keyword arguments. Returns: dict: The evaluation results containing the score. """ result = self(inputs={'prediction': prediction, 'prediction_b': prediction_b}, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info) return self._prepare_output(result)
def _evaluate_string_pairs(self, *, prediction: str, prediction_b: str, callbacks: Callbacks=None, tags: Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None, include_run_info: bool=False, **kwargs: Any ) ->dict: """ Evaluate the string distance between two predictions. Args: prediction (str): The first prediction string. prediction_b (str): The second prediction string. callbacks (Callbacks, optional): The callbacks to use. tags (List[str], optional): Tags to apply to traces. metadata (Dict[str, Any], optional): Metadata to apply to traces. **kwargs: Additional keyword arguments. Returns: dict: The evaluation results containing the score. """ result = self(inputs={'prediction': prediction, 'prediction_b': prediction_b}, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info) return self._prepare_output(result)
Evaluate the string distance between two predictions. Args: prediction (str): The first prediction string. prediction_b (str): The second prediction string. callbacks (Callbacks, optional): The callbacks to use. tags (List[str], optional): Tags to apply to traces. metadata (Dict[str, Any], optional): Metadata to apply to traces. **kwargs: Additional keyword arguments. Returns: dict: The evaluation results containing the score.
_import_ctranslate2
from langchain_community.llms.ctranslate2 import CTranslate2 return CTranslate2
def _import_ctranslate2() ->Any: from langchain_community.llms.ctranslate2 import CTranslate2 return CTranslate2
null
on_tool_start
"""Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_tool_start'}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(input_str, resp, self.step) resp.update({'input_str': input_str}) self.action_records.append(resp)
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, ** kwargs: Any) ->None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_tool_start'}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(input_str, resp, self.step) resp.update({'input_str': input_str}) self.action_records.append(resp)
Run when tool starts running.
test_llm_construction_with_kwargs
llm_chain_kwargs = {'verbose': True} compressor = LLMChainExtractor.from_llm(ChatOpenAI(), llm_chain_kwargs= llm_chain_kwargs) assert compressor.llm_chain.verbose is True
def test_llm_construction_with_kwargs() ->None: llm_chain_kwargs = {'verbose': True} compressor = LLMChainExtractor.from_llm(ChatOpenAI(), llm_chain_kwargs= llm_chain_kwargs) assert compressor.llm_chain.verbose is True
null
w
"""Width of the box.""" return self._w
@property def w(self) ->int: """Width of the box.""" return self._w
Width of the box.
test_async_recursive_url_loader_deterministic
url = 'https://docs.python.org/3.9/' loader = RecursiveUrlLoader(url, use_async=True, max_depth=3, timeout=None) docs = sorted(loader.load(), key=lambda d: d.metadata['source']) docs_2 = sorted(loader.load(), key=lambda d: d.metadata['source']) assert docs == docs_2
def test_async_recursive_url_loader_deterministic() ->None: url = 'https://docs.python.org/3.9/' loader = RecursiveUrlLoader(url, use_async=True, max_depth=3, timeout=None) docs = sorted(loader.load(), key=lambda d: d.metadata['source']) docs_2 = sorted(loader.load(), key=lambda d: d.metadata['source']) assert docs == docs_2
null
test_read_schema
with pytest.raises(TypeError): read_schema(index_schema=None)
def test_read_schema() ->None: with pytest.raises(TypeError): read_schema(index_schema=None)
null
convert_openai_messages
"""Convert dictionaries representing OpenAI messages to LangChain format. Args: messages: List of dictionaries representing OpenAI messages Returns: List of LangChain BaseMessage objects. """ return [convert_dict_to_message(m) for m in messages]
def convert_openai_messages(messages: Sequence[Dict[str, Any]]) ->List[ BaseMessage]: """Convert dictionaries representing OpenAI messages to LangChain format. Args: messages: List of dictionaries representing OpenAI messages Returns: List of LangChain BaseMessage objects. """ return [convert_dict_to_message(m) for m in messages]
Convert dictionaries representing OpenAI messages to LangChain format. Args: messages: List of dictionaries representing OpenAI messages Returns: List of LangChain BaseMessage objects.
_merge_kwargs_dict
"""Merge additional_kwargs from another BaseMessageChunk into this one, handling specific scenarios where a key exists in both dictionaries but has a value of None in 'left'. In such cases, the method uses the value from 'right' for that key in the merged dictionary. Example: If left = {"function_call": {"arguments": None}} and right = {"function_call": {"arguments": "{ "}} then, after merging, for the key "function_call", the value from 'right' is used, resulting in merged = {"function_call": {"arguments": "{ "}}. """ merged = left.copy() for k, v in right.items(): if k not in merged: merged[k] = v elif merged[k] is None and v: merged[k] = v elif v is None: continue elif merged[k] == v: continue elif type(merged[k]) != type(v): raise TypeError( f'additional_kwargs["{k}"] already exists in this message, but with a different type.' ) elif isinstance(merged[k], str): merged[k] += v elif isinstance(merged[k], dict): merged[k] = self._merge_kwargs_dict(merged[k], v) elif isinstance(merged[k], list): merged[k] = merged[k].copy() for i, e in enumerate(v): if isinstance(e, dict) and isinstance(e.get('index'), int): i = e['index'] if i < len(merged[k]): merged[k][i] = self._merge_kwargs_dict(merged[k][i], e) else: merged[k] = merged[k] + [e] else: raise TypeError( f'Additional kwargs key {k} already exists in this message.') return merged
def _merge_kwargs_dict(self, left: Dict[str, Any], right: Dict[str, Any] ) ->Dict[str, Any]: """Merge additional_kwargs from another BaseMessageChunk into this one, handling specific scenarios where a key exists in both dictionaries but has a value of None in 'left'. In such cases, the method uses the value from 'right' for that key in the merged dictionary. Example: If left = {"function_call": {"arguments": None}} and right = {"function_call": {"arguments": "{ "}} then, after merging, for the key "function_call", the value from 'right' is used, resulting in merged = {"function_call": {"arguments": "{ "}}. """ merged = left.copy() for k, v in right.items(): if k not in merged: merged[k] = v elif merged[k] is None and v: merged[k] = v elif v is None: continue elif merged[k] == v: continue elif type(merged[k]) != type(v): raise TypeError( f'additional_kwargs["{k}"] already exists in this message, but with a different type.' ) elif isinstance(merged[k], str): merged[k] += v elif isinstance(merged[k], dict): merged[k] = self._merge_kwargs_dict(merged[k], v) elif isinstance(merged[k], list): merged[k] = merged[k].copy() for i, e in enumerate(v): if isinstance(e, dict) and isinstance(e.get('index'), int): i = e['index'] if i < len(merged[k]): merged[k][i] = self._merge_kwargs_dict(merged[k][i], e) else: merged[k] = merged[k] + [e] else: raise TypeError( f'Additional kwargs key {k} already exists in this message.') return merged
Merge additional_kwargs from another BaseMessageChunk into this one, handling specific scenarios where a key exists in both dictionaries but has a value of None in 'left'. In such cases, the method uses the value from 'right' for that key in the merged dictionary. Example: If left = {"function_call": {"arguments": None}} and right = {"function_call": {"arguments": "{ "}} then, after merging, for the key "function_call", the value from 'right' is used, resulting in merged = {"function_call": {"arguments": "{ "}}.
test_map_stream
prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.' ) + '{question}' chat_res = "i'm a chatbot" chat = FakeListChatModel(responses=[chat_res], sleep=0.01) llm_res = "i'm a textbot" llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) chain: Runnable = prompt | {'chat': chat.bind(stop=['Thought:']), 'llm': llm, 'passthrough': RunnablePassthrough()} stream = chain.stream({'question': 'What is your name?'}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] in [{'passthrough': prompt.invoke({'question': 'What is your name?'})}, {'llm': 'i'}, {'chat': AIMessageChunk(content= 'i')}] assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1 assert all(len(c.keys()) == 1 for c in streamed_chunks) assert final_value is not None assert final_value.get('chat').content == "i'm a chatbot" assert final_value.get('llm') == "i'm a textbot" assert final_value.get('passthrough') == prompt.invoke({'question': 'What is your name?'}) chain_pick_one = chain.pick('llm') assert chain_pick_one.output_schema.schema() == {'title': 'RunnableSequenceOutput', 'type': 'string'} stream = chain_pick_one.stream({'question': 'What is your name?'}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] == 'i' assert len(streamed_chunks) == len(llm_res) chain_pick_two = chain.assign(hello=RunnablePick('llm').pipe(llm)).pick([ 'llm', 'hello']) assert chain_pick_two.output_schema.schema() == {'title': 'RunnableSequenceOutput', 'type': 'object', 'properties': {'hello': { 'title': 'Hello', 'type': 'string'}, 'llm': {'title': 'Llm', 'type': 'string'}}} stream = chain_pick_two.stream({'question': 'What is your name?'}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] in [{'llm': 'i'}, {'chat': AIMessageChunk(content ='i')}] assert len(streamed_chunks) == len(llm_res) + len(chat_res)
def test_map_stream() ->None: prompt = SystemMessagePromptTemplate.from_template( 'You are a nice assistant.') + '{question}' chat_res = "i'm a chatbot" chat = FakeListChatModel(responses=[chat_res], sleep=0.01) llm_res = "i'm a textbot" llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01) chain: Runnable = prompt | {'chat': chat.bind(stop=['Thought:']), 'llm': llm, 'passthrough': RunnablePassthrough()} stream = chain.stream({'question': 'What is your name?'}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] in [{'passthrough': prompt.invoke({'question': 'What is your name?'})}, {'llm': 'i'}, {'chat': AIMessageChunk( content='i')}] assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1 assert all(len(c.keys()) == 1 for c in streamed_chunks) assert final_value is not None assert final_value.get('chat').content == "i'm a chatbot" assert final_value.get('llm') == "i'm a textbot" assert final_value.get('passthrough') == prompt.invoke({'question': 'What is your name?'}) chain_pick_one = chain.pick('llm') assert chain_pick_one.output_schema.schema() == {'title': 'RunnableSequenceOutput', 'type': 'string'} stream = chain_pick_one.stream({'question': 'What is your name?'}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] == 'i' assert len(streamed_chunks) == len(llm_res) chain_pick_two = chain.assign(hello=RunnablePick('llm').pipe(llm)).pick([ 'llm', 'hello']) assert chain_pick_two.output_schema.schema() == {'title': 'RunnableSequenceOutput', 'type': 'object', 'properties': {'hello': {'title': 'Hello', 'type': 'string'}, 'llm': {'title': 'Llm', 'type': 'string'}}} stream = chain_pick_two.stream({'question': 'What is your name?'}) final_value = None streamed_chunks = [] for chunk in stream: streamed_chunks.append(chunk) if final_value is None: final_value = chunk else: final_value += chunk assert streamed_chunks[0] in [{'llm': 'i'}, {'chat': AIMessageChunk( content='i')}] assert len(streamed_chunks) == len(llm_res) + len(chat_res)
null
_generate
generations: List[List[Generation]] = [] generation_config = {'stop_sequences': stop, 'temperature': self. temperature, 'top_p': self.top_p, 'top_k': self.top_k, 'max_output_tokens': self.max_output_tokens, 'candidate_count': self.n} for prompt in prompts: if self.is_gemini: res = _completion_with_retry(self, prompt=prompt, stream=False, is_gemini=True, run_manager=run_manager, generation_config= generation_config) candidates = [''.join([p.text for p in c.content.parts]) for c in res.candidates] generations.append([Generation(text=c) for c in candidates]) else: res = _completion_with_retry(self, model=self.model, prompt=prompt, stream=False, is_gemini=False, run_manager=run_manager, ** generation_config) prompt_generations = [] for candidate in res.candidates: raw_text = candidate['output'] stripped_text = _strip_erroneous_leading_spaces(raw_text) prompt_generations.append(Generation(text=stripped_text)) generations.append(prompt_generations) return LLMResult(generations=generations)
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: generations: List[List[Generation]] = [] generation_config = {'stop_sequences': stop, 'temperature': self. temperature, 'top_p': self.top_p, 'top_k': self.top_k, 'max_output_tokens': self.max_output_tokens, 'candidate_count': self.n} for prompt in prompts: if self.is_gemini: res = _completion_with_retry(self, prompt=prompt, stream=False, is_gemini=True, run_manager=run_manager, generation_config= generation_config) candidates = [''.join([p.text for p in c.content.parts]) for c in res.candidates] generations.append([Generation(text=c) for c in candidates]) else: res = _completion_with_retry(self, model=self.model, prompt= prompt, stream=False, is_gemini=False, run_manager= run_manager, **generation_config) prompt_generations = [] for candidate in res.candidates: raw_text = candidate['output'] stripped_text = _strip_erroneous_leading_spaces(raw_text) prompt_generations.append(Generation(text=stripped_text)) generations.append(prompt_generations) return LLMResult(generations=generations)
null
add_texts
"""Add more texts to the vectorstore index. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters """ max_id = self._connection.execute( f'SELECT max(rowid) as rowid FROM {self._table}').fetchone()['rowid'] if max_id is None: max_id = 0 embeds = self._embedding.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] data_input = [(text, json.dumps(metadata), json.dumps(embed)) for text, metadata, embed in zip(texts, metadatas, embeds)] self._connection.executemany( f'INSERT INTO {self._table}(text, metadata, text_embedding) VALUES (?,?,?)' , data_input) self._connection.commit() results = self._connection.execute( f'SELECT rowid FROM {self._table} WHERE rowid > {max_id}') return [row['rowid'] for row in results]
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, **kwargs: Any) ->List[str]: """Add more texts to the vectorstore index. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters """ max_id = self._connection.execute( f'SELECT max(rowid) as rowid FROM {self._table}').fetchone()['rowid'] if max_id is None: max_id = 0 embeds = self._embedding.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] data_input = [(text, json.dumps(metadata), json.dumps(embed)) for text, metadata, embed in zip(texts, metadatas, embeds)] self._connection.executemany( f'INSERT INTO {self._table}(text, metadata, text_embedding) VALUES (?,?,?)' , data_input) self._connection.commit() results = self._connection.execute( f'SELECT rowid FROM {self._table} WHERE rowid > {max_id}') return [row['rowid'] for row in results]
Add more texts to the vectorstore index. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters
_reset_llm_token_stream
self._llm_token_stream = '' self._llm_token_writer_idx = None
def _reset_llm_token_stream(self) ->None: self._llm_token_stream = '' self._llm_token_writer_idx = None
null
_import_bageldb
from langchain_community.vectorstores.bageldb import Bagel return Bagel
def _import_bageldb() ->Any: from langchain_community.vectorstores.bageldb import Bagel return Bagel
null
format
"""Format the chat template into a string. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: formatted string """ return self.format_prompt(**kwargs).to_string()
def format(self, **kwargs: Any) ->str: """Format the chat template into a string. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: formatted string """ return self.format_prompt(**kwargs).to_string()
Format the chat template into a string. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: formatted string
__init__
"""Create engine from database URI.""" self._engine = engine self._schema = schema if include_tables and ignore_tables: raise ValueError('Cannot specify both include_tables and ignore_tables') self._inspector = inspect(self._engine) self._all_tables = set(self._inspector.get_table_names(schema=schema) + ( self._inspector.get_view_names(schema=schema) if view_support else [])) self._include_tables = set(include_tables) if include_tables else set() if self._include_tables: missing_tables = self._include_tables - self._all_tables if missing_tables: raise ValueError( f'include_tables {missing_tables} not found in database') self._ignore_tables = set(ignore_tables) if ignore_tables else set() if self._ignore_tables: missing_tables = self._ignore_tables - self._all_tables if missing_tables: raise ValueError( f'ignore_tables {missing_tables} not found in database') usable_tables = self.get_usable_table_names() self._usable_tables = set(usable_tables) if usable_tables else self._all_tables if not isinstance(sample_rows_in_table_info, int): raise TypeError('sample_rows_in_table_info must be an integer') self._sample_rows_in_table_info = sample_rows_in_table_info self._indexes_in_table_info = indexes_in_table_info self._custom_table_info = custom_table_info if self._custom_table_info: if not isinstance(self._custom_table_info, dict): raise TypeError( 'table_info must be a dictionary with table names as keys and the desired table info as values' ) intersection = set(self._custom_table_info).intersection(self._all_tables) self._custom_table_info = dict((table, self._custom_table_info[table]) for table in self._custom_table_info if table in intersection) self._max_string_length = max_string_length self._metadata = metadata or MetaData() self._metadata.reflect(views=view_support, bind=self._engine, only=list( self._usable_tables), schema=self._schema)
def __init__(self, engine: Engine, schema: Optional[str]=None, metadata: Optional[MetaData]=None, ignore_tables: Optional[List[str]]=None, include_tables: Optional[List[str]]=None, sample_rows_in_table_info: int=3, indexes_in_table_info: bool=False, custom_table_info: Optional[ dict]=None, view_support: bool=False, max_string_length: int=300): """Create engine from database URI.""" self._engine = engine self._schema = schema if include_tables and ignore_tables: raise ValueError('Cannot specify both include_tables and ignore_tables' ) self._inspector = inspect(self._engine) self._all_tables = set(self._inspector.get_table_names(schema=schema) + (self._inspector.get_view_names(schema=schema) if view_support else []) ) self._include_tables = set(include_tables) if include_tables else set() if self._include_tables: missing_tables = self._include_tables - self._all_tables if missing_tables: raise ValueError( f'include_tables {missing_tables} not found in database') self._ignore_tables = set(ignore_tables) if ignore_tables else set() if self._ignore_tables: missing_tables = self._ignore_tables - self._all_tables if missing_tables: raise ValueError( f'ignore_tables {missing_tables} not found in database') usable_tables = self.get_usable_table_names() self._usable_tables = set(usable_tables ) if usable_tables else self._all_tables if not isinstance(sample_rows_in_table_info, int): raise TypeError('sample_rows_in_table_info must be an integer') self._sample_rows_in_table_info = sample_rows_in_table_info self._indexes_in_table_info = indexes_in_table_info self._custom_table_info = custom_table_info if self._custom_table_info: if not isinstance(self._custom_table_info, dict): raise TypeError( 'table_info must be a dictionary with table names as keys and the desired table info as values' ) intersection = set(self._custom_table_info).intersection(self. _all_tables) self._custom_table_info = dict((table, self._custom_table_info[ table]) for table in self._custom_table_info if table in intersection) self._max_string_length = max_string_length self._metadata = metadata or MetaData() self._metadata.reflect(views=view_support, bind=self._engine, only=list (self._usable_tables), schema=self._schema)
Create engine from database URI.
test_get_dimension_values
mock_response = Mock() mock_response.status_code = 200 mock_response.json.return_value = {'data': [{'test_dimension': 'value1'}]} mock_request.return_value = mock_response values = self.loader._get_dimension_values('test_dimension') self.assertEqual(values, ['value1'])
@patch('requests.request') def test_get_dimension_values(self, mock_request: MagicMock) ->None: mock_response = Mock() mock_response.status_code = 200 mock_response.json.return_value = {'data': [{'test_dimension': 'value1'}]} mock_request.return_value = mock_response values = self.loader._get_dimension_values('test_dimension') self.assertEqual(values, ['value1'])
null
clean_results
"""Clean results from Tavily Search API.""" clean_results = [] for result in results: clean_results.append({'url': result['url'], 'content': result['content']}) return clean_results
def clean_results(self, results: List[Dict]) ->List[Dict]: """Clean results from Tavily Search API.""" clean_results = [] for result in results: clean_results.append({'url': result['url'], 'content': result[ 'content']}) return clean_results
Clean results from Tavily Search API.
get_format_instructions
return self.parser.get_format_instructions()
def get_format_instructions(self) ->str: return self.parser.get_format_instructions()
null
_is_method_unchanged
return base_method.__qualname__ == derived_method.__qualname__
def _is_method_unchanged(self, base_method: Callable, derived_method: Callable ) ->bool: return base_method.__qualname__ == derived_method.__qualname__
null
on_chat_model_start_common
self.chat_model_starts += 1 self.starts += 1
def on_chat_model_start_common(self) ->None: self.chat_model_starts += 1 self.starts += 1
null
generate
"""Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {'stop': stop} callback_manager = CallbackManager.configure(callbacks, self.callbacks, self.verbose, tags, self.tags, metadata, self.metadata) run_managers = callback_manager.on_chat_model_start(dumpd(self), messages, invocation_params=params, options=options, name=run_name, batch_size= len(messages)) results = [] for i, m in enumerate(messages): try: results.append(self._generate_with_cache(m, stop=stop, run_manager= run_managers[i] if run_managers else None, **kwargs)) except BaseException as e: if run_managers: run_managers[i].on_llm_error(e, response=LLMResult(generations=[])) raise e flattened_outputs = [LLMResult(generations=[res.generations], llm_output= res.llm_output) for res in results] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) if run_managers: run_infos = [] for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) run_infos.append(RunInfo(run_id=manager.run_id)) output.run = run_infos return output
def generate(self, messages: List[List[BaseMessage]], stop: Optional[List[ str]]=None, callbacks: Callbacks=None, *, tags: Optional[List[str]]= None, metadata: Optional[Dict[str, Any]]=None, run_name: Optional[str]= None, **kwargs: Any) ->LLMResult: """Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {'stop': stop} callback_manager = CallbackManager.configure(callbacks, self.callbacks, self.verbose, tags, self.tags, metadata, self.metadata) run_managers = callback_manager.on_chat_model_start(dumpd(self), messages, invocation_params=params, options=options, name=run_name, batch_size=len(messages)) results = [] for i, m in enumerate(messages): try: results.append(self._generate_with_cache(m, stop=stop, run_manager=run_managers[i] if run_managers else None, ** kwargs)) except BaseException as e: if run_managers: run_managers[i].on_llm_error(e, response=LLMResult( generations=[])) raise e flattened_outputs = [LLMResult(generations=[res.generations], llm_output=res.llm_output) for res in results] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) if run_managers: run_infos = [] for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) run_infos.append(RunInfo(run_id=manager.run_id)) output.run = run_infos return output
Top Level call
_to_langchain_compatible_metadata
"""Convert a dictionary to a compatible with langchain.""" result = {} for key, value in metadata.items(): if type(value) in {str, int, float}: result[key] = value else: result[key] = str(value) return result
def _to_langchain_compatible_metadata(self, metadata: dict) ->dict: """Convert a dictionary to a compatible with langchain.""" result = {} for key, value in metadata.items(): if type(value) in {str, int, float}: result[key] = value else: result[key] = str(value) return result
Convert a dictionary to a compatible with langchain.
test_mmr
texts = ['foo', 'foo', 'fou', 'foy'] vectorstore = MongoDBAtlasVectorSearch.from_texts(texts, embedding_openai, collection=collection, index_name=INDEX_NAME) sleep(1) query = 'foo' output = vectorstore.max_marginal_relevance_search(query, k=10, lambda_mult=0.1 ) assert len(output) == len(texts) assert output[0].page_content == 'foo' assert output[1].page_content != 'foo'
def test_mmr(self, embedding_openai: Embeddings, collection: Any) ->None: texts = ['foo', 'foo', 'fou', 'foy'] vectorstore = MongoDBAtlasVectorSearch.from_texts(texts, embedding_openai, collection=collection, index_name=INDEX_NAME) sleep(1) query = 'foo' output = vectorstore.max_marginal_relevance_search(query, k=10, lambda_mult=0.1) assert len(output) == len(texts) assert output[0].page_content == 'foo' assert output[1].page_content != 'foo'
null
encode_image
"""Get base64 string from image URI.""" with open(uri, 'rb') as image_file: return base64.b64encode(image_file.read()).decode('utf-8')
def encode_image(self, uri: str) ->str: """Get base64 string from image URI.""" with open(uri, 'rb') as image_file: return base64.b64encode(image_file.read()).decode('utf-8')
Get base64 string from image URI.
_create_index_if_not_exists
"""Create the index if it doesn't already exist. Args: dims_length: Length of the embedding vectors. """ if self.client.indices.exists(index=self.index_name): logger.info(f'Index {self.index_name} already exists. Skipping creation.') else: if dims_length is None: raise ValueError( 'Cannot create index without specifying dims_length ' + "when the index doesn't already exist. ") indexMapping = self._index_mapping(dims_length=dims_length) logger.debug( f'Creating index {self.index_name} with mappings {indexMapping}') self.client.indices.create(index=self.index_name, body={'settings': { 'index': {'knn': True}}, 'mappings': {'properties': indexMapping}})
def _create_index_if_not_exists(self, dims_length: Optional[int]=None) ->None: """Create the index if it doesn't already exist. Args: dims_length: Length of the embedding vectors. """ if self.client.indices.exists(index=self.index_name): logger.info( f'Index {self.index_name} already exists. Skipping creation.') else: if dims_length is None: raise ValueError( 'Cannot create index without specifying dims_length ' + "when the index doesn't already exist. ") indexMapping = self._index_mapping(dims_length=dims_length) logger.debug( f'Creating index {self.index_name} with mappings {indexMapping}') self.client.indices.create(index=self.index_name, body={'settings': {'index': {'knn': True}}, 'mappings': {'properties': indexMapping}} )
Create the index if it doesn't already exist. Args: dims_length: Length of the embedding vectors.
check_jsonformer_installation
import_jsonformer() return values
@root_validator def check_jsonformer_installation(cls, values: dict) ->dict: import_jsonformer() return values
null
_llm_type
"""Return type of model.""" return 'together'
@property def _llm_type(self) ->str: """Return type of model.""" return 'together'
Return type of model.
test_forefrontai_uses_actual_secret_value_from_secretstr
"""Test that the actual secret value is correctly retrieved.""" llm = ForefrontAI(forefrontai_api_key='secret-api-key', temperature=0.2) assert cast(SecretStr, llm.forefrontai_api_key).get_secret_value( ) == 'secret-api-key'
def test_forefrontai_uses_actual_secret_value_from_secretstr() ->None: """Test that the actual secret value is correctly retrieved.""" llm = ForefrontAI(forefrontai_api_key='secret-api-key', temperature=0.2) assert cast(SecretStr, llm.forefrontai_api_key).get_secret_value( ) == 'secret-api-key'
Test that the actual secret value is correctly retrieved.
set_cache_and_teardown
cache_instance = request.param set_llm_cache(cache_instance()) if get_llm_cache(): get_llm_cache().clear() else: raise ValueError('Cache not set. This should never happen.') yield if get_llm_cache(): get_llm_cache().clear() set_llm_cache(None) else: raise ValueError('Cache not set. This should never happen.')
@pytest.fixture(autouse=True, params=CACHE_OPTIONS) def set_cache_and_teardown(request: FixtureRequest) ->Generator[None, None, None]: cache_instance = request.param set_llm_cache(cache_instance()) if get_llm_cache(): get_llm_cache().clear() else: raise ValueError('Cache not set. This should never happen.') yield if get_llm_cache(): get_llm_cache().clear() set_llm_cache(None) else: raise ValueError('Cache not set. This should never happen.')
null
similarity_search_by_vector
return [doc for doc, _ in self.similarity_search_with_score_by_vector( embedding, k, filter=filter)]
def similarity_search_by_vector(self, embedding: List[float], k: int=4, filter: Optional[Dict[str, str]]=None, **kwargs: Any) ->List[Document]: return [doc for doc, _ in self.similarity_search_with_score_by_vector( embedding, k, filter=filter)]
null
_call
"""Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] accepts_run_manager = 'run_manager' in inspect.signature(self._get_docs ).parameters if accepts_run_manager: docs = self._get_docs(question, run_manager=_run_manager) else: docs = self._get_docs(question) answer = self.combine_documents_chain.run(input_documents=docs, question= question, callbacks=_run_manager.get_child()) if self.return_source_documents: return {self.output_key: answer, 'source_documents': docs} else: return {self.output_key: answer}
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] accepts_run_manager = 'run_manager' in inspect.signature(self._get_docs ).parameters if accepts_run_manager: docs = self._get_docs(question, run_manager=_run_manager) else: docs = self._get_docs(question) answer = self.combine_documents_chain.run(input_documents=docs, question=question, callbacks=_run_manager.get_child()) if self.return_source_documents: return {self.output_key: answer, 'source_documents': docs} else: return {self.output_key: answer}
Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents']
_run
"""Use the tool.""" try: if self.sanitize_input: query = sanitize_input(query) tree = ast.parse(query) module = ast.Module(tree.body[:-1], type_ignores=[]) exec(ast.unparse(module), self.globals, self.locals) module_end = ast.Module(tree.body[-1:], type_ignores=[]) module_end_str = ast.unparse(module_end) io_buffer = StringIO() try: with redirect_stdout(io_buffer): ret = eval(module_end_str, self.globals, self.locals) if ret is None: return io_buffer.getvalue() else: return ret except Exception: with redirect_stdout(io_buffer): exec(module_end_str, self.globals, self.locals) return io_buffer.getvalue() except Exception as e: return '{}: {}'.format(type(e).__name__, str(e))
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" try: if self.sanitize_input: query = sanitize_input(query) tree = ast.parse(query) module = ast.Module(tree.body[:-1], type_ignores=[]) exec(ast.unparse(module), self.globals, self.locals) module_end = ast.Module(tree.body[-1:], type_ignores=[]) module_end_str = ast.unparse(module_end) io_buffer = StringIO() try: with redirect_stdout(io_buffer): ret = eval(module_end_str, self.globals, self.locals) if ret is None: return io_buffer.getvalue() else: return ret except Exception: with redirect_stdout(io_buffer): exec(module_end_str, self.globals, self.locals) return io_buffer.getvalue() except Exception as e: return '{}: {}'.format(type(e).__name__, str(e))
Use the tool.
test_csv_loader_load_single_row_file
file_path = self._get_csv_file_path('test_one_row.csv') expected_docs = [Document(page_content= """column1: value1 column2: value2 column3: value3""", metadata={ 'source': file_path, 'row': 0})] loader = CSVLoader(file_path=file_path) result = loader.load() assert result == expected_docs
def test_csv_loader_load_single_row_file(self) ->None: file_path = self._get_csv_file_path('test_one_row.csv') expected_docs = [Document(page_content= 'column1: value1\ncolumn2: value2\ncolumn3: value3', metadata={ 'source': file_path, 'row': 0})] loader = CSVLoader(file_path=file_path) result = loader.load() assert result == expected_docs
null
_format_chat_history
buffer = [] for human, ai in chat_history: buffer.append(HumanMessage(content=human)) buffer.append(AIMessage(content=ai)) return buffer
def _format_chat_history(chat_history: List[Tuple[str, str]]): buffer = [] for human, ai in chat_history: buffer.append(HumanMessage(content=human)) buffer.append(AIMessage(content=ai)) return buffer
null
redis_client
"""Yield redis client.""" import redis port = 6379 password = os.environ.get('REDIS_PASSWORD') or str(uuid.uuid4()) client = redis.Redis(host='localhost', port=port, password=password, db=0) try: client.ping() except redis.exceptions.ConnectionError: pytest.skip( 'Redis server is not running or is not accessible. Verify that credentials are correct. ' ) client.flushdb() return client
@pytest.fixture def redis_client() ->Redis: """Yield redis client.""" import redis port = 6379 password = os.environ.get('REDIS_PASSWORD') or str(uuid.uuid4()) client = redis.Redis(host='localhost', port=port, password=password, db=0) try: client.ping() except redis.exceptions.ConnectionError: pytest.skip( 'Redis server is not running or is not accessible. Verify that credentials are correct. ' ) client.flushdb() return client
Yield redis client.
lc_secrets
return {'google_api_key': 'GOOGLE_API_KEY'}
@property def lc_secrets(self) ->Dict[str, str]: return {'google_api_key': 'GOOGLE_API_KEY'}
null
load
"""Load from file path.""" text = '' try: with open(self.file_path, encoding=self.encoding) as f: text = f.read() except UnicodeDecodeError as e: if self.autodetect_encoding: detected_encodings = detect_file_encodings(self.file_path) for encoding in detected_encodings: logger.debug(f'Trying encoding: {encoding.encoding}') try: with open(self.file_path, encoding=encoding.encoding) as f: text = f.read() break except UnicodeDecodeError: continue else: raise RuntimeError(f'Error loading {self.file_path}') from e except Exception as e: raise RuntimeError(f'Error loading {self.file_path}') from e metadata = {'source': self.file_path} return [Document(page_content=text, metadata=metadata)]
def load(self) ->List[Document]: """Load from file path.""" text = '' try: with open(self.file_path, encoding=self.encoding) as f: text = f.read() except UnicodeDecodeError as e: if self.autodetect_encoding: detected_encodings = detect_file_encodings(self.file_path) for encoding in detected_encodings: logger.debug(f'Trying encoding: {encoding.encoding}') try: with open(self.file_path, encoding=encoding.encoding) as f: text = f.read() break except UnicodeDecodeError: continue else: raise RuntimeError(f'Error loading {self.file_path}') from e except Exception as e: raise RuntimeError(f'Error loading {self.file_path}') from e metadata = {'source': self.file_path} return [Document(page_content=text, metadata=metadata)]
Load from file path.
mocked_requests_post
assert url.startswith(_GRADIENT_BASE_URL) assert _MODEL_ID in url assert json assert headers assert headers.get('authorization') == f'Bearer {_GRADIENT_SECRET}' assert headers.get('x-gradient-workspace-id') == f'{_GRADIENT_WORKSPACE_ID}' query = json.get('query') assert query and isinstance(query, str) output = 'bar' if 'foo' in query else 'baz' return MockResponse(json_data={'generatedOutput': output}, status_code=200)
def mocked_requests_post(url: str, headers: dict, json: dict) ->MockResponse: assert url.startswith(_GRADIENT_BASE_URL) assert _MODEL_ID in url assert json assert headers assert headers.get('authorization') == f'Bearer {_GRADIENT_SECRET}' assert headers.get('x-gradient-workspace-id' ) == f'{_GRADIENT_WORKSPACE_ID}' query = json.get('query') assert query and isinstance(query, str) output = 'bar' if 'foo' in query else 'baz' return MockResponse(json_data={'generatedOutput': output}, status_code=200)
null
fake_llm_symbolic_math_chain
"""Fake LLM Math chain for testing.""" queries = {_PROMPT_TEMPLATE.format(question='What is 1 plus 1?'): 'Answer: 2', _PROMPT_TEMPLATE.format(question= 'What is the square root of 2?'): """```text sqrt(2) ```""", _PROMPT_TEMPLATE.format(question= 'What is the limit of sin(x) / x as x goes to 0?'): """```text limit(sin(x)/x,x,0) ```""", _PROMPT_TEMPLATE.format(question ='What is the integral of e^-x from 0 to infinity?'): """```text integrate(exp(-x), (x, 0, oo)) ```""", _PROMPT_TEMPLATE. format(question='What are the solutions to this equation x**2 - x?'): """```text solveset(x**2 - x, x) ```""", _PROMPT_TEMPLATE.format( question='foo'): 'foo'} fake_llm = FakeLLM(queries=queries) return LLMSymbolicMathChain.from_llm(fake_llm, input_key='q', output_key='a')
@pytest.fixture def fake_llm_symbolic_math_chain() ->LLMSymbolicMathChain: """Fake LLM Math chain for testing.""" queries = {_PROMPT_TEMPLATE.format(question='What is 1 plus 1?'): 'Answer: 2', _PROMPT_TEMPLATE.format(question= 'What is the square root of 2?'): """```text sqrt(2) ```""", _PROMPT_TEMPLATE.format(question= 'What is the limit of sin(x) / x as x goes to 0?'): """```text limit(sin(x)/x,x,0) ```""", _PROMPT_TEMPLATE.format( question='What is the integral of e^-x from 0 to infinity?'): """```text integrate(exp(-x), (x, 0, oo)) ```""", _PROMPT_TEMPLATE. format(question='What are the solutions to this equation x**2 - x?' ): """```text solveset(x**2 - x, x) ```""", _PROMPT_TEMPLATE.format (question='foo'): 'foo'} fake_llm = FakeLLM(queries=queries) return LLMSymbolicMathChain.from_llm(fake_llm, input_key='q', output_key='a')
Fake LLM Math chain for testing.
acall_func_with_variable_args
"""Call function that may optionally accept a run_manager and/or config. Args: func (Union[Callable[[Input], Awaitable[Output]], Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]], Callable[[Input, AsyncCallbackManagerForChainRun, RunnableConfig], Awaitable[Output]]]): The function to call. input (Input): The input to the function. run_manager (AsyncCallbackManagerForChainRun): The run manager to pass to the function. config (RunnableConfig): The config to pass to the function. **kwargs (Any): The keyword arguments to pass to the function. Returns: Output: The output of the function. """ if accepts_config(func): if run_manager is not None: kwargs['config'] = patch_config(config, callbacks=run_manager. get_child()) else: kwargs['config'] = config if run_manager is not None and accepts_run_manager(func): kwargs['run_manager'] = run_manager return func(input, **kwargs)
def acall_func_with_variable_args(func: Union[Callable[[Input], Awaitable[ Output]], Callable[[Input, RunnableConfig], Awaitable[Output]], Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]], Callable[[Input, AsyncCallbackManagerForChainRun, RunnableConfig], Awaitable[Output]]], input: Input, config: RunnableConfig, run_manager: Optional[AsyncCallbackManagerForChainRun]=None, **kwargs: Any) ->Awaitable[ Output]: """Call function that may optionally accept a run_manager and/or config. Args: func (Union[Callable[[Input], Awaitable[Output]], Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]], Callable[[Input, AsyncCallbackManagerForChainRun, RunnableConfig], Awaitable[Output]]]): The function to call. input (Input): The input to the function. run_manager (AsyncCallbackManagerForChainRun): The run manager to pass to the function. config (RunnableConfig): The config to pass to the function. **kwargs (Any): The keyword arguments to pass to the function. Returns: Output: The output of the function. """ if accepts_config(func): if run_manager is not None: kwargs['config'] = patch_config(config, callbacks=run_manager. get_child()) else: kwargs['config'] = config if run_manager is not None and accepts_run_manager(func): kwargs['run_manager'] = run_manager return func(input, **kwargs)
Call function that may optionally accept a run_manager and/or config. Args: func (Union[Callable[[Input], Awaitable[Output]], Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]], Callable[[Input, AsyncCallbackManagerForChainRun, RunnableConfig], Awaitable[Output]]]): The function to call. input (Input): The input to the function. run_manager (AsyncCallbackManagerForChainRun): The run manager to pass to the function. config (RunnableConfig): The config to pass to the function. **kwargs (Any): The keyword arguments to pass to the function. Returns: Output: The output of the function.
requires_input
""" This evaluator does not require input. """ return False
@property def requires_input(self) ->bool: """ This evaluator does not require input. """ return False
This evaluator does not require input.
from_rail_string
try: from guardrails import Guard except ImportError: raise ImportError( 'guardrails-ai package not installed. Install it by running `pip install guardrails-ai`.' ) return cls(guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks), api=api, args=args, kwargs=kwargs)
@classmethod def from_rail_string(cls, rail_str: str, num_reasks: int=1, api: Optional[ Callable]=None, *args: Any, **kwargs: Any) ->GuardrailsOutputParser: try: from guardrails import Guard except ImportError: raise ImportError( 'guardrails-ai package not installed. Install it by running `pip install guardrails-ai`.' ) return cls(guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks ), api=api, args=args, kwargs=kwargs)
null
on_chain_end
"""Run when traced chain group ends. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ self.ended = True return self.parent_run_manager.on_chain_end(outputs, **kwargs)
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any ) ->None: """Run when traced chain group ends. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ self.ended = True return self.parent_run_manager.on_chain_end(outputs, **kwargs)
Run when traced chain group ends. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
on_agent_action
"""Do nothing when agent takes a specific action.""" pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Do nothing when agent takes a specific action.""" pass
Do nothing when agent takes a specific action.
on_llm_error
self.on_llm_error_common()
def on_llm_error(self, *args: Any, **kwargs: Any) ->Any: self.on_llm_error_common()
null
test_merge_config_callbacks
manager: RunnableConfig = {'callbacks': CallbackManager(handlers=[ StdOutCallbackHandler()])} handlers: RunnableConfig = {'callbacks': [ConsoleCallbackHandler()]} other_handlers: RunnableConfig = {'callbacks': [ StreamingStdOutCallbackHandler()]} merged = merge_configs(manager, handlers)['callbacks'] assert isinstance(merged, CallbackManager) assert len(merged.handlers) == 2 assert isinstance(merged.handlers[0], StdOutCallbackHandler) assert isinstance(merged.handlers[1], ConsoleCallbackHandler) merged = merge_configs(handlers, manager)['callbacks'] assert isinstance(merged, CallbackManager) assert len(merged.handlers) == 2 assert isinstance(merged.handlers[0], StdOutCallbackHandler) assert isinstance(merged.handlers[1], ConsoleCallbackHandler) merged = merge_configs(handlers, other_handlers)['callbacks'] assert isinstance(merged, list) assert len(merged) == 2 assert isinstance(merged[0], ConsoleCallbackHandler) assert isinstance(merged[1], StreamingStdOutCallbackHandler)
def test_merge_config_callbacks() ->None: manager: RunnableConfig = {'callbacks': CallbackManager(handlers=[ StdOutCallbackHandler()])} handlers: RunnableConfig = {'callbacks': [ConsoleCallbackHandler()]} other_handlers: RunnableConfig = {'callbacks': [ StreamingStdOutCallbackHandler()]} merged = merge_configs(manager, handlers)['callbacks'] assert isinstance(merged, CallbackManager) assert len(merged.handlers) == 2 assert isinstance(merged.handlers[0], StdOutCallbackHandler) assert isinstance(merged.handlers[1], ConsoleCallbackHandler) merged = merge_configs(handlers, manager)['callbacks'] assert isinstance(merged, CallbackManager) assert len(merged.handlers) == 2 assert isinstance(merged.handlers[0], StdOutCallbackHandler) assert isinstance(merged.handlers[1], ConsoleCallbackHandler) merged = merge_configs(handlers, other_handlers)['callbacks'] assert isinstance(merged, list) assert len(merged) == 2 assert isinstance(merged[0], ConsoleCallbackHandler) assert isinstance(merged[1], StreamingStdOutCallbackHandler)
null
requires_reference
""" This evaluator requires a reference. """ return True
@property def requires_reference(self) ->bool: """ This evaluator requires a reference. """ return True
This evaluator requires a reference.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
is_lc_serializable
"""Return whether or not the class is serializable.""" return False
@classmethod def is_lc_serializable(cls) ->bool: """Return whether or not the class is serializable.""" return False
Return whether or not the class is serializable.
_chain_type
raise NotImplementedError('Saving not supported for this chain type.')
@property def _chain_type(self) ->str: raise NotImplementedError('Saving not supported for this chain type.')
null
similarity_search
""" Return docs most similar to query. Examples: >>> # Search using an embedding >>> data = vector_store.similarity_search( ... query=<your_query>, ... k=<num_items>, ... exec_option=<preferred_exec_option>, ... ) >>> # Run tql search: >>> data = vector_store.similarity_search( ... query=None, ... tql="SELECT * WHERE id == <id>", ... exec_option="compute_engine", ... ) Args: k (int): Number of Documents to return. Defaults to 4. query (str): Text to look up similar documents. **kwargs: Additional keyword arguments include: embedding (Callable): Embedding function to use. Defaults to None. distance_metric (str): 'L2' for Euclidean, 'L1' for Nuclear, 'max' for L-infinity, 'cos' for cosine, 'dot' for dot product. Defaults to 'L2'. filter (Union[Dict, Callable], optional): Additional filter before embedding search. - Dict: Key-value search on tensors of htype json, (sample must satisfy all key-value filters) Dict = {"tensor_1": {"key": value}, "tensor_2": {"key": value}} - Function: Compatible with `deeplake.filter`. Defaults to None. exec_option (str): Supports 3 ways to perform searching. 'python', 'compute_engine', or 'tensor_db'. Defaults to 'python'. - 'python': Pure-python implementation for the client. WARNING: not recommended for big datasets. - 'compute_engine': C++ implementation of the Compute Engine for the client. Not for in-memory or local datasets. - 'tensor_db': Managed Tensor Database for storage and query. Only for data in Deep Lake Managed Database. Use `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. Returns: List[Document]: List of Documents most similar to the query vector. """ return self._search(query=query, k=k, use_maximal_marginal_relevance=False, return_score=False, **kwargs)
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[ Document]: """ Return docs most similar to query. Examples: >>> # Search using an embedding >>> data = vector_store.similarity_search( ... query=<your_query>, ... k=<num_items>, ... exec_option=<preferred_exec_option>, ... ) >>> # Run tql search: >>> data = vector_store.similarity_search( ... query=None, ... tql="SELECT * WHERE id == <id>", ... exec_option="compute_engine", ... ) Args: k (int): Number of Documents to return. Defaults to 4. query (str): Text to look up similar documents. **kwargs: Additional keyword arguments include: embedding (Callable): Embedding function to use. Defaults to None. distance_metric (str): 'L2' for Euclidean, 'L1' for Nuclear, 'max' for L-infinity, 'cos' for cosine, 'dot' for dot product. Defaults to 'L2'. filter (Union[Dict, Callable], optional): Additional filter before embedding search. - Dict: Key-value search on tensors of htype json, (sample must satisfy all key-value filters) Dict = {"tensor_1": {"key": value}, "tensor_2": {"key": value}} - Function: Compatible with `deeplake.filter`. Defaults to None. exec_option (str): Supports 3 ways to perform searching. 'python', 'compute_engine', or 'tensor_db'. Defaults to 'python'. - 'python': Pure-python implementation for the client. WARNING: not recommended for big datasets. - 'compute_engine': C++ implementation of the Compute Engine for the client. Not for in-memory or local datasets. - 'tensor_db': Managed Tensor Database for storage and query. Only for data in Deep Lake Managed Database. Use `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. Returns: List[Document]: List of Documents most similar to the query vector. """ return self._search(query=query, k=k, use_maximal_marginal_relevance= False, return_score=False, **kwargs)
Return docs most similar to query. Examples: >>> # Search using an embedding >>> data = vector_store.similarity_search( ... query=<your_query>, ... k=<num_items>, ... exec_option=<preferred_exec_option>, ... ) >>> # Run tql search: >>> data = vector_store.similarity_search( ... query=None, ... tql="SELECT * WHERE id == <id>", ... exec_option="compute_engine", ... ) Args: k (int): Number of Documents to return. Defaults to 4. query (str): Text to look up similar documents. **kwargs: Additional keyword arguments include: embedding (Callable): Embedding function to use. Defaults to None. distance_metric (str): 'L2' for Euclidean, 'L1' for Nuclear, 'max' for L-infinity, 'cos' for cosine, 'dot' for dot product. Defaults to 'L2'. filter (Union[Dict, Callable], optional): Additional filter before embedding search. - Dict: Key-value search on tensors of htype json, (sample must satisfy all key-value filters) Dict = {"tensor_1": {"key": value}, "tensor_2": {"key": value}} - Function: Compatible with `deeplake.filter`. Defaults to None. exec_option (str): Supports 3 ways to perform searching. 'python', 'compute_engine', or 'tensor_db'. Defaults to 'python'. - 'python': Pure-python implementation for the client. WARNING: not recommended for big datasets. - 'compute_engine': C++ implementation of the Compute Engine for the client. Not for in-memory or local datasets. - 'tensor_db': Managed Tensor Database for storage and query. Only for data in Deep Lake Managed Database. Use `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. Returns: List[Document]: List of Documents most similar to the query vector.
pending
return [item for idx, item in enumerate(iterable) if idx not in results_map]
def pending(iterable: List[U]) ->List[U]: return [item for idx, item in enumerate(iterable) if idx not in results_map ]
null
on_retriever_end_common
self.ends += 1 self.retriever_ends += 1
def on_retriever_end_common(self) ->None: self.ends += 1 self.retriever_ends += 1
null
get_salient_docs
"""Return documents that are salient to the query.""" docs_and_scores: List[Tuple[Document, float]] docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs) results = {} for fetched_doc, relevance in docs_and_scores: if 'buffer_idx' in fetched_doc.metadata: buffer_idx = fetched_doc.metadata['buffer_idx'] doc = self.memory_stream[buffer_idx] results[buffer_idx] = doc, relevance return results
def get_salient_docs(self, query: str) ->Dict[int, Tuple[Document, float]]: """Return documents that are salient to the query.""" docs_and_scores: List[Tuple[Document, float]] docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs) results = {} for fetched_doc, relevance in docs_and_scores: if 'buffer_idx' in fetched_doc.metadata: buffer_idx = fetched_doc.metadata['buffer_idx'] doc = self.memory_stream[buffer_idx] results[buffer_idx] = doc, relevance return results
Return documents that are salient to the query.
test_memory_ttl
"""Test time-to-live feature of the memory.""" message_history = _chat_message_history(ttl_seconds=5) memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) assert memory.chat_memory.messages == [] memory.chat_memory.add_ai_message('Nothing special here.') time.sleep(2) assert memory.chat_memory.messages != [] time.sleep(5) assert memory.chat_memory.messages == []
def test_memory_ttl() ->None: """Test time-to-live feature of the memory.""" message_history = _chat_message_history(ttl_seconds=5) memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) assert memory.chat_memory.messages == [] memory.chat_memory.add_ai_message('Nothing special here.') time.sleep(2) assert memory.chat_memory.messages != [] time.sleep(5) assert memory.chat_memory.messages == []
Test time-to-live feature of the memory.
is_lc_serializable
return True
@classmethod def is_lc_serializable(self) ->bool: return True
null
before_index_setup
""" Executes before the index is created. Used for setting up any required Elasticsearch resources like a pipeline. Args: client: The Elasticsearch client. text_field: The field containing the text data in the index. vector_query_field: The field containing the vector representations in the index. """
def before_index_setup(self, client: 'Elasticsearch', text_field: str, vector_query_field: str) ->None: """ Executes before the index is created. Used for setting up any required Elasticsearch resources like a pipeline. Args: client: The Elasticsearch client. text_field: The field containing the text data in the index. vector_query_field: The field containing the vector representations in the index. """
Executes before the index is created. Used for setting up any required Elasticsearch resources like a pipeline. Args: client: The Elasticsearch client. text_field: The field containing the text data in the index. vector_query_field: The field containing the vector representations in the index.
lazy_query
with self.client.execute_sql(query).open_reader() as reader: if reader.count == 0: raise ValueError('Table contains no data.') for record in reader: yield {k: v for k, v in record}
def lazy_query(self, query: str) ->Iterator[dict]: with self.client.execute_sql(query).open_reader() as reader: if reader.count == 0: raise ValueError('Table contains no data.') for record in reader: yield {k: v for k, v in record}
null
test_multiple_messages
"""Tests multiple messages works.""" chat = ChatTongyi() message = HumanMessage(content='Hi, how are you.') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content
def test_multiple_messages() ->None: """Tests multiple messages works.""" chat = ChatTongyi() message = HumanMessage(content='Hi, how are you.') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content
Tests multiple messages works.
test_loader_detect_encoding_csv
"""Test csv loader.""" path = Path(__file__).parent.parent / 'examples' files = path.glob('**/*.csv') row_count = 0 for file in files: encodings = detect_file_encodings(str(file)) for encoding in encodings: try: row_count += sum(1 for line in open(file, encoding=encoding. encoding)) break except UnicodeDecodeError: continue row_count -= 1 loader = DirectoryLoader(str(path), glob='**/*.csv', loader_cls=CSVLoader) loader_detect_encoding = DirectoryLoader(str(path), glob='**/*.csv', loader_kwargs={'autodetect_encoding': True}, loader_cls=CSVLoader) with pytest.raises((UnicodeDecodeError, RuntimeError)): loader.load() docs = loader_detect_encoding.load() assert len(docs) == row_count
@pytest.mark.requires('chardet') def test_loader_detect_encoding_csv() ->None: """Test csv loader.""" path = Path(__file__).parent.parent / 'examples' files = path.glob('**/*.csv') row_count = 0 for file in files: encodings = detect_file_encodings(str(file)) for encoding in encodings: try: row_count += sum(1 for line in open(file, encoding=encoding .encoding)) break except UnicodeDecodeError: continue row_count -= 1 loader = DirectoryLoader(str(path), glob='**/*.csv', loader_cls=CSVLoader) loader_detect_encoding = DirectoryLoader(str(path), glob='**/*.csv', loader_kwargs={'autodetect_encoding': True}, loader_cls=CSVLoader) with pytest.raises((UnicodeDecodeError, RuntimeError)): loader.load() docs = loader_detect_encoding.load() assert len(docs) == row_count
Test csv loader.
__init__
"""Initializes the loader. Args: config: The config to pass to the source connector. stream_name: The name of the stream to load. record_handler: A function that takes in a record and an optional id and returns a Document. If None, the record will be used as the document. Defaults to None. state: The state to pass to the source connector. Defaults to None. """ source_class = guard_import('source_stripe', pip_name='airbyte-source-stripe' ).SourceStripe super().__init__(config=config, source_class=source_class, stream_name= stream_name, record_handler=record_handler, state=state)
def __init__(self, config: Mapping[str, Any], stream_name: str, record_handler: Optional[RecordHandler]=None, state: Optional[Any]=None ) ->None: """Initializes the loader. Args: config: The config to pass to the source connector. stream_name: The name of the stream to load. record_handler: A function that takes in a record and an optional id and returns a Document. If None, the record will be used as the document. Defaults to None. state: The state to pass to the source connector. Defaults to None. """ source_class = guard_import('source_stripe', pip_name= 'airbyte-source-stripe').SourceStripe super().__init__(config=config, source_class=source_class, stream_name= stream_name, record_handler=record_handler, state=state)
Initializes the loader. Args: config: The config to pass to the source connector. stream_name: The name of the stream to load. record_handler: A function that takes in a record and an optional id and returns a Document. If None, the record will be used as the document. Defaults to None. state: The state to pass to the source connector. Defaults to None.
func
return call_func_with_variable_args(self.func, input, config, run_manager. get_sync(), **kwargs)
def func(input: Input, run_manager: AsyncCallbackManagerForChainRun, config: RunnableConfig) ->Output: return call_func_with_variable_args(self.func, input, config, run_manager.get_sync(), **kwargs)
null
_default_params
"""Get the default parameters.""" return {'max_length': self.max_length, 'sampling_topk': self.sampling_topk, 'sampling_topp': self.sampling_topp, 'sampling_temperature': self. sampling_temperature}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters.""" return {'max_length': self.max_length, 'sampling_topk': self. sampling_topk, 'sampling_topp': self.sampling_topp, 'sampling_temperature': self.sampling_temperature}
Get the default parameters.
lazy_parse
"""Lazy parsing interface.""" yield Document(page_content='foo')
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Lazy parsing interface.""" yield Document(page_content='foo')
Lazy parsing interface.
point
"""Create a point on ASCII canvas. Args: x (int): x coordinate. Should be >= 0 and < number of columns in the canvas. y (int): y coordinate. Should be >= 0 an < number of lines in the canvas. char (str): character to place in the specified point on the canvas. """ assert len(char) == 1 assert x >= 0 assert x < self.cols assert y >= 0 assert y < self.lines self.canvas[y][x] = char
def point(self, x: int, y: int, char: str) ->None: """Create a point on ASCII canvas. Args: x (int): x coordinate. Should be >= 0 and < number of columns in the canvas. y (int): y coordinate. Should be >= 0 an < number of lines in the canvas. char (str): character to place in the specified point on the canvas. """ assert len(char) == 1 assert x >= 0 assert x < self.cols assert y >= 0 assert y < self.lines self.canvas[y][x] = char
Create a point on ASCII canvas. Args: x (int): x coordinate. Should be >= 0 and < number of columns in the canvas. y (int): y coordinate. Should be >= 0 an < number of lines in the canvas. char (str): character to place in the specified point on the canvas.
get_final_answer
final_answer_str = "Here's a comprehensive answer:\n\n" for i, el in enumerate(expanded_list): final_answer_str += f'{i + 1}. {el}\n\n' return final_answer_str
def get_final_answer(expanded_list): final_answer_str = "Here's a comprehensive answer:\n\n" for i, el in enumerate(expanded_list): final_answer_str += f'{i + 1}. {el}\n\n' return final_answer_str
null
test_parse_nested_json_with_escaped_quotes
parsed = parse_json_markdown(json_string) assert parsed == {'action': 'Final Answer', 'action_input': '{"foo": "bar", "bar": "foo"}'}
@pytest.mark.parametrize('json_string', TEST_CASES_ESCAPED_QUOTES) def test_parse_nested_json_with_escaped_quotes(json_string: str) ->None: parsed = parse_json_markdown(json_string) assert parsed == {'action': 'Final Answer', 'action_input': '{"foo": "bar", "bar": "foo"}'}
null
test_numeric_filter
nf = Num('numeric_field') assert str(getattr(nf, operation)(value)) == expected
@pytest.mark.parametrize('operation, value, expected', [('__eq__', 5, '@numeric_field:[5 5]'), ('__ne__', 5, '(-@numeric_field:[5 5])'), ( '__gt__', 5, '@numeric_field:[(5 +inf]'), ('__ge__', 5, '@numeric_field:[5 +inf]'), ('__lt__', 5.55, '@numeric_field:[-inf (5.55]'), ('__le__', 5, '@numeric_field:[-inf 5]' ), ('__le__', None, '*'), ('__eq__', None, '*'), ('__ne__', None, '*')], ids=['eq', 'ne', 'gt', 'ge', 'lt', 'le', 'le_none', 'eq_none', 'ne_none']) def test_numeric_filter(operation: str, value: Any, expected: str) ->None: nf = Num('numeric_field') assert str(getattr(nf, operation)(value)) == expected
null
on_llm_end
tags = ['langchain'] user_id = None session_id = None metadata: dict = {'langchain_run_id': run_id} if self.messages: metadata['messages'] = self.messages if self.trubrics_kwargs: if self.trubrics_kwargs.get('tags'): tags.append(*self.trubrics_kwargs.pop('tags')) user_id = self.trubrics_kwargs.pop('user_id', None) session_id = self.trubrics_kwargs.pop('session_id', None) metadata.update(self.trubrics_kwargs) for generation in response.generations: self.trubrics.log_prompt(config_model={'model': response.llm_output.get ('model_name') if response.llm_output else 'NA'}, prompt=self. prompt, generation=generation[0].text, user_id=user_id, session_id= session_id, tags=tags, metadata=metadata)
def on_llm_end(self, response: LLMResult, run_id: UUID, **kwargs: Any) ->None: tags = ['langchain'] user_id = None session_id = None metadata: dict = {'langchain_run_id': run_id} if self.messages: metadata['messages'] = self.messages if self.trubrics_kwargs: if self.trubrics_kwargs.get('tags'): tags.append(*self.trubrics_kwargs.pop('tags')) user_id = self.trubrics_kwargs.pop('user_id', None) session_id = self.trubrics_kwargs.pop('session_id', None) metadata.update(self.trubrics_kwargs) for generation in response.generations: self.trubrics.log_prompt(config_model={'model': response.llm_output .get('model_name') if response.llm_output else 'NA'}, prompt= self.prompt, generation=generation[0].text, user_id=user_id, session_id=session_id, tags=tags, metadata=metadata)
null
test_forefrontai_api_key_masked_when_passed_via_constructor
"""Test that the API key is masked when passed via the constructor.""" llm = ForefrontAI(forefrontai_api_key='secret-api-key', temperature=0.2) print(llm.forefrontai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_forefrontai_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) ->None: """Test that the API key is masked when passed via the constructor.""" llm = ForefrontAI(forefrontai_api_key='secret-api-key', temperature=0.2) print(llm.forefrontai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
Test that the API key is masked when passed via the constructor.
from_texts
"""Construct a TileDB index from raw documents. Args: texts: List of documents to index. embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. ids: Optional ids of each text object. metric: Metric to use for indexing. Defaults to "euclidean". index_uri: The URI to write the TileDB arrays index_type: Optional, Vector index type ("FLAT", IVF_FLAT") config: Optional, TileDB config index_timestamp: Optional, timestamp to write new texts with. Example: .. code-block:: python from langchain_community import TileDB from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = TileDB.from_texts(texts, embeddings) """ embeddings = [] embeddings = embedding.embed_documents(texts) return cls.__from(texts=texts, embeddings=embeddings, embedding=embedding, metadatas=metadatas, ids=ids, metric=metric, index_uri=index_uri, index_type=index_type, config=config, index_timestamp=index_timestamp, **kwargs)
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, metric: str= DEFAULT_METRIC, index_uri: str='/tmp/tiledb_array', index_type: str= 'FLAT', config: Optional[Mapping[str, Any]]=None, index_timestamp: int= 0, **kwargs: Any) ->TileDB: """Construct a TileDB index from raw documents. Args: texts: List of documents to index. embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. ids: Optional ids of each text object. metric: Metric to use for indexing. Defaults to "euclidean". index_uri: The URI to write the TileDB arrays index_type: Optional, Vector index type ("FLAT", IVF_FLAT") config: Optional, TileDB config index_timestamp: Optional, timestamp to write new texts with. Example: .. code-block:: python from langchain_community import TileDB from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = TileDB.from_texts(texts, embeddings) """ embeddings = [] embeddings = embedding.embed_documents(texts) return cls.__from(texts=texts, embeddings=embeddings, embedding= embedding, metadatas=metadatas, ids=ids, metric=metric, index_uri= index_uri, index_type=index_type, config=config, index_timestamp= index_timestamp, **kwargs)
Construct a TileDB index from raw documents. Args: texts: List of documents to index. embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. ids: Optional ids of each text object. metric: Metric to use for indexing. Defaults to "euclidean". index_uri: The URI to write the TileDB arrays index_type: Optional, Vector index type ("FLAT", IVF_FLAT") config: Optional, TileDB config index_timestamp: Optional, timestamp to write new texts with. Example: .. code-block:: python from langchain_community import TileDB from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = TileDB.from_texts(texts, embeddings)
test_integration_initialization
"""Test chat model initialization.""" GoogleGenerativeAIEmbeddings(model='models/embedding-001', google_api_key='...' ) GoogleGenerativeAIEmbeddings(model='models/embedding-001', google_api_key= '...', task_type='retrieval_document')
def test_integration_initialization() ->None: """Test chat model initialization.""" GoogleGenerativeAIEmbeddings(model='models/embedding-001', google_api_key='...') GoogleGenerativeAIEmbeddings(model='models/embedding-001', google_api_key='...', task_type='retrieval_document')
Test chat model initialization.
_headers
"""Return headers for requests to OneNote API""" return {'Authorization': f'Bearer {self.access_token}'}
@property def _headers(self) ->Dict[str, str]: """Return headers for requests to OneNote API""" return {'Authorization': f'Bearer {self.access_token}'}
Return headers for requests to OneNote API
memory_variables
"""Will always return list of memory variables. :meta private: """ return [self.memory_key]
@property def memory_variables(self) ->List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key]
Will always return list of memory variables. :meta private:
on_chain_error
"""Run when chain errors.""" self.step += 1 self.errors += 1
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None: """Run when chain errors.""" self.step += 1 self.errors += 1
Run when chain errors.
on_tool_end
"""Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({'action': 'on_tool_end', 'output': output}) resp.update(self.get_custom_callback_meta()) self.on_tool_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp)
def on_tool_end(self, output: str, **kwargs: Any) ->None: """Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({'action': 'on_tool_end', 'output': output}) resp.update(self.get_custom_callback_meta()) self.on_tool_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp)
Run when tool ends running.
_import_edenai_EdenAiSpeechToTextTool
from langchain_community.tools.edenai import EdenAiSpeechToTextTool return EdenAiSpeechToTextTool
def _import_edenai_EdenAiSpeechToTextTool() ->Any: from langchain_community.tools.edenai import EdenAiSpeechToTextTool return EdenAiSpeechToTextTool
null
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( 'For MMR search, you must specify an embedding function on creation.') embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k, lambda_mul=lambda_mult) return docs
def max_marginal_relevance_search(self, query: str, k: int=DEFAULT_K, fetch_k: int=DEFAULT_FETCH_K, lambda_mult: float=0.5, **kwargs: Any ) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( 'For MMR search, you must specify an embedding function on creation.' ) embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k, lambda_mul=lambda_mult) return docs
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance.
test_create_tool_positional_args
"""Test that positional arguments are allowed.""" test_tool = Tool('test_name', lambda x: x, 'test_description') assert test_tool('foo') == 'foo' assert test_tool.name == 'test_name' assert test_tool.description == 'test_description' assert test_tool.is_single_input
def test_create_tool_positional_args() ->None: """Test that positional arguments are allowed.""" test_tool = Tool('test_name', lambda x: x, 'test_description') assert test_tool('foo') == 'foo' assert test_tool.name == 'test_name' assert test_tool.description == 'test_description' assert test_tool.is_single_input
Test that positional arguments are allowed.
similarity_search_by_vector
"""Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding= embedding, k=k, **kwargs) return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(self, embedding: List[float], k: int=4, ** kwargs: Any) ->List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding =embedding, k=k, **kwargs) return [doc for doc, _ in docs_and_scores]
Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector.
_get_prefixed_key
"""Get the key with the namespace prefix. Args: key (str): The original key. Returns: str: The key with the namespace prefix. """ delimiter = '/' if self.namespace: return f'{self.namespace}{delimiter}{key}' return key
def _get_prefixed_key(self, key: str) ->str: """Get the key with the namespace prefix. Args: key (str): The original key. Returns: str: The key with the namespace prefix. """ delimiter = '/' if self.namespace: return f'{self.namespace}{delimiter}{key}' return key
Get the key with the namespace prefix. Args: key (str): The original key. Returns: str: The key with the namespace prefix.
__init__
self.name = name self.keys = keys
def __init__(self, name: str, keys: Set[str]) ->None: self.name = name self.keys = keys
null
lazy_load
"""Lazy load the messages from the chat file and yield them in as chat sessions. Yields: ChatSession: The loaded chat session. """ for file_path in self._iterate_files(self.path): if file_path.endswith('.html'): yield self._load_single_chat_session_html(file_path) elif file_path.endswith('.json'): yield self._load_single_chat_session_json(file_path)
def lazy_load(self) ->Iterator[ChatSession]: """Lazy load the messages from the chat file and yield them in as chat sessions. Yields: ChatSession: The loaded chat session. """ for file_path in self._iterate_files(self.path): if file_path.endswith('.html'): yield self._load_single_chat_session_html(file_path) elif file_path.endswith('.json'): yield self._load_single_chat_session_json(file_path)
Lazy load the messages from the chat file and yield them in as chat sessions. Yields: ChatSession: The loaded chat session.
_import_bing_search_tool_BingSearchResults
from langchain_community.tools.bing_search.tool import BingSearchResults return BingSearchResults
def _import_bing_search_tool_BingSearchResults() ->Any: from langchain_community.tools.bing_search.tool import BingSearchResults return BingSearchResults
null
is_lc_serializable
return True
@classmethod def is_lc_serializable(self) ->bool: return True
null
test_initialization
loader = GitHubIssuesLoader(repo='repo', access_token='access_token') assert loader.repo == 'repo' assert loader.access_token == 'access_token' assert loader.headers == {'Accept': 'application/vnd.github+json', 'Authorization': 'Bearer access_token'}
def test_initialization() ->None: loader = GitHubIssuesLoader(repo='repo', access_token='access_token') assert loader.repo == 'repo' assert loader.access_token == 'access_token' assert loader.headers == {'Accept': 'application/vnd.github+json', 'Authorization': 'Bearer access_token'}
null
reset_callback_meta
"""Reset the callback metadata.""" self.step = 0 self.starts = 0 self.ends = 0 self.errors = 0 self.text_ctr = 0 self.ignore_llm_ = False self.ignore_chain_ = False self.ignore_agent_ = False self.always_verbose_ = False self.chain_starts = 0 self.chain_ends = 0 self.llm_starts = 0 self.llm_ends = 0 self.llm_streams = 0 self.tool_starts = 0 self.tool_ends = 0 self.agent_ends = 0 self.on_llm_start_records = [] self.on_llm_token_records = [] self.on_llm_end_records = [] self.on_chain_start_records = [] self.on_chain_end_records = [] self.on_tool_start_records = [] self.on_tool_end_records = [] self.on_text_records = [] self.on_agent_finish_records = [] self.on_agent_action_records = [] return None
def reset_callback_meta(self) ->None: """Reset the callback metadata.""" self.step = 0 self.starts = 0 self.ends = 0 self.errors = 0 self.text_ctr = 0 self.ignore_llm_ = False self.ignore_chain_ = False self.ignore_agent_ = False self.always_verbose_ = False self.chain_starts = 0 self.chain_ends = 0 self.llm_starts = 0 self.llm_ends = 0 self.llm_streams = 0 self.tool_starts = 0 self.tool_ends = 0 self.agent_ends = 0 self.on_llm_start_records = [] self.on_llm_token_records = [] self.on_llm_end_records = [] self.on_chain_start_records = [] self.on_chain_end_records = [] self.on_tool_start_records = [] self.on_tool_end_records = [] self.on_text_records = [] self.on_agent_finish_records = [] self.on_agent_action_records = [] return None
Reset the callback metadata.
_make_request_headers
headers = headers or {} if not isinstance(self.arcee_api_key, SecretStr): raise TypeError( f'arcee_api_key must be a SecretStr. Got {type(self.arcee_api_key)}') api_key = self.arcee_api_key.get_secret_value() internal_headers = {'X-Token': api_key, 'Content-Type': 'application/json'} headers.update(internal_headers) return headers
def _make_request_headers(self, headers: Optional[Dict]=None) ->Dict: headers = headers or {} if not isinstance(self.arcee_api_key, SecretStr): raise TypeError( f'arcee_api_key must be a SecretStr. Got {type(self.arcee_api_key)}' ) api_key = self.arcee_api_key.get_secret_value() internal_headers = {'X-Token': api_key, 'Content-Type': 'application/json'} headers.update(internal_headers) return headers
null
run
"""Run query through GoogleSearchScholar and parse result""" total_results = [] page = 0 while page < max(self.top_k_results - 20, 1): results = self.google_scholar_engine({'q': query, 'start': page, 'hl': self.hl, 'num': min(self.top_k_results, 20), 'lr': self.lr}).get_dict( ).get('organic_results', []) total_results.extend(results) if not results: break page += 20 if self.top_k_results % 20 != 0 and page > 20 and total_results: results = self.google_scholar_engine({'q': query, 'start': page, 'num': self.top_k_results % 20, 'hl': self.hl, 'lr': self.lr}).get_dict().get( 'organic_results', []) total_results.extend(results) if not total_results: return 'No good Google Scholar Result was found' docs = [ f"""Title: {result.get('title', '')} Authors: {','.join([author.get('name') for author in result.get('publication_info', {}).get('authors', [])])} Summary: {result.get('publication_info', {}).get('summary', '')} Total-Citations: {result.get('inline_links', {}).get('cited_by', {}).get('total', '')}""" for result in total_results] return '\n\n'.join(docs)
def run(self, query: str) ->str: """Run query through GoogleSearchScholar and parse result""" total_results = [] page = 0 while page < max(self.top_k_results - 20, 1): results = self.google_scholar_engine({'q': query, 'start': page, 'hl': self.hl, 'num': min(self.top_k_results, 20), 'lr': self.lr} ).get_dict().get('organic_results', []) total_results.extend(results) if not results: break page += 20 if self.top_k_results % 20 != 0 and page > 20 and total_results: results = self.google_scholar_engine({'q': query, 'start': page, 'num': self.top_k_results % 20, 'hl': self.hl, 'lr': self.lr} ).get_dict().get('organic_results', []) total_results.extend(results) if not total_results: return 'No good Google Scholar Result was found' docs = [ f"""Title: {result.get('title', '')} Authors: {','.join([author.get('name') for author in result.get('publication_info', {}).get('authors', [])])} Summary: {result.get('publication_info', {}).get('summary', '')} Total-Citations: {result.get('inline_links', {}).get('cited_by', {}).get('total', '')}""" for result in total_results] return '\n\n'.join(docs)
Run query through GoogleSearchScholar and parse result
actual_decorator
if condition: return decorator(func) return func
def actual_decorator(func: Callable[[Any], Any]) ->Callable[[Any], Any]: if condition: return decorator(func) return func
null
_completion_with_retry
return self.client.completion(**kwargs)
@retry_decorator def _completion_with_retry(**kwargs: Any) ->Any: return self.client.completion(**kwargs)
null
test_results_empty_query
"""Test that results gives the correct output with empty query.""" search = api_client.results(query='', sort='relevance', time_filter='all', subreddit='all', limit=10) assert search == []
@pytest.mark.requires('praw') def test_results_empty_query(api_client: RedditSearchAPIWrapper) ->None: """Test that results gives the correct output with empty query.""" search = api_client.results(query='', sort='relevance', time_filter= 'all', subreddit='all', limit=10) assert search == []
Test that results gives the correct output with empty query.
test_run_no_result
output = api_client.run( 'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL' ) assert 'No good Wikipedia Search Result was found' == output
def test_run_no_result(api_client: WikipediaAPIWrapper) ->None: output = api_client.run( 'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL' ) assert 'No good Wikipedia Search Result was found' == output
null
create_knn_index
""" Create a new k-NN index in Elasticsearch. Args: mapping (Dict): The mapping to use for the new index. Returns: None """ self.client.indices.create(index=self.index_name, mappings=mapping)
def create_knn_index(self, mapping: Dict) ->None: """ Create a new k-NN index in Elasticsearch. Args: mapping (Dict): The mapping to use for the new index. Returns: None """ self.client.indices.create(index=self.index_name, mappings=mapping)
Create a new k-NN index in Elasticsearch. Args: mapping (Dict): The mapping to use for the new index. Returns: None