method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_messages_to_prompt_dict_raises_with_mismatched_examples
pytest.importorskip('google.generativeai') with pytest.raises(ChatGooglePalmError) as e: _messages_to_prompt_dict([HumanMessage(example=True, content= 'Human example #1'), AIMessage(example=False, content='AI example #1')] ) assert 'Human example message must be immediately followed' in str(e)
def test_messages_to_prompt_dict_raises_with_mismatched_examples() ->None: pytest.importorskip('google.generativeai') with pytest.raises(ChatGooglePalmError) as e: _messages_to_prompt_dict([HumanMessage(example=True, content= 'Human example #1'), AIMessage(example=False, content= 'AI example #1')]) assert 'Human example message must be immediately followed' in str(e)
null
_insert_texts
if not texts: return [] embeddings = self._embedding.embed_documents(texts) to_insert = [{self._text_key: t, self._embedding_key: embedding, **m} for t, m, embedding in zip(texts, metadatas, embeddings)] insert_result = self._collection.insert_many(to_insert) return insert_result.inserted_ids
def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]] ) ->List: if not texts: return [] embeddings = self._embedding.embed_documents(texts) to_insert = [{self._text_key: t, self._embedding_key: embedding, **m} for t, m, embedding in zip(texts, metadatas, embeddings)] insert_result = self._collection.insert_many(to_insert) return insert_result.inserted_ids
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return self.add_embeddings(texts=texts, embeddings=embeddings, metadatas= metadatas, ids=ids, **kwargs)
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return self.add_embeddings(texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs)
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore.
get_format_instructions
return XML_FORMAT_INSTRUCTIONS.format(tags=self.tags)
def get_format_instructions(self) ->str: return XML_FORMAT_INSTRUCTIONS.format(tags=self.tags)
null
load
text = '' for line in open(self.file_path, 'r'): data = json.loads(line)['_airbyte_data'] text += stringify_dict(data) metadata = {'source': self.file_path} return [Document(page_content=text, metadata=metadata)]
def load(self) ->List[Document]: text = '' for line in open(self.file_path, 'r'): data = json.loads(line)['_airbyte_data'] text += stringify_dict(data) metadata = {'source': self.file_path} return [Document(page_content=text, metadata=metadata)]
null
prepare_cosmos
"""Prepare the CosmosDB client. Use this function or the context manager to make sure your database is ready. """ try: from azure.cosmos import PartitionKey except ImportError as exc: raise ImportError( 'You must install the azure-cosmos package to use the CosmosDBChatMessageHistory.Please install it with `pip install azure-cosmos`.' ) from exc database = self._client.create_database_if_not_exists(self.cosmos_database) self._container = database.create_container_if_not_exists(self. cosmos_container, partition_key=PartitionKey('/user_id'), default_ttl= self.ttl) self.load_messages()
def prepare_cosmos(self) ->None: """Prepare the CosmosDB client. Use this function or the context manager to make sure your database is ready. """ try: from azure.cosmos import PartitionKey except ImportError as exc: raise ImportError( 'You must install the azure-cosmos package to use the CosmosDBChatMessageHistory.Please install it with `pip install azure-cosmos`.' ) from exc database = self._client.create_database_if_not_exists(self.cosmos_database) self._container = database.create_container_if_not_exists(self. cosmos_container, partition_key=PartitionKey('/user_id'), default_ttl=self.ttl) self.load_messages()
Prepare the CosmosDB client. Use this function or the context manager to make sure your database is ready.
_call
return self.transform_cb(inputs)
def _call(self, inputs: Dict[str, str], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: return self.transform_cb(inputs)
null
test_clear_messages
sql_history, other_history = sql_histories sql_history.add_user_message('Hello!') sql_history.add_ai_message('Hi there!') assert len(sql_history.messages) == 2 other_history.add_user_message('Hellox') assert len(other_history.messages) == 1 assert len(sql_history.messages) == 2 sql_history.clear() assert len(sql_history.messages) == 0 assert len(other_history.messages) == 1
def test_clear_messages(sql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory]) ->None: sql_history, other_history = sql_histories sql_history.add_user_message('Hello!') sql_history.add_ai_message('Hi there!') assert len(sql_history.messages) == 2 other_history.add_user_message('Hellox') assert len(other_history.messages) == 1 assert len(sql_history.messages) == 2 sql_history.clear() assert len(sql_history.messages) == 0 assert len(other_history.messages) == 1
null
from_llm
"""Load and use LLMChain with either a specific prompt key or custom prompt.""" if custom_prompt is not None: prompt = custom_prompt elif prompt_key is not None and prompt_key in PROMPT_MAP: prompt = PROMPT_MAP[prompt_key] else: raise ValueError( f'Must specify prompt_key if custom_prompt not provided. Should be one of {list(PROMPT_MAP.keys())}.' ) llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, base_embeddings: Embeddings, prompt_key: Optional[str]=None, custom_prompt: Optional[ BasePromptTemplate]=None, **kwargs: Any) ->HypotheticalDocumentEmbedder: """Load and use LLMChain with either a specific prompt key or custom prompt.""" if custom_prompt is not None: prompt = custom_prompt elif prompt_key is not None and prompt_key in PROMPT_MAP: prompt = PROMPT_MAP[prompt_key] else: raise ValueError( f'Must specify prompt_key if custom_prompt not provided. Should be one of {list(PROMPT_MAP.keys())}.' ) llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
Load and use LLMChain with either a specific prompt key or custom prompt.
from_components
""" Create a structured query output parser from components. Args: allowed_comparators: allowed comparators allowed_operators: allowed operators Returns: a structured query output parser """ ast_parse: Callable if fix_invalid: def ast_parse(raw_filter: str) ->Optional[FilterDirective]: filter = cast(Optional[FilterDirective], get_parser().parse_folder( raw_filter)) fixed = fix_filter_directive(filter, allowed_comparators= allowed_comparators, allowed_operators=allowed_operators, allowed_attributes=allowed_attributes) return fixed else: ast_parse = get_parser(allowed_comparators=allowed_comparators, allowed_operators=allowed_operators, allowed_attributes= allowed_attributes).parse_folder return cls(ast_parse=ast_parse)
@classmethod def from_components(cls, allowed_comparators: Optional[Sequence[Comparator] ]=None, allowed_operators: Optional[Sequence[Operator]]=None, allowed_attributes: Optional[Sequence[str]]=None, fix_invalid: bool=False ) ->StructuredQueryOutputParser: """ Create a structured query output parser from components. Args: allowed_comparators: allowed comparators allowed_operators: allowed operators Returns: a structured query output parser """ ast_parse: Callable if fix_invalid: def ast_parse(raw_filter: str) ->Optional[FilterDirective]: filter = cast(Optional[FilterDirective], get_parser(). parse_folder(raw_filter)) fixed = fix_filter_directive(filter, allowed_comparators= allowed_comparators, allowed_operators=allowed_operators, allowed_attributes=allowed_attributes) return fixed else: ast_parse = get_parser(allowed_comparators=allowed_comparators, allowed_operators=allowed_operators, allowed_attributes= allowed_attributes).parse_folder return cls(ast_parse=ast_parse)
Create a structured query output parser from components. Args: allowed_comparators: allowed comparators allowed_operators: allowed operators Returns: a structured query output parser
f
"""Return 2.""" return 2
def f(x: int) ->int: """Return 2.""" return 2
Return 2.
test_correct_get_tracer_project
cases = [self.SetProperTracerProjectTestCase(test_name= "default to 'default' when no project provided", envvars={}, expected_project_name='default'), self.SetProperTracerProjectTestCase( test_name='use session_name for legacy tracers', envvars={ 'LANGCHAIN_SESSION': 'old_timey_session'}, expected_project_name= 'old_timey_session'), self.SetProperTracerProjectTestCase(test_name= 'use LANGCHAIN_PROJECT over SESSION_NAME', envvars={'LANGCHAIN_SESSION': 'old_timey_session', 'LANGCHAIN_PROJECT': 'modern_session'}, expected_project_name='modern_session')] for case in cases: with self.subTest(msg=case.test_name): with pytest.MonkeyPatch.context() as mp: for k, v in case.envvars.items(): mp.setenv(k, v) client = unittest.mock.MagicMock(spec=Client) tracer = LangChainTracer(client=client) projects = [] def mock_create_run(**kwargs: Any) ->Any: projects.append(kwargs.get('project_name')) return unittest.mock.MagicMock() client.create_run = mock_create_run tracer.on_llm_start({'name': 'example_1'}, ['foo'], run_id=UUID ('9d878ab3-e5ca-4218-aef6-44cbdc90160a')) tracer.wait_for_futures() assert len(projects) == 1 and projects[0 ] == case.expected_project_name
def test_correct_get_tracer_project(self) ->None: cases = [self.SetProperTracerProjectTestCase(test_name= "default to 'default' when no project provided", envvars={}, expected_project_name='default'), self. SetProperTracerProjectTestCase(test_name= 'use session_name for legacy tracers', envvars={'LANGCHAIN_SESSION': 'old_timey_session'}, expected_project_name='old_timey_session'), self.SetProperTracerProjectTestCase(test_name= 'use LANGCHAIN_PROJECT over SESSION_NAME', envvars={ 'LANGCHAIN_SESSION': 'old_timey_session', 'LANGCHAIN_PROJECT': 'modern_session'}, expected_project_name='modern_session')] for case in cases: with self.subTest(msg=case.test_name): with pytest.MonkeyPatch.context() as mp: for k, v in case.envvars.items(): mp.setenv(k, v) client = unittest.mock.MagicMock(spec=Client) tracer = LangChainTracer(client=client) projects = [] def mock_create_run(**kwargs: Any) ->Any: projects.append(kwargs.get('project_name')) return unittest.mock.MagicMock() client.create_run = mock_create_run tracer.on_llm_start({'name': 'example_1'}, ['foo'], run_id= UUID('9d878ab3-e5ca-4218-aef6-44cbdc90160a')) tracer.wait_for_futures() assert len(projects) == 1 and projects[0 ] == case.expected_project_name
null
raise_deprecation
warnings.warn( '`VectorDBQAWithSourcesChain` is deprecated - please use `from langchain.chains import RetrievalQAWithSourcesChain`' ) return values
@root_validator() def raise_deprecation(cls, values: Dict) ->Dict: warnings.warn( '`VectorDBQAWithSourcesChain` is deprecated - please use `from langchain.chains import RetrievalQAWithSourcesChain`' ) return values
null
input_keys
"""Expect input key. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Expect input key. :meta private: """ return [self.input_key]
Expect input key. :meta private:
embeddings
return None
@property def embeddings(self) ->Optional[Embeddings]: return None
null
line
"""Create a line on ASCII canvas. Args: x0 (int): x coordinate where the line should start. y0 (int): y coordinate where the line should start. x1 (int): x coordinate where the line should end. y1 (int): y coordinate where the line should end. char (str): character to draw the line with. """ if x0 > x1: x1, x0 = x0, x1 y1, y0 = y0, y1 dx = x1 - x0 dy = y1 - y0 if dx == 0 and dy == 0: self.point(x0, y0, char) elif abs(dx) >= abs(dy): for x in range(x0, x1 + 1): if dx == 0: y = y0 else: y = y0 + int(round((x - x0) * dy / float(dx))) self.point(x, y, char) elif y0 < y1: for y in range(y0, y1 + 1): if dy == 0: x = x0 else: x = x0 + int(round((y - y0) * dx / float(dy))) self.point(x, y, char) else: for y in range(y1, y0 + 1): if dy == 0: x = x0 else: x = x1 + int(round((y - y1) * dx / float(dy))) self.point(x, y, char)
def line(self, x0: int, y0: int, x1: int, y1: int, char: str) ->None: """Create a line on ASCII canvas. Args: x0 (int): x coordinate where the line should start. y0 (int): y coordinate where the line should start. x1 (int): x coordinate where the line should end. y1 (int): y coordinate where the line should end. char (str): character to draw the line with. """ if x0 > x1: x1, x0 = x0, x1 y1, y0 = y0, y1 dx = x1 - x0 dy = y1 - y0 if dx == 0 and dy == 0: self.point(x0, y0, char) elif abs(dx) >= abs(dy): for x in range(x0, x1 + 1): if dx == 0: y = y0 else: y = y0 + int(round((x - x0) * dy / float(dx))) self.point(x, y, char) elif y0 < y1: for y in range(y0, y1 + 1): if dy == 0: x = x0 else: x = x0 + int(round((y - y0) * dx / float(dy))) self.point(x, y, char) else: for y in range(y1, y0 + 1): if dy == 0: x = x0 else: x = x1 + int(round((y - y1) * dx / float(dy))) self.point(x, y, char)
Create a line on ASCII canvas. Args: x0 (int): x coordinate where the line should start. y0 (int): y coordinate where the line should start. x1 (int): x coordinate where the line should end. y1 (int): y coordinate where the line should end. char (str): character to draw the line with.
get_num_tokens
"""Count approximate number of tokens""" return round(len(text) / 4.6)
def get_num_tokens(self, text: str) ->int: """Count approximate number of tokens""" return round(len(text) / 4.6)
Count approximate number of tokens
func
return call_func_with_variable_args(self.func, input, config, run_manager. get_sync(), **kwargs)
def func(input: Input, run_manager: AsyncCallbackManagerForChainRun, config: RunnableConfig) ->Output: return call_func_with_variable_args(self.func, input, config, run_manager.get_sync(), **kwargs)
null
transform_documents
"""Translate text documents using Google Translate. Arguments: source_language_code: ISO 639 language code of the input document. target_language_code: ISO 639 language code of the output document. For supported languages, refer to: https://cloud.google.com/translate/docs/languages mime_type: (Optional) Media Type of input text. Options: `text/plain`, `text/html` """ try: from google.cloud import translate except ImportError as exc: raise ImportError( 'Install Google Cloud Translate to use this parser.(pip install google-cloud-translate)' ) from exc response = self._client.translate_text(request=translate. TranslateTextRequest(contents=[doc.page_content for doc in documents], parent=self._parent_path, model=self._model_path, glossary_config= translate.TranslateTextGlossaryConfig(glossary=self._glossary_path), source_language_code=kwargs.get('source_language_code', None), target_language_code=kwargs.get('target_language_code'), mime_type= kwargs.get('mime_type', 'text/plain'))) translations = response.glossary_translations or response.translations return [Document(page_content=translation.translated_text, metadata={**doc. metadata, 'model': translation.model, 'detected_language_code': translation.detected_language_code}) for doc, translation in zip( documents, translations)]
def transform_documents(self, documents: Sequence[Document], **kwargs: Any ) ->Sequence[Document]: """Translate text documents using Google Translate. Arguments: source_language_code: ISO 639 language code of the input document. target_language_code: ISO 639 language code of the output document. For supported languages, refer to: https://cloud.google.com/translate/docs/languages mime_type: (Optional) Media Type of input text. Options: `text/plain`, `text/html` """ try: from google.cloud import translate except ImportError as exc: raise ImportError( 'Install Google Cloud Translate to use this parser.(pip install google-cloud-translate)' ) from exc response = self._client.translate_text(request=translate. TranslateTextRequest(contents=[doc.page_content for doc in documents], parent=self._parent_path, model=self._model_path, glossary_config=translate.TranslateTextGlossaryConfig(glossary=self ._glossary_path), source_language_code=kwargs.get( 'source_language_code', None), target_language_code=kwargs.get( 'target_language_code'), mime_type=kwargs.get('mime_type', 'text/plain'))) translations = response.glossary_translations or response.translations return [Document(page_content=translation.translated_text, metadata={** doc.metadata, 'model': translation.model, 'detected_language_code': translation.detected_language_code}) for doc, translation in zip( documents, translations)]
Translate text documents using Google Translate. Arguments: source_language_code: ISO 639 language code of the input document. target_language_code: ISO 639 language code of the output document. For supported languages, refer to: https://cloud.google.com/translate/docs/languages mime_type: (Optional) Media Type of input text. Options: `text/plain`, `text/html`
_load_few_shot_prompt
"""Load the "few shot" prompt from the config.""" config = _load_template('suffix', config) config = _load_template('prefix', config) if 'example_prompt_path' in config: if 'example_prompt' in config: raise ValueError( 'Only one of example_prompt and example_prompt_path should be specified.' ) config['example_prompt'] = load_prompt(config.pop('example_prompt_path')) else: config['example_prompt'] = load_prompt_from_config(config['example_prompt'] ) config = _load_examples(config) config = _load_output_parser(config) return FewShotPromptTemplate(**config)
def _load_few_shot_prompt(config: dict) ->FewShotPromptTemplate: """Load the "few shot" prompt from the config.""" config = _load_template('suffix', config) config = _load_template('prefix', config) if 'example_prompt_path' in config: if 'example_prompt' in config: raise ValueError( 'Only one of example_prompt and example_prompt_path should be specified.' ) config['example_prompt'] = load_prompt(config.pop( 'example_prompt_path')) else: config['example_prompt'] = load_prompt_from_config(config[ 'example_prompt']) config = _load_examples(config) config = _load_output_parser(config) return FewShotPromptTemplate(**config)
Load the "few shot" prompt from the config.
filter_complex_metadata
"""Filter out metadata types that are not supported for a vector store.""" updated_documents = [] for document in documents: filtered_metadata = {} for key, value in document.metadata.items(): if not isinstance(value, allowed_types): continue filtered_metadata[key] = value document.metadata = filtered_metadata updated_documents.append(document) return updated_documents
def filter_complex_metadata(documents: List[Document], *, allowed_types: Tuple[Type, ...]=(str, bool, int, float)) ->List[Document]: """Filter out metadata types that are not supported for a vector store.""" updated_documents = [] for document in documents: filtered_metadata = {} for key, value in document.metadata.items(): if not isinstance(value, allowed_types): continue filtered_metadata[key] = value document.metadata = filtered_metadata updated_documents.append(document) return updated_documents
Filter out metadata types that are not supported for a vector store.
get_cassandra_connection
contact_points = [cp.strip() for cp in os.environ.get( 'CASSANDRA_CONTACT_POINTS', '').split(',') if cp.strip()] CASSANDRA_KEYSPACE = os.environ['CASSANDRA_KEYSPACE'] CASSANDRA_USERNAME = os.environ.get('CASSANDRA_USERNAME') CASSANDRA_PASSWORD = os.environ.get('CASSANDRA_PASSWORD') if CASSANDRA_USERNAME and CASSANDRA_PASSWORD: auth_provider = PlainTextAuthProvider(CASSANDRA_USERNAME, CASSANDRA_PASSWORD) else: auth_provider = None c_cluster = Cluster(contact_points if contact_points else None, auth_provider=auth_provider) session = c_cluster.connect() return session, CASSANDRA_KEYSPACE
def get_cassandra_connection(): contact_points = [cp.strip() for cp in os.environ.get( 'CASSANDRA_CONTACT_POINTS', '').split(',') if cp.strip()] CASSANDRA_KEYSPACE = os.environ['CASSANDRA_KEYSPACE'] CASSANDRA_USERNAME = os.environ.get('CASSANDRA_USERNAME') CASSANDRA_PASSWORD = os.environ.get('CASSANDRA_PASSWORD') if CASSANDRA_USERNAME and CASSANDRA_PASSWORD: auth_provider = PlainTextAuthProvider(CASSANDRA_USERNAME, CASSANDRA_PASSWORD) else: auth_provider = None c_cluster = Cluster(contact_points if contact_points else None, auth_provider=auth_provider) session = c_cluster.connect() return session, CASSANDRA_KEYSPACE
null
test_loads_llmchain
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello') prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain) chain2 = loads(chain_string, secrets_map={'OPENAI_API_KEY': 'hello'}) assert chain2 == chain assert dumps(chain2) == chain_string assert isinstance(chain2, LLMChain) assert isinstance(chain2.llm, OpenAI) assert isinstance(chain2.prompt, PromptTemplate)
@pytest.mark.requires('openai') def test_loads_llmchain() ->None: llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello') prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain) chain2 = loads(chain_string, secrets_map={'OPENAI_API_KEY': 'hello'}) assert chain2 == chain assert dumps(chain2) == chain_string assert isinstance(chain2, LLMChain) assert isinstance(chain2.llm, OpenAI) assert isinstance(chain2.prompt, PromptTemplate)
null
__init__
""" Args: collection: MongoDB collection to add the texts to. embedding: Text embedding model to use. text_key: MongoDB field that will contain the text for each document. embedding_key: MongoDB field that will contain the embedding for each document. index_name: Name of the Atlas Search index. relevance_score_fn: The similarity score used for the index. Currently supported: Euclidean, cosine, and dot product. """ self._collection = collection self._embedding = embedding self._index_name = index_name self._text_key = text_key self._embedding_key = embedding_key self._relevance_score_fn = relevance_score_fn
def __init__(self, collection: Collection[MongoDBDocumentType], embedding: Embeddings, *, index_name: str='default', text_key: str='text', embedding_key: str='embedding', relevance_score_fn: str='cosine'): """ Args: collection: MongoDB collection to add the texts to. embedding: Text embedding model to use. text_key: MongoDB field that will contain the text for each document. embedding_key: MongoDB field that will contain the embedding for each document. index_name: Name of the Atlas Search index. relevance_score_fn: The similarity score used for the index. Currently supported: Euclidean, cosine, and dot product. """ self._collection = collection self._embedding = embedding self._index_name = index_name self._text_key = text_key self._embedding_key = embedding_key self._relevance_score_fn = relevance_score_fn
Args: collection: MongoDB collection to add the texts to. embedding: Text embedding model to use. text_key: MongoDB field that will contain the text for each document. embedding_key: MongoDB field that will contain the embedding for each document. index_name: Name of the Atlas Search index. relevance_score_fn: The similarity score used for the index. Currently supported: Euclidean, cosine, and dot product.
format_docs
return '\n\n'.join(f"""Wikipedia {i + 1}: {doc.page_content}""" for i, doc in enumerate(docs))
def format_docs(docs): return '\n\n'.join(f'Wikipedia {i + 1}:\n{doc.page_content}' for i, doc in enumerate(docs))
null
memory_variables
"""Will always return list of memory variables. :meta private: """ return [self.memory_key]
@property def memory_variables(self) ->List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key]
Will always return list of memory variables. :meta private:
_prep_texts
"""Embed and create the documents""" _ids = ids or (str(uuid.uuid4()) for _ in texts) _metadatas: Iterable[dict] = metadatas or ({} for _ in texts) embedded_texts = self._embedding.embed_documents(list(texts)) return [{'id': _id, 'vec': vec, f'{self._text_key}': text, 'metadata': metadata} for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas)]
def _prep_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]], ids: Optional[List[str]]) ->List[dict]: """Embed and create the documents""" _ids = ids or (str(uuid.uuid4()) for _ in texts) _metadatas: Iterable[dict] = metadatas or ({} for _ in texts) embedded_texts = self._embedding.embed_documents(list(texts)) return [{'id': _id, 'vec': vec, f'{self._text_key}': text, 'metadata': metadata} for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas)]
Embed and create the documents
on_llm_start
"""Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_llm_start'}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for prompt in prompts: prompt_resp = deepcopy(resp) prompt_resp['prompts'] = prompt self.on_llm_start_records.append(prompt_resp) self.action_records.append(prompt_resp) if self.stream_logs: self.logger.report_text(prompt_resp)
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], ** kwargs: Any) ->None: """Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_llm_start'}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for prompt in prompts: prompt_resp = deepcopy(resp) prompt_resp['prompts'] = prompt self.on_llm_start_records.append(prompt_resp) self.action_records.append(prompt_resp) if self.stream_logs: self.logger.report_text(prompt_resp)
Run when LLM starts.
_detect_pii
analyzer_results = analyzer.analyze(text=inputs['text'], language='en') return bool(analyzer_results)
def _detect_pii(inputs: dict) ->bool: analyzer_results = analyzer.analyze(text=inputs['text'], language='en') return bool(analyzer_results)
null
test_confluence_pagination
loader = ConfluenceLoader(url='https://templates.atlassian.net/wiki/') docs = loader.load(space_key='RD', limit=3, max_pages=5) assert len(docs) == 5 assert docs[0].page_content is not None
@pytest.mark.skipif(not confluence_installed, reason= 'Atlassian package not installed') def test_confluence_pagination() ->None: loader = ConfluenceLoader(url='https://templates.atlassian.net/wiki/') docs = loader.load(space_key='RD', limit=3, max_pages=5) assert len(docs) == 5 assert docs[0].page_content is not None
null
handle_starttag
"""Hook when a new tag is encountered.""" self.depth += 1 self.stack.append(defaultdict(list)) self.data = None
def handle_starttag(self, tag: str, attrs: Any) ->None: """Hook when a new tag is encountered.""" self.depth += 1 self.stack.append(defaultdict(list)) self.data = None
Hook when a new tag is encountered.
test_simple_action_strlist_w_some_emb
str1 = 'test1' str2 = 'test2' str3 = 'test3' encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [{'a_namespace': str1}, {'a_namespace': encoded_str2}, { 'a_namespace': encoded_str3}] assert base.embed([str1, base.Embed(str2), base.Embed(str3)], MockEncoder(), 'a_namespace') == expected expected_embed_and_keep = [{'a_namespace': str1}, {'a_namespace': str2 + ' ' + encoded_str2}, {'a_namespace': str3 + ' ' + encoded_str3}] assert base.embed([str1, base.EmbedAndKeep(str2), base.EmbedAndKeep(str3)], MockEncoder(), 'a_namespace') == expected_embed_and_keep
@pytest.mark.requires('vowpal_wabbit_next') def test_simple_action_strlist_w_some_emb() ->None: str1 = 'test1' str2 = 'test2' str3 = 'test3' encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [{'a_namespace': str1}, {'a_namespace': encoded_str2}, { 'a_namespace': encoded_str3}] assert base.embed([str1, base.Embed(str2), base.Embed(str3)], MockEncoder(), 'a_namespace') == expected expected_embed_and_keep = [{'a_namespace': str1}, {'a_namespace': str2 + ' ' + encoded_str2}, {'a_namespace': str3 + ' ' + encoded_str3}] assert base.embed([str1, base.EmbedAndKeep(str2), base.EmbedAndKeep( str3)], MockEncoder(), 'a_namespace') == expected_embed_and_keep
null
transform_output
return response.json()[0]['generated_text']
@classmethod def transform_output(cls, response: Any) ->str: return response.json()[0]['generated_text']
null
__init__
self.connection_string = connection_string self.embedding_function = embedding_function self.collection_name = collection_name self.collection_metadata = collection_metadata self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.engine_args = engine_args or {} self._bind = connection if connection else self._create_engine() self.__post_init__()
def __init__(self, connection_string: str, embedding_function: Embeddings, collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME, collection_metadata: Optional[dict]=None, distance_strategy: DistanceStrategy=DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool =False, logger: Optional[logging.Logger]=None, relevance_score_fn: Optional[Callable[[float], float]]=None, *, connection: Optional[ sqlalchemy.engine.Connection]=None, engine_args: Optional[dict[str, Any ]]=None) ->None: self.connection_string = connection_string self.embedding_function = embedding_function self.collection_name = collection_name self.collection_metadata = collection_metadata self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.engine_args = engine_args or {} self._bind = connection if connection else self._create_engine() self.__post_init__()
null
__init__
"""Initialize with a path.""" self.file_path = path
def __init__(self, path: str): """Initialize with a path.""" self.file_path = path
Initialize with a path.
__init__
""" Initialize the IMessageChatLoader. Args: path (str or Path, optional): Path to the chat.db SQLite file. Defaults to None, in which case the default path ~/Library/Messages/chat.db will be used. """ if path is None: path = Path.home() / 'Library' / 'Messages' / 'chat.db' self.db_path = path if isinstance(path, Path) else Path(path) if not self.db_path.exists(): raise FileNotFoundError(f'File {self.db_path} not found') try: import sqlite3 except ImportError as e: raise ImportError( """The sqlite3 module is required to load iMessage chats. Please install it with `pip install pysqlite3`""" ) from e
def __init__(self, path: Optional[Union[str, Path]]=None): """ Initialize the IMessageChatLoader. Args: path (str or Path, optional): Path to the chat.db SQLite file. Defaults to None, in which case the default path ~/Library/Messages/chat.db will be used. """ if path is None: path = Path.home() / 'Library' / 'Messages' / 'chat.db' self.db_path = path if isinstance(path, Path) else Path(path) if not self.db_path.exists(): raise FileNotFoundError(f'File {self.db_path} not found') try: import sqlite3 except ImportError as e: raise ImportError( """The sqlite3 module is required to load iMessage chats. Please install it with `pip install pysqlite3`""" ) from e
Initialize the IMessageChatLoader. Args: path (str or Path, optional): Path to the chat.db SQLite file. Defaults to None, in which case the default path ~/Library/Messages/chat.db will be used.
load
"""Load from a list of image data or file paths""" try: from transformers import BlipForConditionalGeneration, BlipProcessor except ImportError: raise ImportError( '`transformers` package not found, please install with `pip install transformers`.' ) processor = BlipProcessor.from_pretrained(self.blip_processor) model = BlipForConditionalGeneration.from_pretrained(self.blip_model) results = [] for image in self.images: caption, metadata = self._get_captions_and_metadata(model=model, processor=processor, image=image) doc = Document(page_content=caption, metadata=metadata) results.append(doc) return results
def load(self) ->List[Document]: """Load from a list of image data or file paths""" try: from transformers import BlipForConditionalGeneration, BlipProcessor except ImportError: raise ImportError( '`transformers` package not found, please install with `pip install transformers`.' ) processor = BlipProcessor.from_pretrained(self.blip_processor) model = BlipForConditionalGeneration.from_pretrained(self.blip_model) results = [] for image in self.images: caption, metadata = self._get_captions_and_metadata(model=model, processor=processor, image=image) doc = Document(page_content=caption, metadata=metadata) results.append(doc) return results
Load from a list of image data or file paths
create_structured_chat_agent
"""Create an agent aimed at supporting tools with multiple inputs. Examples: .. code-block:: python from langchain import hub from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_structured_chat_agent prompt = hub.pull("hwchase17/structured-chat-agent") model = ChatOpenAI() tools = ... agent = create_structured_chat_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"input": "hi"}) # Using with chat history from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", "chat_history": [ HumanMessage(content="hi! my name is bob"), AIMessage(content="Hello Bob! How can I assist you today?"), ], } ) Args: llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use, must have input keys of `tools`, `tool_names`, and `agent_scratchpad`. Returns: A runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an AgentAction or AgentFinish. """ missing_vars = {'tools', 'tool_names', 'agent_scratchpad'}.difference(prompt .input_variables) if missing_vars: raise ValueError(f'Prompt missing required variables: {missing_vars}') prompt = prompt.partial(tools=render_text_description_and_args(list(tools)), tool_names=', '.join([t.name for t in tools])) llm_with_stop = llm.bind(stop=['Observation']) agent = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_log_to_str(x['intermediate_steps']) ) | prompt | llm_with_stop | JSONAgentOutputParser() return agent
def create_structured_chat_agent(llm: BaseLanguageModel, tools: Sequence[ BaseTool], prompt: ChatPromptTemplate) ->Runnable: """Create an agent aimed at supporting tools with multiple inputs. Examples: .. code-block:: python from langchain import hub from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_structured_chat_agent prompt = hub.pull("hwchase17/structured-chat-agent") model = ChatOpenAI() tools = ... agent = create_structured_chat_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"input": "hi"}) # Using with chat history from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", "chat_history": [ HumanMessage(content="hi! my name is bob"), AIMessage(content="Hello Bob! How can I assist you today?"), ], } ) Args: llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use, must have input keys of `tools`, `tool_names`, and `agent_scratchpad`. Returns: A runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an AgentAction or AgentFinish. """ missing_vars = {'tools', 'tool_names', 'agent_scratchpad'}.difference( prompt.input_variables) if missing_vars: raise ValueError(f'Prompt missing required variables: {missing_vars}') prompt = prompt.partial(tools=render_text_description_and_args(list( tools)), tool_names=', '.join([t.name for t in tools])) llm_with_stop = llm.bind(stop=['Observation']) agent = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_log_to_str(x['intermediate_steps']) ) | prompt | llm_with_stop | JSONAgentOutputParser() return agent
Create an agent aimed at supporting tools with multiple inputs. Examples: .. code-block:: python from langchain import hub from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_structured_chat_agent prompt = hub.pull("hwchase17/structured-chat-agent") model = ChatOpenAI() tools = ... agent = create_structured_chat_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"input": "hi"}) # Using with chat history from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", "chat_history": [ HumanMessage(content="hi! my name is bob"), AIMessage(content="Hello Bob! How can I assist you today?"), ], } ) Args: llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use, must have input keys of `tools`, `tool_names`, and `agent_scratchpad`. Returns: A runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an AgentAction or AgentFinish.
_create_retry_decorator
min_seconds = 4 max_seconds = 10 max_retries = llm.max_retries if llm.max_retries is not None else 3 return retry(reraise=True, stop=stop_after_attempt(max_retries), wait= wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry =retry_if_exception_type((RequestException, ConnectTimeout, ReadTimeout )), before_sleep=before_sleep_log(logger, logging.WARNING))
def _create_retry_decorator(llm: Nebula) ->Callable[[Any], Any]: min_seconds = 4 max_seconds = 10 max_retries = llm.max_retries if llm.max_retries is not None else 3 return retry(reraise=True, stop=stop_after_attempt(max_retries), wait= wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=retry_if_exception_type((RequestException, ConnectTimeout, ReadTimeout)), before_sleep=before_sleep_log(logger, logging.WARNING))
null
test_from_texts
vs = zep_vectorstore.from_texts(**texts_metadatas, collection_name= mock_collection_config.name, api_url='http://localhost:8000') vs._collection.add_documents.assert_called_once_with( texts_metadatas_as_zep_documents)
@pytest.mark.requires('zep_python') def test_from_texts(zep_vectorstore: ZepVectorStore, mock_collection_config: CollectionConfig, mock_collection: 'DocumentCollection', texts_metadatas: Dict[str, Any], texts_metadatas_as_zep_documents: List ['ZepDocument']) ->None: vs = zep_vectorstore.from_texts(**texts_metadatas, collection_name= mock_collection_config.name, api_url='http://localhost:8000') vs._collection.add_documents.assert_called_once_with( texts_metadatas_as_zep_documents)
null
test_logging
logger = logging.getLogger('test_logging') logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler(sys.stdout)) handler = LoggingCallbackHandler(logger, extra={'test': 'test_extra'}) handler.on_text('test', run_id=uuid.uuid4()) assert len(caplog.record_tuples) == 1 record = caplog.records[0] assert record.name == logger.name assert record.levelno == logging.INFO assert record.msg == """[text] New text: test""" assert record.test == 'test_extra' cap_result = capsys.readouterr() assert cap_result.out == """[text] New text: test """
def test_logging(caplog: pytest.LogCaptureFixture, capsys: pytest. CaptureFixture[str]) ->None: logger = logging.getLogger('test_logging') logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler(sys.stdout)) handler = LoggingCallbackHandler(logger, extra={'test': 'test_extra'}) handler.on_text('test', run_id=uuid.uuid4()) assert len(caplog.record_tuples) == 1 record = caplog.records[0] assert record.name == logger.name assert record.levelno == logging.INFO assert record.msg == '\x1b[36;1m\x1b[1;3m[text]\x1b[0m \x1b[1mNew text:\x1b[0m\ntest' assert record.test == 'test_extra' cap_result = capsys.readouterr() assert cap_result.out == '\x1b[36;1m\x1b[1;3m[text]\x1b[0m \x1b[1mNew text:\x1b[0m\ntest\n'
null
fn
return 'fake_uuid'
def fn(self: Any, **kwargs: Any) ->str: return 'fake_uuid'
null
_num_thought_containers
"""The number of 'thought containers' we're currently showing: the number of completed thought containers, the history container (if it exists), and the current thought container (if it exists). """ count = len(self._completed_thoughts) if self._history_container is not None: count += 1 if self._current_thought is not None: count += 1 return count
@property def _num_thought_containers(self) ->int: """The number of 'thought containers' we're currently showing: the number of completed thought containers, the history container (if it exists), and the current thought container (if it exists). """ count = len(self._completed_thoughts) if self._history_container is not None: count += 1 if self._current_thought is not None: count += 1 return count
The number of 'thought containers' we're currently showing: the number of completed thought containers, the history container (if it exists), and the current thought container (if it exists).
test_load_success_multiple_arxiv_identifiers
"""Test a query of arxiv identifiers that returns the correct answer""" docs = api_client.load('1605.08386v1 2212.00794v2 2308.07912') assert len(docs) == 3 assert_docs(docs)
def test_load_success_multiple_arxiv_identifiers(api_client: ArxivAPIWrapper ) ->None: """Test a query of arxiv identifiers that returns the correct answer""" docs = api_client.load('1605.08386v1 2212.00794v2 2308.07912') assert len(docs) == 3 assert_docs(docs)
Test a query of arxiv identifiers that returns the correct answer
_get_document_for_video_id
captions = self._get_transcripe_for_video_id(video_id) video_response = self.youtube_client.videos().list(part='id,snippet', id= video_id).execute() return Document(page_content=captions, metadata=video_response.get('items')[0])
def _get_document_for_video_id(self, video_id: str, **kwargs: Any) ->Document: captions = self._get_transcripe_for_video_id(video_id) video_response = self.youtube_client.videos().list(part='id,snippet', id=video_id).execute() return Document(page_content=captions, metadata=video_response.get( 'items')[0])
null
test_octoai_endpoint_text_generation
"""Test valid call to OctoAI text generation model.""" llm = OctoAIEndpoint(endpoint_url= 'https://mpt-7b-demo-f1kzsig6xes9.octoai.run/generate', octoai_api_token='<octoai_api_token>', model_kwargs={'max_new_tokens': 200, 'temperature': 0.75, 'top_p': 0.95, 'repetition_penalty': 1, 'seed': None, 'stop': []}) output = llm('Which state is Los Angeles in?') print(output) assert isinstance(output, str)
def test_octoai_endpoint_text_generation() ->None: """Test valid call to OctoAI text generation model.""" llm = OctoAIEndpoint(endpoint_url= 'https://mpt-7b-demo-f1kzsig6xes9.octoai.run/generate', octoai_api_token='<octoai_api_token>', model_kwargs={ 'max_new_tokens': 200, 'temperature': 0.75, 'top_p': 0.95, 'repetition_penalty': 1, 'seed': None, 'stop': []}) output = llm('Which state is Los Angeles in?') print(output) assert isinstance(output, str)
Test valid call to OctoAI text generation model.
test_similarity_search_exact_search_unknown_distance_strategy
"""Test end to end construction and search with unknown distance strategy.""" with pytest.raises(KeyError): texts = ['foo', 'bar', 'baz'] ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_name=index_name, strategy= ElasticsearchStore.ExactRetrievalStrategy(), distance_strategy= 'NOT_A_STRATEGY')
def test_similarity_search_exact_search_unknown_distance_strategy(self, elasticsearch_connection: dict, index_name: str) ->None: """Test end to end construction and search with unknown distance strategy.""" with pytest.raises(KeyError): texts = ['foo', 'bar', 'baz'] ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_name=index_name, strategy= ElasticsearchStore.ExactRetrievalStrategy(), distance_strategy= 'NOT_A_STRATEGY')
Test end to end construction and search with unknown distance strategy.
add_message
"""Append the message to the record in SingleStoreDB""" self._create_table_if_not_exists() conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute('INSERT INTO {} ({}, {}) VALUES (%s, %s)'.format(self. table_name, self.session_id_field, self.message_field), (self. session_id, json.dumps(message_to_dict(message)))) finally: cur.close() finally: conn.close()
def add_message(self, message: BaseMessage) ->None: """Append the message to the record in SingleStoreDB""" self._create_table_if_not_exists() conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute('INSERT INTO {} ({}, {}) VALUES (%s, %s)'.format( self.table_name, self.session_id_field, self.message_field), (self.session_id, json.dumps(message_to_dict(message)))) finally: cur.close() finally: conn.close()
Append the message to the record in SingleStoreDB
_import_koboldai
from langchain_community.llms.koboldai import KoboldApiLLM return KoboldApiLLM
def _import_koboldai() ->Any: from langchain_community.llms.koboldai import KoboldApiLLM return KoboldApiLLM
null
__init__
self._approve = approve self._should_check = should_check
def __init__(self, approve: Callable[[Any], bool]=_default_approve, should_check: Callable[[Dict[str, Any]], bool]=_default_true): self._approve = approve self._should_check = should_check
null
serialize_chat_messages
"""Extract the input messages from the run.""" if isinstance(messages, list) and messages: if isinstance(messages[0], dict): chat_messages = _get_messages_from_run_dict(messages) elif isinstance(messages[0], list): chat_messages = _get_messages_from_run_dict(messages[0]) else: raise ValueError(f'Could not extract messages to evaluate {messages}') return get_buffer_string(chat_messages) raise ValueError(f'Could not extract messages to evaluate {messages}')
def serialize_chat_messages(self, messages: List[Dict]) ->str: """Extract the input messages from the run.""" if isinstance(messages, list) and messages: if isinstance(messages[0], dict): chat_messages = _get_messages_from_run_dict(messages) elif isinstance(messages[0], list): chat_messages = _get_messages_from_run_dict(messages[0]) else: raise ValueError( f'Could not extract messages to evaluate {messages}') return get_buffer_string(chat_messages) raise ValueError(f'Could not extract messages to evaluate {messages}')
Extract the input messages from the run.
_make_id
return f'{_hash(prompt)}#{_hash(llm_string)}'
@staticmethod def _make_id(prompt: str, llm_string: str) ->str: return f'{_hash(prompt)}#{_hash(llm_string)}'
null
config_specs
mapper_config_specs = [s for mapper in self.keys.values() if mapper is not None for s in mapper.config_specs] for spec in mapper_config_specs: if spec.id.endswith(CONTEXT_CONFIG_SUFFIX_GET): getter_key = spec.id.split('/')[1] if getter_key in self.keys: raise ValueError( f'Circular reference in context setter for key {getter_key}') return super().config_specs + [ConfigurableFieldSpec(id=id_, annotation= Callable[[], Any]) for id_ in self.ids]
@property def config_specs(self) ->List[ConfigurableFieldSpec]: mapper_config_specs = [s for mapper in self.keys.values() if mapper is not None for s in mapper.config_specs] for spec in mapper_config_specs: if spec.id.endswith(CONTEXT_CONFIG_SUFFIX_GET): getter_key = spec.id.split('/')[1] if getter_key in self.keys: raise ValueError( f'Circular reference in context setter for key {getter_key}' ) return super().config_specs + [ConfigurableFieldSpec(id=id_, annotation =Callable[[], Any]) for id_ in self.ids]
null
_run
"""Use the Clickup API to run an operation.""" return self.api_wrapper.run(self.mode, instructions)
def _run(self, instructions: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: """Use the Clickup API to run an operation.""" return self.api_wrapper.run(self.mode, instructions)
Use the Clickup API to run an operation.
from_llm
"""Create a SQLDatabaseChain from an LLM and a database connection. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include the permissions this chain needs. Failure to do so may result in data corruption or loss, since this chain may attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this chain. This issue shows an example negative outcome if these steps are not taken: https://github.com/langchain-ai/langchain/issues/5923 """ prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT) llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, database=db, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, db: SQLDatabase, prompt: Optional [BasePromptTemplate]=None, **kwargs: Any) ->SQLDatabaseChain: """Create a SQLDatabaseChain from an LLM and a database connection. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include the permissions this chain needs. Failure to do so may result in data corruption or loss, since this chain may attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this chain. This issue shows an example negative outcome if these steps are not taken: https://github.com/langchain-ai/langchain/issues/5923 """ prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT) llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, database=db, **kwargs)
Create a SQLDatabaseChain from an LLM and a database connection. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include the permissions this chain needs. Failure to do so may result in data corruption or loss, since this chain may attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this chain. This issue shows an example negative outcome if these steps are not taken: https://github.com/langchain-ai/langchain/issues/5923
__init__
"""Initialize the LLMThought. Args: parent_container: The container we're writing into. labeler: The labeler to use for this thought. expanded: Whether the thought should be expanded by default. collapse_on_complete: Whether the thought should be collapsed. """ self._container = MutableExpander(parent_container=parent_container, label= labeler.get_initial_label(), expanded=expanded) self._state = LLMThoughtState.THINKING self._llm_token_stream = '' self._llm_token_writer_idx: Optional[int] = None self._last_tool: Optional[ToolRecord] = None self._collapse_on_complete = collapse_on_complete self._labeler = labeler
def __init__(self, parent_container: DeltaGenerator, labeler: LLMThoughtLabeler, expanded: bool, collapse_on_complete: bool): """Initialize the LLMThought. Args: parent_container: The container we're writing into. labeler: The labeler to use for this thought. expanded: Whether the thought should be expanded by default. collapse_on_complete: Whether the thought should be collapsed. """ self._container = MutableExpander(parent_container=parent_container, label=labeler.get_initial_label(), expanded=expanded) self._state = LLMThoughtState.THINKING self._llm_token_stream = '' self._llm_token_writer_idx: Optional[int] = None self._last_tool: Optional[ToolRecord] = None self._collapse_on_complete = collapse_on_complete self._labeler = labeler
Initialize the LLMThought. Args: parent_container: The container we're writing into. labeler: The labeler to use for this thought. expanded: Whether the thought should be expanded by default. collapse_on_complete: Whether the thought should be collapsed.
exists
"""Check if the provided keys exist in the database. Args: keys: A list of keys to check. Returns: A list of boolean values indicating the existence of each key. """
@abstractmethod def exists(self, keys: Sequence[str]) ->List[bool]: """Check if the provided keys exist in the database. Args: keys: A list of keys to check. Returns: A list of boolean values indicating the existence of each key. """
Check if the provided keys exist in the database. Args: keys: A list of keys to check. Returns: A list of boolean values indicating the existence of each key.
test_bs_html_loader
"""Test unstructured loader.""" file_path = EXAMPLES / 'example.html' blob = Blob.from_path(file_path) parser = BS4HTMLParser(get_text_separator='|') docs = list(parser.lazy_parse(blob)) assert isinstance(docs, list) assert len(docs) == 1 metadata = docs[0].metadata content = docs[0].page_content assert metadata['title'] == "Chew dad's slippers" assert metadata['source'] == str(file_path) assert content[:2] == '\n|'
@pytest.mark.requires('bs4', 'lxml') def test_bs_html_loader() ->None: """Test unstructured loader.""" file_path = EXAMPLES / 'example.html' blob = Blob.from_path(file_path) parser = BS4HTMLParser(get_text_separator='|') docs = list(parser.lazy_parse(blob)) assert isinstance(docs, list) assert len(docs) == 1 metadata = docs[0].metadata content = docs[0].page_content assert metadata['title'] == "Chew dad's slippers" assert metadata['source'] == str(file_path) assert content[:2] == '\n|'
Test unstructured loader.
test_unstructured_xml_loader
"""Test unstructured loader.""" file_path = os.path.join(EXAMPLE_DIRECTORY, 'factbook.xml') loader = UnstructuredXMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1
def test_unstructured_xml_loader() ->None: """Test unstructured loader.""" file_path = os.path.join(EXAMPLE_DIRECTORY, 'factbook.xml') loader = UnstructuredXMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1
Test unstructured loader.
format_prompt
""" Format prompt. Should return a PromptValue. Args: **kwargs: Keyword arguments to use for formatting. Returns: PromptValue. """ messages = self.format_messages(**kwargs) return ChatPromptValue(messages=messages)
def format_prompt(self, **kwargs: Any) ->PromptValue: """ Format prompt. Should return a PromptValue. Args: **kwargs: Keyword arguments to use for formatting. Returns: PromptValue. """ messages = self.format_messages(**kwargs) return ChatPromptValue(messages=messages)
Format prompt. Should return a PromptValue. Args: **kwargs: Keyword arguments to use for formatting. Returns: PromptValue.
delete
"""Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError('No ids provided to delete.') rows: List[Dict[str, Any]] = [{'id': id} for id in ids] for row in rows: self._client.from_(self.table_name).delete().eq('id', row['id']).execute()
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->None: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError('No ids provided to delete.') rows: List[Dict[str, Any]] = [{'id': id} for id in ids] for row in rows: self._client.from_(self.table_name).delete().eq('id', row['id'] ).execute()
Delete by vector IDs. Args: ids: List of ids to delete.
set_db
from arango.database import Database if not isinstance(db, Database): msg = '**db** parameter must inherit from arango.database.Database' raise TypeError(msg) self.__db: Database = db self.set_schema()
def set_db(self, db: Any) ->None: from arango.database import Database if not isinstance(db, Database): msg = '**db** parameter must inherit from arango.database.Database' raise TypeError(msg) self.__db: Database = db self.set_schema()
null
on_chain_start
"""Run when chain starts running."""
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID]=None, tags: Optional[ List[str]]=None, metadata: Optional[Dict[str, Any]]=None, **kwargs: Any ) ->Any: """Run when chain starts running."""
Run when chain starts running.
test_vertexai_single_call
if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() message = HumanMessage(content='Hello') response = model([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
@pytest.mark.scheduled @pytest.mark.parametrize('model_name', model_names_to_test) def test_vertexai_single_call(model_name: str) ->None: if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() message = HumanMessage(content='Hello') response = model([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
null
test_google_vertex_ai_search_get_relevant_documents
"""Test the get_relevant_documents() method.""" retriever = GoogleVertexAISearchRetriever() documents = retriever.get_relevant_documents("What are Alphabet's Other Bets?") assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata['id'] assert doc.metadata['source']
@pytest.mark.requires('google.api_core') def test_google_vertex_ai_search_get_relevant_documents() ->None: """Test the get_relevant_documents() method.""" retriever = GoogleVertexAISearchRetriever() documents = retriever.get_relevant_documents( "What are Alphabet's Other Bets?") assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata['id'] assert doc.metadata['source']
Test the get_relevant_documents() method.
output_keys
"""Expect input key. :meta private: """ _output_keys = super().output_keys if self.return_intermediate_steps: _output_keys = _output_keys + ['intermediate_steps'] if self.metadata_keys is not None: _output_keys += self.metadata_keys return _output_keys
@property def output_keys(self) ->List[str]: """Expect input key. :meta private: """ _output_keys = super().output_keys if self.return_intermediate_steps: _output_keys = _output_keys + ['intermediate_steps'] if self.metadata_keys is not None: _output_keys += self.metadata_keys return _output_keys
Expect input key. :meta private:
_import_serpapi
from langchain_community.utilities.serpapi import SerpAPIWrapper return SerpAPIWrapper
def _import_serpapi() ->Any: from langchain_community.utilities.serpapi import SerpAPIWrapper return SerpAPIWrapper
null
test_hyde_from_llm
"""Test loading HyDE from all prompts.""" for key in PROMPT_MAP: embedding = HypotheticalDocumentEmbedder.from_llm(FakeLLM(), FakeEmbeddings(), key) embedding.embed_query('foo')
def test_hyde_from_llm() ->None: """Test loading HyDE from all prompts.""" for key in PROMPT_MAP: embedding = HypotheticalDocumentEmbedder.from_llm(FakeLLM(), FakeEmbeddings(), key) embedding.embed_query('foo')
Test loading HyDE from all prompts.
build_extra_kwargs
"""Build extra kwargs from values and extra_kwargs. Args: extra_kwargs: Extra kwargs passed in by user. values: Values passed in by user. all_required_field_names: All required field names for the pydantic class. """ for field_name in list(values): if field_name in extra_kwargs: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra_kwargs[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs. keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) return extra_kwargs
def build_extra_kwargs(extra_kwargs: Dict[str, Any], values: Dict[str, Any], all_required_field_names: Set[str]) ->Dict[str, Any]: """Build extra kwargs from values and extra_kwargs. Args: extra_kwargs: Extra kwargs passed in by user. values: Values passed in by user. all_required_field_names: All required field names for the pydantic class. """ for field_name in list(values): if field_name in extra_kwargs: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra_kwargs[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs .keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) return extra_kwargs
Build extra kwargs from values and extra_kwargs. Args: extra_kwargs: Extra kwargs passed in by user. values: Values passed in by user. all_required_field_names: All required field names for the pydantic class.
_generate
llm_input = self._to_chat_prompt(messages) llm_result = self.llm._generate(prompts=[llm_input], stop=stop, run_manager =run_manager, **kwargs) return self._to_chat_result(llm_result)
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->ChatResult: llm_input = self._to_chat_prompt(messages) llm_result = self.llm._generate(prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs) return self._to_chat_result(llm_result)
null
on_chain_end
"""If either the `parent_run_id` or the `run_id` is in `self.prompts`, then log the outputs to Argilla, and pop the run from `self.prompts`. The behavior differs if the output is a list or not. """ if not any(key in self.prompts for key in [str(kwargs['parent_run_id']), str(kwargs['run_id'])]): return prompts = self.prompts.get(str(kwargs['parent_run_id'])) or self.prompts.get( str(kwargs['run_id'])) for chain_output_key, chain_output_val in outputs.items(): if isinstance(chain_output_val, list): self.dataset.add_records(records=[{'fields': {'prompt': prompt, 'response': output['text'].strip()}} for prompt, output in zip( prompts, chain_output_val)]) else: self.dataset.add_records(records=[{'fields': {'prompt': ' '.join( prompts), 'response': chain_output_val.strip()}}]) if str(kwargs['parent_run_id']) in self.prompts: self.prompts.pop(str(kwargs['parent_run_id'])) if str(kwargs['run_id']) in self.prompts: self.prompts.pop(str(kwargs['run_id'])) if parse(self.ARGILLA_VERSION) < parse('1.14.0'): self.dataset.push_to_argilla()
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: """If either the `parent_run_id` or the `run_id` is in `self.prompts`, then log the outputs to Argilla, and pop the run from `self.prompts`. The behavior differs if the output is a list or not. """ if not any(key in self.prompts for key in [str(kwargs['parent_run_id']), str(kwargs['run_id'])]): return prompts = self.prompts.get(str(kwargs['parent_run_id']) ) or self.prompts.get(str(kwargs['run_id'])) for chain_output_key, chain_output_val in outputs.items(): if isinstance(chain_output_val, list): self.dataset.add_records(records=[{'fields': {'prompt': prompt, 'response': output['text'].strip()}} for prompt, output in zip(prompts, chain_output_val)]) else: self.dataset.add_records(records=[{'fields': {'prompt': ' '. join(prompts), 'response': chain_output_val.strip()}}]) if str(kwargs['parent_run_id']) in self.prompts: self.prompts.pop(str(kwargs['parent_run_id'])) if str(kwargs['run_id']) in self.prompts: self.prompts.pop(str(kwargs['run_id'])) if parse(self.ARGILLA_VERSION) < parse('1.14.0'): self.dataset.push_to_argilla()
If either the `parent_run_id` or the `run_id` is in `self.prompts`, then log the outputs to Argilla, and pop the run from `self.prompts`. The behavior differs if the output is a list or not.
get_num_tokens_from_messages
"""Calculate num tokens with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() tokens_per_message = 3 tokens_per_name = 1 num_tokens = 0 messages_dict = [convert_message_to_dict(m) for m in messages] for message in messages_dict: num_tokens += tokens_per_message for key, value in message.items(): num_tokens += len(encoding.encode(str(value))) if key == 'name': num_tokens += tokens_per_name num_tokens += 3 return num_tokens
def get_num_tokens_from_messages(self, messages: list[BaseMessage]) ->int: """Calculate num tokens with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() tokens_per_message = 3 tokens_per_name = 1 num_tokens = 0 messages_dict = [convert_message_to_dict(m) for m in messages] for message in messages_dict: num_tokens += tokens_per_message for key, value in message.items(): num_tokens += len(encoding.encode(str(value))) if key == 'name': num_tokens += tokens_per_name num_tokens += 3 return num_tokens
Calculate num tokens with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
test_promptlayer_openai_chat_stop_valid
"""Test promptlayer openai stop logic on valid configuration.""" query = 'write an ordered list of five items' first_llm = PromptLayerOpenAIChat(stop='3', temperature=0) first_output = first_llm(query) second_llm = PromptLayerOpenAIChat(temperature=0) second_output = second_llm(query, stop=['3']) assert first_output == second_output
def test_promptlayer_openai_chat_stop_valid() ->None: """Test promptlayer openai stop logic on valid configuration.""" query = 'write an ordered list of five items' first_llm = PromptLayerOpenAIChat(stop='3', temperature=0) first_output = first_llm(query) second_llm = PromptLayerOpenAIChat(temperature=0) second_output = second_llm(query, stop=['3']) assert first_output == second_output
Test promptlayer openai stop logic on valid configuration.
test_json_spec_from_file
"""Test JsonSpec can be constructed from a file.""" path = tmp_path / 'test.json' path.write_text('{"foo": "bar"}') spec = JsonSpec.from_file(path) assert spec.dict_ == {'foo': 'bar'}
def test_json_spec_from_file(tmp_path: Path) ->None: """Test JsonSpec can be constructed from a file.""" path = tmp_path / 'test.json' path.write_text('{"foo": "bar"}') spec = JsonSpec.from_file(path) assert spec.dict_ == {'foo': 'bar'}
Test JsonSpec can be constructed from a file.
_invocation_params
params = {**self._default_params, **kwargs} if stop is not None: params['stop'] = stop if params.get('stream'): params['incremental_output'] = True message_dicts = [convert_message_to_dict(m) for m in messages] if message_dicts[-1]['role'] != 'user': raise ValueError('Last message should be user message.') system_message_indices = [i for i, m in enumerate(message_dicts) if m[ 'role'] == 'system'] if len(system_message_indices) != 1 or system_message_indices[0] != 0: raise ValueError('System message can only be the first message.') params['messages'] = message_dicts return params
def _invocation_params(self, messages: List[BaseMessage], stop: Any, ** kwargs: Any) ->Dict[str, Any]: params = {**self._default_params, **kwargs} if stop is not None: params['stop'] = stop if params.get('stream'): params['incremental_output'] = True message_dicts = [convert_message_to_dict(m) for m in messages] if message_dicts[-1]['role'] != 'user': raise ValueError('Last message should be user message.') system_message_indices = [i for i, m in enumerate(message_dicts) if m[ 'role'] == 'system'] if len(system_message_indices) != 1 or system_message_indices[0] != 0: raise ValueError('System message can only be the first message.') params['messages'] = message_dicts return params
null
embeddings
return self._embeddings
@property def embeddings(self) ->Embeddings: return self._embeddings
null
_build_istr
ks = ','.join(column_names) _data = [] for n in transac: n = ','.join([f"'{self.escape_str(str(_n))}'" for _n in n]) _data.append(f'({n})') i_str = f""" INSERT INTO TABLE {self.config.database}.{self.config.table}({ks}) VALUES {','.join(_data)} """ return i_str
def _build_istr(self, transac: Iterable, column_names: Iterable[str]) ->str: ks = ','.join(column_names) _data = [] for n in transac: n = ','.join([f"'{self.escape_str(str(_n))}'" for _n in n]) _data.append(f'({n})') i_str = f""" INSERT INTO TABLE {self.config.database}.{self.config.table}({ks}) VALUES {','.join(_data)} """ return i_str
null
test_exception_handling_str
expected = 'foo bar' _tool = _FakeExceptionTool(handle_tool_error=expected) actual = _tool.run({}) assert expected == actual
def test_exception_handling_str() ->None: expected = 'foo bar' _tool = _FakeExceptionTool(handle_tool_error=expected) actual = _tool.run({}) assert expected == actual
null
results
results = self._search_api_results(query, **kwargs) return results
def results(self, query: str, **kwargs: Any) ->dict: results = self._search_api_results(query, **kwargs) return results
null
InputType
return self.runnable.InputType
@property def InputType(self) ->Type[Input]: return self.runnable.InputType
null
save
"""Raise error - saving not supported for Agent Executors.""" raise ValueError( 'Saving not supported for agent executors. If you are trying to save the agent, please use the `.save_agent(...)`' )
def save(self, file_path: Union[Path, str]) ->None: """Raise error - saving not supported for Agent Executors.""" raise ValueError( 'Saving not supported for agent executors. If you are trying to save the agent, please use the `.save_agent(...)`' )
Raise error - saving not supported for Agent Executors.
concatenate_rows
"""Combine message information in a readable format ready to be used.""" return f'{sender} on {date}: {text}\n\n'
def concatenate_rows(date: str, sender: str, text: str) ->str: """Combine message information in a readable format ready to be used.""" return f'{sender} on {date}: {text}\n\n'
Combine message information in a readable format ready to be used.
test_test_group_dependencies
"""Check if someone is attempting to add additional test dependencies. Only dependencies associated with test running infrastructure should be added to the test group; e.g., pytest, pytest-cov etc. Examples of dependencies that should NOT be included: boto3, azure, postgres, etc. """ test_group_deps = sorted(poetry_conf['group']['test']['dependencies']) assert test_group_deps == sorted(['duckdb-engine', 'freezegun', 'langchain-core', 'lark', 'pandas', 'pytest', 'pytest-asyncio', 'pytest-cov', 'pytest-dotenv', 'pytest-mock', 'pytest-socket', 'pytest-watcher', 'responses', 'syrupy', 'requests-mock'])
def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) ->None: """Check if someone is attempting to add additional test dependencies. Only dependencies associated with test running infrastructure should be added to the test group; e.g., pytest, pytest-cov etc. Examples of dependencies that should NOT be included: boto3, azure, postgres, etc. """ test_group_deps = sorted(poetry_conf['group']['test']['dependencies']) assert test_group_deps == sorted(['duckdb-engine', 'freezegun', 'langchain-core', 'lark', 'pandas', 'pytest', 'pytest-asyncio', 'pytest-cov', 'pytest-dotenv', 'pytest-mock', 'pytest-socket', 'pytest-watcher', 'responses', 'syrupy', 'requests-mock'])
Check if someone is attempting to add additional test dependencies. Only dependencies associated with test running infrastructure should be added to the test group; e.g., pytest, pytest-cov etc. Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
_convert_chunk_to_message_message
data = json.loads(chunk.encode('utf-8')) return AIMessageChunk(content=data.get('response', ''))
def _convert_chunk_to_message_message(self, chunk: str) ->AIMessageChunk: data = json.loads(chunk.encode('utf-8')) return AIMessageChunk(content=data.get('response', ''))
null
load_embedding_model
"""Load the embedding model.""" if not instruct: import sentence_transformers client = sentence_transformers.SentenceTransformer(model_id) else: from InstructorEmbedding import INSTRUCTOR client = INSTRUCTOR(model_id) if importlib.util.find_spec('torch') is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or device >= cuda_device_count: raise ValueError( f'Got device=={device}, device is required to be within [-1, {cuda_device_count})' ) if device < 0 and cuda_device_count > 0: logger.warning( 'Device has %d GPUs available. Provide device={deviceId} to `from_model_id` to use availableGPUs for execution. deviceId is -1 for CPU and can be a positive integer associated with CUDA device id.' , cuda_device_count) client = client.to(device) return client
def load_embedding_model(model_id: str, instruct: bool=False, device: int=0 ) ->Any: """Load the embedding model.""" if not instruct: import sentence_transformers client = sentence_transformers.SentenceTransformer(model_id) else: from InstructorEmbedding import INSTRUCTOR client = INSTRUCTOR(model_id) if importlib.util.find_spec('torch') is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or device >= cuda_device_count: raise ValueError( f'Got device=={device}, device is required to be within [-1, {cuda_device_count})' ) if device < 0 and cuda_device_count > 0: logger.warning( 'Device has %d GPUs available. Provide device={deviceId} to `from_model_id` to use availableGPUs for execution. deviceId is -1 for CPU and can be a positive integer associated with CUDA device id.' , cuda_device_count) client = client.to(device) return client
Load the embedding model.
test_hub_runnable_configurable_fields
mock_pull.side_effect = repo_lookup original: HubRunnable = HubRunnable('efriis/my-prompt-1') obj_configurable = original.configurable_fields(owner_repo_commit= ConfigurableField(id='owner_repo_commit', name='Hub ID')) templated_1 = obj_configurable.invoke({}) assert templated_1.messages[1].content == '1' templated_2 = obj_configurable.with_config(configurable={ 'owner_repo_commit': 'efriis/my-prompt-2'}).invoke({}) assert templated_2.messages[1].content == '2'
@patch('langchain.hub.pull') def test_hub_runnable_configurable_fields(mock_pull: Mock) ->None: mock_pull.side_effect = repo_lookup original: HubRunnable = HubRunnable('efriis/my-prompt-1') obj_configurable = original.configurable_fields(owner_repo_commit= ConfigurableField(id='owner_repo_commit', name='Hub ID')) templated_1 = obj_configurable.invoke({}) assert templated_1.messages[1].content == '1' templated_2 = obj_configurable.with_config(configurable={ 'owner_repo_commit': 'efriis/my-prompt-2'}).invoke({}) assert templated_2.messages[1].content == '2'
null
run_no_throw
"""Execute a SQL command and return a string representing the results. If the statement returns rows, a string of the results is returned. If the statement returns no rows, an empty string is returned. If the statement throws an error, the error message is returned. """ try: return self.run(command, fetch) except Exception as e: """Format the error message""" return f'Error: {e}'
def run_no_throw(self, command: str, fetch: str='all') ->str: """Execute a SQL command and return a string representing the results. If the statement returns rows, a string of the results is returned. If the statement returns no rows, an empty string is returned. If the statement throws an error, the error message is returned. """ try: return self.run(command, fetch) except Exception as e: """Format the error message""" return f'Error: {e}'
Execute a SQL command and return a string representing the results. If the statement returns rows, a string of the results is returned. If the statement returns no rows, an empty string is returned. If the statement throws an error, the error message is returned.
test_blob_initialized_with_binary_data
"""Test reading blob IO if blob content hasn't been read yet.""" data = b'Hello, World!' blob = Blob(data=data) assert blob.as_string() == 'Hello, World!' assert blob.as_bytes() == data assert blob.source is None with blob.as_bytes_io() as bytes_io: assert bytes_io.read() == data
def test_blob_initialized_with_binary_data() ->None: """Test reading blob IO if blob content hasn't been read yet.""" data = b'Hello, World!' blob = Blob(data=data) assert blob.as_string() == 'Hello, World!' assert blob.as_bytes() == data assert blob.source is None with blob.as_bytes_io() as bytes_io: assert bytes_io.read() == data
Test reading blob IO if blob content hasn't been read yet.
test_batch
"""Test batch tokens from Chat__ModuleName__.""" llm = Chat__ModuleName__() result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
def test_batch() ->None: """Test batch tokens from Chat__ModuleName__.""" llm = Chat__ModuleName__() result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
Test batch tokens from Chat__ModuleName__.
_type
return 'self_ask'
@property def _type(self) ->str: return 'self_ask'
null
similarity_search
""" Return docs most similar to query. """ if self.embedding_func is None: raise ValueError('embedding_func is None!!!') embeddings = self.embedding_func.embed_query(query) docs = self.similarity_search_by_vector(embeddings, k) return docs
def similarity_search(self, query: str, k: int=DEFAULT_TOPN, **kwargs: Any ) ->List[Document]: """ Return docs most similar to query. """ if self.embedding_func is None: raise ValueError('embedding_func is None!!!') embeddings = self.embedding_func.embed_query(query) docs = self.similarity_search_by_vector(embeddings, k) return docs
Return docs most similar to query.
load
"""Makes a call to Cube's REST API metadata endpoint. Returns: A list of documents with attributes: - page_content=column_title + column_description - metadata - table_name - column_name - column_data_type - column_member_type - column_title - column_description - column_values - cube_data_obj_type """ headers = {'Content-Type': 'application/json', 'Authorization': self. cube_api_token} logger.info(f'Loading metadata from {self.cube_api_url}...') response = requests.get(f'{self.cube_api_url}/meta', headers=headers) response.raise_for_status() raw_meta_json = response.json() cube_data_objects = raw_meta_json.get('cubes', []) logger.info(f'Found {len(cube_data_objects)} cube data objects in metadata.') if not cube_data_objects: raise ValueError('No cubes found in metadata.') docs = [] for cube_data_obj in cube_data_objects: cube_data_obj_name = cube_data_obj.get('name') cube_data_obj_type = cube_data_obj.get('type') cube_data_obj_is_public = cube_data_obj.get('public') measures = cube_data_obj.get('measures', []) dimensions = cube_data_obj.get('dimensions', []) logger.info(f'Processing {cube_data_obj_name}...') if not cube_data_obj_is_public: logger.info(f'Skipping {cube_data_obj_name} because it is not public.') continue for item in (measures + dimensions): column_member_type = 'measure' if item in measures else 'dimension' dimension_values = [] item_name = str(item.get('name')) item_type = str(item.get('type')) if (self.load_dimension_values and column_member_type == 'dimension' and item_type == 'string'): dimension_values = self._get_dimension_values(item_name) metadata = dict(table_name=str(cube_data_obj_name), column_name= item_name, column_data_type=item_type, column_title=str(item. get('title')), column_description=str(item.get('description')), column_member_type=column_member_type, column_values= dimension_values, cube_data_obj_type=cube_data_obj_type) page_content = f"{str(item.get('title'))}, " page_content += f"{str(item.get('description'))}" docs.append(Document(page_content=page_content, metadata=metadata)) return docs
def load(self) ->List[Document]: """Makes a call to Cube's REST API metadata endpoint. Returns: A list of documents with attributes: - page_content=column_title + column_description - metadata - table_name - column_name - column_data_type - column_member_type - column_title - column_description - column_values - cube_data_obj_type """ headers = {'Content-Type': 'application/json', 'Authorization': self. cube_api_token} logger.info(f'Loading metadata from {self.cube_api_url}...') response = requests.get(f'{self.cube_api_url}/meta', headers=headers) response.raise_for_status() raw_meta_json = response.json() cube_data_objects = raw_meta_json.get('cubes', []) logger.info( f'Found {len(cube_data_objects)} cube data objects in metadata.') if not cube_data_objects: raise ValueError('No cubes found in metadata.') docs = [] for cube_data_obj in cube_data_objects: cube_data_obj_name = cube_data_obj.get('name') cube_data_obj_type = cube_data_obj.get('type') cube_data_obj_is_public = cube_data_obj.get('public') measures = cube_data_obj.get('measures', []) dimensions = cube_data_obj.get('dimensions', []) logger.info(f'Processing {cube_data_obj_name}...') if not cube_data_obj_is_public: logger.info( f'Skipping {cube_data_obj_name} because it is not public.') continue for item in (measures + dimensions): column_member_type = 'measure' if item in measures else 'dimension' dimension_values = [] item_name = str(item.get('name')) item_type = str(item.get('type')) if (self.load_dimension_values and column_member_type == 'dimension' and item_type == 'string'): dimension_values = self._get_dimension_values(item_name) metadata = dict(table_name=str(cube_data_obj_name), column_name =item_name, column_data_type=item_type, column_title=str( item.get('title')), column_description=str(item.get( 'description')), column_member_type=column_member_type, column_values=dimension_values, cube_data_obj_type= cube_data_obj_type) page_content = f"{str(item.get('title'))}, " page_content += f"{str(item.get('description'))}" docs.append(Document(page_content=page_content, metadata=metadata)) return docs
Makes a call to Cube's REST API metadata endpoint. Returns: A list of documents with attributes: - page_content=column_title + column_description - metadata - table_name - column_name - column_data_type - column_member_type - column_title - column_description - column_values - cube_data_obj_type
_invocation_params
"""Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = {'api_key': cast(SecretStr, self. anyscale_api_key).get_secret_value(), 'api_base': self.anyscale_api_base} return {**openai_creds, **{'model': self.model_name}, **super()._default_params }
@property def _invocation_params(self) ->Dict[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = {'api_key': cast(SecretStr, self. anyscale_api_key).get_secret_value(), 'api_base': self. anyscale_api_base} return {**openai_creds, **{'model': self.model_name}, **super(). _default_params}
Get the parameters used to invoke the model.
test_add_documents_with_ids
ids = [uuid.uuid4().hex for _ in range(len(texts))] Pinecone.from_texts(texts=texts, ids=ids, embedding=embedding_openai, index_name=index_name, namespace=index_name) index_stats = self.index.describe_index_stats() assert index_stats['namespaces'][index_name]['vector_count'] == len(texts) ids_1 = [uuid.uuid4().hex for _ in range(len(texts))] Pinecone.from_texts(texts=texts, ids=ids_1, embedding=embedding_openai, index_name=index_name, namespace=index_name) index_stats = self.index.describe_index_stats() assert index_stats['namespaces'][index_name]['vector_count'] == len(texts) * 2 assert index_stats['total_vector_count'] == len(texts) * 2
def test_add_documents_with_ids(self, texts: List[str], embedding_openai: OpenAIEmbeddings) ->None: ids = [uuid.uuid4().hex for _ in range(len(texts))] Pinecone.from_texts(texts=texts, ids=ids, embedding=embedding_openai, index_name=index_name, namespace=index_name) index_stats = self.index.describe_index_stats() assert index_stats['namespaces'][index_name]['vector_count'] == len(texts) ids_1 = [uuid.uuid4().hex for _ in range(len(texts))] Pinecone.from_texts(texts=texts, ids=ids_1, embedding=embedding_openai, index_name=index_name, namespace=index_name) index_stats = self.index.describe_index_stats() assert index_stats['namespaces'][index_name]['vector_count'] == len(texts ) * 2 assert index_stats['total_vector_count'] == len(texts) * 2
null
set_model
"""Set the model used for embedding. The default model used is all-mpnet-base-v2 Args: model_name: A string which represents the name of model. """ self.model = model_name self.client.model_name = model_name
def set_model(self, model_name: str) ->None: """Set the model used for embedding. The default model used is all-mpnet-base-v2 Args: model_name: A string which represents the name of model. """ self.model = model_name self.client.model_name = model_name
Set the model used for embedding. The default model used is all-mpnet-base-v2 Args: model_name: A string which represents the name of model.
on_chat_model_start
self.messages = [_convert_message_to_dict(message) for message in messages[0]] self.prompt = self.messages[-1]['content']
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[ List[BaseMessage]], **kwargs: Any) ->None: self.messages = [_convert_message_to_dict(message) for message in messages[0]] self.prompt = self.messages[-1]['content']
null
test_openai_embedding_with_empty_string
"""Test openai embeddings with empty string.""" import openai document = ['', 'abc'] embedding = OpenAIEmbeddings() output = embedding.embed_documents(document) assert len(output) == 2 assert len(output[0]) == 1536 expected_output = openai.Embedding.create(input='', model= 'text-embedding-ada-002')['data'][0]['embedding'] assert np.allclose(output[0], expected_output) assert len(output[1]) == 1536
@pytest.mark.skip(reason='Unblock scheduled testing. TODO: fix.') @pytest.mark.scheduled def test_openai_embedding_with_empty_string() ->None: """Test openai embeddings with empty string.""" import openai document = ['', 'abc'] embedding = OpenAIEmbeddings() output = embedding.embed_documents(document) assert len(output) == 2 assert len(output[0]) == 1536 expected_output = openai.Embedding.create(input='', model= 'text-embedding-ada-002')['data'][0]['embedding'] assert np.allclose(output[0], expected_output) assert len(output[1]) == 1536
Test openai embeddings with empty string.
test_saving_loading_round_trip
"""Test saving/loading a Fake LLM.""" fake_llm = FakeLLM() fake_llm.save(file_path=tmp_path / 'fake_llm.yaml') loaded_llm = load_llm(tmp_path / 'fake_llm.yaml') assert loaded_llm == fake_llm
@patch('langchain_community.llms.loading.get_type_to_cls_dict', lambda : { 'fake': lambda : FakeLLM}) def test_saving_loading_round_trip(tmp_path: Path) ->None: """Test saving/loading a Fake LLM.""" fake_llm = FakeLLM() fake_llm.save(file_path=tmp_path / 'fake_llm.yaml') loaded_llm = load_llm(tmp_path / 'fake_llm.yaml') assert loaded_llm == fake_llm
Test saving/loading a Fake LLM.
initialize
""" Initialize a vector store with a set of documents. By default, the documents will be compatible with the default metadata field info. You can override these defaults by passing in your own values. :param embeddings: an Embeddings to use for generating queries :param collection_name: name of the Qdrant collection to use :param documents: a list of documents to initialize the vector store with :return: """ embeddings = embeddings or OpenAIEmbeddings() Qdrant.from_documents(documents, embedding=embeddings, collection_name= collection_name)
def initialize(embeddings: Optional[Embeddings]=None, collection_name: str= defaults.DEFAULT_COLLECTION_NAME, documents: List[Document]=defaults. DEFAULT_DOCUMENTS): """ Initialize a vector store with a set of documents. By default, the documents will be compatible with the default metadata field info. You can override these defaults by passing in your own values. :param embeddings: an Embeddings to use for generating queries :param collection_name: name of the Qdrant collection to use :param documents: a list of documents to initialize the vector store with :return: """ embeddings = embeddings or OpenAIEmbeddings() Qdrant.from_documents(documents, embedding=embeddings, collection_name= collection_name)
Initialize a vector store with a set of documents. By default, the documents will be compatible with the default metadata field info. You can override these defaults by passing in your own values. :param embeddings: an Embeddings to use for generating queries :param collection_name: name of the Qdrant collection to use :param documents: a list of documents to initialize the vector store with :return:
test_invoke
"""Test invoke tokens from Chat__ModuleName__.""" llm = Chat__ModuleName__() result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo'])) assert isinstance(result.content, str)
def test_invoke() ->None: """Test invoke tokens from Chat__ModuleName__.""" llm = Chat__ModuleName__() result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo'])) assert isinstance(result.content, str)
Test invoke tokens from Chat__ModuleName__.