method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
โŒ€
get_docs_message
docs = rag.get_relevant_documents(message) message_doc = next((x for x in docs if x.metadata.get('type') == 'model_response'), None) return message_doc.page_content
def get_docs_message(message): docs = rag.get_relevant_documents(message) message_doc = next((x for x in docs if x.metadata.get('type') == 'model_response'), None) return message_doc.page_content
null
api_client
return OutlineAPIWrapper(outline_api_key='api_key', outline_instance_url= OUTLINE_INSTANCE_TEST_URL)
@pytest.fixture def api_client() ->OutlineAPIWrapper: return OutlineAPIWrapper(outline_api_key='api_key', outline_instance_url=OUTLINE_INSTANCE_TEST_URL)
null
_llm_type
"""Return type of llm.""" return 'fireworks'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'fireworks'
Return type of llm.
_log_stream
self.experiment.log_text(prompt, metadata=metadata, step=step)
def _log_stream(self, prompt: str, metadata: dict, step: int) ->None: self.experiment.log_text(prompt, metadata=metadata, step=step)
null
test_from_documents_cosine_distance
"""Test end to end construction and search.""" documents = [Document(page_content='Dogs are tough.', metadata={'a': 1}), Document(page_content='Cats have fluff.', metadata={'b': 1}), Document( page_content='What is a sandwich?', metadata={'c': 1}), Document( page_content='That fence is purple.', metadata={'d': 1, 'e': 2})] vectorstore = AzureCosmosDBVectorSearch.from_documents(documents, azure_openai_embeddings, collection=collection, index_name=INDEX_NAME) sleep(1) vectorstore.create_index(num_lists, dimensions, similarity_algorithm) sleep(2) output = vectorstore.similarity_search('Sandwich', k=1) assert output assert output[0].page_content == 'What is a sandwich?' assert output[0].metadata['c'] == 1 vectorstore.delete_index()
def test_from_documents_cosine_distance(self, azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None: """Test end to end construction and search.""" documents = [Document(page_content='Dogs are tough.', metadata={'a': 1} ), Document(page_content='Cats have fluff.', metadata={'b': 1}), Document(page_content='What is a sandwich?', metadata={'c': 1}), Document(page_content='That fence is purple.', metadata={'d': 1, 'e': 2})] vectorstore = AzureCosmosDBVectorSearch.from_documents(documents, azure_openai_embeddings, collection=collection, index_name=INDEX_NAME) sleep(1) vectorstore.create_index(num_lists, dimensions, similarity_algorithm) sleep(2) output = vectorstore.similarity_search('Sandwich', k=1) assert output assert output[0].page_content == 'What is a sandwich?' assert output[0].metadata['c'] == 1 vectorstore.delete_index()
Test end to end construction and search.
identify
"""Builds an LLMonitor UserContextManager Parameters: - `user_id`: The user id. - `user_props`: The user properties. Returns: A context manager that sets the user context. """ return UserContextManager(user_id, user_props)
def identify(user_id: str, user_props: Any=None) ->UserContextManager: """Builds an LLMonitor UserContextManager Parameters: - `user_id`: The user id. - `user_props`: The user properties. Returns: A context manager that sets the user context. """ return UserContextManager(user_id, user_props)
Builds an LLMonitor UserContextManager Parameters: - `user_id`: The user id. - `user_props`: The user properties. Returns: A context manager that sets the user context.
message_to_dict
"""Convert a Message to a dictionary. Args: message: Message to convert. Returns: Message as a dict. """ return {'type': message.type, 'data': message.dict()}
def message_to_dict(message: BaseMessage) ->dict: """Convert a Message to a dictionary. Args: message: Message to convert. Returns: Message as a dict. """ return {'type': message.type, 'data': message.dict()}
Convert a Message to a dictionary. Args: message: Message to convert. Returns: Message as a dict.
_create_collection
"""Creates a collection for this message history""" self.client.Collections.create_s3_collection(name=self.collection, workspace=self.workspace)
def _create_collection(self) ->None: """Creates a collection for this message history""" self.client.Collections.create_s3_collection(name=self.collection, workspace=self.workspace)
Creates a collection for this message history
_get_trace_callbacks
if _tracing_v2_is_enabled(): project_name_ = project_name or _get_tracer_project() tracer = tracing_v2_callback_var.get() or LangChainTracer(project_name= project_name_, example_id=example_id) if callback_manager is None: from langchain_core.callbacks.base import Callbacks cb = cast(Callbacks, [tracer]) else: if not any(isinstance(handler, LangChainTracer) for handler in callback_manager.handlers): callback_manager.add_handler(tracer, True) cb = callback_manager else: cb = None return cb
def _get_trace_callbacks(project_name: Optional[str]=None, example_id: Optional[Union[str, UUID]]=None, callback_manager: Optional[Union[ CallbackManager, AsyncCallbackManager]]=None) ->Callbacks: if _tracing_v2_is_enabled(): project_name_ = project_name or _get_tracer_project() tracer = tracing_v2_callback_var.get() or LangChainTracer(project_name =project_name_, example_id=example_id) if callback_manager is None: from langchain_core.callbacks.base import Callbacks cb = cast(Callbacks, [tracer]) else: if not any(isinstance(handler, LangChainTracer) for handler in callback_manager.handlers): callback_manager.add_handler(tracer, True) cb = callback_manager else: cb = None return cb
null
_get_firestore_client
try: import firebase_admin from firebase_admin import firestore except ImportError: raise ImportError( 'Could not import firebase-admin python package. Please install it with `pip install firebase-admin`.' ) try: firebase_admin.get_app() except ValueError as e: logger.debug('Initializing Firebase app: %s', e) firebase_admin.initialize_app() return firestore.client()
def _get_firestore_client() ->Client: try: import firebase_admin from firebase_admin import firestore except ImportError: raise ImportError( 'Could not import firebase-admin python package. Please install it with `pip install firebase-admin`.' ) try: firebase_admin.get_app() except ValueError as e: logger.debug('Initializing Firebase app: %s', e) firebase_admin.initialize_app() return firestore.client()
null
test_structured_tool_lambda_multi_args_schema
"""Test args schema inference when the tool argument is a lambda function.""" tool = StructuredTool.from_function(name='tool', description='A tool', func =lambda tool_input, other_arg: f'{tool_input}{other_arg}') assert tool.args_schema is not None expected_args = {'tool_input': {'title': 'Tool Input'}, 'other_arg': { 'title': 'Other Arg'}} assert tool.args == expected_args
def test_structured_tool_lambda_multi_args_schema() ->None: """Test args schema inference when the tool argument is a lambda function.""" tool = StructuredTool.from_function(name='tool', description='A tool', func=lambda tool_input, other_arg: f'{tool_input}{other_arg}') assert tool.args_schema is not None expected_args = {'tool_input': {'title': 'Tool Input'}, 'other_arg': { 'title': 'Other Arg'}} assert tool.args == expected_args
Test args schema inference when the tool argument is a lambda function.
add_texts
"""Add texts to the index. Only support direct-access index. Args: texts: List of texts to add. metadatas: List of metadata for each text. Defaults to None. ids: List of ids for each text. Defaults to None. If not provided, a random uuid will be generated for each text. Returns: List of ids from adding the texts into the index. """ self._op_require_direct_access_index('add_texts') assert self.embeddings is not None, 'embedding model is required.' if isinstance(texts, str): texts = [texts] texts = list(texts) vectors = self.embeddings.embed_documents(texts) ids = ids or [str(uuid.uuid4()) for _ in texts] metadatas = metadatas or [{} for _ in texts] updates = [{self.primary_key: id_, self.text_column: text, self. _embedding_vector_column_name(): vector, **metadata} for text, vector, id_, metadata in zip(texts, vectors, ids, metadatas)] upsert_resp = self.index.upsert(updates) if upsert_resp.get('status') in ('PARTIAL_SUCCESS', 'FAILURE'): failed_ids = upsert_resp.get('result', dict()).get('failed_primary_keys', []) if upsert_resp.get('status') == 'FAILURE': logger.error('Failed to add texts to the index.') else: logger.warning('Some texts failed to be added to the index.') return [id_ for id_ in ids if id_ not in failed_ids] return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[Any]]=None, **kwargs: Any) ->List[str]: """Add texts to the index. Only support direct-access index. Args: texts: List of texts to add. metadatas: List of metadata for each text. Defaults to None. ids: List of ids for each text. Defaults to None. If not provided, a random uuid will be generated for each text. Returns: List of ids from adding the texts into the index. """ self._op_require_direct_access_index('add_texts') assert self.embeddings is not None, 'embedding model is required.' if isinstance(texts, str): texts = [texts] texts = list(texts) vectors = self.embeddings.embed_documents(texts) ids = ids or [str(uuid.uuid4()) for _ in texts] metadatas = metadatas or [{} for _ in texts] updates = [{self.primary_key: id_, self.text_column: text, self. _embedding_vector_column_name(): vector, **metadata} for text, vector, id_, metadata in zip(texts, vectors, ids, metadatas)] upsert_resp = self.index.upsert(updates) if upsert_resp.get('status') in ('PARTIAL_SUCCESS', 'FAILURE'): failed_ids = upsert_resp.get('result', dict()).get( 'failed_primary_keys', []) if upsert_resp.get('status') == 'FAILURE': logger.error('Failed to add texts to the index.') else: logger.warning('Some texts failed to be added to the index.') return [id_ for id_ in ids if id_ not in failed_ids] return ids
Add texts to the index. Only support direct-access index. Args: texts: List of texts to add. metadatas: List of metadata for each text. Defaults to None. ids: List of ids for each text. Defaults to None. If not provided, a random uuid will be generated for each text. Returns: List of ids from adding the texts into the index.
test_parse_issue
issue = {'html_url': 'https://github.com/repo/issue/1', 'title': 'Example Issue 1', 'user': {'login': 'username1'}, 'created_at': '2023-01-01T00:00:00Z', 'comments': 1, 'state': 'open', 'labels': [{ 'name': 'bug'}], 'assignee': {'login': 'username2'}, 'milestone': { 'title': 'v1.0'}, 'locked': 'False', 'number': '1', 'body': 'This is an example issue 1'} expected_document = Document(page_content=issue['body'], metadata={'url': issue['html_url'], 'title': issue['title'], 'creator': issue['user'][ 'login'], 'created_at': issue['created_at'], 'comments': issue[ 'comments'], 'state': issue['state'], 'labels': [label['name'] for label in issue['labels']], 'assignee': issue['assignee']['login'], 'milestone': issue['milestone']['title'], 'locked': issue['locked'], 'number': issue['number'], 'is_pull_request': False}) loader = GitHubIssuesLoader(repo='repo', access_token='access_token') document = loader.parse_issue(issue) assert document == expected_document
def test_parse_issue() ->None: issue = {'html_url': 'https://github.com/repo/issue/1', 'title': 'Example Issue 1', 'user': {'login': 'username1'}, 'created_at': '2023-01-01T00:00:00Z', 'comments': 1, 'state': 'open', 'labels': [ {'name': 'bug'}], 'assignee': {'login': 'username2'}, 'milestone': {'title': 'v1.0'}, 'locked': 'False', 'number': '1', 'body': 'This is an example issue 1'} expected_document = Document(page_content=issue['body'], metadata={ 'url': issue['html_url'], 'title': issue['title'], 'creator': issue ['user']['login'], 'created_at': issue['created_at'], 'comments': issue['comments'], 'state': issue['state'], 'labels': [label['name' ] for label in issue['labels']], 'assignee': issue['assignee'][ 'login'], 'milestone': issue['milestone']['title'], 'locked': issue ['locked'], 'number': issue['number'], 'is_pull_request': False}) loader = GitHubIssuesLoader(repo='repo', access_token='access_token') document = loader.parse_issue(issue) assert document == expected_document
null
test_create_chat_prompt_template_from_template
"""Create a chat prompt template.""" prompt = ChatPromptTemplate.from_template('hi {foo} {bar}') assert prompt.messages == [HumanMessagePromptTemplate.from_template( 'hi {foo} {bar}')]
def test_create_chat_prompt_template_from_template() ->None: """Create a chat prompt template.""" prompt = ChatPromptTemplate.from_template('hi {foo} {bar}') assert prompt.messages == [HumanMessagePromptTemplate.from_template( 'hi {foo} {bar}')]
Create a chat prompt template.
test_load_success
loader = WikipediaLoader(query='HUNTER X HUNTER') docs = loader.load() assert len(docs) > 1 assert len(docs) <= 25 assert_docs(docs, all_meta=False)
def test_load_success() ->None: loader = WikipediaLoader(query='HUNTER X HUNTER') docs = loader.load() assert len(docs) > 1 assert len(docs) <= 25 assert_docs(docs, all_meta=False)
null
_build_framework_chunk
_hashed_id = hashlib.md5(dg_chunk.text.encode()).hexdigest() metadata = {XPATH_KEY: dg_chunk.xpath, ID_KEY: _hashed_id, DOCUMENT_NAME_KEY: document_name, DOCUMENT_SOURCE_KEY: document_name, STRUCTURE_KEY: dg_chunk.structure, TAG_KEY: dg_chunk.tag} text = dg_chunk.text if additional_doc_metadata: if self.include_project_metadata_in_doc_metadata: metadata.update(additional_doc_metadata) return Document(page_content=text[:self.max_text_length], metadata=metadata)
def _build_framework_chunk(dg_chunk: Chunk) ->Document: _hashed_id = hashlib.md5(dg_chunk.text.encode()).hexdigest() metadata = {XPATH_KEY: dg_chunk.xpath, ID_KEY: _hashed_id, DOCUMENT_NAME_KEY: document_name, DOCUMENT_SOURCE_KEY: document_name, STRUCTURE_KEY: dg_chunk.structure, TAG_KEY: dg_chunk.tag } text = dg_chunk.text if additional_doc_metadata: if self.include_project_metadata_in_doc_metadata: metadata.update(additional_doc_metadata) return Document(page_content=text[:self.max_text_length], metadata=metadata )
null
test_ifixit_loader_answers
web_path = ( 'https://www.ifixit.com/Answers/View/318583/My+iPhone+6+is+typing+and+opening+apps+by+itself' ) loader = IFixitLoader(web_path) assert loader.page_type == 'Answers' assert loader.id == '318583'
def test_ifixit_loader_answers() ->None: web_path = ( 'https://www.ifixit.com/Answers/View/318583/My+iPhone+6+is+typing+and+opening+apps+by+itself' ) loader = IFixitLoader(web_path) assert loader.page_type == 'Answers' assert loader.id == '318583'
null
_hamming_distance
"""Compute the Hamming distance between two vectors. Args: a (np.ndarray): The first vector. b (np.ndarray): The second vector. Returns: np.floating: The Hamming distance. """ return np.mean(a != b)
@staticmethod def _hamming_distance(a: np.ndarray, b: np.ndarray) ->np.floating: """Compute the Hamming distance between two vectors. Args: a (np.ndarray): The first vector. b (np.ndarray): The second vector. Returns: np.floating: The Hamming distance. """ return np.mean(a != b)
Compute the Hamming distance between two vectors. Args: a (np.ndarray): The first vector. b (np.ndarray): The second vector. Returns: np.floating: The Hamming distance.
__init__
self.account_address = account_address self.api_key = os.environ.get('ETHERSCAN_API_KEY') or api_key self.filter = filter self.page = page self.offset = offset self.start_block = start_block self.end_block = end_block self.sort = sort if not self.api_key: raise ValueError('Etherscan API key not provided') if not re.match('^0x[a-fA-F0-9]{40}$', self.account_address): raise ValueError(f'Invalid contract address {self.account_address}') if filter not in ['normal_transaction', 'internal_transaction', 'erc20_transaction', 'eth_balance', 'erc721_transaction', 'erc1155_transaction']: raise ValueError(f'Invalid filter {filter}')
def __init__(self, account_address: str, api_key: str='docs-demo', filter: str='normal_transaction', page: int=1, offset: int=10, start_block: int =0, end_block: int=99999999, sort: str='desc'): self.account_address = account_address self.api_key = os.environ.get('ETHERSCAN_API_KEY') or api_key self.filter = filter self.page = page self.offset = offset self.start_block = start_block self.end_block = end_block self.sort = sort if not self.api_key: raise ValueError('Etherscan API key not provided') if not re.match('^0x[a-fA-F0-9]{40}$', self.account_address): raise ValueError(f'Invalid contract address {self.account_address}') if filter not in ['normal_transaction', 'internal_transaction', 'erc20_transaction', 'eth_balance', 'erc721_transaction', 'erc1155_transaction']: raise ValueError(f'Invalid filter {filter}')
null
from_params
"""Instantiate whylogs Logger from params. Args: api_key (Optional[str]): WhyLabs API key. Optional because the preferred way to specify the API key is with environment variable WHYLABS_API_KEY. org_id (Optional[str]): WhyLabs organization id to write profiles to. If not set must be specified in environment variable WHYLABS_DEFAULT_ORG_ID. dataset_id (Optional[str]): The model or dataset this callback is gathering telemetry for. If not set must be specified in environment variable WHYLABS_DEFAULT_DATASET_ID. sentiment (bool): If True will initialize a model to perform sentiment analysis compound score. Defaults to False and will not gather this metric. toxicity (bool): If True will initialize a model to score toxicity. Defaults to False and will not gather this metric. themes (bool): If True will initialize a model to calculate distance to configured themes. Defaults to None and will not gather this metric. logger (Optional[Logger]): If specified will bind the configured logger as the telemetry gathering agent. Defaults to LangKit schema with periodic WhyLabs writer. """ import_langkit(sentiment=sentiment, toxicity=toxicity, themes=themes) import whylogs as why from langkit.callback_handler import get_callback_instance from whylogs.api.writer.whylabs import WhyLabsWriter from whylogs.experimental.core.udf_schema import udf_schema if logger is None: api_key = api_key or get_from_env('api_key', 'WHYLABS_API_KEY') org_id = org_id or get_from_env('org_id', 'WHYLABS_DEFAULT_ORG_ID') dataset_id = dataset_id or get_from_env('dataset_id', 'WHYLABS_DEFAULT_DATASET_ID') whylabs_writer = WhyLabsWriter(api_key=api_key, org_id=org_id, dataset_id=dataset_id) whylabs_logger = why.logger(mode='rolling', interval=5, when='M', schema=udf_schema()) whylabs_logger.append_writer(writer=whylabs_writer) else: diagnostic_logger.info('Using passed in whylogs logger {logger}') whylabs_logger = logger callback_handler_cls = get_callback_instance(logger=whylabs_logger, impl=cls) diagnostic_logger.info( 'Started whylogs Logger with WhyLabsWriter and initialized LangKit. ๐Ÿ“') return callback_handler_cls
@classmethod def from_params(cls, *, api_key: Optional[str]=None, org_id: Optional[str]= None, dataset_id: Optional[str]=None, sentiment: bool=False, toxicity: bool=False, themes: bool=False, logger: Optional[Logger]=None ) ->WhyLabsCallbackHandler: """Instantiate whylogs Logger from params. Args: api_key (Optional[str]): WhyLabs API key. Optional because the preferred way to specify the API key is with environment variable WHYLABS_API_KEY. org_id (Optional[str]): WhyLabs organization id to write profiles to. If not set must be specified in environment variable WHYLABS_DEFAULT_ORG_ID. dataset_id (Optional[str]): The model or dataset this callback is gathering telemetry for. If not set must be specified in environment variable WHYLABS_DEFAULT_DATASET_ID. sentiment (bool): If True will initialize a model to perform sentiment analysis compound score. Defaults to False and will not gather this metric. toxicity (bool): If True will initialize a model to score toxicity. Defaults to False and will not gather this metric. themes (bool): If True will initialize a model to calculate distance to configured themes. Defaults to None and will not gather this metric. logger (Optional[Logger]): If specified will bind the configured logger as the telemetry gathering agent. Defaults to LangKit schema with periodic WhyLabs writer. """ import_langkit(sentiment=sentiment, toxicity=toxicity, themes=themes) import whylogs as why from langkit.callback_handler import get_callback_instance from whylogs.api.writer.whylabs import WhyLabsWriter from whylogs.experimental.core.udf_schema import udf_schema if logger is None: api_key = api_key or get_from_env('api_key', 'WHYLABS_API_KEY') org_id = org_id or get_from_env('org_id', 'WHYLABS_DEFAULT_ORG_ID') dataset_id = dataset_id or get_from_env('dataset_id', 'WHYLABS_DEFAULT_DATASET_ID') whylabs_writer = WhyLabsWriter(api_key=api_key, org_id=org_id, dataset_id=dataset_id) whylabs_logger = why.logger(mode='rolling', interval=5, when='M', schema=udf_schema()) whylabs_logger.append_writer(writer=whylabs_writer) else: diagnostic_logger.info('Using passed in whylogs logger {logger}') whylabs_logger = logger callback_handler_cls = get_callback_instance(logger=whylabs_logger, impl=cls) diagnostic_logger.info( 'Started whylogs Logger with WhyLabsWriter and initialized LangKit. ๐Ÿ“') return callback_handler_cls
Instantiate whylogs Logger from params. Args: api_key (Optional[str]): WhyLabs API key. Optional because the preferred way to specify the API key is with environment variable WHYLABS_API_KEY. org_id (Optional[str]): WhyLabs organization id to write profiles to. If not set must be specified in environment variable WHYLABS_DEFAULT_ORG_ID. dataset_id (Optional[str]): The model or dataset this callback is gathering telemetry for. If not set must be specified in environment variable WHYLABS_DEFAULT_DATASET_ID. sentiment (bool): If True will initialize a model to perform sentiment analysis compound score. Defaults to False and will not gather this metric. toxicity (bool): If True will initialize a model to score toxicity. Defaults to False and will not gather this metric. themes (bool): If True will initialize a model to calculate distance to configured themes. Defaults to None and will not gather this metric. logger (Optional[Logger]): If specified will bind the configured logger as the telemetry gathering agent. Defaults to LangKit schema with periodic WhyLabs writer.
mock_create
nonlocal completed completed = True return mock_completion
def mock_create(*args: Any, **kwargs: Any) ->Any: nonlocal completed completed = True return mock_completion
null
_run
"""Run the tool.""" try: data = _parse_input(text) return self.requests_wrapper.put(_clean_url(data['url']), data['data']) except Exception as e: return repr(e)
def _run(self, text: str, run_manager: Optional[CallbackManagerForToolRun]=None ) ->str: """Run the tool.""" try: data = _parse_input(text) return self.requests_wrapper.put(_clean_url(data['url']), data['data']) except Exception as e: return repr(e)
Run the tool.
on_llm_error
"""Do nothing when LLM outputs an error."""
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None: """Do nothing when LLM outputs an error."""
Do nothing when LLM outputs an error.
on_tool_error_common
self.errors += 1
def on_tool_error_common(self) ->None: self.errors += 1
null
search_results
return [gen_mock_zep_document(collection_name='test_collection', embedding_dimensions=VECTOR_DIMS) for _ in range(2)]
@pytest.fixture def search_results() ->List['ZepDocument']: return [gen_mock_zep_document(collection_name='test_collection', embedding_dimensions=VECTOR_DIMS) for _ in range(2)]
null
test_visit_operation
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator= Comparator.LT, attribute='abc', value=['1', '2'])]) expected = ( "(metadata['foo'] < 2 and metadata['bar'] == 'baz' and (metadata['abc'] < 1 or metadata['abc'] < 2))" ) actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
def test_visit_operation() ->None: op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator =Comparator.LT, attribute='abc', value=['1', '2'])]) expected = ( "(metadata['foo'] < 2 and metadata['bar'] == 'baz' and (metadata['abc'] < 1 or metadata['abc'] < 2))" ) actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
null
_getter
done.wait() return values[done]
def _getter(done: threading.Event, values: Values) ->Any: done.wait() return values[done]
null
messages
"""Retrieve the messages from MongoDB""" from pymongo import errors try: cursor = self.collection.find({'SessionId': self.session_id}) except errors.OperationFailure as error: logger.error(error) if cursor: items = [json.loads(document['History']) for document in cursor] else: items = [] messages = messages_from_dict(items) return messages
@property def messages(self) ->List[BaseMessage]: """Retrieve the messages from MongoDB""" from pymongo import errors try: cursor = self.collection.find({'SessionId': self.session_id}) except errors.OperationFailure as error: logger.error(error) if cursor: items = [json.loads(document['History']) for document in cursor] else: items = [] messages = messages_from_dict(items) return messages
Retrieve the messages from MongoDB
_type
return 'json_list'
@property def _type(self) ->str: return 'json_list'
null
validate_environment
"""Validate that api key and python package exists in environment.""" values['serp_api_key'] = convert_to_secret_str(get_from_dict_or_env(values, 'serp_api_key', 'SERPAPI_API_KEY')) try: from serpapi import SerpApiClient except ImportError: raise ImportError( 'google-search-results is not installed. Please install it with `pip install google-search-results>=2.4.2`' ) serp_search_engine = SerpApiClient values['serp_search_engine'] = serp_search_engine return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" values['serp_api_key'] = convert_to_secret_str(get_from_dict_or_env( values, 'serp_api_key', 'SERPAPI_API_KEY')) try: from serpapi import SerpApiClient except ImportError: raise ImportError( 'google-search-results is not installed. Please install it with `pip install google-search-results>=2.4.2`' ) serp_search_engine = SerpApiClient values['serp_search_engine'] = serp_search_engine return values
Validate that api key and python package exists in environment.
_embed
resp = self.session.post(JINA_API_URL, json={'input': texts, 'model': self. model_name}).json() if 'data' not in resp: raise RuntimeError(resp['detail']) embeddings = resp['data'] sorted_embeddings = sorted(embeddings, key=lambda e: e['index']) return [result['embedding'] for result in sorted_embeddings]
def _embed(self, texts: List[str]) ->List[List[float]]: resp = self.session.post(JINA_API_URL, json={'input': texts, 'model': self.model_name}).json() if 'data' not in resp: raise RuntimeError(resp['detail']) embeddings = resp['data'] sorted_embeddings = sorted(embeddings, key=lambda e: e['index']) return [result['embedding'] for result in sorted_embeddings]
null
_identifying_params
return {'key': 'fake'}
@property def _identifying_params(self) ->Dict[str, Any]: return {'key': 'fake'}
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
_stream
message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, 'stream': True} default_chunk_class = AIMessageChunk for chunk in self.completion_with_retry(messages=message_dicts, run_manager =run_manager, **params): if not isinstance(chunk, dict): chunk = chunk.dict() if len(chunk['choices']) == 0: continue choice = chunk['choices'][0] chunk = _convert_delta_to_message_chunk(choice['delta'], default_chunk_class) finish_reason = choice.get('finish_reason') generation_info = dict(finish_reason=finish_reason ) if finish_reason is not None else None default_chunk_class = chunk.__class__ chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, 'stream': True} default_chunk_class = AIMessageChunk for chunk in self.completion_with_retry(messages=message_dicts, run_manager=run_manager, **params): if not isinstance(chunk, dict): chunk = chunk.dict() if len(chunk['choices']) == 0: continue choice = chunk['choices'][0] chunk = _convert_delta_to_message_chunk(choice['delta'], default_chunk_class) finish_reason = choice.get('finish_reason') generation_info = dict(finish_reason=finish_reason ) if finish_reason is not None else None default_chunk_class = chunk.__class__ chunk = ChatGenerationChunk(message=chunk, generation_info= generation_info) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk)
null
_import_ddg_search_tool_DuckDuckGoSearchRun
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchRun return DuckDuckGoSearchRun
def _import_ddg_search_tool_DuckDuckGoSearchRun() ->Any: from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchRun return DuckDuckGoSearchRun
null
_convert_dict_to_message
content = _dict.get('result', '') or '' if _dict.get('function_call'): additional_kwargs = {'function_call': dict(_dict['function_call'])} if 'thoughts' in additional_kwargs['function_call']: additional_kwargs['function_call'].pop('thoughts') else: additional_kwargs = {} return AIMessage(content=content, additional_kwargs={**_dict.get('body', {} ), **additional_kwargs})
def _convert_dict_to_message(_dict: Mapping[str, Any]) ->AIMessage: content = _dict.get('result', '') or '' if _dict.get('function_call'): additional_kwargs = {'function_call': dict(_dict['function_call'])} if 'thoughts' in additional_kwargs['function_call']: additional_kwargs['function_call'].pop('thoughts') else: additional_kwargs = {} return AIMessage(content=content, additional_kwargs={**_dict.get('body', {}), **additional_kwargs})
null
test_empty_token
assert len(_get_token_ids_default_method('')) == 0
def test_empty_token(self) ->None: assert len(_get_token_ids_default_method('')) == 0
null
_default_relevance_score
return 1 - val
def _default_relevance_score(val: float) ->float: return 1 - val
null
parse_issues
""" Extracts title and number from each Issue and puts them in a dictionary Parameters: issues(List[Issue]): A list of gitlab Issue objects Returns: List[dict]: A dictionary of issue titles and numbers """ parsed = [] for issue in issues: title = issue.title number = issue.iid parsed.append({'title': title, 'number': number}) return parsed
def parse_issues(self, issues: List[Issue]) ->List[dict]: """ Extracts title and number from each Issue and puts them in a dictionary Parameters: issues(List[Issue]): A list of gitlab Issue objects Returns: List[dict]: A dictionary of issue titles and numbers """ parsed = [] for issue in issues: title = issue.title number = issue.iid parsed.append({'title': title, 'number': number}) return parsed
Extracts title and number from each Issue and puts them in a dictionary Parameters: issues(List[Issue]): A list of gitlab Issue objects Returns: List[dict]: A dictionary of issue titles and numbers
with_retry
return self.__class__(bound=self.bound.with_retry(**kwargs), kwargs=self. kwargs, config=self.config)
def with_retry(self, **kwargs: Any) ->Runnable[Input, Output]: return self.__class__(bound=self.bound.with_retry(**kwargs), kwargs= self.kwargs, config=self.config)
null
add_message
"""Add a message to the session memory""" self._messages.append(message)
def add_message(self, message: BaseMessage) ->None: """Add a message to the session memory""" self._messages.append(message)
Add a message to the session memory
_generate
generations = [] if self.streaming: if len(prompts) > 1: raise ValueError('Cannot stream results with multiple prompts.') generation: Optional[GenerationChunk] = None for chunk in self._stream(prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None generations.append([self._chunk_to_generation(generation)]) else: params: Dict[str, Any] = self._invocation_params(stop=stop, **kwargs) for prompt in prompts: completion = generate_with_retry(self, prompt=prompt, **params) generations.append([Generation(**self._generation_from_qwen_resp( completion))]) return LLMResult(generations=generations, llm_output={'model_name': self. model_name})
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: generations = [] if self.streaming: if len(prompts) > 1: raise ValueError('Cannot stream results with multiple prompts.') generation: Optional[GenerationChunk] = None for chunk in self._stream(prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None generations.append([self._chunk_to_generation(generation)]) else: params: Dict[str, Any] = self._invocation_params(stop=stop, **kwargs) for prompt in prompts: completion = generate_with_retry(self, prompt=prompt, **params) generations.append([Generation(**self. _generation_from_qwen_resp(completion))]) return LLMResult(generations=generations, llm_output={'model_name': self.model_name})
null
similarity_search
res = self.store.get(query) if res is None: return [] return [res]
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[ Document]: res = self.store.get(query) if res is None: return [] return [res]
null
from_math_prompt
"""Load PAL from math prompt. Args: llm (BaseLanguageModel): The language model to use for generating code. Returns: PALChain: An instance of PALChain. """ llm_chain = LLMChain(llm=llm, prompt=MATH_PROMPT) code_validations = PALValidation(solution_expression_name='solution', solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION) return cls(llm_chain=llm_chain, stop='\n\n', get_answer_expr= 'print(solution())', code_validations=code_validations, **kwargs)
@classmethod def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) ->PALChain: """Load PAL from math prompt. Args: llm (BaseLanguageModel): The language model to use for generating code. Returns: PALChain: An instance of PALChain. """ llm_chain = LLMChain(llm=llm, prompt=MATH_PROMPT) code_validations = PALValidation(solution_expression_name='solution', solution_expression_type=PALValidation. SOLUTION_EXPRESSION_TYPE_FUNCTION) return cls(llm_chain=llm_chain, stop='\n\n', get_answer_expr= 'print(solution())', code_validations=code_validations, **kwargs)
Load PAL from math prompt. Args: llm (BaseLanguageModel): The language model to use for generating code. Returns: PALChain: An instance of PALChain.
__init__
try: import neo4j except ImportError: raise ValueError( 'Could not import neo4j python package. Please install it with `pip install neo4j`.' ) if not session_id: raise ValueError('Please ensure that the session_id parameter is provided') url = get_from_env('url', 'NEO4J_URI', url) username = get_from_env('username', 'NEO4J_USERNAME', username) password = get_from_env('password', 'NEO4J_PASSWORD', password) database = get_from_env('database', 'NEO4J_DATABASE', database) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database self._session_id = session_id self._node_label = node_label self._window = window try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the url is correct' ) except neo4j.exceptions.AuthError: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the username and password are correct' ) self._driver.execute_query(f'MERGE (s:`{self._node_label}` {{id:$session_id}})' , {'session_id': self._session_id}).summary
def __init__(self, session_id: Union[str, int], url: Optional[str]=None, username: Optional[str]=None, password: Optional[str]=None, database: str='neo4j', node_label: str='Session', window: int=3): try: import neo4j except ImportError: raise ValueError( 'Could not import neo4j python package. Please install it with `pip install neo4j`.' ) if not session_id: raise ValueError( 'Please ensure that the session_id parameter is provided') url = get_from_env('url', 'NEO4J_URI', url) username = get_from_env('username', 'NEO4J_USERNAME', username) password = get_from_env('password', 'NEO4J_PASSWORD', password) database = get_from_env('database', 'NEO4J_DATABASE', database) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database self._session_id = session_id self._node_label = node_label self._window = window try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the url is correct' ) except neo4j.exceptions.AuthError: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the username and password are correct' ) self._driver.execute_query( f'MERGE (s:`{self._node_label}` {{id:$session_id}})', {'session_id': self._session_id}).summary
null
_seq_output_schema
from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick last = steps[-1] if len(steps) == 1: return last.get_input_schema(config) elif isinstance(last, RunnableAssign): mapper_output_schema = last.mapper.get_output_schema(config) prev_output_schema = _seq_output_schema(steps[:-1], config) if not prev_output_schema.__custom_root_type__: return create_model('RunnableSequenceOutput', **{**{k: (v. annotation, v.default) for k, v in prev_output_schema. __fields__.items()}, **{k: (v.annotation, v.default) for k, v in mapper_output_schema.__fields__.items()}}, __config__=_SchemaConfig ) elif isinstance(last, RunnablePick): prev_output_schema = _seq_output_schema(steps[:-1], config) if not prev_output_schema.__custom_root_type__: if isinstance(last.keys, list): return create_model('RunnableSequenceOutput', **{k: (v. annotation, v.default) for k, v in prev_output_schema. __fields__.items() if k in last.keys}, __config__=_SchemaConfig ) else: field = prev_output_schema.__fields__[last.keys] return create_model('RunnableSequenceOutput', __root__=(field. annotation, field.default), __config__=_SchemaConfig) return last.get_output_schema(config)
def _seq_output_schema(steps: List[Runnable[Any, Any]], config: Optional[ RunnableConfig]) ->Type[BaseModel]: from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick last = steps[-1] if len(steps) == 1: return last.get_input_schema(config) elif isinstance(last, RunnableAssign): mapper_output_schema = last.mapper.get_output_schema(config) prev_output_schema = _seq_output_schema(steps[:-1], config) if not prev_output_schema.__custom_root_type__: return create_model('RunnableSequenceOutput', **{**{k: (v. annotation, v.default) for k, v in prev_output_schema. __fields__.items()}, **{k: (v.annotation, v.default) for k, v in mapper_output_schema.__fields__.items()}}, __config__= _SchemaConfig) elif isinstance(last, RunnablePick): prev_output_schema = _seq_output_schema(steps[:-1], config) if not prev_output_schema.__custom_root_type__: if isinstance(last.keys, list): return create_model('RunnableSequenceOutput', **{k: (v. annotation, v.default) for k, v in prev_output_schema. __fields__.items() if k in last.keys}, __config__= _SchemaConfig) else: field = prev_output_schema.__fields__[last.keys] return create_model('RunnableSequenceOutput', __root__=( field.annotation, field.default), __config__=_SchemaConfig) return last.get_output_schema(config)
null
load
"""Load file.""" if kwargs: logger.warning( f'Received runtime arguments {kwargs}. Passing runtime args to `load` is deprecated. Please pass arguments during initialization instead.' ) text_kwargs = {**self.text_kwargs, **kwargs} parser = PyMuPDFParser(text_kwargs=text_kwargs, extract_images=self. extract_images) if self.web_path: blob = Blob.from_data(open(self.file_path, 'rb').read(), path=self.web_path ) else: blob = Blob.from_path(self.file_path) return parser.parse_folder(blob)
def load(self, **kwargs: Any) ->List[Document]: """Load file.""" if kwargs: logger.warning( f'Received runtime arguments {kwargs}. Passing runtime args to `load` is deprecated. Please pass arguments during initialization instead.' ) text_kwargs = {**self.text_kwargs, **kwargs} parser = PyMuPDFParser(text_kwargs=text_kwargs, extract_images=self. extract_images) if self.web_path: blob = Blob.from_data(open(self.file_path, 'rb').read(), path=self. web_path) else: blob = Blob.from_path(self.file_path) return parser.parse_folder(blob)
Load file.
__init__
"""Initialize callback handler.""" self.color = color
def __init__(self, color: Optional[str]=None) ->None: """Initialize callback handler.""" self.color = color
Initialize callback handler.
_run
"""Use the tool.""" try: document_analysis_result = self._document_analysis(query) if not document_analysis_result: return 'No good document analysis result was found' return self._format_document_analysis_result(document_analysis_result) except Exception as e: raise RuntimeError(f'Error while running AzureCogsFormRecognizerTool: {e}')
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" try: document_analysis_result = self._document_analysis(query) if not document_analysis_result: return 'No good document analysis result was found' return self._format_document_analysis_result(document_analysis_result) except Exception as e: raise RuntimeError( f'Error while running AzureCogsFormRecognizerTool: {e}')
Use the tool.
test_playwright_url_loader
"""Test Playwright URL loader.""" urls = ['https://www.youtube.com/watch?v=dQw4w9WgXcQ', 'https://goo.gl/maps/NDSHwePEyaHMFGwh8', 'https://techmeme.com', 'https://techcrunch.com'] loader = PlaywrightURLLoader(urls=urls, remove_selectors=['header', 'footer'], continue_on_failure=False, headless=True) docs = loader.load() assert len(docs) > 0
def test_playwright_url_loader() ->None: """Test Playwright URL loader.""" urls = ['https://www.youtube.com/watch?v=dQw4w9WgXcQ', 'https://goo.gl/maps/NDSHwePEyaHMFGwh8', 'https://techmeme.com', 'https://techcrunch.com'] loader = PlaywrightURLLoader(urls=urls, remove_selectors=['header', 'footer'], continue_on_failure=False, headless=True) docs = loader.load() assert len(docs) > 0
Test Playwright URL loader.
validate_environment
"""Validate api key, python package exists, temperature, top_p, and top_k.""" google_api_key = convert_to_secret_str(get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY')) try: import google.generativeai as genai genai.configure(api_key=google_api_key.get_secret_value()) except ImportError: raise ChatGooglePalmError( 'Could not import google.generativeai python package. Please install it with `pip install google-generativeai`' ) values['client'] = genai if values['temperature'] is not None and not 0 <= values['temperature'] <= 1: raise ValueError('temperature must be in the range [0.0, 1.0]') if values['top_p'] is not None and not 0 <= values['top_p'] <= 1: raise ValueError('top_p must be in the range [0.0, 1.0]') if values['top_k'] is not None and values['top_k'] <= 0: raise ValueError('top_k must be positive') return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate api key, python package exists, temperature, top_p, and top_k.""" google_api_key = convert_to_secret_str(get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY')) try: import google.generativeai as genai genai.configure(api_key=google_api_key.get_secret_value()) except ImportError: raise ChatGooglePalmError( 'Could not import google.generativeai python package. Please install it with `pip install google-generativeai`' ) values['client'] = genai if values['temperature'] is not None and not 0 <= values['temperature' ] <= 1: raise ValueError('temperature must be in the range [0.0, 1.0]') if values['top_p'] is not None and not 0 <= values['top_p'] <= 1: raise ValueError('top_p must be in the range [0.0, 1.0]') if values['top_k'] is not None and values['top_k'] <= 0: raise ValueError('top_k must be positive') return values
Validate api key, python package exists, temperature, top_p, and top_k.
_UnaryOp
self.write('(') self.write(self.unop[t.op.__class__.__name__]) self.write(' ') self.dispatch(t.operand) self.write(')')
def _UnaryOp(self, t): self.write('(') self.write(self.unop[t.op.__class__.__name__]) self.write(' ') self.dispatch(t.operand) self.write(')')
null
test_dashvector_search_with_filter
metadatas = [{'meta': i} for i in range(len(texts))] dashvector = DashVector.from_texts(texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, ids=ids) sleep(0.5) output = dashvector.similarity_search('foo', filter='meta=2') assert output == [Document(page_content='baz', metadata={'meta': 2})]
def test_dashvector_search_with_filter() ->None: metadatas = [{'meta': i} for i in range(len(texts))] dashvector = DashVector.from_texts(texts=texts, embedding= FakeEmbeddings(), metadatas=metadatas, ids=ids) sleep(0.5) output = dashvector.similarity_search('foo', filter='meta=2') assert output == [Document(page_content='baz', metadata={'meta': 2})]
null
get_custom_callback_meta
return {'step': self.step, 'starts': self.starts, 'ends': self.ends, 'errors': self.errors, 'text_ctr': self.text_ctr, 'chain_starts': self. chain_starts, 'chain_ends': self.chain_ends, 'llm_starts': self. llm_starts, 'llm_ends': self.llm_ends, 'llm_streams': self.llm_streams, 'tool_starts': self.tool_starts, 'tool_ends': self.tool_ends, 'agent_ends': self.agent_ends}
def get_custom_callback_meta(self) ->Dict[str, Any]: return {'step': self.step, 'starts': self.starts, 'ends': self.ends, 'errors': self.errors, 'text_ctr': self.text_ctr, 'chain_starts': self.chain_starts, 'chain_ends': self.chain_ends, 'llm_starts': self.llm_starts, 'llm_ends': self.llm_ends, 'llm_streams': self. llm_streams, 'tool_starts': self.tool_starts, 'tool_ends': self. tool_ends, 'agent_ends': self.agent_ends}
null
test_similarity_search_with_score_by_vector_with_score_threshold
"""Test vector similarity with score by vector.""" texts = ['foo', 'bar', 'baz'] docsearch = TileDB.from_texts(texts=texts, embedding= ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat', index_type='FLAT') query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_with_score_by_vector(query_vec, k=2, score_threshold=0.2) assert len(output) == 1 assert output[0][0] == Document(page_content='foo') assert output[0][1] < 0.2 docsearch = TileDB.from_texts(texts=texts, embedding= ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat', index_type='IVF_FLAT') query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_with_score_by_vector(query_vec, k=2, score_threshold=0.2, nprobe=docsearch.vector_index.partitions) assert len(output) == 1 assert output[0][0] == Document(page_content='foo') assert output[0][1] < 0.2
@pytest.mark.requires('tiledb-vector-search') def test_similarity_search_with_score_by_vector_with_score_threshold(tmp_path: Path) ->None: """Test vector similarity with score by vector.""" texts = ['foo', 'bar', 'baz'] docsearch = TileDB.from_texts(texts=texts, embedding= ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat', index_type='FLAT') query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_with_score_by_vector(query_vec, k= 2, score_threshold=0.2) assert len(output) == 1 assert output[0][0] == Document(page_content='foo') assert output[0][1] < 0.2 docsearch = TileDB.from_texts(texts=texts, embedding= ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat', index_type='IVF_FLAT') query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_with_score_by_vector(query_vec, k= 2, score_threshold=0.2, nprobe=docsearch.vector_index.partitions) assert len(output) == 1 assert output[0][0] == Document(page_content='foo') assert output[0][1] < 0.2
Test vector similarity with score by vector.
test_loads_llmchain_env
import os has_env = 'OPENAI_API_KEY' in os.environ if not has_env: os.environ['OPENAI_API_KEY'] = 'env_variable' llm = OpenAI(model='davinci', temperature=0.5) prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain) chain2 = loads(chain_string) assert chain2 == chain assert dumps(chain2) == chain_string assert isinstance(chain2, LLMChain) assert isinstance(chain2.llm, OpenAI) assert isinstance(chain2.prompt, PromptTemplate) if not has_env: del os.environ['OPENAI_API_KEY']
@pytest.mark.requires('openai') def test_loads_llmchain_env() ->None: import os has_env = 'OPENAI_API_KEY' in os.environ if not has_env: os.environ['OPENAI_API_KEY'] = 'env_variable' llm = OpenAI(model='davinci', temperature=0.5) prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain) chain2 = loads(chain_string) assert chain2 == chain assert dumps(chain2) == chain_string assert isinstance(chain2, LLMChain) assert isinstance(chain2.llm, OpenAI) assert isinstance(chain2.prompt, PromptTemplate) if not has_env: del os.environ['OPENAI_API_KEY']
null
OutputType
return self.default.OutputType
@property def OutputType(self) ->Type[Output]: return self.default.OutputType
null
add_documents
"""Add the given documents to the store (insert behavior).""" if ids and len(ids) != len(documents): raise ValueError( f'Expected {len(ids)} ids, got {len(documents)} documents.') if not ids: raise NotImplementedError('This is not implemented yet.') for _id, document in zip(ids, documents): if _id in self.store and not self.permit_upserts: raise ValueError( f'Document with uid {_id} already exists in the store.') self.store[_id] = document return list(ids)
def add_documents(self, documents: Sequence[Document], *, ids: Optional[ Sequence[str]]=None, **kwargs: Any) ->List[str]: """Add the given documents to the store (insert behavior).""" if ids and len(ids) != len(documents): raise ValueError( f'Expected {len(ids)} ids, got {len(documents)} documents.') if not ids: raise NotImplementedError('This is not implemented yet.') for _id, document in zip(ids, documents): if _id in self.store and not self.permit_upserts: raise ValueError( f'Document with uid {_id} already exists in the store.') self.store[_id] = document return list(ids)
Add the given documents to the store (insert behavior).
test_run_returns_no_result
"""Test that gives no result.""" output = api_client.run('1605.08386WWW') assert 'No good Arxiv Result was found' == output
def test_run_returns_no_result(api_client: ArxivAPIWrapper) ->None: """Test that gives no result.""" output = api_client.run('1605.08386WWW') assert 'No good Arxiv Result was found' == output
Test that gives no result.
test_json_loader
"""Test unstructured loader.""" file_path = Path(__file__).parent.parent / 'examples/example.json' loader = JSONLoader(str(file_path), '.messages[].content') docs = loader.load() assert len(docs) == 3 assert docs[-1].page_content == ''
def test_json_loader() ->None: """Test unstructured loader.""" file_path = Path(__file__).parent.parent / 'examples/example.json' loader = JSONLoader(str(file_path), '.messages[].content') docs = loader.load() assert len(docs) == 3 assert docs[-1].page_content == ''
Test unstructured loader.
test_socket_disabled
"""This test should fail.""" with pytest.raises(pytest_socket.SocketBlockedError): requests.get('https://www.example.com')
def test_socket_disabled() ->None: """This test should fail.""" with pytest.raises(pytest_socket.SocketBlockedError): requests.get('https://www.example.com')
This test should fail.
__init__
""" Initialize the cache with all relevant parameters. Args: session (cassandra.cluster.Session): an open Cassandra session keyspace (str): the keyspace to use for storing the cache embedding (Embedding): Embedding provider for semantic encoding and search. table_name (str): name of the Cassandra (vector) table to use as cache distance_metric (str, 'dot'): which measure to adopt for similarity searches score_threshold (optional float): numeric value to use as cutoff for the similarity searches ttl_seconds (optional int): time-to-live for cache entries (default: None, i.e. forever) The default score threshold is tuned to the default metric. Tune it carefully yourself if switching to another distance metric. """ try: from cassio.table import MetadataVectorCassandraTable except (ImportError, ModuleNotFoundError): raise ValueError( 'Could not import cassio python package. Please install it with `pip install cassio`.' ) self.session = session self.keyspace = keyspace self.embedding = embedding self.table_name = table_name self.distance_metric = distance_metric self.score_threshold = score_threshold self.ttl_seconds = ttl_seconds @lru_cache(maxsize=CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE) def _cache_embedding(text: str) ->List[float]: return self.embedding.embed_query(text=text) self._get_embedding = _cache_embedding self.embedding_dimension = self._get_embedding_dimension() self.table = MetadataVectorCassandraTable(session=self.session, keyspace= self.keyspace, table=self.table_name, primary_key_type=['TEXT'], vector_dimension=self.embedding_dimension, ttl_seconds=self.ttl_seconds, metadata_indexing=('allow', {'_llm_string_hash'}), skip_provisioning= skip_provisioning)
def __init__(self, session: Optional[CassandraSession], keyspace: Optional[ str], embedding: Embeddings, table_name: str= CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME, distance_metric: str= CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC, score_threshold: float=CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD, ttl_seconds: Optional[int]=CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS, skip_provisioning: bool=False): """ Initialize the cache with all relevant parameters. Args: session (cassandra.cluster.Session): an open Cassandra session keyspace (str): the keyspace to use for storing the cache embedding (Embedding): Embedding provider for semantic encoding and search. table_name (str): name of the Cassandra (vector) table to use as cache distance_metric (str, 'dot'): which measure to adopt for similarity searches score_threshold (optional float): numeric value to use as cutoff for the similarity searches ttl_seconds (optional int): time-to-live for cache entries (default: None, i.e. forever) The default score threshold is tuned to the default metric. Tune it carefully yourself if switching to another distance metric. """ try: from cassio.table import MetadataVectorCassandraTable except (ImportError, ModuleNotFoundError): raise ValueError( 'Could not import cassio python package. Please install it with `pip install cassio`.' ) self.session = session self.keyspace = keyspace self.embedding = embedding self.table_name = table_name self.distance_metric = distance_metric self.score_threshold = score_threshold self.ttl_seconds = ttl_seconds @lru_cache(maxsize=CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE) def _cache_embedding(text: str) ->List[float]: return self.embedding.embed_query(text=text) self._get_embedding = _cache_embedding self.embedding_dimension = self._get_embedding_dimension() self.table = MetadataVectorCassandraTable(session=self.session, keyspace=self.keyspace, table=self.table_name, primary_key_type=[ 'TEXT'], vector_dimension=self.embedding_dimension, ttl_seconds= self.ttl_seconds, metadata_indexing=('allow', {'_llm_string_hash'}), skip_provisioning=skip_provisioning)
Initialize the cache with all relevant parameters. Args: session (cassandra.cluster.Session): an open Cassandra session keyspace (str): the keyspace to use for storing the cache embedding (Embedding): Embedding provider for semantic encoding and search. table_name (str): name of the Cassandra (vector) table to use as cache distance_metric (str, 'dot'): which measure to adopt for similarity searches score_threshold (optional float): numeric value to use as cutoff for the similarity searches ttl_seconds (optional int): time-to-live for cache entries (default: None, i.e. forever) The default score threshold is tuned to the default metric. Tune it carefully yourself if switching to another distance metric.
_chain_type
return 'stuff_documents_chain'
@property def _chain_type(self) ->str: return 'stuff_documents_chain'
null
on_tool_error
self.on_tool_error_common()
def on_tool_error(self, *args: Any, **kwargs: Any) ->Any: self.on_tool_error_common()
null
test_memory_empty_on_new_session
memory = ConversationBufferMemory(memory_key='foo', chat_memory= message_history, return_messages=True) assert memory.chat_memory.messages == []
def test_memory_empty_on_new_session(message_history: MomentoChatMessageHistory ) ->None: memory = ConversationBufferMemory(memory_key='foo', chat_memory= message_history, return_messages=True) assert memory.chat_memory.messages == []
null
validate_tools
"""Validate that tools are compatible with agent.""" agent = values['agent'] tools = values['tools'] allowed_tools = agent.get_allowed_tools() if allowed_tools is not None: if set(allowed_tools) != set([tool.name for tool in tools]): raise ValueError( f'Allowed tools ({allowed_tools}) different than provided tools ({[tool.name for tool in tools]})' ) return values
@root_validator() def validate_tools(cls, values: Dict) ->Dict: """Validate that tools are compatible with agent.""" agent = values['agent'] tools = values['tools'] allowed_tools = agent.get_allowed_tools() if allowed_tools is not None: if set(allowed_tools) != set([tool.name for tool in tools]): raise ValueError( f'Allowed tools ({allowed_tools}) different than provided tools ({[tool.name for tool in tools]})' ) return values
Validate that tools are compatible with agent.
get_generation
"""Call to client generate method with call scope""" payload = self.get_payload(inputs=inputs, stream=False, labels=labels, **kwargs ) out = self.client.get_req_generation(self.model, stop=stop, payload=payload) return out
def get_generation(self, inputs: Sequence[Dict], labels: Optional[dict]= None, stop: Optional[Sequence[str]]=None, **kwargs: Any) ->dict: """Call to client generate method with call scope""" payload = self.get_payload(inputs=inputs, stream=False, labels=labels, **kwargs) out = self.client.get_req_generation(self.model, stop=stop, payload=payload ) return out
Call to client generate method with call scope
on_agent_action
"""Do nothing when agent takes a specific action.""" pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Do nothing when agent takes a specific action.""" pass
Do nothing when agent takes a specific action.
validate_environment
"""Validate api key, python package exists.""" google_api_key = get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY') model_name = values['model'] if isinstance(google_api_key, SecretStr): google_api_key = google_api_key.get_secret_value() genai.configure(api_key=google_api_key) if _is_gemini_model(model_name): values['client'] = genai.GenerativeModel(model_name=model_name) else: values['client'] = genai if values['temperature'] is not None and not 0 <= values['temperature'] <= 1: raise ValueError('temperature must be in the range [0.0, 1.0]') if values['top_p'] is not None and not 0 <= values['top_p'] <= 1: raise ValueError('top_p must be in the range [0.0, 1.0]') if values['top_k'] is not None and values['top_k'] <= 0: raise ValueError('top_k must be positive') if values['max_output_tokens'] is not None and values['max_output_tokens' ] <= 0: raise ValueError('max_output_tokens must be greater than zero') return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate api key, python package exists.""" google_api_key = get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY') model_name = values['model'] if isinstance(google_api_key, SecretStr): google_api_key = google_api_key.get_secret_value() genai.configure(api_key=google_api_key) if _is_gemini_model(model_name): values['client'] = genai.GenerativeModel(model_name=model_name) else: values['client'] = genai if values['temperature'] is not None and not 0 <= values['temperature' ] <= 1: raise ValueError('temperature must be in the range [0.0, 1.0]') if values['top_p'] is not None and not 0 <= values['top_p'] <= 1: raise ValueError('top_p must be in the range [0.0, 1.0]') if values['top_k'] is not None and values['top_k'] <= 0: raise ValueError('top_k must be positive') if values['max_output_tokens'] is not None and values['max_output_tokens' ] <= 0: raise ValueError('max_output_tokens must be greater than zero') return values
Validate api key, python package exists.
_import_requests_tool_RequestsDeleteTool
from langchain_community.tools.requests.tool import RequestsDeleteTool return RequestsDeleteTool
def _import_requests_tool_RequestsDeleteTool() ->Any: from langchain_community.tools.requests.tool import RequestsDeleteTool return RequestsDeleteTool
null
test_alibabacloud_opensearch_with_text_and_meta_query
opensearch = create_alibabacloud_opensearch() output = opensearch.similarity_search(query='foo', search_filter={ 'string_field': 'value1'}, k=1) assert output == [Document(page_content='foo', metadata={'string_field': 'value1', 'int_field': 1, 'float_field': 1.0, 'double_field': 2.0})] output = opensearch.similarity_search(query='bar', search_filter={ 'int_field': 2}, k=1) assert output == [Document(page_content='bar', metadata={'string_field': 'value2', 'int_field': 2, 'float_field': 3.0, 'double_field': 4.0})] output = opensearch.similarity_search(query='baz', search_filter={ 'float_field': 5.0}, k=1) assert output == [Document(page_content='baz', metadata={'string_field': 'value3', 'int_field': 3, 'float_field': 5.0, 'double_field': 6.0})] output = opensearch.similarity_search(query='baz', search_filter={ 'float_field': 6.0}, k=1) assert len(output) == 0
def test_alibabacloud_opensearch_with_text_and_meta_query() ->None: opensearch = create_alibabacloud_opensearch() output = opensearch.similarity_search(query='foo', search_filter={ 'string_field': 'value1'}, k=1) assert output == [Document(page_content='foo', metadata={'string_field': 'value1', 'int_field': 1, 'float_field': 1.0, 'double_field': 2.0})] output = opensearch.similarity_search(query='bar', search_filter={ 'int_field': 2}, k=1) assert output == [Document(page_content='bar', metadata={'string_field': 'value2', 'int_field': 2, 'float_field': 3.0, 'double_field': 4.0})] output = opensearch.similarity_search(query='baz', search_filter={ 'float_field': 5.0}, k=1) assert output == [Document(page_content='baz', metadata={'string_field': 'value3', 'int_field': 3, 'float_field': 5.0, 'double_field': 6.0})] output = opensearch.similarity_search(query='baz', search_filter={ 'float_field': 6.0}, k=1) assert len(output) == 0
null
test_init_delta_sync_with_managed_embeddings
index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS) vectorsearch = DatabricksVectorSearch(index) assert vectorsearch.index == index
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_init_delta_sync_with_managed_embeddings() ->None: index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS) vectorsearch = DatabricksVectorSearch(index) assert vectorsearch.index == index
null
_load_stuff_chain
_prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks) return StuffDocumentsChain(llm_chain=llm_chain, document_variable_name= document_variable_name, verbose=verbose, callback_manager= callback_manager, callbacks=callbacks, **kwargs)
def _load_stuff_chain(llm: BaseLanguageModel, prompt: Optional[ BasePromptTemplate]=None, document_variable_name: str='context', verbose: Optional[bool]=None, callback_manager: Optional[ BaseCallbackManager]=None, callbacks: Callbacks=None, **kwargs: Any ) ->StuffDocumentsChain: _prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks) return StuffDocumentsChain(llm_chain=llm_chain, document_variable_name= document_variable_name, verbose=verbose, callback_manager= callback_manager, callbacks=callbacks, **kwargs)
null
_handle_sse_line
try: obj = json.loads(line) return GenerationChunk(text=obj.get('token', {}).get('text')) except Exception: return None
def _handle_sse_line(line: str) ->Optional[GenerationChunk]: try: obj = json.loads(line) return GenerationChunk(text=obj.get('token', {}).get('text')) except Exception: return None
null
test_do_not_remove_repeated_content
bs_transformer = BeautifulSoupTransformer() with_lines_html = '<p>1\n1\n1\n1</p>' documents = [Document(page_content=with_lines_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == '1 1 1 1'
@pytest.mark.requires('bs4') def test_do_not_remove_repeated_content() ->None: bs_transformer = BeautifulSoupTransformer() with_lines_html = '<p>1\n1\n1\n1</p>' documents = [Document(page_content=with_lines_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == '1 1 1 1'
null
test_chroma_with_include_parameter
"""Test end to end construction and include parameter.""" texts = ['foo', 'bar', 'baz'] docsearch = Chroma.from_texts(collection_name='test_collection', texts= texts, embedding=FakeEmbeddings()) output = docsearch.get(include=['embeddings']) assert output['embeddings'] is not None output = docsearch.get() assert output['embeddings'] is None
def test_chroma_with_include_parameter() ->None: """Test end to end construction and include parameter.""" texts = ['foo', 'bar', 'baz'] docsearch = Chroma.from_texts(collection_name='test_collection', texts= texts, embedding=FakeEmbeddings()) output = docsearch.get(include=['embeddings']) assert output['embeddings'] is not None output = docsearch.get() assert output['embeddings'] is None
Test end to end construction and include parameter.
translator
schema = RedisModel(text=[TextFieldSchema(name='bar')], numeric=[ NumericFieldSchema(name='foo')], tag=[TagFieldSchema(name='tag')]) return RedisTranslator(schema)
@pytest.fixture def translator() ->RedisTranslator: schema = RedisModel(text=[TextFieldSchema(name='bar')], numeric=[ NumericFieldSchema(name='foo')], tag=[TagFieldSchema(name='tag')]) return RedisTranslator(schema)
null
test_embedding_query
"""Test embeddings for query.""" document = 'foo bar' embedding = VolcanoEmbeddings() output = embedding.embed_query(document) assert len(output) == 1024
def test_embedding_query() ->None: """Test embeddings for query.""" document = 'foo bar' embedding = VolcanoEmbeddings() output = embedding.embed_query(document) assert len(output) == 1024
Test embeddings for query.
on_chain_start_common
self.chain_starts += 1 self.starts += 1
def on_chain_start_common(self) ->None: self.chain_starts += 1 self.starts += 1
null
test_lancedb_add_texts
import lancedb embeddings = FakeEmbeddings() db = lancedb.connect('/tmp/lancedb') texts = ['text 1'] vectors = embeddings.embed_documents(texts) table = db.create_table('my_table', data=[{'vector': vectors[idx], 'id': text, 'text': text} for idx, text in enumerate(texts)], mode='overwrite') store = LanceDB(table, embeddings) store.add_texts(['text 2']) result = store.similarity_search('text 2') result_texts = [doc.page_content for doc in result] assert 'text 2' in result_texts
def test_lancedb_add_texts() ->None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect('/tmp/lancedb') texts = ['text 1'] vectors = embeddings.embed_documents(texts) table = db.create_table('my_table', data=[{'vector': vectors[idx], 'id': text, 'text': text} for idx, text in enumerate(texts)], mode= 'overwrite') store = LanceDB(table, embeddings) store.add_texts(['text 2']) result = store.similarity_search('text 2') result_texts = [doc.page_content for doc in result] assert 'text 2' in result_texts
null
_call
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.question_key] api_url = self.api_request_chain.predict(question=question, api_docs=self. api_docs, callbacks=_run_manager.get_child()) _run_manager.on_text(api_url, color='green', end='\n', verbose=self.verbose) api_url = api_url.strip() if self.limit_to_domains and not _check_in_allowed_domain(api_url, self. limit_to_domains): raise ValueError( f'{api_url} is not in the allowed domains: {self.limit_to_domains}') api_response = self.requests_wrapper.get(api_url) _run_manager.on_text(api_response, color='yellow', end='\n', verbose=self. verbose) answer = self.api_answer_chain.predict(question=question, api_docs=self. api_docs, api_url=api_url, api_response=api_response, callbacks= _run_manager.get_child()) return {self.output_key: answer}
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.question_key] api_url = self.api_request_chain.predict(question=question, api_docs= self.api_docs, callbacks=_run_manager.get_child()) _run_manager.on_text(api_url, color='green', end='\n', verbose=self.verbose ) api_url = api_url.strip() if self.limit_to_domains and not _check_in_allowed_domain(api_url, self .limit_to_domains): raise ValueError( f'{api_url} is not in the allowed domains: {self.limit_to_domains}' ) api_response = self.requests_wrapper.get(api_url) _run_manager.on_text(api_response, color='yellow', end='\n', verbose= self.verbose) answer = self.api_answer_chain.predict(question=question, api_docs=self .api_docs, api_url=api_url, api_response=api_response, callbacks= _run_manager.get_child()) return {self.output_key: answer}
null
invoke
if return_exceptions: try: return self.invoke(input, config, **kwargs) except Exception as e: return e else: return self.invoke(input, config, **kwargs)
def invoke(input: Input, config: RunnableConfig) ->Union[Output, Exception]: if return_exceptions: try: return self.invoke(input, config, **kwargs) except Exception as e: return e else: return self.invoke(input, config, **kwargs)
null
get_tools
"""Get the tools in the toolkit.""" return [SlackGetChannel(), SlackGetMessage(), SlackScheduleMessage(), SlackSendMessage()]
def get_tools(self) ->List[BaseTool]: """Get the tools in the toolkit.""" return [SlackGetChannel(), SlackGetMessage(), SlackScheduleMessage(), SlackSendMessage()]
Get the tools in the toolkit.
__init__
validate_unstructured_version(min_unstructured_version='0.6.7') super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def __init__(self, file_path: str, mode: str='single', ** unstructured_kwargs: Any): validate_unstructured_version(min_unstructured_version='0.6.7') super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
null
_enter_history
hist = config['configurable']['message_history'] if self.history_messages_key: return hist.messages.copy() else: input_val = input if not self.input_messages_key else input[self. input_messages_key] return hist.messages.copy() + self._get_input_messages(input_val)
def _enter_history(self, input: Any, config: RunnableConfig) ->List[BaseMessage ]: hist = config['configurable']['message_history'] if self.history_messages_key: return hist.messages.copy() else: input_val = input if not self.input_messages_key else input[self. input_messages_key] return hist.messages.copy() + self._get_input_messages(input_val)
null
lc_attributes
"""List of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. """ return {}
@property def lc_attributes(self) ->Dict: """List of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. """ return {}
List of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor.
_combine_documents
return '\n\n'.join(format_document(doc, prompt=DOCUMENT_PROMPT) for doc in docs )
def _combine_documents(docs: List) ->str: return '\n\n'.join(format_document(doc, prompt=DOCUMENT_PROMPT) for doc in docs)
null
_completion_with_retry
if is_gemini: return llm.client.generate_content(prompt, stream=stream, generation_config=kwargs) else: if stream: return llm.client.predict_streaming(prompt[0], **kwargs) return llm.client.predict(prompt[0], **kwargs)
@retry_decorator def _completion_with_retry(prompt: List[Union[str, 'Image']], is_gemini: bool=False, **kwargs: Any) ->Any: if is_gemini: return llm.client.generate_content(prompt, stream=stream, generation_config=kwargs) else: if stream: return llm.client.predict_streaming(prompt[0], **kwargs) return llm.client.predict(prompt[0], **kwargs)
null
test_tracing_v2_agent_with_metadata
from langchain.agents import AgentType, initialize_agent, load_tools os.environ['LANGCHAIN_TRACING_V2'] = 'true' llm = OpenAI(temperature=0) chat = ChatOpenAI(temperature=0) tools = load_tools(['llm-math', 'serpapi'], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, verbose=True) chat_agent = initialize_agent(tools, chat, agent=AgentType. CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True) agent.run(questions[0], tags=['a-tag'], metadata={'a': 'b', 'c': 'd'}) chat_agent.run(questions[0], tags=['a-tag'], metadata={'a': 'b', 'c': 'd'})
def test_tracing_v2_agent_with_metadata() ->None: from langchain.agents import AgentType, initialize_agent, load_tools os.environ['LANGCHAIN_TRACING_V2'] = 'true' llm = OpenAI(temperature=0) chat = ChatOpenAI(temperature=0) tools = load_tools(['llm-math', 'serpapi'], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, verbose=True) chat_agent = initialize_agent(tools, chat, agent=AgentType. CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True) agent.run(questions[0], tags=['a-tag'], metadata={'a': 'b', 'c': 'd'}) chat_agent.run(questions[0], tags=['a-tag'], metadata={'a': 'b', 'c': 'd'})
null
on_chat_model_start
"""Run when a chat model starts running.""" raise NotImplementedError( f'{self.__class__.__name__} does not implement `on_chat_model_start`')
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[ List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID]=None, tags: Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None, **kwargs: Any) ->Any: """Run when a chat model starts running.""" raise NotImplementedError( f'{self.__class__.__name__} does not implement `on_chat_model_start`')
Run when a chat model starts running.
test_openai_model_param
llm = OpenAI(model='foo') assert llm.model_name == 'foo' llm = OpenAI(model_name='foo') assert llm.model_name == 'foo'
@pytest.mark.requires('openai') def test_openai_model_param() ->None: llm = OpenAI(model='foo') assert llm.model_name == 'foo' llm = OpenAI(model_name='foo') assert llm.model_name == 'foo'
null
ignore_llm
"""Whether to ignore LLM callbacks.""" return self.ignore_llm_
@property def ignore_llm(self) ->bool: """Whether to ignore LLM callbacks.""" return self.ignore_llm_
Whether to ignore LLM callbacks.
_execute
""" Executes SQL command through underlying engine. If the statement returns no rows, an empty list is returned. """ with self._engine.begin() as connection: if self._schema is not None: if self.dialect == 'snowflake': connection.exec_driver_sql('ALTER SESSION SET search_path = %s', (self._schema,)) elif self.dialect == 'bigquery': connection.exec_driver_sql('SET @@dataset_id=?', (self._schema,)) elif self.dialect == 'mssql': pass elif self.dialect == 'trino': connection.exec_driver_sql('USE ?', (self._schema,)) elif self.dialect == 'duckdb': connection.exec_driver_sql(f'SET search_path TO {self._schema}') elif self.dialect == 'oracle': connection.exec_driver_sql( f'ALTER SESSION SET CURRENT_SCHEMA = {self._schema}') elif self.dialect == 'sqlany': pass else: connection.exec_driver_sql('SET search_path TO %s', (self._schema,) ) cursor = connection.execute(text(command)) if cursor.returns_rows: if fetch == 'all': result = [x._asdict() for x in cursor.fetchall()] elif fetch == 'one': first_result = cursor.fetchone() result = [] if first_result is None else [first_result._asdict()] else: raise ValueError("Fetch parameter must be either 'one' or 'all'") return result return []
def _execute(self, command: str, fetch: Literal['all', 'one']='all' ) ->Sequence[Dict[str, Any]]: """ Executes SQL command through underlying engine. If the statement returns no rows, an empty list is returned. """ with self._engine.begin() as connection: if self._schema is not None: if self.dialect == 'snowflake': connection.exec_driver_sql('ALTER SESSION SET search_path = %s' , (self._schema,)) elif self.dialect == 'bigquery': connection.exec_driver_sql('SET @@dataset_id=?', (self. _schema,)) elif self.dialect == 'mssql': pass elif self.dialect == 'trino': connection.exec_driver_sql('USE ?', (self._schema,)) elif self.dialect == 'duckdb': connection.exec_driver_sql(f'SET search_path TO {self._schema}' ) elif self.dialect == 'oracle': connection.exec_driver_sql( f'ALTER SESSION SET CURRENT_SCHEMA = {self._schema}') elif self.dialect == 'sqlany': pass else: connection.exec_driver_sql('SET search_path TO %s', (self. _schema,)) cursor = connection.execute(text(command)) if cursor.returns_rows: if fetch == 'all': result = [x._asdict() for x in cursor.fetchall()] elif fetch == 'one': first_result = cursor.fetchone() result = [] if first_result is None else [first_result. _asdict()] else: raise ValueError( "Fetch parameter must be either 'one' or 'all'") return result return []
Executes SQL command through underlying engine. If the statement returns no rows, an empty list is returned.
test_partial
"""Test prompt can be partialed.""" template = 'This is a {foo} test.' prompt = PromptTemplate(input_variables=['foo'], template=template) assert prompt.template == template assert prompt.input_variables == ['foo'] new_prompt = prompt.partial(foo='3') new_result = new_prompt.format() assert new_result == 'This is a 3 test.' result = prompt.format(foo='foo') assert result == 'This is a foo test.'
def test_partial() ->None: """Test prompt can be partialed.""" template = 'This is a {foo} test.' prompt = PromptTemplate(input_variables=['foo'], template=template) assert prompt.template == template assert prompt.input_variables == ['foo'] new_prompt = prompt.partial(foo='3') new_result = new_prompt.format() assert new_result == 'This is a 3 test.' result = prompt.format(foo='foo') assert result == 'This is a foo test.'
Test prompt can be partialed.
extract_functions_classes
tree = ast.parse(self.code) functions_classes = [] for node in ast.iter_child_nodes(tree): if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): functions_classes.append(self._extract_code(node)) return functions_classes
def extract_functions_classes(self) ->List[str]: tree = ast.parse(self.code) functions_classes = [] for node in ast.iter_child_nodes(tree): if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast. ClassDef)): functions_classes.append(self._extract_code(node)) return functions_classes
null
embeddings
return self.embedding_function
@property def embeddings(self) ->Embeddings: return self.embedding_function
null
_embedding
base_url = ( f'{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings') resp = requests.post(f'{base_url}/embedding-v1', headers={'Content-Type': 'application/json'}, params={'access_token': self.access_token}, json=json) return resp.json()
def _embedding(self, json: object) ->dict: base_url = ( f'{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings' ) resp = requests.post(f'{base_url}/embedding-v1', headers={ 'Content-Type': 'application/json'}, params={'access_token': self. access_token}, json=json) return resp.json()
null
test_load_jsonlines_list
file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='value1', metadata={'source': file_path, 'seq_num': 1}), Document(page_content='value2', metadata={ 'source': file_path, 'seq_num': 2}), Document(page_content='value3', metadata={'source': file_path, 'seq_num': 3}), Document(page_content= 'value4', metadata={'source': file_path, 'seq_num': 4})] mocker.patch('pathlib.Path.open', return_value=io.StringIO( """ [{"text": "value1"}, {"text": "value2"}] [{"text": "value3"}, {"text": "value4"}] """ )) loader = JSONLoader(file_path=file_path, json_lines=True, **params) result = loader.load() assert result == expected_docs
@pytest.mark.parametrize('params', ({'jq_schema': '.[].text'}, {'jq_schema': '.[]', 'content_key': 'text'})) def test_load_jsonlines_list(params: Dict, mocker: MockerFixture) ->None: file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='value1', metadata={'source': file_path, 'seq_num': 1}), Document(page_content='value2', metadata ={'source': file_path, 'seq_num': 2}), Document(page_content= 'value3', metadata={'source': file_path, 'seq_num': 3}), Document( page_content='value4', metadata={'source': file_path, 'seq_num': 4})] mocker.patch('pathlib.Path.open', return_value=io.StringIO( """ [{"text": "value1"}, {"text": "value2"}] [{"text": "value3"}, {"text": "value4"}] """ )) loader = JSONLoader(file_path=file_path, json_lines=True, **params) result = loader.load() assert result == expected_docs
null
_mlflow_extras
return ''
@property def _mlflow_extras(self) ->str: return ''
null
_convert_structured_search_response
"""Converts a sequence of search results to a list of LangChain documents.""" import json from google.protobuf.json_format import MessageToDict documents: List[Document] = [] for result in results: document_dict = MessageToDict(result.document._pb, preserving_proto_field_name=True) documents.append(Document(page_content=json.dumps(document_dict.get( 'struct_data', {})), metadata={'id': document_dict['id'], 'name': document_dict['name']})) return documents
def _convert_structured_search_response(self, results: Sequence[SearchResult] ) ->List[Document]: """Converts a sequence of search results to a list of LangChain documents.""" import json from google.protobuf.json_format import MessageToDict documents: List[Document] = [] for result in results: document_dict = MessageToDict(result.document._pb, preserving_proto_field_name=True) documents.append(Document(page_content=json.dumps(document_dict.get ('struct_data', {})), metadata={'id': document_dict['id'], 'name': document_dict['name']})) return documents
Converts a sequence of search results to a list of LangChain documents.