method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
save_agent
"""Save the underlying agent.""" return self.agent.save(file_path)
def save_agent(self, file_path: Union[Path, str]) ->None: """Save the underlying agent.""" return self.agent.save(file_path)
Save the underlying agent.
test_call
"""Test that call gives the correct answer.""" search = GoldenQueryAPIWrapper() output = json.loads(search.run('companies in nanotech')) assert len(output.get('results', [])) > 0
def test_call() ->None: """Test that call gives the correct answer.""" search = GoldenQueryAPIWrapper() output = json.loads(search.run('companies in nanotech')) assert len(output.get('results', [])) > 0
Test that call gives the correct answer.
_collection
return self._typesense_client.collections[self._typesense_collection_name]
@property def _collection(self) ->Collection: return self._typesense_client.collections[self._typesense_collection_name]
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional ids of each text object. timestamp: Optional timestamp to write new texts with. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ tiledb_vs, tiledb = dependable_tiledb_import() embeddings = self.embedding.embed_documents(list(texts)) if ids is None: ids = [str(random.randint(0, MAX_UINT64 - 1)) for _ in texts] external_ids = np.array(ids).astype(np.uint64) vectors = np.empty(len(embeddings), dtype='O') for i in range(len(embeddings)): vectors[i] = np.array(embeddings[i], dtype=np.float32) self.vector_index.update_batch(vectors=vectors, external_ids=external_ids, timestamp=timestamp if timestamp != 0 else None) docs = {} docs['text'] = np.array(texts) if metadatas is not None: metadata_attr = np.empty([len(metadatas)], dtype=object) i = 0 for metadata in metadatas: metadata_attr[i] = np.frombuffer(pickle.dumps(metadata), dtype=np.uint8 ) i += 1 docs['metadata'] = metadata_attr docs_array = tiledb.open(self.docs_array_uri, 'w', timestamp=timestamp if timestamp != 0 else None, config=self.config) docs_array[external_ids] = docs docs_array.close() return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, timestamp: int=0, **kwargs: Any ) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional ids of each text object. timestamp: Optional timestamp to write new texts with. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ tiledb_vs, tiledb = dependable_tiledb_import() embeddings = self.embedding.embed_documents(list(texts)) if ids is None: ids = [str(random.randint(0, MAX_UINT64 - 1)) for _ in texts] external_ids = np.array(ids).astype(np.uint64) vectors = np.empty(len(embeddings), dtype='O') for i in range(len(embeddings)): vectors[i] = np.array(embeddings[i], dtype=np.float32) self.vector_index.update_batch(vectors=vectors, external_ids= external_ids, timestamp=timestamp if timestamp != 0 else None) docs = {} docs['text'] = np.array(texts) if metadatas is not None: metadata_attr = np.empty([len(metadatas)], dtype=object) i = 0 for metadata in metadatas: metadata_attr[i] = np.frombuffer(pickle.dumps(metadata), dtype= np.uint8) i += 1 docs['metadata'] = metadata_attr docs_array = tiledb.open(self.docs_array_uri, 'w', timestamp=timestamp if timestamp != 0 else None, config=self.config) docs_array[external_ids] = docs docs_array.close() return ids
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional ids of each text object. timestamp: Optional timestamp to write new texts with. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore.
test_load_openai_llm
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello') llm_obj = dumpd(llm) llm2 = load(llm_obj, secrets_map={'OPENAI_API_KEY': 'hello'}) assert llm2 == llm assert dumpd(llm2) == llm_obj assert isinstance(llm2, OpenAI)
@pytest.mark.requires('openai') def test_load_openai_llm() ->None: llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello') llm_obj = dumpd(llm) llm2 = load(llm_obj, secrets_map={'OPENAI_API_KEY': 'hello'}) assert llm2 == llm assert dumpd(llm2) == llm_obj assert isinstance(llm2, OpenAI)
null
embed_query
"""Embed a query using a Bookend deployed embeddings model. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed_documents([text])[0]
def embed_query(self, text: str) ->List[float]: """Embed a query using a Bookend deployed embeddings model. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed_documents([text])[0]
Embed a query using a Bookend deployed embeddings model. Args: text: The text to embed. Returns: Embeddings for the text.
input_keys
"""Will be whatever keys the LLM chain prompt expects. :meta private: """ return self.routing_keys
@property def input_keys(self) ->List[str]: """Will be whatever keys the LLM chain prompt expects. :meta private: """ return self.routing_keys
Will be whatever keys the LLM chain prompt expects. :meta private:
__init__
"""original doc""" pass
def __init__(self) ->None: """original doc""" pass
original doc
from_secrets
"""Create a TwitterTweetLoader from access tokens and secrets.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuthHandler(access_token=access_token, access_token_secret= access_token_secret, consumer_key=consumer_key, consumer_secret= consumer_secret) return cls(auth_handler=auth, twitter_users=twitter_users, number_tweets= number_tweets)
@classmethod def from_secrets(cls, access_token: str, access_token_secret: str, consumer_key: str, consumer_secret: str, twitter_users: Sequence[str], number_tweets: Optional[int]=100) ->TwitterTweetLoader: """Create a TwitterTweetLoader from access tokens and secrets.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuthHandler(access_token=access_token, access_token_secret=access_token_secret, consumer_key=consumer_key, consumer_secret=consumer_secret) return cls(auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets)
Create a TwitterTweetLoader from access tokens and secrets.
parse_json_markdown
""" Parse a JSON string from a Markdown string. Args: json_string: The Markdown string. Returns: The parsed JSON object as a Python dictionary. """ match = re.search('```(json)?(.*)```', json_string, re.DOTALL) if match is None: json_str = json_string else: json_str = match.group(2) json_str = json_str.strip() json_str = _custom_parser(json_str) parsed = parser(json_str) return parsed
def parse_json_markdown(json_string: str, *, parser: Callable[[str], Any]= parse_partial_json) ->dict: """ Parse a JSON string from a Markdown string. Args: json_string: The Markdown string. Returns: The parsed JSON object as a Python dictionary. """ match = re.search('```(json)?(.*)```', json_string, re.DOTALL) if match is None: json_str = json_string else: json_str = match.group(2) json_str = json_str.strip() json_str = _custom_parser(json_str) parsed = parser(json_str) return parsed
Parse a JSON string from a Markdown string. Args: json_string: The Markdown string. Returns: The parsed JSON object as a Python dictionary.
from_embeddings
"""Construct Annoy wrapper from embeddings. Args: text_embeddings: List of tuples of (text, embedding) embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: 1. Creates an in memory docstore with provided embeddings 2. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Annoy from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs)
@classmethod def from_embeddings(cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]]=None, metric: str=DEFAULT_METRIC, trees: int=100, n_jobs: int=-1, **kwargs: Any) ->Annoy: """Construct Annoy wrapper from embeddings. Args: text_embeddings: List of tuples of (text, embedding) embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: 1. Creates an in memory docstore with provided embeddings 2. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Annoy from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs)
Construct Annoy wrapper from embeddings. Args: text_embeddings: List of tuples of (text, embedding) embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: 1. Creates an in memory docstore with provided embeddings 2. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Annoy from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings)
_stream
generation_config = kwargs.get('generation_config', {}) if stop: generation_config['stop_sequences'] = stop for stream_resp in completion_with_retry(self, prompt, stream=True, is_gemini=True, run_manager=run_manager, generation_config= generation_config, **kwargs): chunk = GenerationChunk(text=stream_resp.text) yield chunk if run_manager: run_manager.on_llm_new_token(stream_resp.text, chunk=chunk, verbose =self.verbose)
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: generation_config = kwargs.get('generation_config', {}) if stop: generation_config['stop_sequences'] = stop for stream_resp in completion_with_retry(self, prompt, stream=True, is_gemini=True, run_manager=run_manager, generation_config= generation_config, **kwargs): chunk = GenerationChunk(text=stream_resp.text) yield chunk if run_manager: run_manager.on_llm_new_token(stream_resp.text, chunk=chunk, verbose=self.verbose)
null
parse_issues
parsed = [] for issue in issues['issues']: key = issue['key'] summary = issue['fields']['summary'] created = issue['fields']['created'][0:10] priority = issue['fields']['priority']['name'] status = issue['fields']['status']['name'] try: assignee = issue['fields']['assignee']['displayName'] except Exception: assignee = 'None' rel_issues = {} for related_issue in issue['fields']['issuelinks']: if 'inwardIssue' in related_issue.keys(): rel_type = related_issue['type']['inward'] rel_key = related_issue['inwardIssue']['key'] rel_summary = related_issue['inwardIssue']['fields']['summary'] if 'outwardIssue' in related_issue.keys(): rel_type = related_issue['type']['outward'] rel_key = related_issue['outwardIssue']['key'] rel_summary = related_issue['outwardIssue']['fields']['summary'] rel_issues = {'type': rel_type, 'key': rel_key, 'summary': rel_summary} parsed.append({'key': key, 'summary': summary, 'created': created, 'assignee': assignee, 'priority': priority, 'status': status, 'related_issues': rel_issues}) return parsed
def parse_issues(self, issues: Dict) ->List[dict]: parsed = [] for issue in issues['issues']: key = issue['key'] summary = issue['fields']['summary'] created = issue['fields']['created'][0:10] priority = issue['fields']['priority']['name'] status = issue['fields']['status']['name'] try: assignee = issue['fields']['assignee']['displayName'] except Exception: assignee = 'None' rel_issues = {} for related_issue in issue['fields']['issuelinks']: if 'inwardIssue' in related_issue.keys(): rel_type = related_issue['type']['inward'] rel_key = related_issue['inwardIssue']['key'] rel_summary = related_issue['inwardIssue']['fields']['summary'] if 'outwardIssue' in related_issue.keys(): rel_type = related_issue['type']['outward'] rel_key = related_issue['outwardIssue']['key'] rel_summary = related_issue['outwardIssue']['fields']['summary' ] rel_issues = {'type': rel_type, 'key': rel_key, 'summary': rel_summary} parsed.append({'key': key, 'summary': summary, 'created': created, 'assignee': assignee, 'priority': priority, 'status': status, 'related_issues': rel_issues}) return parsed
null
func_call
func = self._match_func_name(str(func_name)) if isinstance(func, Comparator): if self.allowed_attributes and args[0] not in self.allowed_attributes: raise ValueError( f'Received invalid attributes {args[0]}. Allowed attributes are {self.allowed_attributes}' ) return Comparison(comparator=func, attribute=args[0], value=args[1]) elif len(args) == 1 and func in (Operator.AND, Operator.OR): return args[0] else: return Operation(operator=func, arguments=args)
def func_call(self, func_name: Any, args: list) ->FilterDirective: func = self._match_func_name(str(func_name)) if isinstance(func, Comparator): if self.allowed_attributes and args[0] not in self.allowed_attributes: raise ValueError( f'Received invalid attributes {args[0]}. Allowed attributes are {self.allowed_attributes}' ) return Comparison(comparator=func, attribute=args[0], value=args[1]) elif len(args) == 1 and func in (Operator.AND, Operator.OR): return args[0] else: return Operation(operator=func, arguments=args)
null
from_texts
"""Return VectorStore initialized from texts and optional metadatas.""" sample_embedding = embedding.embed_query('Hello pgvecto_rs!') dimension = len(sample_embedding) if db_url is None: raise ValueError('db_url must be provided') _self: PGVecto_rs = cls(embedding=embedding, dimension=dimension, db_url= db_url, collection_name=collection_name, new_table=True) _self.add_texts(texts, metadatas, **kwargs) return _self
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, db_url: str='', collection_name: str=str( uuid.uuid4().hex), **kwargs: Any) ->PGVecto_rs: """Return VectorStore initialized from texts and optional metadatas.""" sample_embedding = embedding.embed_query('Hello pgvecto_rs!') dimension = len(sample_embedding) if db_url is None: raise ValueError('db_url must be provided') _self: PGVecto_rs = cls(embedding=embedding, dimension=dimension, db_url=db_url, collection_name=collection_name, new_table=True) _self.add_texts(texts, metadatas, **kwargs) return _self
Return VectorStore initialized from texts and optional metadatas.
load
"""Loads the data from the persist_path"""
@abstractmethod def load(self) ->Any: """Loads the data from the persist_path"""
Loads the data from the persist_path
test_add_user_message
zep_chat.add_user_message('test message') zep_chat.zep_client.memory.add_memory.assert_called_once()
@pytest.mark.requires('zep_python') def test_add_user_message(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) ->None: zep_chat.add_user_message('test message') zep_chat.zep_client.memory.add_memory.assert_called_once()
null
max_marginal_relevance_search_by_vector
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. text_in_page_content: Filter by the text in page_content of Document. meta_filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self.awadb_client is None: raise ValueError('AwaDB client is None!!!') results: List[Document] = [] if embedding is None: return results not_include_fields: set = {'_id', 'score'} retrieved_docs = self.similarity_search_by_vector(embedding, fetch_k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields_in_metadata=not_include_fields) top_embeddings = [] for doc in retrieved_docs: top_embeddings.append(doc.metadata['text_embedding']) selected_docs = maximal_marginal_relevance(np.array(embedding, dtype=np. float32), embedding_list=top_embeddings) for s_id in selected_docs: if 'text_embedding' in retrieved_docs[s_id].metadata: del retrieved_docs[s_id].metadata['text_embedding'] results.append(retrieved_docs[s_id]) return results
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, text_in_page_content: Optional[str]=None, meta_filter: Optional[dict]=None, **kwargs: Any ) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. text_in_page_content: Filter by the text in page_content of Document. meta_filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self.awadb_client is None: raise ValueError('AwaDB client is None!!!') results: List[Document] = [] if embedding is None: return results not_include_fields: set = {'_id', 'score'} retrieved_docs = self.similarity_search_by_vector(embedding, fetch_k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields_in_metadata=not_include_fields) top_embeddings = [] for doc in retrieved_docs: top_embeddings.append(doc.metadata['text_embedding']) selected_docs = maximal_marginal_relevance(np.array(embedding, dtype=np .float32), embedding_list=top_embeddings) for s_id in selected_docs: if 'text_embedding' in retrieved_docs[s_id].metadata: del retrieved_docs[s_id].metadata['text_embedding'] results.append(retrieved_docs[s_id]) return results
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. text_in_page_content: Filter by the text in page_content of Document. meta_filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance.
_chain_type
return 'sql_database_chain'
@property def _chain_type(self) ->str: return 'sql_database_chain'
null
_to_chatml_format
"""Convert LangChain message to ChatML format.""" if isinstance(message, SystemMessage): role = 'system' elif isinstance(message, AIMessage): role = 'assistant' elif isinstance(message, HumanMessage): role = 'user' else: raise ValueError(f'Unknown message type: {type(message)}') return {'role': role, 'content': message.content}
def _to_chatml_format(self, message: BaseMessage) ->dict: """Convert LangChain message to ChatML format.""" if isinstance(message, SystemMessage): role = 'system' elif isinstance(message, AIMessage): role = 'assistant' elif isinstance(message, HumanMessage): role = 'user' else: raise ValueError(f'Unknown message type: {type(message)}') return {'role': role, 'content': message.content}
Convert LangChain message to ChatML format.
_load_module_members
"""Load all members of a module. Args: module_path: Path to the module. namespace: the namespace of the module. Returns: list: A list of loaded module objects. """ classes_: List[ClassInfo] = [] functions: List[FunctionInfo] = [] module = importlib.import_module(module_path) for name, type_ in inspect.getmembers(module): if not hasattr(type_, '__module__'): continue if type_.__module__ != module_path: continue if inspect.isclass(type_): if type(type_) == typing._TypedDictMeta: kind: ClassKind = 'TypedDict' elif issubclass(type_, Enum): kind = 'enum' elif issubclass(type_, BaseModel): kind = 'Pydantic' else: kind = 'Regular' classes_.append(ClassInfo(name=name, qualified_name= f'{namespace}.{name}', kind=kind, is_public=not name.startswith ('_'))) elif inspect.isfunction(type_): functions.append(FunctionInfo(name=name, qualified_name= f'{namespace}.{name}', is_public=not name.startswith('_'))) else: continue return ModuleMembers(classes_=classes_, functions=functions)
def _load_module_members(module_path: str, namespace: str) ->ModuleMembers: """Load all members of a module. Args: module_path: Path to the module. namespace: the namespace of the module. Returns: list: A list of loaded module objects. """ classes_: List[ClassInfo] = [] functions: List[FunctionInfo] = [] module = importlib.import_module(module_path) for name, type_ in inspect.getmembers(module): if not hasattr(type_, '__module__'): continue if type_.__module__ != module_path: continue if inspect.isclass(type_): if type(type_) == typing._TypedDictMeta: kind: ClassKind = 'TypedDict' elif issubclass(type_, Enum): kind = 'enum' elif issubclass(type_, BaseModel): kind = 'Pydantic' else: kind = 'Regular' classes_.append(ClassInfo(name=name, qualified_name= f'{namespace}.{name}', kind=kind, is_public=not name. startswith('_'))) elif inspect.isfunction(type_): functions.append(FunctionInfo(name=name, qualified_name= f'{namespace}.{name}', is_public=not name.startswith('_'))) else: continue return ModuleMembers(classes_=classes_, functions=functions)
Load all members of a module. Args: module_path: Path to the module. namespace: the namespace of the module. Returns: list: A list of loaded module objects.
max_marginal_relevance_search_by_vector
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ indices_dists = self._similarity_index_search_with_score(embedding, k= fetch_k, **kwargs) indices, _ = zip(*indices_dists) result_embeddings = self._embeddings_np[indices,] mmr_selected = maximal_marginal_relevance(self._np.array(embedding, dtype= self._np.float32), result_embeddings, k=k, lambda_mult=lambda_mult) mmr_indices = [indices[i] for i in mmr_selected] return [Document(page_content=self._texts[idx], metadata={'id': self._ids[ idx], **self._metadatas[idx]}) for idx in mmr_indices]
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=DEFAULT_K, fetch_k: int=DEFAULT_FETCH_K, lambda_mult: float=0.5, ** kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ indices_dists = self._similarity_index_search_with_score(embedding, k= fetch_k, **kwargs) indices, _ = zip(*indices_dists) result_embeddings = self._embeddings_np[indices,] mmr_selected = maximal_marginal_relevance(self._np.array(embedding, dtype=self._np.float32), result_embeddings, k=k, lambda_mult= lambda_mult) mmr_indices = [indices[i] for i in mmr_selected] return [Document(page_content=self._texts[idx], metadata={'id': self. _ids[idx], **self._metadatas[idx]}) for idx in mmr_indices]
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance.
create_llm_result
"""Create the LLMResult from the choices and prompts.""" generations = [] n = params.get('n', self.n) for i, _ in enumerate(prompts): sub_choices = choices[i * n:(i + 1) * n] generations.append([Generation(text=choice['text'], generation_info= dict(finish_reason=choice.get('finish_reason'), logprobs=choice.get ('logprobs'))) for choice in sub_choices]) llm_output = {'token_usage': token_usage, 'model_name': self.model_name} if system_fingerprint: llm_output['system_fingerprint'] = system_fingerprint return LLMResult(generations=generations, llm_output=llm_output)
def create_llm_result(self, choices: Any, prompts: List[str], params: Dict[ str, Any], token_usage: Dict[str, int], *, system_fingerprint: Optional [str]=None) ->LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] n = params.get('n', self.n) for i, _ in enumerate(prompts): sub_choices = choices[i * n:(i + 1) * n] generations.append([Generation(text=choice['text'], generation_info =dict(finish_reason=choice.get('finish_reason'), logprobs= choice.get('logprobs'))) for choice in sub_choices]) llm_output = {'token_usage': token_usage, 'model_name': self.model_name} if system_fingerprint: llm_output['system_fingerprint'] = system_fingerprint return LLMResult(generations=generations, llm_output=llm_output)
Create the LLMResult from the choices and prompts.
validate_runnable_agent
"""Convert runnable to agent if passed in.""" agent = values['agent'] if isinstance(agent, Runnable): try: output_type = agent.OutputType except Exception as _: multi_action = False else: multi_action = output_type == Union[List[AgentAction], AgentFinish] if multi_action: values['agent'] = RunnableMultiActionAgent(runnable=agent) else: values['agent'] = RunnableAgent(runnable=agent) return values
@root_validator(pre=True) def validate_runnable_agent(cls, values: Dict) ->Dict: """Convert runnable to agent if passed in.""" agent = values['agent'] if isinstance(agent, Runnable): try: output_type = agent.OutputType except Exception as _: multi_action = False else: multi_action = output_type == Union[List[AgentAction], AgentFinish] if multi_action: values['agent'] = RunnableMultiActionAgent(runnable=agent) else: values['agent'] = RunnableAgent(runnable=agent) return values
Convert runnable to agent if passed in.
test_action
"""Test standard parsing of action/action input.""" parser = ReActJsonSingleInputOutputParser() _input = """Thought: agent thought here ``` { "action": "search", "action_input": "what is the temperature in SF?" } ``` """ output = parser.invoke(_input) expected_output = AgentAction(tool='search', tool_input= 'what is the temperature in SF?', log=_input) assert output == expected_output
def test_action() ->None: """Test standard parsing of action/action input.""" parser = ReActJsonSingleInputOutputParser() _input = """Thought: agent thought here ``` { "action": "search", "action_input": "what is the temperature in SF?" } ``` """ output = parser.invoke(_input) expected_output = AgentAction(tool='search', tool_input= 'what is the temperature in SF?', log=_input) assert output == expected_output
Test standard parsing of action/action input.
test_confluence_loader_initialization_from_env
with unittest.mock.patch.dict('os.environ', {'CONFLUENCE_USERNAME': self. MOCK_USERNAME, 'CONFLUENCE_API_TOKEN': self.MOCK_API_TOKEN}): ConfluenceLoader(url=self.CONFLUENCE_URL) mock_confluence.assert_called_with(url=self.CONFLUENCE_URL, username= None, password=None, cloud=True)
def test_confluence_loader_initialization_from_env(self, mock_confluence: MagicMock) ->None: with unittest.mock.patch.dict('os.environ', {'CONFLUENCE_USERNAME': self.MOCK_USERNAME, 'CONFLUENCE_API_TOKEN': self.MOCK_API_TOKEN}): ConfluenceLoader(url=self.CONFLUENCE_URL) mock_confluence.assert_called_with(url=self.CONFLUENCE_URL, username=None, password=None, cloud=True)
null
test_metadata_with_template_vars_in_frontmatter
"""Verify frontmatter fields with template variables are loaded.""" doc = next(doc for doc in docs if doc.metadata['source'] == 'template_var_frontmatter.md') FRONTMATTER_FIELDS = {'aString', 'anArray', 'aDict', 'tags'} assert set(doc.metadata) == FRONTMATTER_FIELDS | STANDARD_METADATA_FIELDS assert doc.metadata['aString'] == '{{var}}' assert doc.metadata['anArray'] == "['element', '{{varElement}}']" assert doc.metadata['aDict'] == "{'dictId1': 'val', 'dictId2': '{{varVal}}'}" assert set(doc.metadata['tags'].split(',')) == {'tag', '{{varTag}}'}
def test_metadata_with_template_vars_in_frontmatter() ->None: """Verify frontmatter fields with template variables are loaded.""" doc = next(doc for doc in docs if doc.metadata['source'] == 'template_var_frontmatter.md') FRONTMATTER_FIELDS = {'aString', 'anArray', 'aDict', 'tags'} assert set(doc.metadata) == FRONTMATTER_FIELDS | STANDARD_METADATA_FIELDS assert doc.metadata['aString'] == '{{var}}' assert doc.metadata['anArray'] == "['element', '{{varElement}}']" assert doc.metadata['aDict' ] == "{'dictId1': 'val', 'dictId2': '{{varVal}}'}" assert set(doc.metadata['tags'].split(',')) == {'tag', '{{varTag}}'}
Verify frontmatter fields with template variables are loaded.
create_extraction_chain_pydantic
"""Creates a chain that extracts information from a passage using pydantic schema. Args: pydantic_schema: The pydantic schema of the entities to extract. llm: The language model to use. prompt: The prompt to use for extraction. verbose: Whether to run in verbose mode. In verbose mode, some intermediate logs will be printed to the console. Defaults to the global `verbose` value, accessible via `langchain.globals.get_verbose()` Returns: Chain that can be used to extract information from a passage. """ class PydanticSchema(BaseModel): info: List[pydantic_schema] openai_schema = pydantic_schema.schema() openai_schema = _resolve_schema_references(openai_schema, openai_schema.get ('definitions', {})) function = _get_extraction_function(openai_schema) extraction_prompt = prompt or ChatPromptTemplate.from_template( _EXTRACTION_TEMPLATE) output_parser = PydanticAttrOutputFunctionsParser(pydantic_schema= PydanticSchema, attr_name='info') llm_kwargs = get_llm_kwargs(function) chain = LLMChain(llm=llm, prompt=extraction_prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, verbose=verbose) return chain
def create_extraction_chain_pydantic(pydantic_schema: Any, llm: BaseLanguageModel, prompt: Optional[BasePromptTemplate]=None, verbose: bool=False) ->Chain: """Creates a chain that extracts information from a passage using pydantic schema. Args: pydantic_schema: The pydantic schema of the entities to extract. llm: The language model to use. prompt: The prompt to use for extraction. verbose: Whether to run in verbose mode. In verbose mode, some intermediate logs will be printed to the console. Defaults to the global `verbose` value, accessible via `langchain.globals.get_verbose()` Returns: Chain that can be used to extract information from a passage. """ class PydanticSchema(BaseModel): info: List[pydantic_schema] openai_schema = pydantic_schema.schema() openai_schema = _resolve_schema_references(openai_schema, openai_schema .get('definitions', {})) function = _get_extraction_function(openai_schema) extraction_prompt = prompt or ChatPromptTemplate.from_template( _EXTRACTION_TEMPLATE) output_parser = PydanticAttrOutputFunctionsParser(pydantic_schema= PydanticSchema, attr_name='info') llm_kwargs = get_llm_kwargs(function) chain = LLMChain(llm=llm, prompt=extraction_prompt, llm_kwargs= llm_kwargs, output_parser=output_parser, verbose=verbose) return chain
Creates a chain that extracts information from a passage using pydantic schema. Args: pydantic_schema: The pydantic schema of the entities to extract. llm: The language model to use. prompt: The prompt to use for extraction. verbose: Whether to run in verbose mode. In verbose mode, some intermediate logs will be printed to the console. Defaults to the global `verbose` value, accessible via `langchain.globals.get_verbose()` Returns: Chain that can be used to extract information from a passage.
create
""" Create a KayRetriever given a Kay dataset id and a list of datasources. Args: dataset_id: A dataset id category in Kay, like "company" data_types: A list of datasources present within a dataset. For "company" the corresponding datasources could be ["10-K", "10-Q", "8-K", "PressRelease"]. num_contexts: The number of documents to retrieve on each query. Defaults to 6. """ try: from kay.rag.retrievers import KayRetriever except ImportError: raise ImportError( 'Could not import kay python package. Please install it with `pip install kay`.' ) client = KayRetriever(dataset_id, data_types) return cls(client=client, num_contexts=num_contexts)
@classmethod def create(cls, dataset_id: str, data_types: List[str], num_contexts: int=6 ) ->KayAiRetriever: """ Create a KayRetriever given a Kay dataset id and a list of datasources. Args: dataset_id: A dataset id category in Kay, like "company" data_types: A list of datasources present within a dataset. For "company" the corresponding datasources could be ["10-K", "10-Q", "8-K", "PressRelease"]. num_contexts: The number of documents to retrieve on each query. Defaults to 6. """ try: from kay.rag.retrievers import KayRetriever except ImportError: raise ImportError( 'Could not import kay python package. Please install it with `pip install kay`.' ) client = KayRetriever(dataset_id, data_types) return cls(client=client, num_contexts=num_contexts)
Create a KayRetriever given a Kay dataset id and a list of datasources. Args: dataset_id: A dataset id category in Kay, like "company" data_types: A list of datasources present within a dataset. For "company" the corresponding datasources could be ["10-K", "10-Q", "8-K", "PressRelease"]. num_contexts: The number of documents to retrieve on each query. Defaults to 6.
lazy_parse
"""Lazily parse the blob.""" import pdfplumber with blob.as_bytes_io() as file_path: doc = pdfplumber.open(file_path) yield from [Document(page_content=self._process_page_content(page) + '\n' + self._extract_images_from_page(page), metadata=dict({ 'source': blob.source, 'file_path': blob.source, 'page': page. page_number - 1, 'total_pages': len(doc.pages)}, **{k: doc.metadata [k] for k in doc.metadata if type(doc.metadata[k]) in [str, int]})) for page in doc.pages]
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Lazily parse the blob.""" import pdfplumber with blob.as_bytes_io() as file_path: doc = pdfplumber.open(file_path) yield from [Document(page_content=self._process_page_content(page) + '\n' + self._extract_images_from_page(page), metadata=dict({ 'source': blob.source, 'file_path': blob.source, 'page': page. page_number - 1, 'total_pages': len(doc.pages)}, **{k: doc. metadata[k] for k in doc.metadata if type(doc.metadata[k]) in [ str, int]})) for page in doc.pages]
Lazily parse the blob.
_create_collection
from transwarp_hippo_api.hippo_client import HippoField from transwarp_hippo_api.hippo_type import HippoType dim = len(embeddings[0]) logger.debug(f'[_create_collection] dim: {dim}') fields = [] fields.append(HippoField(self._primary_field, True, HippoType.STRING)) fields.append(HippoField(self._text_field, False, HippoType.STRING)) fields.append(HippoField(self._vector_field, False, HippoType.FLOAT_VECTOR, type_params={'dimension': dim})) if metadatas: for key, value in metadatas[0].items(): if isinstance(value, list): value_dim = len(value) fields.append(HippoField(key, False, HippoType.FLOAT_VECTOR, type_params={'dimension': value_dim})) else: fields.append(HippoField(key, False, HippoType.STRING)) logger.debug(f'[_create_collection] fields: {fields}') self.hc.create_table(name=self.table_name, auto_id=True, fields=fields, database_name=self.database_name, number_of_shards=self. number_of_shards, number_of_replicas=self.number_of_replicas) self.col = self.hc.get_table(self.table_name, self.database_name) logger.info( f'[_create_collection] : create table {self.table_name} in {self.database_name} successfully' )
def _create_collection(self, embeddings: list, metadatas: Optional[List[ dict]]=None) ->None: from transwarp_hippo_api.hippo_client import HippoField from transwarp_hippo_api.hippo_type import HippoType dim = len(embeddings[0]) logger.debug(f'[_create_collection] dim: {dim}') fields = [] fields.append(HippoField(self._primary_field, True, HippoType.STRING)) fields.append(HippoField(self._text_field, False, HippoType.STRING)) fields.append(HippoField(self._vector_field, False, HippoType. FLOAT_VECTOR, type_params={'dimension': dim})) if metadatas: for key, value in metadatas[0].items(): if isinstance(value, list): value_dim = len(value) fields.append(HippoField(key, False, HippoType.FLOAT_VECTOR, type_params={'dimension': value_dim})) else: fields.append(HippoField(key, False, HippoType.STRING)) logger.debug(f'[_create_collection] fields: {fields}') self.hc.create_table(name=self.table_name, auto_id=True, fields=fields, database_name=self.database_name, number_of_shards=self. number_of_shards, number_of_replicas=self.number_of_replicas) self.col = self.hc.get_table(self.table_name, self.database_name) logger.info( f'[_create_collection] : create table {self.table_name} in {self.database_name} successfully' )
null
__post_init__
"""Initialize the store.""" self.check_database_utf8() self.create_table_if_not_exists()
def __post_init__(self) ->None: """Initialize the store.""" self.check_database_utf8() self.create_table_if_not_exists()
Initialize the store.
embed_documents
"""Return simple embeddings.""" return [([float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i)]) for i in range( len(embedding_texts))]
def embed_documents(self, embedding_texts: List[str]) ->List[List[float]]: """Return simple embeddings.""" return [([float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i)]) for i in range(len(embedding_texts))]
Return simple embeddings.
_get_top_tasks
"""Get the top k tasks based on the query.""" results = self.vectorstore.similarity_search(query, k=k) if not results: return [] return [str(item.metadata['task']) for item in results]
def _get_top_tasks(self, query: str, k: int) ->List[str]: """Get the top k tasks based on the query.""" results = self.vectorstore.similarity_search(query, k=k) if not results: return [] return [str(item.metadata['task']) for item in results]
Get the top k tasks based on the query.
is_lc_serializable
"""Return whether this model can be serialized by Langchain.""" return False
@classmethod def is_lc_serializable(cls) ->bool: """Return whether this model can be serialized by Langchain.""" return False
Return whether this model can be serialized by Langchain.
test_rss_loader
loader = RSSFeedLoader(urls=['https://www.engadget.com/rss.xml']) docs = loader.load() assert docs[0] is not None assert hasattr(docs[0], 'page_content') assert hasattr(docs[0], 'metadata') metadata = docs[0].metadata assert 'feed' in metadata assert 'title' in metadata assert 'link' in metadata assert 'authors' in metadata assert 'language' in metadata assert 'description' in metadata assert 'publish_date' in metadata
def test_rss_loader() ->None: loader = RSSFeedLoader(urls=['https://www.engadget.com/rss.xml']) docs = loader.load() assert docs[0] is not None assert hasattr(docs[0], 'page_content') assert hasattr(docs[0], 'metadata') metadata = docs[0].metadata assert 'feed' in metadata assert 'title' in metadata assert 'link' in metadata assert 'authors' in metadata assert 'language' in metadata assert 'description' in metadata assert 'publish_date' in metadata
null
test_cpp_code_splitter
splitter = RecursiveCharacterTextSplitter.from_language(Language.CPP, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ #include <iostream> int main() { std::cout << "Hello, World!" << std::endl; return 0; } """ chunks = splitter.split_text(code) assert chunks == ['#include', '<iostream>', 'int main() {', 'std::cout', '<< "Hello,', 'World!" <<', 'std::endl;', 'return 0;\n}']
def test_cpp_code_splitter() ->None: splitter = RecursiveCharacterTextSplitter.from_language(Language.CPP, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ #include <iostream> int main() { std::cout << "Hello, World!" << std::endl; return 0; } """ chunks = splitter.split_text(code) assert chunks == ['#include', '<iostream>', 'int main() {', 'std::cout', '<< "Hello,', 'World!" <<', 'std::endl;', 'return 0;\n}']
null
_import_metaphor_search
from langchain_community.tools.metaphor_search import MetaphorSearchResults return MetaphorSearchResults
def _import_metaphor_search() ->Any: from langchain_community.tools.metaphor_search import MetaphorSearchResults return MetaphorSearchResults
null
on_llm_end
self.on_llm_end_common()
def on_llm_end(self, *args: Any, **kwargs: Any) ->Any: self.on_llm_end_common()
null
input_keys
return [self.input_key]
@property def input_keys(self) ->List[str]: return [self.input_key]
null
test_saving_loading_endpoint_llm
"""Test saving/loading an OctoAIHub LLM.""" llm = OctoAIEndpoint(endpoint_url= 'https://mpt-7b-demo-f1kzsig6xes9.octoai.run/generate', octoai_api_token='<octoai_api_token>', model_kwargs={'max_new_tokens': 200, 'temperature': 0.75, 'top_p': 0.95, 'repetition_penalty': 1, 'seed': None, 'stop': []}) llm.save(file_path=tmp_path / 'octoai.yaml') loaded_llm = load_llm(tmp_path / 'octoai.yaml') assert_llm_equality(llm, loaded_llm)
def test_saving_loading_endpoint_llm(tmp_path: Path) ->None: """Test saving/loading an OctoAIHub LLM.""" llm = OctoAIEndpoint(endpoint_url= 'https://mpt-7b-demo-f1kzsig6xes9.octoai.run/generate', octoai_api_token='<octoai_api_token>', model_kwargs={ 'max_new_tokens': 200, 'temperature': 0.75, 'top_p': 0.95, 'repetition_penalty': 1, 'seed': None, 'stop': []}) llm.save(file_path=tmp_path / 'octoai.yaml') loaded_llm = load_llm(tmp_path / 'octoai.yaml') assert_llm_equality(llm, loaded_llm)
Test saving/loading an OctoAIHub LLM.
test_one_namespace_w_list_of_features_w_some_emb
str1 = 'test1' str2 = 'test2' encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) expected = [{'test_namespace': [str1, encoded_str2]}] assert base.embed({'test_namespace': [str1, base.Embed(str2)]}, MockEncoder() ) == expected
@pytest.mark.requires('vowpal_wabbit_next') def test_one_namespace_w_list_of_features_w_some_emb() ->None: str1 = 'test1' str2 = 'test2' encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) expected = [{'test_namespace': [str1, encoded_str2]}] assert base.embed({'test_namespace': [str1, base.Embed(str2)]}, MockEncoder()) == expected
null
bes_client
try: import elasticsearch except ImportError: raise ImportError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) connection_params: Dict[str, Any] = {} connection_params['hosts'] = [bes_url] if username and password: connection_params['basic_auth'] = username, password es_client = elasticsearch.Elasticsearch(**connection_params) try: es_client.info() except Exception as e: logger.error(f'Error connecting to Elasticsearch: {e}') raise e return es_client
@staticmethod def bes_client(*, bes_url: Optional[str]=None, username: Optional[str]=None, password: Optional[str]=None) ->'Elasticsearch': try: import elasticsearch except ImportError: raise ImportError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) connection_params: Dict[str, Any] = {} connection_params['hosts'] = [bes_url] if username and password: connection_params['basic_auth'] = username, password es_client = elasticsearch.Elasticsearch(**connection_params) try: es_client.info() except Exception as e: logger.error(f'Error connecting to Elasticsearch: {e}') raise e return es_client
null
_call
"""Increment counter, and then return response in that index.""" self.i += 1 print(f'=== Mock Response #{self.i} ===') print(self.responses[self.i]) return self.responses[self.i]
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Increment counter, and then return response in that index.""" self.i += 1 print(f'=== Mock Response #{self.i} ===') print(self.responses[self.i]) return self.responses[self.i]
Increment counter, and then return response in that index.
_import_wolfram_alpha
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper return WolframAlphaAPIWrapper
def _import_wolfram_alpha() ->Any: from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper return WolframAlphaAPIWrapper
null
test_tool_no_args_specified_assumes_str
"""Older tools could assume *args and **kwargs were passed in.""" def ambiguous_function(*args: Any, **kwargs: Any) ->str: """An ambiguously defined function.""" return args[0] some_tool = Tool(name='chain_run', description='Run the chain', func= ambiguous_function) expected_args = {'tool_input': {'type': 'string'}} assert some_tool.args == expected_args assert some_tool.run('foobar') == 'foobar' assert some_tool.run({'tool_input': 'foobar'}) == 'foobar' with pytest.raises(ToolException, match= 'Too many arguments to single-input tool'): some_tool.run({'tool_input': 'foobar', 'other_input': 'bar'})
def test_tool_no_args_specified_assumes_str() ->None: """Older tools could assume *args and **kwargs were passed in.""" def ambiguous_function(*args: Any, **kwargs: Any) ->str: """An ambiguously defined function.""" return args[0] some_tool = Tool(name='chain_run', description='Run the chain', func= ambiguous_function) expected_args = {'tool_input': {'type': 'string'}} assert some_tool.args == expected_args assert some_tool.run('foobar') == 'foobar' assert some_tool.run({'tool_input': 'foobar'}) == 'foobar' with pytest.raises(ToolException, match= 'Too many arguments to single-input tool'): some_tool.run({'tool_input': 'foobar', 'other_input': 'bar'})
Older tools could assume *args and **kwargs were passed in.
_validate_tools
pass
@classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) ->None: pass
null
test_empty
loader = ReadTheDocsLoader(PARENT_DIR / 'custom') documents = loader.load() assert len(documents[0].page_content) == 0
@pytest.mark.requires('bs4') def test_empty() ->None: loader = ReadTheDocsLoader(PARENT_DIR / 'custom') documents = loader.load() assert len(documents[0].page_content) == 0
null
convert_python_function_to_openai_function
"""Convert a Python function to an OpenAI function-calling API compatible dict. Assumes the Python function has type hints and a docstring with a description. If the docstring has Google Python style argument descriptions, these will be included as well. """ description, arg_descriptions = _parse_python_function_docstring(function) return {'name': _get_python_function_name(function), 'description': description, 'parameters': {'type': 'object', 'properties': _get_python_function_arguments(function, arg_descriptions), 'required': _get_python_function_required_args(function)}}
def convert_python_function_to_openai_function(function: Callable) ->Dict[ str, Any]: """Convert a Python function to an OpenAI function-calling API compatible dict. Assumes the Python function has type hints and a docstring with a description. If the docstring has Google Python style argument descriptions, these will be included as well. """ description, arg_descriptions = _parse_python_function_docstring(function) return {'name': _get_python_function_name(function), 'description': description, 'parameters': {'type': 'object', 'properties': _get_python_function_arguments(function, arg_descriptions), 'required': _get_python_function_required_args(function)}}
Convert a Python function to an OpenAI function-calling API compatible dict. Assumes the Python function has type hints and a docstring with a description. If the docstring has Google Python style argument descriptions, these will be included as well.
_llm_type
"""Return type of llm.""" return 'modal'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'modal'
Return type of llm.
_load_file_from_id
"""Load a file from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build('drive', 'v3', credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().get_media(fileId=id) fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() if self.file_loader_cls is not None: fh.seek(0) loader = self.file_loader_cls(file=fh, **self.file_loader_kwargs) docs = loader.load() for doc in docs: doc.metadata['source'] = f'https://drive.google.com/file/d/{id}/view' if 'title' not in doc.metadata: doc.metadata['title'] = f"{file.get('name')}" return docs else: from PyPDF2 import PdfReader content = fh.getvalue() pdf_reader = PdfReader(BytesIO(content)) return [Document(page_content=page.extract_text(), metadata={'source': f'https://drive.google.com/file/d/{id}/view', 'title': f"{file.get('name')}", 'page': i}) for i, page in enumerate( pdf_reader.pages)]
def _load_file_from_id(self, id: str) ->List[Document]: """Load a file from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build('drive', 'v3', credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().get_media(fileId=id) fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() if self.file_loader_cls is not None: fh.seek(0) loader = self.file_loader_cls(file=fh, **self.file_loader_kwargs) docs = loader.load() for doc in docs: doc.metadata['source' ] = f'https://drive.google.com/file/d/{id}/view' if 'title' not in doc.metadata: doc.metadata['title'] = f"{file.get('name')}" return docs else: from PyPDF2 import PdfReader content = fh.getvalue() pdf_reader = PdfReader(BytesIO(content)) return [Document(page_content=page.extract_text(), metadata={ 'source': f'https://drive.google.com/file/d/{id}/view', 'title': f"{file.get('name')}", 'page': i}) for i, page in enumerate( pdf_reader.pages)]
Load a file from an ID.
test_array_metadata
"""Verify array metadata is loaded as a string""" doc = next(doc for doc in docs if doc.metadata['source'] == 'tags_and_frontmatter.md') assert doc.metadata['anArray'] == "['one', 'two', 'three']"
def test_array_metadata() ->None: """Verify array metadata is loaded as a string""" doc = next(doc for doc in docs if doc.metadata['source'] == 'tags_and_frontmatter.md') assert doc.metadata['anArray'] == "['one', 'two', 'three']"
Verify array metadata is loaded as a string
parse
steps = [Step(value=v) for v in re.split('\n\\s*\\d+\\. ', text)[1:]] return Plan(steps=steps)
def parse(self, text: str) ->Plan: steps = [Step(value=v) for v in re.split('\n\\s*\\d+\\. ', text)[1:]] return Plan(steps=steps)
null
add_texts
"""Add text to the datastore. Args: texts (List[str]): The text """ self.vectorstore.add_texts(texts)
def add_texts(self, texts: List[str]) ->None: """Add text to the datastore. Args: texts (List[str]): The text """ self.vectorstore.add_texts(texts)
Add text to the datastore. Args: texts (List[str]): The text
test_qdrant_from_texts_raises_error_on_different_vector_name
"""Test if Qdrant.from_texts raises an exception if vector name does not match""" collection_name = uuid.uuid4().hex with tempfile.TemporaryDirectory() as tmpdir: vec_store = Qdrant.from_texts(['lorem', 'ipsum', 'dolor', 'sit', 'amet' ], ConsistentFakeEmbeddings(dimensionality=10), collection_name= collection_name, path=str(tmpdir), vector_name=first_vector_name) del vec_store with pytest.raises(QdrantException): Qdrant.from_texts(['foo', 'bar'], ConsistentFakeEmbeddings( dimensionality=5), collection_name=collection_name, path=str( tmpdir), vector_name=second_vector_name)
@pytest.mark.parametrize(['first_vector_name', 'second_vector_name'], [( None, 'custom-vector'), ('custom-vector', None), ('my-first-vector', 'my-second_vector')]) def test_qdrant_from_texts_raises_error_on_different_vector_name( first_vector_name: Optional[str], second_vector_name: Optional[str] ) ->None: """Test if Qdrant.from_texts raises an exception if vector name does not match""" collection_name = uuid.uuid4().hex with tempfile.TemporaryDirectory() as tmpdir: vec_store = Qdrant.from_texts(['lorem', 'ipsum', 'dolor', 'sit', 'amet'], ConsistentFakeEmbeddings(dimensionality=10), collection_name=collection_name, path=str(tmpdir), vector_name= first_vector_name) del vec_store with pytest.raises(QdrantException): Qdrant.from_texts(['foo', 'bar'], ConsistentFakeEmbeddings( dimensionality=5), collection_name=collection_name, path= str(tmpdir), vector_name=second_vector_name)
Test if Qdrant.from_texts raises an exception if vector name does not match
create_index
""" Create an index of embeddings for a list of contexts. Args: contexts: List of contexts to embed. embeddings: Embeddings model to use. Returns: Index of embeddings. """ with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts)))
def create_index(contexts: List[str], embeddings: Embeddings) ->np.ndarray: """ Create an index of embeddings for a list of contexts. Args: contexts: List of contexts to embed. embeddings: Embeddings model to use. Returns: Index of embeddings. """ with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts)))
Create an index of embeddings for a list of contexts. Args: contexts: List of contexts to embed. embeddings: Embeddings model to use. Returns: Index of embeddings.
prompt
"""Create LLM prompt with the question.""" prompt_template = PromptTemplate(input_variables=[Constant.narrative_input. value], template=self.template, partial_variables={ 'format_instructions': PydanticOutputParser(pydantic_object=self. data_model).get_format_instructions()}) prompt = prompt_template.format(narrative_input=self.question) return prompt
@property def prompt(self) ->str: """Create LLM prompt with the question.""" prompt_template = PromptTemplate(input_variables=[Constant. narrative_input.value], template=self.template, partial_variables={ 'format_instructions': PydanticOutputParser(pydantic_object=self. data_model).get_format_instructions()}) prompt = prompt_template.format(narrative_input=self.question) return prompt
Create LLM prompt with the question.
preprocess_msg
if isinstance(msg, BaseMessage): role_convert = {'ai': 'assistant', 'human': 'user'} if isinstance(msg, ChatMessage): role = msg.role else: role = msg.type role = role_convert.get(role, role) content = self._process_content(msg.content) return {'role': role, 'content': content} raise ValueError(f'Invalid message: {repr(msg)} of type {type(msg)}')
def preprocess_msg(self, msg: BaseMessage) ->Dict[str, str]: if isinstance(msg, BaseMessage): role_convert = {'ai': 'assistant', 'human': 'user'} if isinstance(msg, ChatMessage): role = msg.role else: role = msg.type role = role_convert.get(role, role) content = self._process_content(msg.content) return {'role': role, 'content': content} raise ValueError(f'Invalid message: {repr(msg)} of type {type(msg)}')
null
test_main_id_main_content
loader = ReadTheDocsLoader(PARENT_DIR / 'main_id_main_content') documents = loader.load() assert len(documents[0].page_content) != 0
@pytest.mark.requires('bs4') def test_main_id_main_content() ->None: loader = ReadTheDocsLoader(PARENT_DIR / 'main_id_main_content') documents = loader.load() assert len(documents[0].page_content) != 0
null
get_tokenizer
try: from transformers import GPT2TokenizerFast except ImportError: raise ImportError( 'Could not import transformers python package. This is needed in order to calculate get_token_ids. Please install it with `pip install transformers`.' ) return GPT2TokenizerFast.from_pretrained('gpt2')
@lru_cache(maxsize=None) def get_tokenizer() ->Any: try: from transformers import GPT2TokenizerFast except ImportError: raise ImportError( 'Could not import transformers python package. This is needed in order to calculate get_token_ids. Please install it with `pip install transformers`.' ) return GPT2TokenizerFast.from_pretrained('gpt2')
null
_get_relevant_documents
response = self.client.retrieve(retrievalQuery={'text': query.strip()}, knowledgeBaseId=self.knowledge_base_id, retrievalConfiguration=self. retrieval_config.dict()) results = response['retrievalResults'] documents = [] for result in results: documents.append(Document(page_content=result['content']['text'], metadata={'location': result['location'], 'score': result['score'] if 'score' in result else 0})) return documents
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: response = self.client.retrieve(retrievalQuery={'text': query.strip()}, knowledgeBaseId=self.knowledge_base_id, retrievalConfiguration=self .retrieval_config.dict()) results = response['retrievalResults'] documents = [] for result in results: documents.append(Document(page_content=result['content']['text'], metadata={'location': result['location'], 'score': result[ 'score'] if 'score' in result else 0})) return documents
null
test_sklearn_with_persistence
"""Test end to end construction and search, with persistence.""" persist_path = tmpdir / 'foo.parquet' texts = ['foo', 'bar', 'baz'] docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings(), persist_path=str(persist_path), serializer='json') output = docsearch.similarity_search('foo', k=1) assert len(output) == 1 assert output[0].page_content == 'foo' docsearch.persist() docsearch = SKLearnVectorStore(FakeEmbeddings(), persist_path=str( persist_path), serializer='json') output = docsearch.similarity_search('foo', k=1) assert len(output) == 1 assert output[0].page_content == 'foo'
@pytest.mark.requires('numpy', 'sklearn') def test_sklearn_with_persistence(tmpdir: Path) ->None: """Test end to end construction and search, with persistence.""" persist_path = tmpdir / 'foo.parquet' texts = ['foo', 'bar', 'baz'] docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings(), persist_path=str(persist_path), serializer='json') output = docsearch.similarity_search('foo', k=1) assert len(output) == 1 assert output[0].page_content == 'foo' docsearch.persist() docsearch = SKLearnVectorStore(FakeEmbeddings(), persist_path=str( persist_path), serializer='json') output = docsearch.similarity_search('foo', k=1) assert len(output) == 1 assert output[0].page_content == 'foo'
Test end to end construction and search, with persistence.
test_incorrect_command
"""Test handling of incorrect command.""" session = BashProcess() output = session.run(['invalid_command']) assert output == "Command 'invalid_command' returned non-zero exit status 127."
@pytest.mark.skipif(sys.platform.startswith('win'), reason= 'Test not supported on Windows') def test_incorrect_command() ->None: """Test handling of incorrect command.""" session = BashProcess() output = session.run(['invalid_command']) assert output == "Command 'invalid_command' returned non-zero exit status 127."
Test handling of incorrect command.
map
"""Maps the Example, or dataset row to a dictionary.""" if not example.outputs: raise ValueError( f'Example {example.id} has no outputs to use as a reference.') if self.reference_key is None: if len(example.outputs) > 1: raise ValueError( f'Example {example.id} has multiple outputs, so you must specify a reference_key.' ) else: output = list(example.outputs.values())[0] elif self.reference_key not in example.outputs: raise ValueError( f'Example {example.id} does not have reference key {self.reference_key}.' ) else: output = example.outputs[self.reference_key] return {'reference': self.serialize_chat_messages([output]) if isinstance( output, dict) and output.get('type') and output.get('data') else output}
def map(self, example: Example) ->Dict[str, str]: """Maps the Example, or dataset row to a dictionary.""" if not example.outputs: raise ValueError( f'Example {example.id} has no outputs to use as a reference.') if self.reference_key is None: if len(example.outputs) > 1: raise ValueError( f'Example {example.id} has multiple outputs, so you must specify a reference_key.' ) else: output = list(example.outputs.values())[0] elif self.reference_key not in example.outputs: raise ValueError( f'Example {example.id} does not have reference key {self.reference_key}.' ) else: output = example.outputs[self.reference_key] return {'reference': self.serialize_chat_messages([output]) if isinstance(output, dict) and output.get('type') and output.get( 'data') else output}
Maps the Example, or dataset row to a dictionary.
check_bs4
"""Check if BeautifulSoup4 is installed. Raises: ImportError: If BeautifulSoup4 is not installed. """ try: import bs4 except ImportError: raise ImportError( 'BeautifulSoup4 is required for BlackboardLoader. Please install it with `pip install beautifulsoup4`.' )
def check_bs4(self) ->None: """Check if BeautifulSoup4 is installed. Raises: ImportError: If BeautifulSoup4 is not installed. """ try: import bs4 except ImportError: raise ImportError( 'BeautifulSoup4 is required for BlackboardLoader. Please install it with `pip install beautifulsoup4`.' )
Check if BeautifulSoup4 is installed. Raises: ImportError: If BeautifulSoup4 is not installed.
_call
return 'fake response'
def _call(self, messages: List[BaseMessage], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: return 'fake response'
null
test_google_generativeai_get_num_tokens
llm = GoogleGenerativeAI(model='models/text-bison-001') output = llm.get_num_tokens('How are you?') assert output == 4
def test_google_generativeai_get_num_tokens() ->None: llm = GoogleGenerativeAI(model='models/text-bison-001') output = llm.get_num_tokens('How are you?') assert output == 4
null
_run
try: try: multion.close_session(sessionId) except Exception as e: print(f'{e}, retrying...') except Exception as e: raise Exception(f'An error occurred: {e}')
def _run(self, sessionId: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->None: try: try: multion.close_session(sessionId) except Exception as e: print(f'{e}, retrying...') except Exception as e: raise Exception(f'An error occurred: {e}')
null
similarity_search
"""Perform a similarity search against the query string.""" res = self.similarity_search_with_score(query=query, k=k, param=param, expr =expr, timeout=timeout, **kwargs) return [doc for doc, _ in res]
def similarity_search(self, query: str, k: int=4, param: Optional[dict]= None, expr: Optional[str]=None, timeout: Optional[int]=None, **kwargs: Any ) ->List[Document]: """Perform a similarity search against the query string.""" res = self.similarity_search_with_score(query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs) return [doc for doc, _ in res]
Perform a similarity search against the query string.
test_huggingface_pipeline_text2text_generation
"""Test valid call to HuggingFace text2text generation model.""" llm = HuggingFacePipeline.from_model_id(model_id='google/flan-t5-small', task='text2text-generation') output = llm('Say foo:') assert isinstance(output, str)
def test_huggingface_pipeline_text2text_generation() ->None: """Test valid call to HuggingFace text2text generation model.""" llm = HuggingFacePipeline.from_model_id(model_id='google/flan-t5-small', task='text2text-generation') output = llm('Say foo:') assert isinstance(output, str)
Test valid call to HuggingFace text2text generation model.
is_async
"""Return whether the handler is async.""" return True
@property def is_async(self) ->bool: """Return whether the handler is async.""" return True
Return whether the handler is async.
test_yield_keys
key_value_pairs = [('key1', b'value1'), ('subdir/key2', b'value2')] file_store.mset(key_value_pairs) keys = list(file_store.yield_keys()) expected_keys = ['key1', os.path.join('subdir', 'key2')] assert keys == expected_keys
def test_yield_keys(file_store: LocalFileStore) ->None: key_value_pairs = [('key1', b'value1'), ('subdir/key2', b'value2')] file_store.mset(key_value_pairs) keys = list(file_store.yield_keys()) expected_keys = ['key1', os.path.join('subdir', 'key2')] assert keys == expected_keys
null
_is_valid_url
"""Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme)
@staticmethod def _is_valid_url(url: str) ->bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme)
Check if the url is valid.
_init_resp
return {k: None for k in self.callback_columns}
def _init_resp(self) ->Dict: return {k: None for k in self.callback_columns}
null
test_multiple_intermediate_steps_default_response
intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1', log= 'Log1'), 'Observation1'), (AgentAction(tool='Tool2', tool_input= 'input2', log='Log2'), 'Observation2'), (AgentAction(tool='Tool3', tool_input='input3', log='Log3'), 'Observation3')] expected_result = [AIMessage(content='Log1'), HumanMessage(content= 'Observation1'), AIMessage(content='Log2'), HumanMessage(content= 'Observation2'), AIMessage(content='Log3'), HumanMessage(content= 'Observation3')] assert format_log_to_messages(intermediate_steps) == expected_result
def test_multiple_intermediate_steps_default_response() ->None: intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1', log='Log1'), 'Observation1'), (AgentAction(tool='Tool2', tool_input ='input2', log='Log2'), 'Observation2'), (AgentAction(tool='Tool3', tool_input='input3', log='Log3'), 'Observation3')] expected_result = [AIMessage(content='Log1'), HumanMessage(content= 'Observation1'), AIMessage(content='Log2'), HumanMessage(content= 'Observation2'), AIMessage(content='Log3'), HumanMessage(content= 'Observation3')] assert format_log_to_messages(intermediate_steps) == expected_result
null
_collect_user_input
"""Collects and returns user input as a single string.""" separator = separator or '\n' lines = [] while True: line = input() if not line: break lines.append(line) if stop and any(seq in line for seq in stop): break multi_line_input = separator.join(lines) return multi_line_input
def _collect_user_input(separator: Optional[str]=None, stop: Optional[List[ str]]=None) ->str: """Collects and returns user input as a single string.""" separator = separator or '\n' lines = [] while True: line = input() if not line: break lines.append(line) if stop and any(seq in line for seq in stop): break multi_line_input = separator.join(lines) return multi_line_input
Collects and returns user input as a single string.
test_success
"""Test that a valid hub path is loaded correctly with and without a ref.""" path = 'chains/path/chain.json' lc_path_prefix = f"lc{'@' + ref if ref else ''}://" valid_suffixes = {'json'} body = json.dumps({'foo': 'bar'}) ref = ref or DEFAULT_REF file_contents = None def loader(file_path: str) ->None: nonlocal file_contents assert file_contents is None file_contents = Path(file_path).read_text() mocked_responses.get(urljoin(URL_BASE.format(ref=ref), path), body=body, status=200, content_type='application/json') try_load_from_hub(f'{lc_path_prefix}{path}', loader, 'chains', valid_suffixes) assert file_contents == body
@pytest.mark.parametrize('ref', [None, 'v0.3']) def test_success(mocked_responses: responses.RequestsMock, ref: str) ->None: """Test that a valid hub path is loaded correctly with and without a ref.""" path = 'chains/path/chain.json' lc_path_prefix = f"lc{'@' + ref if ref else ''}://" valid_suffixes = {'json'} body = json.dumps({'foo': 'bar'}) ref = ref or DEFAULT_REF file_contents = None def loader(file_path: str) ->None: nonlocal file_contents assert file_contents is None file_contents = Path(file_path).read_text() mocked_responses.get(urljoin(URL_BASE.format(ref=ref), path), body=body, status=200, content_type='application/json') try_load_from_hub(f'{lc_path_prefix}{path}', loader, 'chains', valid_suffixes) assert file_contents == body
Test that a valid hub path is loaded correctly with and without a ref.
test_selector_valid
"""Test NGramOverlapExampleSelector can select examples.""" sentence = 'Spot can run.' output = selector.select_examples({'input': sentence}) assert output == [EXAMPLES[2], EXAMPLES[0], EXAMPLES[1]]
def test_selector_valid(selector: NGramOverlapExampleSelector) ->None: """Test NGramOverlapExampleSelector can select examples.""" sentence = 'Spot can run.' output = selector.select_examples({'input': sentence}) assert output == [EXAMPLES[2], EXAMPLES[0], EXAMPLES[1]]
Test NGramOverlapExampleSelector can select examples.
_cache_embedding
return self.embedding.embed_query(text=text)
@lru_cache(maxsize=CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE) def _cache_embedding(text: str) ->List[float]: return self.embedding.embed_query(text=text)
null
_get_custom_metrics
"""Compute Custom Metrics for an LLM Generated Output Args: generation (LLMResult): Output generation from an LLM prompt_idx (int): List index of the input prompt gen_idx (int): List index of the generated output Returns: dict: A dictionary containing the custom metrics. """ resp = {} if self.custom_metrics: custom_metrics = self.custom_metrics(generation, prompt_idx, gen_idx) resp.update(custom_metrics) return resp
def _get_custom_metrics(self, generation: Generation, prompt_idx: int, gen_idx: int) ->dict: """Compute Custom Metrics for an LLM Generated Output Args: generation (LLMResult): Output generation from an LLM prompt_idx (int): List index of the input prompt gen_idx (int): List index of the generated output Returns: dict: A dictionary containing the custom metrics. """ resp = {} if self.custom_metrics: custom_metrics = self.custom_metrics(generation, prompt_idx, gen_idx) resp.update(custom_metrics) return resp
Compute Custom Metrics for an LLM Generated Output Args: generation (LLMResult): Output generation from an LLM prompt_idx (int): List index of the input prompt gen_idx (int): List index of the generated output Returns: dict: A dictionary containing the custom metrics.
_invocation_params
params = self._default_params if self.stop_sequences is not None and stop_sequences is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop_sequences is not None: params['stop_sequences'] = self.stop_sequences else: params['stop_sequences'] = stop_sequences return {**params, **kwargs}
def _invocation_params(self, stop_sequences: Optional[List[str]], **kwargs: Any ) ->dict: params = self._default_params if self.stop_sequences is not None and stop_sequences is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop_sequences is not None: params['stop_sequences'] = self.stop_sequences else: params['stop_sequences'] = stop_sequences return {**params, **kwargs}
null
get_attribute_value
if not self.AdditionalAttributes: return '' if not self.AdditionalAttributes[0]: return '' else: return self.AdditionalAttributes[0].get_value_text()
def get_attribute_value(self) ->str: if not self.AdditionalAttributes: return '' if not self.AdditionalAttributes[0]: return '' else: return self.AdditionalAttributes[0].get_value_text()
null
connection_string_from_db_params
"""Return connection string from database parameters.""" return f'dbname={database} user={user} password={password} host={host} port={port}'
@classmethod def connection_string_from_db_params(cls, host: str, port: int, database: str, user: str, password: str) ->str: """Return connection string from database parameters.""" return ( f'dbname={database} user={user} password={password} host={host} port={port}' )
Return connection string from database parameters.
test_returnsingledocument_loadnotebook_eachnoteiscombinedinto1document
loader = EverNoteLoader(self.example_notebook_path('sample_notebook.enex'), True) documents = loader.load() assert len(documents) == 1
def test_returnsingledocument_loadnotebook_eachnoteiscombinedinto1document(self ) ->None: loader = EverNoteLoader(self.example_notebook_path( 'sample_notebook.enex'), True) documents = loader.load() assert len(documents) == 1
null
_to_chat_result
chat_generations = [] for g in llm_result.generations[0]: chat_generation = ChatGeneration(message=AIMessage(content=g.text), generation_info=g.generation_info) chat_generations.append(chat_generation) return ChatResult(generations=chat_generations, llm_output=llm_result. llm_output)
@staticmethod def _to_chat_result(llm_result: LLMResult) ->ChatResult: chat_generations = [] for g in llm_result.generations[0]: chat_generation = ChatGeneration(message=AIMessage(content=g.text), generation_info=g.generation_info) chat_generations.append(chat_generation) return ChatResult(generations=chat_generations, llm_output=llm_result. llm_output)
null
test_embaas_embed_documents_response
"""Test embaas embeddings with multiple texts.""" responses.add(responses.POST, EMBAAS_API_URL, json={'data': [{'embedding': [0.0] * 1024}]}, status=200) text = 'asd' embeddings = EmbaasEmbeddings() output = embeddings.embed_query(text) assert len(output) == 1024
@responses.activate def test_embaas_embed_documents_response() ->None: """Test embaas embeddings with multiple texts.""" responses.add(responses.POST, EMBAAS_API_URL, json={'data': [{ 'embedding': [0.0] * 1024}]}, status=200) text = 'asd' embeddings = EmbaasEmbeddings() output = embeddings.embed_query(text) assert len(output) == 1024
Test embaas embeddings with multiple texts.
_create_action_payload
"""Create a payload for an action.""" data = params if params else {} data.update({'instructions': instructions}) if preview_only: data.update({'preview_only': True}) return data
def _create_action_payload(self, instructions: str, params: Optional[Dict]= None, preview_only=False) ->Dict: """Create a payload for an action.""" data = params if params else {} data.update({'instructions': instructions}) if preview_only: data.update({'preview_only': True}) return data
Create a payload for an action.
invoke
runnable, config = self._prepare(config) return runnable.invoke(input, config, **kwargs)
def invoke(self, input: Input, config: Optional[RunnableConfig]=None, ** kwargs: Any) ->Output: runnable, config = self._prepare(config) return runnable.invoke(input, config, **kwargs)
null
embed_documents
"""Return consistent embeddings for each text seen so far.""" out_vectors = [] for text in texts: if text not in self.known_texts: self.known_texts.append(text) vector = [float(1.0)] * (self.dimensionality - 1) + [float(self. known_texts.index(text))] out_vectors.append(vector) return out_vectors
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Return consistent embeddings for each text seen so far.""" out_vectors = [] for text in texts: if text not in self.known_texts: self.known_texts.append(text) vector = [float(1.0)] * (self.dimensionality - 1) + [float(self. known_texts.index(text))] out_vectors.append(vector) return out_vectors
Return consistent embeddings for each text seen so far.
test_wrapper_fails_no_api_key_or_access_token_initialization
"""Test Wrapper requires either an API Key or OAuth Access Token.""" with pytest.raises(ValueError): ZapierNLAWrapper()
def test_wrapper_fails_no_api_key_or_access_token_initialization() ->None: """Test Wrapper requires either an API Key or OAuth Access Token.""" with pytest.raises(ValueError): ZapierNLAWrapper()
Test Wrapper requires either an API Key or OAuth Access Token.
similarity_search_by_vector
"""Accepts a query_embedding (vector), and returns documents with similar embeddings.""" docs_and_scores = self.similarity_search_by_vector_with_relevance_scores( embedding, k, distance_func, where_str, **kwargs) return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(self, embedding: List[float], k: int=4, distance_func: DistanceFunction=DistanceFunction.COSINE_SIM, where_str: Optional[str]=None, **kwargs: Any) ->List[Document]: """Accepts a query_embedding (vector), and returns documents with similar embeddings.""" docs_and_scores = self.similarity_search_by_vector_with_relevance_scores( embedding, k, distance_func, where_str, **kwargs) return [doc for doc, _ in docs_and_scores]
Accepts a query_embedding (vector), and returns documents with similar embeddings.
load
return list(self.lazy_load())
def load(self) ->List[Document]: return list(self.lazy_load())
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
_import_aleph_alpha
from langchain_community.llms.aleph_alpha import AlephAlpha return AlephAlpha
def _import_aleph_alpha() ->Any: from langchain_community.llms.aleph_alpha import AlephAlpha return AlephAlpha
null
_reset
for k, v in self.metrics.items(): self.metrics[k] = 0 for k, v in self.records.items(): self.records[k] = []
def _reset(self) ->None: for k, v in self.metrics.items(): self.metrics[k] = 0 for k, v in self.records.items(): self.records[k] = []
null
_import_gradient_ai
from langchain_community.llms.gradient_ai import GradientLLM return GradientLLM
def _import_gradient_ai() ->Any: from langchain_community.llms.gradient_ai import GradientLLM return GradientLLM
null
__init__
super().__init__(**data) try: from mlflow.deployments import get_deploy_client self.client = get_deploy_client(self.databricks_uri) except ImportError as e: raise ImportError( 'Failed to create the client. Please install mlflow with `pip install mlflow`.' ) from e endpoint = self.client.get_endpoint(self.endpoint_name) self.external_or_foundation = endpoint.get('endpoint_type', '').lower() in ( 'external_model', 'foundation_model_api') if self.task is None: self.task = endpoint.get('task')
def __init__(self, **data: Any): super().__init__(**data) try: from mlflow.deployments import get_deploy_client self.client = get_deploy_client(self.databricks_uri) except ImportError as e: raise ImportError( 'Failed to create the client. Please install mlflow with `pip install mlflow`.' ) from e endpoint = self.client.get_endpoint(self.endpoint_name) self.external_or_foundation = endpoint.get('endpoint_type', '').lower( ) in ('external_model', 'foundation_model_api') if self.task is None: self.task = endpoint.get('task')
null
deserialize_from_bytes
"""Deserialize FAISS index, docstore, and index_to_docstore_id from bytes.""" index, docstore, index_to_docstore_id = pickle.loads(serialized) return cls(embeddings, index, docstore, index_to_docstore_id, **kwargs)
@classmethod def deserialize_from_bytes(cls, serialized: bytes, embeddings: Embeddings, **kwargs: Any) ->FAISS: """Deserialize FAISS index, docstore, and index_to_docstore_id from bytes.""" index, docstore, index_to_docstore_id = pickle.loads(serialized) return cls(embeddings, index, docstore, index_to_docstore_id, **kwargs)
Deserialize FAISS index, docstore, and index_to_docstore_id from bytes.
_get_env
logger.info('init ...') if embeddings is not None: logger.info('create collection') self._create_collection(embeddings, metadatas) self._extract_fields() self._create_index()
def _get_env(self, embeddings: Optional[list]=None, metadatas: Optional[ List[dict]]=None) ->None: logger.info('init ...') if embeddings is not None: logger.info('create collection') self._create_collection(embeddings, metadatas) self._extract_fields() self._create_index()
null
get_fields
redis_fields: List['RedisField'] = [] if self.is_empty: return redis_fields for field_name in self.__fields__.keys(): if field_name not in ['content_key', 'content_vector_key', 'extra']: field_group = getattr(self, field_name) if field_group is not None: for field in field_group: redis_fields.append(field.as_field()) return redis_fields
def get_fields(self) ->List['RedisField']: redis_fields: List['RedisField'] = [] if self.is_empty: return redis_fields for field_name in self.__fields__.keys(): if field_name not in ['content_key', 'content_vector_key', 'extra']: field_group = getattr(self, field_name) if field_group is not None: for field in field_group: redis_fields.append(field.as_field()) return redis_fields
null