method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
_import_interaction_tool | from langchain_community.tools.interaction.tool import StdInInquireTool
return StdInInquireTool | def _import_interaction_tool() ->Any:
from langchain_community.tools.interaction.tool import StdInInquireTool
return StdInInquireTool | null |
assign | """Assigns new fields to the dict output of this runnable.
Returns a new runnable."""
from langchain_core.runnables.passthrough import RunnableAssign
return self | RunnableAssign(RunnableParallel(kwargs)) | def assign(self, **kwargs: Union[Runnable[Dict[str, Any], Any], Callable[[
Dict[str, Any]], Any], Mapping[str, Union[Runnable[Dict[str, Any], Any],
Callable[[Dict[str, Any]], Any]]]]) ->RunnableSerializable[Any, Any]:
"""Assigns new fields to the dict output of this runnable.
Returns a new runnable."""
from langchain_core.runnables.passthrough import RunnableAssign
return self | RunnableAssign(RunnableParallel(kwargs)) | Assigns new fields to the dict output of this runnable.
Returns a new runnable. |
test_pandas_output_parser_col_oob | try:
parser.parse_folder('row:10')
assert False, 'Should have raised OutputParserException'
except OutputParserException:
assert True | def test_pandas_output_parser_col_oob() ->None:
try:
parser.parse_folder('row:10')
assert False, 'Should have raised OutputParserException'
except OutputParserException:
assert True | null |
test_solve_sudoku_k_too_small | """Test simple question that should not need python."""
tot_chain = ToTChain(llm=fake_llm_sudoku, checker=SudokuChecker(), k=len(
solutions) - 1, c=4, tot_strategy_class=SampleCoTStrategy)
output = tot_chain.run({'problem_description': ''})
assert output != sudoku_solution | @pytest.mark.requires('jinja2')
def test_solve_sudoku_k_too_small(fake_llm_sudoku: FakeLLM) ->None:
"""Test simple question that should not need python."""
tot_chain = ToTChain(llm=fake_llm_sudoku, checker=SudokuChecker(), k=
len(solutions) - 1, c=4, tot_strategy_class=SampleCoTStrategy)
output = tot_chain.run({'problem_description': ''})
assert output != sudoku_solution | Test simple question that should not need python. |
set_llm_chain | llm_chain = values.get('llm_chain')
llm = values.get('llm')
few_shot_template = values.get('template')
if not llm_chain:
if llm is None or few_shot_template is None:
raise ValueError(
'Both llm and few_shot_template must be provided if llm_chain is not given.'
)
values['llm_chain'] = LLMChain(llm=llm, prompt=few_shot_template)
return values | @root_validator(pre=False, skip_on_failure=True)
def set_llm_chain(cls, values: Dict[str, Any]) ->Dict[str, Any]:
llm_chain = values.get('llm_chain')
llm = values.get('llm')
few_shot_template = values.get('template')
if not llm_chain:
if llm is None or few_shot_template is None:
raise ValueError(
'Both llm and few_shot_template must be provided if llm_chain is not given.'
)
values['llm_chain'] = LLMChain(llm=llm, prompt=few_shot_template)
return values | null |
ignore_chain | """Whether to ignore chain callbacks."""
return self.ignore_chain_ | @property
def ignore_chain(self) ->bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_ | Whether to ignore chain callbacks. |
test_chat_ai_endpoints | """Test ChatNVIDIA wrapper."""
chat = ChatNVIDIA(model='llama2_13b', temperature=0.7)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | def test_chat_ai_endpoints() ->None:
"""Test ChatNVIDIA wrapper."""
chat = ChatNVIDIA(model='llama2_13b', temperature=0.7)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | Test ChatNVIDIA wrapper. |
create_review_request | """
Creates a review request on *THE* open pull request
that matches the current active_branch.
Parameters:
reviewer_username(str): The username of the person who is being requested
Returns:
str: A message confirming the creation of the review request
"""
pull_requests = self.github_repo_instance.get_pulls(state='open', sort=
'created')
pr = next((pr for pr in pull_requests if pr.head.ref == self.active_branch),
None)
if pr is None:
return (
f'No open pull request found for the current branch `{self.active_branch}`'
)
try:
pr.create_review_request(reviewers=[reviewer_username])
return (
f'Review request created for user {reviewer_username} on PR #{pr.number}'
)
except Exception as e:
return f'Failed to create a review request with error {e}' | def create_review_request(self, reviewer_username: str) ->str:
"""
Creates a review request on *THE* open pull request
that matches the current active_branch.
Parameters:
reviewer_username(str): The username of the person who is being requested
Returns:
str: A message confirming the creation of the review request
"""
pull_requests = self.github_repo_instance.get_pulls(state='open', sort=
'created')
pr = next((pr for pr in pull_requests if pr.head.ref == self.
active_branch), None)
if pr is None:
return (
f'No open pull request found for the current branch `{self.active_branch}`'
)
try:
pr.create_review_request(reviewers=[reviewer_username])
return (
f'Review request created for user {reviewer_username} on PR #{pr.number}'
)
except Exception as e:
return f'Failed to create a review request with error {e}' | Creates a review request on *THE* open pull request
that matches the current active_branch.
Parameters:
reviewer_username(str): The username of the person who is being requested
Returns:
str: A message confirming the creation of the review request |
from_llm_and_tools | prompt = AutoGPTPrompt(ai_name=ai_name, ai_role=ai_role, tools=tools,
input_variables=['memory', 'messages', 'goals', 'user_input'],
token_counter=llm.get_num_tokens)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(ai_name, memory, chain, output_parser or AutoGPTOutputParser(),
tools, feedback_tool=human_feedback_tool, chat_history_memory=
chat_history_memory) | @classmethod
def from_llm_and_tools(cls, ai_name: str, ai_role: str, memory:
VectorStoreRetriever, tools: List[BaseTool], llm: BaseChatModel,
human_in_the_loop: bool=False, output_parser: Optional[
BaseAutoGPTOutputParser]=None, chat_history_memory: Optional[
BaseChatMessageHistory]=None) ->AutoGPT:
prompt = AutoGPTPrompt(ai_name=ai_name, ai_role=ai_role, tools=tools,
input_variables=['memory', 'messages', 'goals', 'user_input'],
token_counter=llm.get_num_tokens)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(ai_name, memory, chain, output_parser or AutoGPTOutputParser
(), tools, feedback_tool=human_feedback_tool, chat_history_memory=
chat_history_memory) | null |
test_query | result = self.kuzu_graph.query('MATCH (n:Movie) RETURN n.name ORDER BY n.name')
excepted_result = [{'n.name': 'The Godfather'}, {'n.name':
'The Godfather Coda: The Death of Michael Corleone'}, {'n.name':
'The Godfather: Part II'}]
self.assertEqual(result, excepted_result) | def test_query(self) ->None:
result = self.kuzu_graph.query(
'MATCH (n:Movie) RETURN n.name ORDER BY n.name')
excepted_result = [{'n.name': 'The Godfather'}, {'n.name':
'The Godfather Coda: The Death of Michael Corleone'}, {'n.name':
'The Godfather: Part II'}]
self.assertEqual(result, excepted_result) | null |
build_extra | """Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.'
)
values['model_kwargs'] = extra
return values | @root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.'
)
values['model_kwargs'] = extra
return values | Build extra kwargs from additional params that were passed in. |
test_similarity_search_with_metadata_and_additional | """Test end to end construction and search with metadata and additional."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
output = docsearch.similarity_search('foo', k=1, additional=['certainty'])
assert output == [Document(page_content='foo', metadata={'page': 0,
'_additional': {'certainty': 1}})] | @pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata_and_additional(self, weaviate_url:
str, embedding_openai: OpenAIEmbeddings) ->None:
"""Test end to end construction and search with metadata and additional."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
output = docsearch.similarity_search('foo', k=1, additional=['certainty'])
assert output == [Document(page_content='foo', metadata={'page': 0,
'_additional': {'certainty': 1}})] | Test end to end construction and search with metadata and additional. |
template_is_valid | """Check that prefix, suffix, and input variables are consistent."""
if values['validate_template']:
check_valid_template(values['prefix'] + values['suffix'], values[
'template_format'], values['input_variables'] + list(values[
'partial_variables']))
elif values.get('template_format'):
values['input_variables'] = [var for var in get_template_variables(
values['prefix'] + values['suffix'], values['template_format']) if
var not in values['partial_variables']]
return values | @root_validator()
def template_is_valid(cls, values: Dict) ->Dict:
"""Check that prefix, suffix, and input variables are consistent."""
if values['validate_template']:
check_valid_template(values['prefix'] + values['suffix'], values[
'template_format'], values['input_variables'] + list(values[
'partial_variables']))
elif values.get('template_format'):
values['input_variables'] = [var for var in get_template_variables(
values['prefix'] + values['suffix'], values['template_format']) if
var not in values['partial_variables']]
return values | Check that prefix, suffix, and input variables are consistent. |
route | """
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result['destination'], result['next_inputs']) | def route(self, inputs: Dict[str, Any], callbacks: Callbacks=None) ->Route:
"""
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result['destination'], result['next_inputs']) | Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object |
validate_size | if 'size' in values:
size = values['size']
model_name = values['model_name']
if size not in SUPPORTED_IMAGE_SIZES[model_name]:
raise RuntimeError(f'size {size} is not supported by {model_name}')
return values | @root_validator(pre=True)
def validate_size(cls, values: Dict) ->Dict:
if 'size' in values:
size = values['size']
model_name = values['model_name']
if size not in SUPPORTED_IMAGE_SIZES[model_name]:
raise RuntimeError(f'size {size} is not supported by {model_name}')
return values | null |
embed_with_retry | """Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) ->Any:
result = []
i = 0
input_data = kwargs['input']
while i < len(input_data):
kwargs['input'] = input_data[i:i + 25]
resp = embeddings.client.call(**kwargs)
if resp.status_code == 200:
result += resp.output['embeddings']
elif resp.status_code in [400, 401]:
raise ValueError(
f"""status_code: {resp.status_code}
code: {resp.code}
message: {resp.message}"""
)
else:
raise HTTPError(
f"""HTTP error occurred: status_code: {resp.status_code}
code: {resp.code}
message: {resp.message}"""
, response=resp)
i += 25
return result
return _embed_with_retry(**kwargs) | def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) ->Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) ->Any:
result = []
i = 0
input_data = kwargs['input']
while i < len(input_data):
kwargs['input'] = input_data[i:i + 25]
resp = embeddings.client.call(**kwargs)
if resp.status_code == 200:
result += resp.output['embeddings']
elif resp.status_code in [400, 401]:
raise ValueError(
f"""status_code: {resp.status_code}
code: {resp.code}
message: {resp.message}"""
)
else:
raise HTTPError(
f"""HTTP error occurred: status_code: {resp.status_code}
code: {resp.code}
message: {resp.message}"""
, response=resp)
i += 25
return result
return _embed_with_retry(**kwargs) | Use tenacity to retry the embedding call. |
list | """Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
"""
session = self._get_session()
try:
response = session.get(self.zapier_nla_api_base + 'exposed/')
response.raise_for_status()
except requests.HTTPError as http_err:
if response.status_code == 401:
if self.zapier_nla_oauth_access_token:
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your access token is correct and doesn't need to be refreshed. Err: {http_err}"
, response=response)
raise requests.HTTPError(
f'An unauthorized response occurred. Check that your api key is correct. Err: {http_err}'
, response=response)
raise http_err
return response.json()['results'] | def list(self) ->List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
"""
session = self._get_session()
try:
response = session.get(self.zapier_nla_api_base + 'exposed/')
response.raise_for_status()
except requests.HTTPError as http_err:
if response.status_code == 401:
if self.zapier_nla_oauth_access_token:
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your access token is correct and doesn't need to be refreshed. Err: {http_err}"
, response=response)
raise requests.HTTPError(
f'An unauthorized response occurred. Check that your api key is correct. Err: {http_err}'
, response=response)
raise http_err
return response.json()['results'] | Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing) |
on_llm_end | if self.__has_valid_config is False:
return
try:
token_usage = (response.llm_output or {}).get('token_usage', {})
parsed_output: Any = [(_parse_lc_message(generation.message) if hasattr
(generation, 'message') else generation.text) for generation in
response.generations[0]]
if len(parsed_output) == 1:
parsed_output = parsed_output[0]
self.__track_event('llm', 'end', run_id=str(run_id), parent_run_id=str(
parent_run_id) if parent_run_id else None, output=parsed_output,
token_usage={'prompt': token_usage.get('prompt_tokens'),
'completion': token_usage.get('completion_tokens')}, app_id=self.
__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_llm_end: {e}') | def on_llm_end(self, response: LLMResult, *, run_id: UUID, parent_run_id:
Union[UUID, None]=None, **kwargs: Any) ->None:
if self.__has_valid_config is False:
return
try:
token_usage = (response.llm_output or {}).get('token_usage', {})
parsed_output: Any = [(_parse_lc_message(generation.message) if
hasattr(generation, 'message') else generation.text) for
generation in response.generations[0]]
if len(parsed_output) == 1:
parsed_output = parsed_output[0]
self.__track_event('llm', 'end', run_id=str(run_id), parent_run_id=
str(parent_run_id) if parent_run_id else None, output=
parsed_output, token_usage={'prompt': token_usage.get(
'prompt_tokens'), 'completion': token_usage.get(
'completion_tokens')}, app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_llm_end: {e}') | null |
get_num_tokens | """Calculate number of tokens."""
if not self.count_tokens:
raise NameError('Please ensure the anthropic package is loaded')
return self.count_tokens(text) | def get_num_tokens(self, text: str) ->int:
"""Calculate number of tokens."""
if not self.count_tokens:
raise NameError('Please ensure the anthropic package is loaded')
return self.count_tokens(text) | Calculate number of tokens. |
test_on_llm_end_azure_openai | response = LLMResult(generations=[], llm_output={'token_usage': {
'prompt_tokens': 1000, 'completion_tokens': 1000, 'total_tokens': 2000},
'model_name': model_name})
handler.on_llm_end(response)
assert handler.total_cost == expected_cost | @pytest.mark.parametrize('model_name,expected_cost', [('gpt-35-turbo',
0.0035), ('gpt-35-turbo-0301', 0.0035), ('gpt-35-turbo-0613', 0.0035),
('gpt-35-turbo-16k-0613', 0.007), ('gpt-35-turbo-16k', 0.007), ('gpt-4',
0.09), ('gpt-4-0314', 0.09), ('gpt-4-0613', 0.09), ('gpt-4-32k', 0.18),
('gpt-4-32k-0314', 0.18), ('gpt-4-32k-0613', 0.18)])
def test_on_llm_end_azure_openai(handler: OpenAICallbackHandler, model_name:
str, expected_cost: float) ->None:
response = LLMResult(generations=[], llm_output={'token_usage': {
'prompt_tokens': 1000, 'completion_tokens': 1000, 'total_tokens':
2000}, 'model_name': model_name})
handler.on_llm_end(response)
assert handler.total_cost == expected_cost | null |
test_promptlayer_chat_openai_streaming | """Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = PromptLayerChatOpenAI(max_tokens=10, streaming=True, temperature=0,
callback_manager=callback_manager, verbose=True)
message = HumanMessage(content='Hello')
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage) | def test_promptlayer_chat_openai_streaming() ->None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = PromptLayerChatOpenAI(max_tokens=10, streaming=True, temperature
=0, callback_manager=callback_manager, verbose=True)
message = HumanMessage(content='Hello')
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage) | Test that streaming correctly invokes on_llm_new_token callback. |
add_texts_with_embeddings | """Run more texts through the embeddings and add to the vectorstore.
Args:
texts: List of strings to add to the vectorstore.
embs: List of lists of floats with text embeddings for texts.
metadatas: Optional list of metadata associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
ids = [uuid.uuid4().hex for _ in texts]
values_dict: Dict[str, List[Any]] = {self.content_field: texts, self.
doc_id_field: ids}
if not metadatas:
metadatas = []
len_diff = len(ids) - len(metadatas)
add_meta = [None for _ in range(0, len_diff)]
metadatas = [(m if m is not None else {}) for m in metadatas + add_meta]
values_dict[self.metadata_field] = metadatas
values_dict[self.text_embedding_field] = embs
self._persist(values_dict)
return ids | def add_texts_with_embeddings(self, texts: List[str], embs: List[List[float
]], metadatas: Optional[List[dict]]=None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: List of strings to add to the vectorstore.
embs: List of lists of floats with text embeddings for texts.
metadatas: Optional list of metadata associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
ids = [uuid.uuid4().hex for _ in texts]
values_dict: Dict[str, List[Any]] = {self.content_field: texts, self.
doc_id_field: ids}
if not metadatas:
metadatas = []
len_diff = len(ids) - len(metadatas)
add_meta = [None for _ in range(0, len_diff)]
metadatas = [(m if m is not None else {}) for m in metadatas + add_meta]
values_dict[self.metadata_field] = metadatas
values_dict[self.text_embedding_field] = embs
self._persist(values_dict)
return ids | Run more texts through the embeddings and add to the vectorstore.
Args:
texts: List of strings to add to the vectorstore.
embs: List of lists of floats with text embeddings for texts.
metadatas: Optional list of metadata associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore. |
_identifying_params | return {} | @property
def _identifying_params(self) ->Dict[str, Any]:
return {} | null |
similarity_search_by_vector | result = self.similarity_search_by_vector_with_relevance_scores(embedding,
k=k, filter=filter, **kwargs)
documents = [doc for doc, _ in result]
return documents | def similarity_search_by_vector(self, embedding: List[float], k: int=4,
filter: Optional[Dict[str, Any]]=None, **kwargs: Any) ->List[Document]:
result = self.similarity_search_by_vector_with_relevance_scores(embedding,
k=k, filter=filter, **kwargs)
documents = [doc for doc, _ in result]
return documents | null |
check_if_answer_reached | if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
else:
return self.last_tokens == self.answer_prefix_tokens | def check_if_answer_reached(self) ->bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
else:
return self.last_tokens == self.answer_prefix_tokens | null |
_run | try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='source_path', value=
source_path)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='destination_path', value=
destination_path)
try:
shutil.copy2(source_path_, destination_path_, follow_symlinks=False)
return (
f'File copied successfully from {source_path} to {destination_path}.')
except Exception as e:
return 'Error: ' + str(e) | def _run(self, source_path: str, destination_path: str, run_manager:
Optional[CallbackManagerForToolRun]=None) ->str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='source_path', value=
source_path)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='destination_path',
value=destination_path)
try:
shutil.copy2(source_path_, destination_path_, follow_symlinks=False)
return (
f'File copied successfully from {source_path} to {destination_path}.'
)
except Exception as e:
return 'Error: ' + str(e) | null |
__init__ | """
Args:
file_path: The path to the Microsoft Excel file.
mode: The mode to use when partitioning the file. See unstructured docs
for more info. Optional. Defaults to "single".
**unstructured_kwargs: Keyword arguments to pass to unstructured.
"""
validate_unstructured_version(min_unstructured_version='0.6.7')
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) | def __init__(self, file_path: str, mode: str='single', **
unstructured_kwargs: Any):
"""
Args:
file_path: The path to the Microsoft Excel file.
mode: The mode to use when partitioning the file. See unstructured docs
for more info. Optional. Defaults to "single".
**unstructured_kwargs: Keyword arguments to pass to unstructured.
"""
validate_unstructured_version(min_unstructured_version='0.6.7')
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) | Args:
file_path: The path to the Microsoft Excel file.
mode: The mode to use when partitioning the file. See unstructured docs
for more info. Optional. Defaults to "single".
**unstructured_kwargs: Keyword arguments to pass to unstructured. |
_import_graphql_tool | from langchain_community.tools.graphql.tool import BaseGraphQLTool
return BaseGraphQLTool | def _import_graphql_tool() ->Any:
from langchain_community.tools.graphql.tool import BaseGraphQLTool
return BaseGraphQLTool | null |
_build_query_sql | q_emb_str = ','.join(map(str, q_emb))
if where_str:
where_str = f'PREWHERE {where_str}'
else:
where_str = ''
settings_strs = []
if self.config.index_query_params:
for k in self.config.index_query_params:
settings_strs.append(f'SETTING {k}={self.config.index_query_params[k]}'
)
q_str = f"""
SELECT {self.config.column_map['document']},
{self.config.column_map['metadata']}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY L2Distance({self.config.column_map['embedding']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk} {' '.join(settings_strs)}
"""
return q_str | def _build_query_sql(self, q_emb: List[float], topk: int, where_str:
Optional[str]=None) ->str:
q_emb_str = ','.join(map(str, q_emb))
if where_str:
where_str = f'PREWHERE {where_str}'
else:
where_str = ''
settings_strs = []
if self.config.index_query_params:
for k in self.config.index_query_params:
settings_strs.append(
f'SETTING {k}={self.config.index_query_params[k]}')
q_str = f"""
SELECT {self.config.column_map['document']},
{self.config.column_map['metadata']}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY L2Distance({self.config.column_map['embedding']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk} {' '.join(settings_strs)}
"""
return q_str | null |
test_similarity_search_with_approx_infer_instack | """test end to end with approx retrieval strategy and inference in-stack"""
docsearch = ElasticsearchStore(index_name=index_name, strategy=
ElasticsearchStore.ApproxRetrievalStrategy(query_model_id=
'sentence-transformers__all-minilm-l6-v2'), query_field='text_field',
vector_query_field='vector_query_field.predicted_value', **
elasticsearch_connection)
docsearch.client.ingest.put_pipeline(id='test_pipeline', processors=[{
'inference': {'model_id': 'sentence-transformers__all-minilm-l6-v2',
'field_map': {'query_field': 'text_field'}, 'target_field':
'vector_query_field'}}])
docsearch.client.indices.create(index=index_name, mappings={'properties': {
'text_field': {'type': 'text'}, 'vector_query_field': {'properties': {
'predicted_value': {'type': 'dense_vector', 'dims': 384, 'index': True,
'similarity': 'l2_norm'}}}}}, settings={'index': {'default_pipeline':
'test_pipeline'}})
texts = ['foo', 'bar', 'baz']
for i, text in enumerate(texts):
docsearch.client.create(index=index_name, id=str(i), document={
'text_field': text, 'metadata': {}})
docsearch.client.indices.refresh(index=index_name)
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'knn': {'filter': [], 'field':
'vector_query_field.predicted_value', 'k': 1, 'num_candidates': 50,
'query_vector_builder': {'text_embedding': {'model_id':
'sentence-transformers__all-minilm-l6-v2', 'model_text': 'foo'}}}}
return query_body
output = docsearch.similarity_search('foo', k=1, custom_query=assert_query)
assert output == [Document(page_content='foo')]
output = docsearch.similarity_search('bar', k=1)
assert output == [Document(page_content='bar')] | @pytest.mark.skipif('sentence-transformers__all-minilm-l6-v2' not in
modelsDeployed, reason=
'Sentence Transformers model not deployed in ML Node, skipping test')
def test_similarity_search_with_approx_infer_instack(self,
elasticsearch_connection: dict, index_name: str) ->None:
"""test end to end with approx retrieval strategy and inference in-stack"""
docsearch = ElasticsearchStore(index_name=index_name, strategy=
ElasticsearchStore.ApproxRetrievalStrategy(query_model_id=
'sentence-transformers__all-minilm-l6-v2'), query_field=
'text_field', vector_query_field=
'vector_query_field.predicted_value', **elasticsearch_connection)
docsearch.client.ingest.put_pipeline(id='test_pipeline', processors=[{
'inference': {'model_id': 'sentence-transformers__all-minilm-l6-v2',
'field_map': {'query_field': 'text_field'}, 'target_field':
'vector_query_field'}}])
docsearch.client.indices.create(index=index_name, mappings={
'properties': {'text_field': {'type': 'text'}, 'vector_query_field':
{'properties': {'predicted_value': {'type': 'dense_vector', 'dims':
384, 'index': True, 'similarity': 'l2_norm'}}}}}, settings={'index':
{'default_pipeline': 'test_pipeline'}})
texts = ['foo', 'bar', 'baz']
for i, text in enumerate(texts):
docsearch.client.create(index=index_name, id=str(i), document={
'text_field': text, 'metadata': {}})
docsearch.client.indices.refresh(index=index_name)
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'knn': {'filter': [], 'field':
'vector_query_field.predicted_value', 'k': 1, 'num_candidates':
50, 'query_vector_builder': {'text_embedding': {'model_id':
'sentence-transformers__all-minilm-l6-v2', 'model_text': 'foo'}}}}
return query_body
output = docsearch.similarity_search('foo', k=1, custom_query=assert_query)
assert output == [Document(page_content='foo')]
output = docsearch.similarity_search('bar', k=1)
assert output == [Document(page_content='bar')] | test end to end with approx retrieval strategy and inference in-stack |
extract_between_tags | ext_list = re.findall(f'<{tag}\\s?>(.+?)</{tag}\\s?>', string, re.DOTALL)
if strip:
ext_list = [e.strip() for e in ext_list]
if ext_list:
if len(ext_list) != 1:
raise ValueError
return ext_list[0] | def extract_between_tags(tag: str, string: str, strip: bool=True) ->str:
ext_list = re.findall(f'<{tag}\\s?>(.+?)</{tag}\\s?>', string, re.DOTALL)
if strip:
ext_list = [e.strip() for e in ext_list]
if ext_list:
if len(ext_list) != 1:
raise ValueError
return ext_list[0] | null |
test_all_imports | assert set(__all__) == set(EXPECTED_ALL) | def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL) | null |
requires_reference | """Returns whether the evaluator requires reference."""
return True | @property
def requires_reference(self) ->bool:
"""Returns whether the evaluator requires reference."""
return True | Returns whether the evaluator requires reference. |
_get_summary | try:
response = self.client.get_propertygraph_summary()
except Exception as e:
raise NeptuneQueryException({'message':
'Summary API is not available for this instance of Neptune,ensure the engine version is >=1.2.1.0'
, 'details': str(e)})
try:
summary = response['payload']['graphSummary']
except Exception:
raise NeptuneQueryException({'message':
'Summary API did not return a valid response.', 'details': response
.content.decode()})
else:
return summary | def _get_summary(self) ->Dict:
try:
response = self.client.get_propertygraph_summary()
except Exception as e:
raise NeptuneQueryException({'message':
'Summary API is not available for this instance of Neptune,ensure the engine version is >=1.2.1.0'
, 'details': str(e)})
try:
summary = response['payload']['graphSummary']
except Exception:
raise NeptuneQueryException({'message':
'Summary API did not return a valid response.', 'details':
response.content.decode()})
else:
return summary | null |
_get_elements | """Get elements matching the given CSS selector."""
elements = page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == 'innerText':
val: Optional[str] = element.inner_text()
else:
val = element.get_attribute(attribute)
if val is not None and val.strip() != '':
result[attribute] = val
if result:
results.append(result)
return results | def _get_elements(page: SyncPage, selector: str, attributes: Sequence[str]
) ->List[dict]:
"""Get elements matching the given CSS selector."""
elements = page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == 'innerText':
val: Optional[str] = element.inner_text()
else:
val = element.get_attribute(attribute)
if val is not None and val.strip() != '':
result[attribute] = val
if result:
results.append(result)
return results | Get elements matching the given CSS selector. |
synthetic_data_generator | return create_openai_data_generator(output_schema=MedicalBilling, llm=
ChatOpenAI(temperature=1), prompt=prompt_template) | @pytest.fixture(scope='function')
def synthetic_data_generator() ->SyntheticDataGenerator:
return create_openai_data_generator(output_schema=MedicalBilling, llm=
ChatOpenAI(temperature=1), prompt=prompt_template) | null |
get_template_variables | """Get the variables from the template.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
Returns:
The variables from the template.
Raises:
ValueError: If the template format is not supported.
"""
if template_format == 'jinja2':
input_variables = _get_jinja2_variables_from_template(template)
elif template_format == 'f-string':
input_variables = {v for _, v, _, _ in Formatter().parse(template) if v
is not None}
else:
raise ValueError(f'Unsupported template format: {template_format}')
return sorted(input_variables) | def get_template_variables(template: str, template_format: str) ->List[str]:
"""Get the variables from the template.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
Returns:
The variables from the template.
Raises:
ValueError: If the template format is not supported.
"""
if template_format == 'jinja2':
input_variables = _get_jinja2_variables_from_template(template)
elif template_format == 'f-string':
input_variables = {v for _, v, _, _ in Formatter().parse(template) if
v is not None}
else:
raise ValueError(f'Unsupported template format: {template_format}')
return sorted(input_variables) | Get the variables from the template.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
Returns:
The variables from the template.
Raises:
ValueError: If the template format is not supported. |
input_keys | """Return the input keys.
:meta private:
"""
return list(set(self.llm_chain.input_keys) - {'agent_scratchpad'}) | @property
def input_keys(self) ->List[str]:
"""Return the input keys.
:meta private:
"""
return list(set(self.llm_chain.input_keys) - {'agent_scratchpad'}) | Return the input keys.
:meta private: |
return_node_list | """
Returns the nodes as a list of Node objects.
Each Node object will have its ID, type, and properties populated.
Returns:
List[Node]: A list of Node objects.
"""
nodes = [Node(id=key[0], type=key[1], properties=self.nodes[key]) for key in
self.nodes]
return nodes | def return_node_list(self) ->List[Node]:
"""
Returns the nodes as a list of Node objects.
Each Node object will have its ID, type, and properties populated.
Returns:
List[Node]: A list of Node objects.
"""
nodes = [Node(id=key[0], type=key[1], properties=self.nodes[key]) for
key in self.nodes]
return nodes | Returns the nodes as a list of Node objects.
Each Node object will have its ID, type, and properties populated.
Returns:
List[Node]: A list of Node objects. |
roundtrip | """Parse a file and pretty-print it to output.
The output is formatted as valid Python source code.
Args:
filename: The name of the file to parse.
output: The output stream to write to.
"""
with open(filename, 'rb') as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, 'r', encoding=encoding) as pyfile:
source = pyfile.read()
tree = compile(source, filename, 'exec', ast.PyCF_ONLY_AST)
Unparser(tree, output) | def roundtrip(filename, output=sys.stdout):
"""Parse a file and pretty-print it to output.
The output is formatted as valid Python source code.
Args:
filename: The name of the file to parse.
output: The output stream to write to.
"""
with open(filename, 'rb') as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, 'r', encoding=encoding) as pyfile:
source = pyfile.read()
tree = compile(source, filename, 'exec', ast.PyCF_ONLY_AST)
Unparser(tree, output) | Parse a file and pretty-print it to output.
The output is formatted as valid Python source code.
Args:
filename: The name of the file to parse.
output: The output stream to write to. |
persist | """Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
'You must specify a persist_directory oncreation to persist the collection.'
)
import chromadb
major, minor, _ = chromadb.__version__.split('.')
if int(major) == 0 and int(minor) < 4:
self._client.persist() | def persist(self) ->None:
"""Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
'You must specify a persist_directory oncreation to persist the collection.'
)
import chromadb
major, minor, _ = chromadb.__version__.split('.')
if int(major) == 0 and int(minor) < 4:
self._client.persist() | Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed. |
_completion_with_retry | return _make_request(llm, **_kwargs) | @retry_decorator
def _completion_with_retry(**_kwargs: Any) ->Any:
return _make_request(llm, **_kwargs) | null |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'llms', 'openai'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'llms', 'openai'] | Get the namespace of the langchain object. |
similarity_search | """Return Vectara documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
any other querying variable in VectaraQueryConfig
Returns:
List of Documents most similar to the query
"""
docs_and_scores = self.similarity_search_with_score(query, **kwargs)
return [doc for doc, _ in docs_and_scores] | def similarity_search(self, query: str, **kwargs: Any) ->List[Document]:
"""Return Vectara documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
any other querying variable in VectaraQueryConfig
Returns:
List of Documents most similar to the query
"""
docs_and_scores = self.similarity_search_with_score(query, **kwargs)
return [doc for doc, _ in docs_and_scores] | Return Vectara documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
any other querying variable in VectaraQueryConfig
Returns:
List of Documents most similar to the query |
mock_now | """Context manager for mocking out datetime.now() in unit tests.
Example:
with mock_now(datetime.datetime(2011, 2, 3, 10, 11)):
assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11)
"""
class MockDateTime(datetime.datetime):
"""Mock datetime.datetime.now() with a fixed datetime."""
@classmethod
def now(cls):
return datetime.datetime(dt_value.year, dt_value.month, dt_value.
day, dt_value.hour, dt_value.minute, dt_value.second, dt_value.
microsecond, dt_value.tzinfo)
real_datetime = datetime.datetime
datetime.datetime = MockDateTime
try:
yield datetime.datetime
finally:
datetime.datetime = real_datetime | @contextlib.contextmanager
def mock_now(dt_value):
"""Context manager for mocking out datetime.now() in unit tests.
Example:
with mock_now(datetime.datetime(2011, 2, 3, 10, 11)):
assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11)
"""
class MockDateTime(datetime.datetime):
"""Mock datetime.datetime.now() with a fixed datetime."""
@classmethod
def now(cls):
return datetime.datetime(dt_value.year, dt_value.month,
dt_value.day, dt_value.hour, dt_value.minute, dt_value.
second, dt_value.microsecond, dt_value.tzinfo)
real_datetime = datetime.datetime
datetime.datetime = MockDateTime
try:
yield datetime.datetime
finally:
datetime.datetime = real_datetime | Context manager for mocking out datetime.now() in unit tests.
Example:
with mock_now(datetime.datetime(2011, 2, 3, 10, 11)):
assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11) |
inputs | return self._inputs | @property
def inputs(self) ->Dict[str, str]:
return self._inputs | null |
_ddgs_text | """Run query through DuckDuckGo text search and return results."""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
ddgs_gen = ddgs.text(query, region=self.region, safesearch=self.
safesearch, timelimit=self.time, max_results=max_results or self.
max_results, backend=self.backend)
if ddgs_gen:
return [r for r in ddgs_gen]
return [] | def _ddgs_text(self, query: str, max_results: Optional[int]=None) ->List[Dict
[str, str]]:
"""Run query through DuckDuckGo text search and return results."""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
ddgs_gen = ddgs.text(query, region=self.region, safesearch=self.
safesearch, timelimit=self.time, max_results=max_results or
self.max_results, backend=self.backend)
if ddgs_gen:
return [r for r in ddgs_gen]
return [] | Run query through DuckDuckGo text search and return results. |
output_keys | _output_keys = [self.output_key]
return _output_keys | @property
def output_keys(self) ->List[str]:
_output_keys = [self.output_key]
return _output_keys | null |
embed_documents | """Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = []
for text in texts:
instruction_pairs.append([self.embed_instruction, text])
embeddings = self.client(self.pipeline_ref, instruction_pairs)
return embeddings.tolist() | def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = []
for text in texts:
instruction_pairs.append([self.embed_instruction, text])
embeddings = self.client(self.pipeline_ref, instruction_pairs)
return embeddings.tolist() | Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text. |
__init__ | """Create a new NebulaGraph wrapper instance."""
try:
import nebula3
import pandas
except ImportError:
raise ValueError(
'Please install NebulaGraph Python client and pandas first: `pip install nebula3-python pandas`'
)
self.username = username
self.password = password
self.address = address
self.port = port
self.space = space
self.session_pool_size = session_pool_size
self.session_pool = self._get_session_pool()
self.schema = ''
try:
self.refresh_schema()
except Exception as e:
raise ValueError(f'Could not refresh schema. Error: {e}') | def __init__(self, space: str, username: str='root', password: str='nebula',
address: str='127.0.0.1', port: int=9669, session_pool_size: int=30
) ->None:
"""Create a new NebulaGraph wrapper instance."""
try:
import nebula3
import pandas
except ImportError:
raise ValueError(
'Please install NebulaGraph Python client and pandas first: `pip install nebula3-python pandas`'
)
self.username = username
self.password = password
self.address = address
self.port = port
self.space = space
self.session_pool_size = session_pool_size
self.session_pool = self._get_session_pool()
self.schema = ''
try:
self.refresh_schema()
except Exception as e:
raise ValueError(f'Could not refresh schema. Error: {e}') | Create a new NebulaGraph wrapper instance. |
uppercase_strings | return v.upper() | @validator('algorithm', 'datatype', 'distance_metric', pre=True, each_item=True
)
def uppercase_strings(cls, v: str) ->str:
return v.upper() | null |
test_sim_search_by_vector | """Test end to end construction and similarity search by vector."""
in_memory_vec_store = DocArrayInMemorySearch.from_texts(texts=texts,
embedding=FakeEmbeddings(), metric=metric)
embedding = [1.0] * 10
output = in_memory_vec_store.similarity_search_by_vector(embedding, k=1)
assert output == [Document(page_content='bar')] | @pytest.mark.parametrize('metric', ['cosine_sim', 'euclidean_dist',
'sqeuclidean_dist'])
def test_sim_search_by_vector(metric: str, texts: List[str]) ->None:
"""Test end to end construction and similarity search by vector."""
in_memory_vec_store = DocArrayInMemorySearch.from_texts(texts=texts,
embedding=FakeEmbeddings(), metric=metric)
embedding = [1.0] * 10
output = in_memory_vec_store.similarity_search_by_vector(embedding, k=1)
assert output == [Document(page_content='bar')] | Test end to end construction and similarity search by vector. |
add_prompts_generations | tasks = []
prompts = self.payload[run_id]['prompts']
model_version = self.payload[run_id]['kwargs'].get('invocation_params', {}
).get('model_name')
for prompt, generation in zip(prompts, generations):
tasks.append({'data': {self.value: prompt, 'run_id': run_id},
'predictions': [{'result': [{'from_name': self.from_name, 'to_name':
self.to_name, 'type': 'textarea', 'value': {'text': [g.text for g in
generation]}}], 'model_version': model_version}]})
self.ls_project.import_tasks(tasks) | def add_prompts_generations(self, run_id: str, generations: List[List[
Generation]]) ->None:
tasks = []
prompts = self.payload[run_id]['prompts']
model_version = self.payload[run_id]['kwargs'].get('invocation_params', {}
).get('model_name')
for prompt, generation in zip(prompts, generations):
tasks.append({'data': {self.value: prompt, 'run_id': run_id},
'predictions': [{'result': [{'from_name': self.from_name,
'to_name': self.to_name, 'type': 'textarea', 'value': {'text':
[g.text for g in generation]}}], 'model_version': model_version}]})
self.ls_project.import_tasks(tasks) | null |
_get_length_based | return len(re.split('\n| ', text)) | def _get_length_based(text: str) ->int:
return len(re.split('\n| ', text)) | null |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable'] | Get the namespace of the langchain object. |
arcgis_mocks | sys_modules = {'arcgis': MagicMock(), 'arcgis.features.FeatureLayer':
mock_feature_layer, 'arcgis.gis.GIS': mock_gis}
with patch.dict('sys.modules', sys_modules):
yield | @pytest.fixture
def arcgis_mocks(mock_feature_layer, mock_gis):
sys_modules = {'arcgis': MagicMock(), 'arcgis.features.FeatureLayer':
mock_feature_layer, 'arcgis.gis.GIS': mock_gis}
with patch.dict('sys.modules', sys_modules):
yield | null |
format | """Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
text = self.prompt.format(**kwargs)
return ChatMessage(content=text, role=self.role, additional_kwargs=self.
additional_kwargs) | def format(self, **kwargs: Any) ->BaseMessage:
"""Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
text = self.prompt.format(**kwargs)
return ChatMessage(content=text, role=self.role, additional_kwargs=self
.additional_kwargs) | Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message. |
test_structured_args | """Test functionality with structured arguments."""
structured_api = _MockStructuredTool()
assert isinstance(structured_api, BaseTool)
assert structured_api.name == 'structured_api'
expected_result = "1 True {'foo': 'bar'}"
args = {'arg1': 1, 'arg2': True, 'arg3': {'foo': 'bar'}}
assert structured_api.run(args) == expected_result | def test_structured_args() ->None:
"""Test functionality with structured arguments."""
structured_api = _MockStructuredTool()
assert isinstance(structured_api, BaseTool)
assert structured_api.name == 'structured_api'
expected_result = "1 True {'foo': 'bar'}"
args = {'arg1': 1, 'arg2': True, 'arg3': {'foo': 'bar'}}
assert structured_api.run(args) == expected_result | Test functionality with structured arguments. |
_log_conversation | """Log the conversation to the context API."""
if len(self.messages) == 0:
return
self.client.log.conversation_upsert(body={'conversation': self.
conversation_model(messages=self.messages, metadata=self.metadata)})
self.messages = []
self.metadata = {} | def _log_conversation(self) ->None:
"""Log the conversation to the context API."""
if len(self.messages) == 0:
return
self.client.log.conversation_upsert(body={'conversation': self.
conversation_model(messages=self.messages, metadata=self.metadata)})
self.messages = []
self.metadata = {} | Log the conversation to the context API. |
test_interfaces | history = ChatMessageHistory()
history.add_message(SystemMessage(content='system'))
history.add_user_message('human 1')
history.add_ai_message('ai')
history.add_message(HumanMessage(content='human 2'))
assert str(history) == """System: system
Human: human 1
AI: ai
Human: human 2""" | def test_interfaces() ->None:
history = ChatMessageHistory()
history.add_message(SystemMessage(content='system'))
history.add_user_message('human 1')
history.add_ai_message('ai')
history.add_message(HumanMessage(content='human 2'))
assert str(history
) == 'System: system\nHuman: human 1\nAI: ai\nHuman: human 2' | null |
validate_credentials_path | """Validate that credentials_path exists."""
if not v.exists():
raise ValueError(f'credentials_path {v} does not exist')
return v | @validator('credentials_path')
def validate_credentials_path(cls, v: Any, **kwargs: Any) ->Any:
"""Validate that credentials_path exists."""
if not v.exists():
raise ValueError(f'credentials_path {v} does not exist')
return v | Validate that credentials_path exists. |
__del__ | if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close() | def __del__(self) ->None:
if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close() | null |
OutputType | return T | @property
def OutputType(self) ->Type[T]:
return T | null |
_type | """Return the output parser type for serialization."""
raise NotImplementedError(
f'_type property is not implemented in class {self.__class__.__name__}. This is required for serialization.'
) | @property
def _type(self) ->str:
"""Return the output parser type for serialization."""
raise NotImplementedError(
f'_type property is not implemented in class {self.__class__.__name__}. This is required for serialization.'
) | Return the output parser type for serialization. |
test_tool_usage | parser = JSONAgentOutputParser()
_input = """ ```
{
"action": "search",
"action_input": "2+2"
}
```"""
output = parser.invoke(_input)
expected_output = AgentAction(tool='search', tool_input='2+2', log=_input)
assert output == expected_output | def test_tool_usage() ->None:
parser = JSONAgentOutputParser()
_input = (
' ```\n{\n "action": "search",\n "action_input": "2+2"\n}\n```')
output = parser.invoke(_input)
expected_output = AgentAction(tool='search', tool_input='2+2', log=_input)
assert output == expected_output | null |
_generate | lmformatenforcer = import_lmformatenforcer()
import lmformatenforcer.integrations.transformers as hf_integration
if 'prefix_allowed_tokens_fn' in self.pipeline._forward_params:
raise ValueError(
'prefix_allowed_tokens_fn param is forbidden with LMFormatEnforcer.')
has_json_schema = self.json_schema is not None
has_regex = self.regex is not None
if has_json_schema == has_regex:
raise ValueError(
'You must specify exactly one of json_schema or a regex, but not both.'
)
if has_json_schema:
parser = lmformatenforcer.JsonSchemaParser(self.json_schema)
else:
parser = lmformatenforcer.RegexParser(self.regex)
prefix_function = hf_integration.build_transformers_prefix_allowed_tokens_fn(
self.pipeline.tokenizer, parser)
self.pipeline._forward_params['prefix_allowed_tokens_fn'] = prefix_function
result = super()._generate(prompts, stop=stop, run_manager=run_manager, **
kwargs)
del self.pipeline._forward_params['prefix_allowed_tokens_fn']
return result | def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
lmformatenforcer = import_lmformatenforcer()
import lmformatenforcer.integrations.transformers as hf_integration
if 'prefix_allowed_tokens_fn' in self.pipeline._forward_params:
raise ValueError(
'prefix_allowed_tokens_fn param is forbidden with LMFormatEnforcer.'
)
has_json_schema = self.json_schema is not None
has_regex = self.regex is not None
if has_json_schema == has_regex:
raise ValueError(
'You must specify exactly one of json_schema or a regex, but not both.'
)
if has_json_schema:
parser = lmformatenforcer.JsonSchemaParser(self.json_schema)
else:
parser = lmformatenforcer.RegexParser(self.regex)
prefix_function = (hf_integration.
build_transformers_prefix_allowed_tokens_fn(self.pipeline.tokenizer,
parser))
self.pipeline._forward_params['prefix_allowed_tokens_fn'] = prefix_function
result = super()._generate(prompts, stop=stop, run_manager=run_manager,
**kwargs)
del self.pipeline._forward_params['prefix_allowed_tokens_fn']
return result | null |
from_texts | """
Return Neo4jVector initialized from texts and embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=
ids, distance_strategy=distance_strategy, **kwargs) | @classmethod
def from_texts(cls: Type[Neo4jVector], texts: List[str], embedding:
Embeddings, metadatas: Optional[List[dict]]=None, distance_strategy:
DistanceStrategy=DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]]=
None, **kwargs: Any) ->Neo4jVector:
"""
Return Neo4jVector initialized from texts and embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(texts, embeddings, embedding, metadatas=metadatas,
ids=ids, distance_strategy=distance_strategy, **kwargs) | Return Neo4jVector initialized from texts and embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters. |
disable_ssl_warnings | """Disable SSL warnings."""
if v:
try:
import urllib3
urllib3.disable_warnings()
except ImportError as e:
print(e)
return v | @validator('unsecure')
def disable_ssl_warnings(cls, v: bool) ->bool:
"""Disable SSL warnings."""
if v:
try:
import urllib3
urllib3.disable_warnings()
except ImportError as e:
print(e)
return v | Disable SSL warnings. |
_import_office365_create_draft_message | from langchain_community.tools.office365.create_draft_message import O365CreateDraftMessage
return O365CreateDraftMessage | def _import_office365_create_draft_message() ->Any:
from langchain_community.tools.office365.create_draft_message import O365CreateDraftMessage
return O365CreateDraftMessage | null |
drop_tables | self.sync_client.drop_table() | def drop_tables(self) ->None:
self.sync_client.drop_table() | null |
web_search | results = ddg_search.results(query, num_results)
return [r['link'] for r in results] | def web_search(query: str, num_results: int):
results = ddg_search.results(query, num_results)
return [r['link'] for r in results] | null |
similarity_search | """Perform a similarity search with StarRocks
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(self.embedding_function.embed_query
(query), k, where_str, **kwargs) | def similarity_search(self, query: str, k: int=4, where_str: Optional[str]=
None, **kwargs: Any) ->List[Document]:
"""Perform a similarity search with StarRocks
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(self.embedding_function.
embed_query(query), k, where_str, **kwargs) | Perform a similarity search with StarRocks
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents |
from_documents | texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs['connection_string'] = connection_string
return cls.from_texts(texts=texts, pre_delete_collection=
pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=
ids, collection_name=collection_name, **kwargs) | @classmethod
def from_documents(cls: Type[PGEmbedding], documents: List[Document],
embedding: Embeddings, collection_name: str=
_LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]]=None,
pre_delete_collection: bool=False, **kwargs: Any) ->PGEmbedding:
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs['connection_string'] = connection_string
return cls.from_texts(texts=texts, pre_delete_collection=
pre_delete_collection, embedding=embedding, metadatas=metadatas,
ids=ids, collection_name=collection_name, **kwargs) | null |
bulk_similarity_search_with_score | """Return documents from Marqo that are similar to the query as well as
their scores using a batch of queries.
Args:
query (Iterable[Union[str, Dict[str, float]]]): An iterable of queries
to execute in bulk, queries in the list can be strings or dictionaries
of weighted queries.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Tuple[Document, float]]: A list of lists of the matching
documents and their scores for each query
"""
bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k)
bulk_documents: List[List[Tuple[Document, float]]] = []
for results in bulk_results['result']:
documents = self._construct_documents_from_results_with_score(results)
bulk_documents.append(documents)
return bulk_documents | def bulk_similarity_search_with_score(self, queries: Iterable[Union[str,
Dict[str, float]]], k: int=4, **kwargs: Any) ->List[List[Tuple[Document,
float]]]:
"""Return documents from Marqo that are similar to the query as well as
their scores using a batch of queries.
Args:
query (Iterable[Union[str, Dict[str, float]]]): An iterable of queries
to execute in bulk, queries in the list can be strings or dictionaries
of weighted queries.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Tuple[Document, float]]: A list of lists of the matching
documents and their scores for each query
"""
bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k)
bulk_documents: List[List[Tuple[Document, float]]] = []
for results in bulk_results['result']:
documents = self._construct_documents_from_results_with_score(results)
bulk_documents.append(documents)
return bulk_documents | Return documents from Marqo that are similar to the query as well as
their scores using a batch of queries.
Args:
query (Iterable[Union[str, Dict[str, float]]]): An iterable of queries
to execute in bulk, queries in the list can be strings or dictionaries
of weighted queries.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Tuple[Document, float]]: A list of lists of the matching
documents and their scores for each query |
requires_input | return True | @property
def requires_input(self) ->bool:
return True | null |
test_list_directory | """Test the DirectoryListing tool."""
with TemporaryDirectory() as temp_dir:
tool = ListDirectoryTool()
file_1 = Path(temp_dir) / 'file1.txt'
file_2 = Path(temp_dir) / 'file2.txt'
file_1.write_text('File 1 content')
file_2.write_text('File 2 content')
entries = tool.run({'dir_path': temp_dir}).split('\n')
assert set(entries) == {'file1.txt', 'file2.txt'} | def test_list_directory() ->None:
"""Test the DirectoryListing tool."""
with TemporaryDirectory() as temp_dir:
tool = ListDirectoryTool()
file_1 = Path(temp_dir) / 'file1.txt'
file_2 = Path(temp_dir) / 'file2.txt'
file_1.write_text('File 1 content')
file_2.write_text('File 2 content')
entries = tool.run({'dir_path': temp_dir}).split('\n')
assert set(entries) == {'file1.txt', 'file2.txt'} | Test the DirectoryListing tool. |
_generate | """Run the LLM on the given prompt and input."""
from vllm import SamplingParams
params = {**self._default_params, **kwargs, 'stop': stop}
sampling_params = SamplingParams(**params)
outputs = self.client.generate(prompts, sampling_params)
generations = []
for output in outputs:
text = output.outputs[0].text
generations.append([Generation(text=text)])
return LLMResult(generations=generations) | def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
"""Run the LLM on the given prompt and input."""
from vllm import SamplingParams
params = {**self._default_params, **kwargs, 'stop': stop}
sampling_params = SamplingParams(**params)
outputs = self.client.generate(prompts, sampling_params)
generations = []
for output in outputs:
text = output.outputs[0].text
generations.append([Generation(text=text)])
return LLMResult(generations=generations) | Run the LLM on the given prompt and input. |
__delete__ | if instance is not None:
emit_warning()
return super().__delete__(instance) | def __delete__(self, instance):
if instance is not None:
emit_warning()
return super().__delete__(instance) | null |
test_all_imports | assert set(__all__) == set(EXPECTED_ALL) | def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL) | null |
__init__ | self.dimension = dimension
self.shard = shard
self.replicas = replicas
self.index_type = index_type
self.metric_type = metric_type
self.params = params | def __init__(self, dimension: int, shard: int=1, replicas: int=2,
index_type: str='HNSW', metric_type: str='L2', params: Optional[Dict]=None
):
self.dimension = dimension
self.shard = shard
self.replicas = replicas
self.index_type = index_type
self.metric_type = metric_type
self.params = params | null |
test_on_llm_end_no_cost_invalid_model | response = LLMResult(generations=[], llm_output={'token_usage': {
'prompt_tokens': 1000, 'completion_tokens': 1000, 'total_tokens': 2000},
'model_name': model_name})
handler.on_llm_end(response)
assert handler.total_cost == 0 | @pytest.mark.parametrize('model_name', ['gpt-35-turbo-16k-0301',
'gpt-4-0301', 'gpt-4-32k-0301'])
def test_on_llm_end_no_cost_invalid_model(handler: OpenAICallbackHandler,
model_name: str) ->None:
response = LLMResult(generations=[], llm_output={'token_usage': {
'prompt_tokens': 1000, 'completion_tokens': 1000, 'total_tokens':
2000}, 'model_name': model_name})
handler.on_llm_end(response)
assert handler.total_cost == 0 | null |
test___module_name___embedding_documents | """Test cohere embeddings."""
documents = ['foo bar']
embedding = __ModuleName__Embeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) > 0 | def test___module_name___embedding_documents() ->None:
"""Test cohere embeddings."""
documents = ['foo bar']
embedding = __ModuleName__Embeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) > 0 | Test cohere embeddings. |
marqo_similarity_search | """Return documents from Marqo exposing Marqo's output directly
Args:
query (str): The query to search with.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Dict[str, Any]]: This hits from marqo.
"""
results = self._client.index(self._index_name).search(q=query,
searchable_attributes=self._searchable_attributes, limit=k)
return results | def marqo_similarity_search(self, query: Union[str, Dict[str, float]], k: int=4
) ->Dict[str, List[Dict[str, str]]]:
"""Return documents from Marqo exposing Marqo's output directly
Args:
query (str): The query to search with.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Dict[str, Any]]: This hits from marqo.
"""
results = self._client.index(self._index_name).search(q=query,
searchable_attributes=self._searchable_attributes, limit=k)
return results | Return documents from Marqo exposing Marqo's output directly
Args:
query (str): The query to search with.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Dict[str, Any]]: This hits from marqo. |
test_vertex_initialization | llm = VertexAI(model_name=model_name) if model_name else VertexAI()
assert llm._llm_type == 'vertexai'
try:
assert llm.model_name == llm.client._model_id
except AttributeError:
assert llm.model_name == llm.client._model_name.split('/')[-1] | @pytest.mark.parametrize('model_name', model_names_to_test_with_default)
def test_vertex_initialization(model_name: str) ->None:
llm = VertexAI(model_name=model_name) if model_name else VertexAI()
assert llm._llm_type == 'vertexai'
try:
assert llm.model_name == llm.client._model_id
except AttributeError:
assert llm.model_name == llm.client._model_name.split('/')[-1] | null |
_import_awslambda | from langchain_community.utilities.awslambda import LambdaWrapper
return LambdaWrapper | def _import_awslambda() ->Any:
from langchain_community.utilities.awslambda import LambdaWrapper
return LambdaWrapper | null |
_make_session | """Create a session and close it after use."""
if isinstance(self.session_factory, async_sessionmaker):
raise AssertionError('This method is not supported for async engines.')
session = self.session_factory()
try:
yield session
finally:
session.close() | @contextlib.contextmanager
def _make_session(self) ->Generator[Session, None, None]:
"""Create a session and close it after use."""
if isinstance(self.session_factory, async_sessionmaker):
raise AssertionError('This method is not supported for async engines.')
session = self.session_factory()
try:
yield session
finally:
session.close() | Create a session and close it after use. |
validate_environment | if isinstance(values.get('model'), str):
values['model'] = _model_default_factory(model_name=values['model'])
return values | @root_validator(pre=True)
def validate_environment(cls, values: dict) ->dict:
if isinstance(values.get('model'), str):
values['model'] = _model_default_factory(model_name=values['model'])
return values | null |
__init__ | """Initialize the BibtexLoader.
Args:
file_path: Path to the bibtex file.
parser: The parser to use. If None, a default parser is used.
max_docs: Max number of associated documents to load. Use -1 means
no limit.
max_content_chars: Maximum number of characters to load from the PDF.
load_extra_metadata: Whether to load extra metadata from the PDF.
file_pattern: Regex pattern to match the file name in the bibtex.
"""
self.file_path = file_path
self.parser = parser or BibtexparserWrapper()
self.max_docs = max_docs
self.max_content_chars = max_content_chars
self.load_extra_metadata = load_extra_metadata
self.file_regex = re.compile(file_pattern) | def __init__(self, file_path: str, *, parser: Optional[BibtexparserWrapper]
=None, max_docs: Optional[int]=None, max_content_chars: Optional[int]=
4000, load_extra_metadata: bool=False, file_pattern: str='[^:]+\\.pdf'):
"""Initialize the BibtexLoader.
Args:
file_path: Path to the bibtex file.
parser: The parser to use. If None, a default parser is used.
max_docs: Max number of associated documents to load. Use -1 means
no limit.
max_content_chars: Maximum number of characters to load from the PDF.
load_extra_metadata: Whether to load extra metadata from the PDF.
file_pattern: Regex pattern to match the file name in the bibtex.
"""
self.file_path = file_path
self.parser = parser or BibtexparserWrapper()
self.max_docs = max_docs
self.max_content_chars = max_content_chars
self.load_extra_metadata = load_extra_metadata
self.file_regex = re.compile(file_pattern) | Initialize the BibtexLoader.
Args:
file_path: Path to the bibtex file.
parser: The parser to use. If None, a default parser is used.
max_docs: Max number of associated documents to load. Use -1 means
no limit.
max_content_chars: Maximum number of characters to load from the PDF.
load_extra_metadata: Whether to load extra metadata from the PDF.
file_pattern: Regex pattern to match the file name in the bibtex. |
_generate_with_retry | resp = llm.client.call(**_kwargs)
return check_response(resp) | @retry_decorator
def _generate_with_retry(**_kwargs: Any) ->Any:
resp = llm.client.call(**_kwargs)
return check_response(resp) | null |
artifact | """To upload the file from given path as artifact."""
with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id=self.
mlf_expid):
self.mlflow.log_artifact(path) | def artifact(self, path: str) ->None:
"""To upload the file from given path as artifact."""
with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id=
self.mlf_expid):
self.mlflow.log_artifact(path) | To upload the file from given path as artifact. |
_make_request_url | return f'{self.arcee_api_url}/{self.arcee_api_version}/{route}' | def _make_request_url(self, route: Union[ArceeRoute, str]) ->str:
return f'{self.arcee_api_url}/{self.arcee_api_version}/{route}' | null |
from_pydantic | try:
from guardrails import Guard
except ImportError:
raise ImportError(
'guardrails-ai package not installed. Install it by running `pip install guardrails-ai`.'
)
return cls(guard=Guard.from_pydantic(output_class, '', num_reasks=
num_reasks), api=api, args=args, kwargs=kwargs) | @classmethod
def from_pydantic(cls, output_class: Any, num_reasks: int=1, api: Optional[
Callable]=None, *args: Any, **kwargs: Any) ->GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ImportError(
'guardrails-ai package not installed. Install it by running `pip install guardrails-ai`.'
)
return cls(guard=Guard.from_pydantic(output_class, '', num_reasks=
num_reasks), api=api, args=args, kwargs=kwargs) | null |
__init__ | """Initialize callback handler."""
flytekit, renderer = import_flytekit()
self.pandas = import_pandas()
self.textstat = None
try:
self.textstat = import_textstat()
except ImportError:
logger.warning(
'Textstat library is not installed. It may result in the inability to log certain metrics that can be captured with Textstat.'
)
spacy = None
try:
spacy = import_spacy()
except ImportError:
logger.warning(
'Spacy library is not installed. It may result in the inability to log certain metrics that can be captured with Spacy.'
)
super().__init__()
self.nlp = None
if spacy:
try:
self.nlp = spacy.load('en_core_web_sm')
except OSError:
logger.warning(
"FlyteCallbackHandler uses spacy's en_core_web_sm model for certain metrics. To download, run the following command in your terminal: `python -m spacy download en_core_web_sm`"
)
self.table_renderer = renderer.TableRenderer
self.markdown_renderer = renderer.MarkdownRenderer
self.deck = flytekit.Deck('LangChain Metrics', self.markdown_renderer().
to_html('## LangChain Metrics')) | def __init__(self) ->None:
"""Initialize callback handler."""
flytekit, renderer = import_flytekit()
self.pandas = import_pandas()
self.textstat = None
try:
self.textstat = import_textstat()
except ImportError:
logger.warning(
'Textstat library is not installed. It may result in the inability to log certain metrics that can be captured with Textstat.'
)
spacy = None
try:
spacy = import_spacy()
except ImportError:
logger.warning(
'Spacy library is not installed. It may result in the inability to log certain metrics that can be captured with Spacy.'
)
super().__init__()
self.nlp = None
if spacy:
try:
self.nlp = spacy.load('en_core_web_sm')
except OSError:
logger.warning(
"FlyteCallbackHandler uses spacy's en_core_web_sm model for certain metrics. To download, run the following command in your terminal: `python -m spacy download en_core_web_sm`"
)
self.table_renderer = renderer.TableRenderer
self.markdown_renderer = renderer.MarkdownRenderer
self.deck = flytekit.Deck('LangChain Metrics', self.markdown_renderer()
.to_html('## LangChain Metrics')) | Initialize callback handler. |
_agent_type | raise ValueError | @property
def _agent_type(self) ->str:
raise ValueError | null |
card_list_to_objects | """Helper to convert dict cards into trello weird mix of objects and dictionaries"""
for card in cards:
card['checklists'] = list_to_objects(card.get('checklists'))
card['labels'] = list_to_objects(card.get('labels'))
return list_to_objects(cards) | def card_list_to_objects(cards: list) ->list:
"""Helper to convert dict cards into trello weird mix of objects and dictionaries"""
for card in cards:
card['checklists'] = list_to_objects(card.get('checklists'))
card['labels'] = list_to_objects(card.get('labels'))
return list_to_objects(cards) | Helper to convert dict cards into trello weird mix of objects and dictionaries |
test_tencent_vector_db_add_extra | """Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
time.sleep(3)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6 | def test_tencent_vector_db_add_extra() ->None:
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
time.sleep(3)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6 | Test end to end construction and MRR search. |
test_chat_ai_endpoints_system_message | """Test wrapper with system message."""
chat = ChatNVIDIA(model='llama2_13b', max_tokens=36)
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | def test_chat_ai_endpoints_system_message() ->None:
"""Test wrapper with system message."""
chat = ChatNVIDIA(model='llama2_13b', max_tokens=36)
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str) | Test wrapper with system message. |
_default_params | """Get the default parameters for calling the API."""
defaults = {'model': self.model, 'temperature': self.temperature,
'max_tokens': self.max_tokens, 'top_p': self.top_p, 'random_seed': self
.random_seed, 'safe_mode': self.safe_mode}
filtered = {k: v for k, v in defaults.items() if v is not None}
return filtered | @property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling the API."""
defaults = {'model': self.model, 'temperature': self.temperature,
'max_tokens': self.max_tokens, 'top_p': self.top_p, 'random_seed':
self.random_seed, 'safe_mode': self.safe_mode}
filtered = {k: v for k, v in defaults.items() if v is not None}
return filtered | Get the default parameters for calling the API. |
from_template | """Create a class from a string template.
Args:
template: a template.
template_format: format of the template.
partial_variables: A dictionary of variables that can be used to partially
fill in the template. For example, if the template is
`"{variable1} {variable2}"`, and `partial_variables` is
`{"variable1": "foo"}`, then the final prompt will be
`"foo {variable2}"`.
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
"""
prompt = PromptTemplate.from_template(template, template_format=
template_format, partial_variables=partial_variables)
return cls(prompt=prompt, **kwargs) | @classmethod
def from_template(cls: Type[MessagePromptTemplateT], template: str,
template_format: str='f-string', partial_variables: Optional[Dict[str,
Any]]=None, **kwargs: Any) ->MessagePromptTemplateT:
"""Create a class from a string template.
Args:
template: a template.
template_format: format of the template.
partial_variables: A dictionary of variables that can be used to partially
fill in the template. For example, if the template is
`"{variable1} {variable2}"`, and `partial_variables` is
`{"variable1": "foo"}`, then the final prompt will be
`"foo {variable2}"`.
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
"""
prompt = PromptTemplate.from_template(template, template_format=
template_format, partial_variables=partial_variables)
return cls(prompt=prompt, **kwargs) | Create a class from a string template.
Args:
template: a template.
template_format: format of the template.
partial_variables: A dictionary of variables that can be used to partially
fill in the template. For example, if the template is
`"{variable1} {variable2}"`, and `partial_variables` is
`{"variable1": "foo"}`, then the final prompt will be
`"foo {variable2}"`.
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class. |
get_schema | return db.get_table_info() | def get_schema(_):
return db.get_table_info() | null |