method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
__enter__ | """Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w') | def __enter__(self) ->None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w') | Open file to pipe stdout to. |
_create_chat_result | generations = []
for res in response.choices:
message = convert_dict_to_message(res.message)
gen = ChatGeneration(message=message, generation_info=dict(
finish_reason=res.finish_reason))
generations.append(gen)
llm_output = {'model': self.model}
return ChatResult(generations=generations, llm_output=llm_output) | def _create_chat_result(self, response: Any) ->ChatResult:
generations = []
for res in response.choices:
message = convert_dict_to_message(res.message)
gen = ChatGeneration(message=message, generation_info=dict(
finish_reason=res.finish_reason))
generations.append(gen)
llm_output = {'model': self.model}
return ChatResult(generations=generations, llm_output=llm_output) | null |
assert_docs | for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {'Copyright Information', 'uid', 'Title',
'Published'} | def assert_docs(docs: List[Document]) ->None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {'Copyright Information', 'uid',
'Title', 'Published'} | null |
_import_arcee | from langchain_community.utilities.arcee import ArceeWrapper
return ArceeWrapper | def _import_arcee() ->Any:
from langchain_community.utilities.arcee import ArceeWrapper
return ArceeWrapper | null |
_completion_with_retry | ordered_generation_requests = get_ordered_generation_requests(
models_priority_list, **kwargs)
return llm.client.generate(ordered_generation_requests=
ordered_generation_requests, is_stream=kwargs.get('stream', False)) | @retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
ordered_generation_requests = get_ordered_generation_requests(
models_priority_list, **kwargs)
return llm.client.generate(ordered_generation_requests=
ordered_generation_requests, is_stream=kwargs.get('stream', False)) | null |
on_agent_finish | """Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
output = finish.return_values['output']
log = finish.log
resp.update({'action': 'on_agent_finish', 'log': log})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(output, resp, self.step)
resp.update({'output': output})
self.action_records.append(resp) | def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
output = finish.return_values['output']
log = finish.log
resp.update({'action': 'on_agent_finish', 'log': log})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(output, resp, self.step)
resp.update({'output': output})
self.action_records.append(resp) | Run when agent ends running. |
test_fireworks_invoke | """Tests completion with invoke"""
output = llm.invoke('How is the weather in New York today?', stop=[','])
assert isinstance(output, str)
assert output[-1] == ',' | @pytest.mark.scheduled
def test_fireworks_invoke(llm: Fireworks) ->None:
"""Tests completion with invoke"""
output = llm.invoke('How is the weather in New York today?', stop=[','])
assert isinstance(output, str)
assert output[-1] == ',' | Tests completion with invoke |
test_similarity_search_with_metadata | """Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})] | @pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata(self, weaviate_url: str,
embedding_openai: OpenAIEmbeddings) ->None:
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})] | Test end to end construction and search with metadata. |
_insert | _insert_query = self._build_insert_sql(transac, column_names)
self.client.command(_insert_query) | def _insert(self, transac: Iterable, column_names: Iterable[str]) ->None:
_insert_query = self._build_insert_sql(transac, column_names)
self.client.command(_insert_query) | null |
test_schemas | fake = FakeRunnable()
assert fake.input_schema.schema() == {'title': 'FakeRunnableInput', 'type':
'string'}
assert fake.output_schema.schema() == {'title': 'FakeRunnableOutput',
'type': 'integer'}
assert fake.config_schema(include=['tags', 'metadata', 'run_name']).schema(
) == {'title': 'FakeRunnableConfig', 'type': 'object', 'properties': {
'metadata': {'title': 'Metadata', 'type': 'object'}, 'run_name': {
'title': 'Run Name', 'type': 'string'}, 'tags': {'items': {'type':
'string'}, 'title': 'Tags', 'type': 'array'}}}
fake_bound = FakeRunnable().bind(a='b')
assert fake_bound.input_schema.schema() == {'title': 'FakeRunnableInput',
'type': 'string'}
assert fake_bound.output_schema.schema() == {'title': 'FakeRunnableOutput',
'type': 'integer'}
fake_w_fallbacks = FakeRunnable().with_fallbacks((fake,))
assert fake_w_fallbacks.input_schema.schema() == {'title':
'FakeRunnableInput', 'type': 'string'}
assert fake_w_fallbacks.output_schema.schema() == {'title':
'FakeRunnableOutput', 'type': 'integer'}
def typed_lambda_impl(x: str) ->int:
return len(x)
typed_lambda = RunnableLambda(typed_lambda_impl)
assert typed_lambda.input_schema.schema() == {'title':
'typed_lambda_impl_input', 'type': 'string'}
assert typed_lambda.output_schema.schema() == {'title':
'typed_lambda_impl_output', 'type': 'integer'}
async def typed_async_lambda_impl(x: str) ->int:
return len(x)
typed_async_lambda: Runnable = RunnableLambda(typed_async_lambda_impl)
assert typed_async_lambda.input_schema.schema() == {'title':
'typed_async_lambda_impl_input', 'type': 'string'}
assert typed_async_lambda.output_schema.schema() == {'title':
'typed_async_lambda_impl_output', 'type': 'integer'}
fake_ret = FakeRetriever()
assert fake_ret.input_schema.schema() == {'title': 'FakeRetrieverInput',
'type': 'string'}
assert fake_ret.output_schema.schema() == {'title': 'FakeRetrieverOutput',
'type': 'array', 'items': {'$ref': '#/definitions/Document'},
'definitions': {'Document': {'title': 'Document', 'description':
'Class for storing a piece of text and associated metadata.', 'type':
'object', 'properties': {'page_content': {'title': 'Page Content',
'type': 'string'}, 'metadata': {'title': 'Metadata', 'type': 'object'},
'type': {'title': 'Type', 'enum': ['Document'], 'default': 'Document',
'type': 'string'}}, 'required': ['page_content']}}}
fake_llm = FakeListLLM(responses=['a'])
assert fake_llm.input_schema.schema() == snapshot
assert fake_llm.output_schema.schema() == {'title': 'FakeListLLMOutput',
'type': 'string'}
fake_chat = FakeListChatModel(responses=['a'])
assert fake_chat.input_schema.schema() == snapshot
assert fake_chat.output_schema.schema() == snapshot
chat_prompt = ChatPromptTemplate.from_messages([MessagesPlaceholder(
variable_name='history'), ('human', 'Hello, how are you?')])
assert chat_prompt.input_schema.schema() == {'title': 'PromptInput', 'type':
'object', 'properties': {'history': {'title': 'History', 'type':
'array', 'items': {'anyOf': [{'$ref': '#/definitions/AIMessage'}, {
'$ref': '#/definitions/HumanMessage'}, {'$ref':
'#/definitions/ChatMessage'}, {'$ref': '#/definitions/SystemMessage'},
{'$ref': '#/definitions/FunctionMessage'}, {'$ref':
'#/definitions/ToolMessage'}]}}}, 'definitions': {'AIMessage': {'title':
'AIMessage', 'description': 'A Message from an AI.', 'type': 'object',
'properties': {'content': {'title': 'Content', 'anyOf': [{'type':
'string'}, {'type': 'array', 'items': {'anyOf': [{'type': 'string'}, {
'type': 'object'}]}}]}, 'additional_kwargs': {'title':
'Additional Kwargs', 'type': 'object'}, 'type': {'title': 'Type',
'default': 'ai', 'enum': ['ai'], 'type': 'string'}, 'example': {'title':
'Example', 'default': False, 'type': 'boolean'}}, 'required': [
'content']}, 'HumanMessage': {'title': 'HumanMessage', 'description':
'A Message from a human.', 'type': 'object', 'properties': {'content':
{'title': 'Content', 'anyOf': [{'type': 'string'}, {'type': 'array',
'items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}]},
'additional_kwargs': {'title': 'Additional Kwargs', 'type': 'object'},
'type': {'title': 'Type', 'default': 'human', 'enum': ['human'], 'type':
'string'}, 'example': {'title': 'Example', 'default': False, 'type':
'boolean'}}, 'required': ['content']}, 'ChatMessage': {'title':
'ChatMessage', 'description':
'A Message that can be assigned an arbitrary speaker (i.e. role).',
'type': 'object', 'properties': {'content': {'title': 'Content',
'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{
'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {
'title': 'Additional Kwargs', 'type': 'object'}, 'type': {'title':
'Type', 'default': 'chat', 'enum': ['chat'], 'type': 'string'}, 'role':
{'title': 'Role', 'type': 'string'}}, 'required': ['content', 'role']},
'SystemMessage': {'title': 'SystemMessage', 'description':
"""A Message for priming AI behavior, usually passed in as the first of a sequence
of input messages."""
, 'type': 'object', 'properties': {'content': {'title': 'Content',
'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{
'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {
'title': 'Additional Kwargs', 'type': 'object'}, 'type': {'title':
'Type', 'default': 'system', 'enum': ['system'], 'type': 'string'}},
'required': ['content']}, 'FunctionMessage': {'title':
'FunctionMessage', 'description':
'A Message for passing the result of executing a function back to a model.'
, 'type': 'object', 'properties': {'content': {'title': 'Content',
'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{
'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {
'title': 'Additional Kwargs', 'type': 'object'}, 'type': {'title':
'Type', 'default': 'function', 'enum': ['function'], 'type': 'string'},
'name': {'title': 'Name', 'type': 'string'}}, 'required': ['content',
'name']}, 'ToolMessage': {'title': 'ToolMessage', 'description':
'A Message for passing the result of executing a tool back to a model.',
'type': 'object', 'properties': {'content': {'title': 'Content',
'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{
'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {
'title': 'Additional Kwargs', 'type': 'object'}, 'type': {'title':
'Type', 'default': 'tool', 'enum': ['tool'], 'type': 'string'},
'tool_call_id': {'title': 'Tool Call Id', 'type': 'string'}},
'required': ['content', 'tool_call_id']}}}
assert chat_prompt.output_schema.schema() == snapshot
prompt = PromptTemplate.from_template('Hello, {name}!')
assert prompt.input_schema.schema() == {'title': 'PromptInput', 'type':
'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}}}
assert prompt.output_schema.schema() == snapshot
prompt_mapper = PromptTemplate.from_template('Hello, {name}!').map()
assert prompt_mapper.input_schema.schema() == {'definitions': {
'PromptInput': {'properties': {'name': {'title': 'Name', 'type':
'string'}}, 'title': 'PromptInput', 'type': 'object'}}, 'items': {
'$ref': '#/definitions/PromptInput'}, 'type': 'array', 'title':
'RunnableEach<PromptTemplate>Input'}
assert prompt_mapper.output_schema.schema() == snapshot
list_parser = CommaSeparatedListOutputParser()
assert list_parser.input_schema.schema() == snapshot
assert list_parser.output_schema.schema() == {'title':
'CommaSeparatedListOutputParserOutput', 'type': 'array', 'items': {
'type': 'string'}}
seq = prompt | fake_llm | list_parser
assert seq.input_schema.schema() == {'title': 'PromptInput', 'type':
'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}}}
assert seq.output_schema.schema() == {'type': 'array', 'items': {'type':
'string'}, 'title': 'CommaSeparatedListOutputParserOutput'}
router: Runnable = RouterRunnable({})
assert router.input_schema.schema() == {'title': 'RouterRunnableInput',
'$ref': '#/definitions/RouterInput', 'definitions': {'RouterInput': {
'title': 'RouterInput', 'type': 'object', 'properties': {'key': {
'title': 'Key', 'type': 'string'}, 'input': {'title': 'Input'}},
'required': ['key', 'input']}}}
assert router.output_schema.schema() == {'title': 'RouterRunnableOutput'}
seq_w_map: Runnable = prompt | fake_llm | {'original': RunnablePassthrough(
input_type=str), 'as_list': list_parser, 'length': typed_lambda_impl}
assert seq_w_map.input_schema.schema() == {'title': 'PromptInput', 'type':
'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}}}
assert seq_w_map.output_schema.schema() == {'title':
'RunnableParallel<original,as_list,length>Output', 'type': 'object',
'properties': {'original': {'title': 'Original', 'type': 'string'},
'length': {'title': 'Length', 'type': 'integer'}, 'as_list': {'title':
'As List', 'type': 'array', 'items': {'type': 'string'}}}} | def test_schemas(snapshot: SnapshotAssertion) ->None:
fake = FakeRunnable()
assert fake.input_schema.schema() == {'title': 'FakeRunnableInput',
'type': 'string'}
assert fake.output_schema.schema() == {'title': 'FakeRunnableOutput',
'type': 'integer'}
assert fake.config_schema(include=['tags', 'metadata', 'run_name']).schema(
) == {'title': 'FakeRunnableConfig', 'type': 'object', 'properties':
{'metadata': {'title': 'Metadata', 'type': 'object'}, 'run_name': {
'title': 'Run Name', 'type': 'string'}, 'tags': {'items': {'type':
'string'}, 'title': 'Tags', 'type': 'array'}}}
fake_bound = FakeRunnable().bind(a='b')
assert fake_bound.input_schema.schema() == {'title':
'FakeRunnableInput', 'type': 'string'}
assert fake_bound.output_schema.schema() == {'title':
'FakeRunnableOutput', 'type': 'integer'}
fake_w_fallbacks = FakeRunnable().with_fallbacks((fake,))
assert fake_w_fallbacks.input_schema.schema() == {'title':
'FakeRunnableInput', 'type': 'string'}
assert fake_w_fallbacks.output_schema.schema() == {'title':
'FakeRunnableOutput', 'type': 'integer'}
def typed_lambda_impl(x: str) ->int:
return len(x)
typed_lambda = RunnableLambda(typed_lambda_impl)
assert typed_lambda.input_schema.schema() == {'title':
'typed_lambda_impl_input', 'type': 'string'}
assert typed_lambda.output_schema.schema() == {'title':
'typed_lambda_impl_output', 'type': 'integer'}
async def typed_async_lambda_impl(x: str) ->int:
return len(x)
typed_async_lambda: Runnable = RunnableLambda(typed_async_lambda_impl)
assert typed_async_lambda.input_schema.schema() == {'title':
'typed_async_lambda_impl_input', 'type': 'string'}
assert typed_async_lambda.output_schema.schema() == {'title':
'typed_async_lambda_impl_output', 'type': 'integer'}
fake_ret = FakeRetriever()
assert fake_ret.input_schema.schema() == {'title': 'FakeRetrieverInput',
'type': 'string'}
assert fake_ret.output_schema.schema() == {'title':
'FakeRetrieverOutput', 'type': 'array', 'items': {'$ref':
'#/definitions/Document'}, 'definitions': {'Document': {'title':
'Document', 'description':
'Class for storing a piece of text and associated metadata.',
'type': 'object', 'properties': {'page_content': {'title':
'Page Content', 'type': 'string'}, 'metadata': {'title': 'Metadata',
'type': 'object'}, 'type': {'title': 'Type', 'enum': ['Document'],
'default': 'Document', 'type': 'string'}}, 'required': [
'page_content']}}}
fake_llm = FakeListLLM(responses=['a'])
assert fake_llm.input_schema.schema() == snapshot
assert fake_llm.output_schema.schema() == {'title': 'FakeListLLMOutput',
'type': 'string'}
fake_chat = FakeListChatModel(responses=['a'])
assert fake_chat.input_schema.schema() == snapshot
assert fake_chat.output_schema.schema() == snapshot
chat_prompt = ChatPromptTemplate.from_messages([MessagesPlaceholder(
variable_name='history'), ('human', 'Hello, how are you?')])
assert chat_prompt.input_schema.schema() == {'title': 'PromptInput',
'type': 'object', 'properties': {'history': {'title': 'History',
'type': 'array', 'items': {'anyOf': [{'$ref':
'#/definitions/AIMessage'}, {'$ref': '#/definitions/HumanMessage'},
{'$ref': '#/definitions/ChatMessage'}, {'$ref':
'#/definitions/SystemMessage'}, {'$ref':
'#/definitions/FunctionMessage'}, {'$ref':
'#/definitions/ToolMessage'}]}}}, 'definitions': {'AIMessage': {
'title': 'AIMessage', 'description': 'A Message from an AI.',
'type': 'object', 'properties': {'content': {'title': 'Content',
'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf':
[{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs':
{'title': 'Additional Kwargs', 'type': 'object'}, 'type': {'title':
'Type', 'default': 'ai', 'enum': ['ai'], 'type': 'string'},
'example': {'title': 'Example', 'default': False, 'type': 'boolean'
}}, 'required': ['content']}, 'HumanMessage': {'title':
'HumanMessage', 'description': 'A Message from a human.', 'type':
'object', 'properties': {'content': {'title': 'Content', 'anyOf': [
{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{'type':
'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {'title':
'Additional Kwargs', 'type': 'object'}, 'type': {'title': 'Type',
'default': 'human', 'enum': ['human'], 'type': 'string'}, 'example':
{'title': 'Example', 'default': False, 'type': 'boolean'}},
'required': ['content']}, 'ChatMessage': {'title': 'ChatMessage',
'description':
'A Message that can be assigned an arbitrary speaker (i.e. role).',
'type': 'object', 'properties': {'content': {'title': 'Content',
'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf':
[{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs':
{'title': 'Additional Kwargs', 'type': 'object'}, 'type': {'title':
'Type', 'default': 'chat', 'enum': ['chat'], 'type': 'string'},
'role': {'title': 'Role', 'type': 'string'}}, 'required': [
'content', 'role']}, 'SystemMessage': {'title': 'SystemMessage',
'description':
"""A Message for priming AI behavior, usually passed in as the first of a sequence
of input messages."""
, 'type': 'object', 'properties': {'content': {'title': 'Content',
'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf':
[{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs':
{'title': 'Additional Kwargs', 'type': 'object'}, 'type': {'title':
'Type', 'default': 'system', 'enum': ['system'], 'type': 'string'}},
'required': ['content']}, 'FunctionMessage': {'title':
'FunctionMessage', 'description':
'A Message for passing the result of executing a function back to a model.'
, 'type': 'object', 'properties': {'content': {'title': 'Content',
'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf':
[{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs':
{'title': 'Additional Kwargs', 'type': 'object'}, 'type': {'title':
'Type', 'default': 'function', 'enum': ['function'], 'type':
'string'}, 'name': {'title': 'Name', 'type': 'string'}}, 'required':
['content', 'name']}, 'ToolMessage': {'title': 'ToolMessage',
'description':
'A Message for passing the result of executing a tool back to a model.'
, 'type': 'object', 'properties': {'content': {'title': 'Content',
'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf':
[{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs':
{'title': 'Additional Kwargs', 'type': 'object'}, 'type': {'title':
'Type', 'default': 'tool', 'enum': ['tool'], 'type': 'string'},
'tool_call_id': {'title': 'Tool Call Id', 'type': 'string'}},
'required': ['content', 'tool_call_id']}}}
assert chat_prompt.output_schema.schema() == snapshot
prompt = PromptTemplate.from_template('Hello, {name}!')
assert prompt.input_schema.schema() == {'title': 'PromptInput', 'type':
'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}}}
assert prompt.output_schema.schema() == snapshot
prompt_mapper = PromptTemplate.from_template('Hello, {name}!').map()
assert prompt_mapper.input_schema.schema() == {'definitions': {
'PromptInput': {'properties': {'name': {'title': 'Name', 'type':
'string'}}, 'title': 'PromptInput', 'type': 'object'}}, 'items': {
'$ref': '#/definitions/PromptInput'}, 'type': 'array', 'title':
'RunnableEach<PromptTemplate>Input'}
assert prompt_mapper.output_schema.schema() == snapshot
list_parser = CommaSeparatedListOutputParser()
assert list_parser.input_schema.schema() == snapshot
assert list_parser.output_schema.schema() == {'title':
'CommaSeparatedListOutputParserOutput', 'type': 'array', 'items': {
'type': 'string'}}
seq = prompt | fake_llm | list_parser
assert seq.input_schema.schema() == {'title': 'PromptInput', 'type':
'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}}}
assert seq.output_schema.schema() == {'type': 'array', 'items': {'type':
'string'}, 'title': 'CommaSeparatedListOutputParserOutput'}
router: Runnable = RouterRunnable({})
assert router.input_schema.schema() == {'title': 'RouterRunnableInput',
'$ref': '#/definitions/RouterInput', 'definitions': {'RouterInput':
{'title': 'RouterInput', 'type': 'object', 'properties': {'key': {
'title': 'Key', 'type': 'string'}, 'input': {'title': 'Input'}},
'required': ['key', 'input']}}}
assert router.output_schema.schema() == {'title': 'RouterRunnableOutput'}
seq_w_map: Runnable = prompt | fake_llm | {'original':
RunnablePassthrough(input_type=str), 'as_list': list_parser,
'length': typed_lambda_impl}
assert seq_w_map.input_schema.schema() == {'title': 'PromptInput',
'type': 'object', 'properties': {'name': {'title': 'Name', 'type':
'string'}}}
assert seq_w_map.output_schema.schema() == {'title':
'RunnableParallel<original,as_list,length>Output', 'type': 'object',
'properties': {'original': {'title': 'Original', 'type': 'string'},
'length': {'title': 'Length', 'type': 'integer'}, 'as_list': {
'title': 'As List', 'type': 'array', 'items': {'type': 'string'}}}} | null |
embed_query | """Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace('\n', ' ')
inputs = {'source_sentence': [text]}
embedding = self.embed(input=inputs)['text_embedding'][0]
return embedding.tolist() | def embed_query(self, text: str) ->List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace('\n', ' ')
inputs = {'source_sentence': [text]}
embedding = self.embed(input=inputs)['text_embedding'][0]
return embedding.tolist() | Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text. |
fetch_page_result | res = self.collection.find(filter=self.filter, options=self.find_options,
projection=self.projection, sort=None)
self.find_options['pageState'] = res['data'].get('nextPageState')
for doc in res['data']['documents']:
queue.put(Document(page_content=self.extraction_function(doc), metadata
={'namespace': self.collection.astra_db.namespace, 'api_endpoint':
self.collection.astra_db.base_url, 'collection': self.collection.
collection_name})) | def fetch_page_result(self, queue: Queue):
res = self.collection.find(filter=self.filter, options=self.
find_options, projection=self.projection, sort=None)
self.find_options['pageState'] = res['data'].get('nextPageState')
for doc in res['data']['documents']:
queue.put(Document(page_content=self.extraction_function(doc),
metadata={'namespace': self.collection.astra_db.namespace,
'api_endpoint': self.collection.astra_db.base_url, 'collection':
self.collection.collection_name})) | null |
add_message | """Append the message to the record in Upstash Redis"""
self.redis_client.lpush(self.key, json.dumps(message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl) | def add_message(self, message: BaseMessage) ->None:
"""Append the message to the record in Upstash Redis"""
self.redis_client.lpush(self.key, json.dumps(message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl) | Append the message to the record in Upstash Redis |
_stream | """Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\\n\\nHuman: {prompt}\\n\\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
for token in self.client.completions.create(prompt=self._wrap_prompt(prompt
), stop_sequences=stop, stream=True, **params):
chunk = GenerationChunk(text=token.completion)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk) | def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\\n\\nHuman: {prompt}\\n\\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
for token in self.client.completions.create(prompt=self._wrap_prompt(
prompt), stop_sequences=stop, stream=True, **params):
chunk = GenerationChunk(text=token.completion)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk) | Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token |
_persist | """Saves documents and embeddings to BigQuery."""
from google.cloud import bigquery
data_len = len(data[list(data.keys())[0]])
if data_len == 0:
return
list_of_dicts = [dict(zip(data, t)) for t in zip(*data.values())]
job_config = bigquery.LoadJobConfig()
job_config.schema = self.vectors_table.schema
job_config.schema_update_options = (bigquery.SchemaUpdateOption.
ALLOW_FIELD_ADDITION)
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
job = self.bq_client.load_table_from_json(list_of_dicts, self.vectors_table,
job_config=job_config)
job.result() | def _persist(self, data: Dict[str, Any]) ->None:
"""Saves documents and embeddings to BigQuery."""
from google.cloud import bigquery
data_len = len(data[list(data.keys())[0]])
if data_len == 0:
return
list_of_dicts = [dict(zip(data, t)) for t in zip(*data.values())]
job_config = bigquery.LoadJobConfig()
job_config.schema = self.vectors_table.schema
job_config.schema_update_options = (bigquery.SchemaUpdateOption.
ALLOW_FIELD_ADDITION)
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
job = self.bq_client.load_table_from_json(list_of_dicts, self.
vectors_table, job_config=job_config)
job.result() | Saves documents and embeddings to BigQuery. |
_parse_lc_message | keys = ['function_call', 'tool_calls', 'tool_call_id', 'name']
parsed = {'text': message.content, 'role': _parse_lc_role(message.type)}
parsed.update({key: cast(Any, message.additional_kwargs.get(key)) for key in
keys if message.additional_kwargs.get(key) is not None})
return parsed | def _parse_lc_message(message: BaseMessage) ->Dict[str, Any]:
keys = ['function_call', 'tool_calls', 'tool_call_id', 'name']
parsed = {'text': message.content, 'role': _parse_lc_role(message.type)}
parsed.update({key: cast(Any, message.additional_kwargs.get(key)) for
key in keys if message.additional_kwargs.get(key) is not None})
return parsed | null |
test_edenai_embedding_query | """Test eden ai embeddings with google."""
document = 'foo bar'
embedding = EdenAiEmbeddings(provider='google')
output = embedding.embed_query(document)
assert len(output) == 768 | def test_edenai_embedding_query() ->None:
"""Test eden ai embeddings with google."""
document = 'foo bar'
embedding = EdenAiEmbeddings(provider='google')
output = embedding.embed_query(document)
assert len(output) == 768 | Test eden ai embeddings with google. |
on_chain_end_common | self.chain_ends += 1
self.ends += 1 | def on_chain_end_common(self) ->None:
self.chain_ends += 1
self.ends += 1 | null |
validate_environment | """Validate that the python package exists in environment."""
try:
import wikipedia
wikipedia.set_lang(values['lang'])
values['wiki_client'] = wikipedia
except ImportError:
raise ImportError(
'Could not import wikipedia python package. Please install it with `pip install wikipedia`.'
)
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that the python package exists in environment."""
try:
import wikipedia
wikipedia.set_lang(values['lang'])
values['wiki_client'] = wikipedia
except ImportError:
raise ImportError(
'Could not import wikipedia python package. Please install it with `pip install wikipedia`.'
)
return values | Validate that the python package exists in environment. |
_add_record | """Add a ChildRecord to self._children. If `index` is specified, replace
the existing record at that index. Otherwise, append the record to the
end of the list.
Return the index of the added record.
"""
if index is not None:
self._child_records[index] = record
return index
self._child_records.append(record)
return len(self._child_records) - 1 | def _add_record(self, record: ChildRecord, index: Optional[int]) ->int:
"""Add a ChildRecord to self._children. If `index` is specified, replace
the existing record at that index. Otherwise, append the record to the
end of the list.
Return the index of the added record.
"""
if index is not None:
self._child_records[index] = record
return index
self._child_records.append(record)
return len(self._child_records) - 1 | Add a ChildRecord to self._children. If `index` is specified, replace
the existing record at that index. Otherwise, append the record to the
end of the list.
Return the index of the added record. |
test_unstructured_odt_loader | """Test unstructured loader."""
file_path = Path(__file__).parent.parent / 'examples/fake.odt'
loader = UnstructuredODTLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1 | def test_unstructured_odt_loader() ->None:
"""Test unstructured loader."""
file_path = Path(__file__).parent.parent / 'examples/fake.odt'
loader = UnstructuredODTLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1 | Test unstructured loader. |
list_keys | """List records in the SQLite database based on the provided date range."""
with self._make_session() as session:
query = session.query(UpsertionRecord).filter(UpsertionRecord.namespace ==
self.namespace)
if after:
query = query.filter(UpsertionRecord.updated_at > after)
if before:
query = query.filter(UpsertionRecord.updated_at < before)
if group_ids:
query = query.filter(UpsertionRecord.group_id.in_(group_ids))
if limit:
query = query.limit(limit)
records = query.all()
return [r.key for r in records] | def list_keys(self, *, before: Optional[float]=None, after: Optional[float]
=None, group_ids: Optional[Sequence[str]]=None, limit: Optional[int]=None
) ->List[str]:
"""List records in the SQLite database based on the provided date range."""
with self._make_session() as session:
query = session.query(UpsertionRecord).filter(UpsertionRecord.
namespace == self.namespace)
if after:
query = query.filter(UpsertionRecord.updated_at > after)
if before:
query = query.filter(UpsertionRecord.updated_at < before)
if group_ids:
query = query.filter(UpsertionRecord.group_id.in_(group_ids))
if limit:
query = query.limit(limit)
records = query.all()
return [r.key for r in records] | List records in the SQLite database based on the provided date range. |
__getattr__ | """Get attr name."""
if name in DEPRECATED_AGENTS:
relative_path = as_import_path(Path(__file__).parent, suffix=name)
old_path = 'langchain.' + relative_path
new_path = 'langchain_experimental.' + relative_path
raise ImportError(
f"""{name} has been moved to langchain experimental. See https://github.com/langchain-ai/langchain/discussions/11680for more information.
Please update your import statement from: `{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist') | def __getattr__(name: str) ->Any:
"""Get attr name."""
if name in DEPRECATED_AGENTS:
relative_path = as_import_path(Path(__file__).parent, suffix=name)
old_path = 'langchain.' + relative_path
new_path = 'langchain_experimental.' + relative_path
raise ImportError(
f"""{name} has been moved to langchain experimental. See https://github.com/langchain-ai/langchain/discussions/11680for more information.
Please update your import statement from: `{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist') | Get attr name. |
_call | """
Generate SPARQL query, use it to retrieve a response from the gdb and answer
the question.
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
prompt = inputs[self.input_key]
_intent = self.sparql_intent_chain.run({'prompt': prompt}, callbacks=callbacks)
intent = _intent.strip()
if 'SELECT' in intent and 'UPDATE' not in intent:
sparql_generation_chain = self.sparql_generation_select_chain
intent = 'SELECT'
elif 'UPDATE' in intent and 'SELECT' not in intent:
sparql_generation_chain = self.sparql_generation_update_chain
intent = 'UPDATE'
else:
raise ValueError(
'I am sorry, but this prompt seems to fit none of the currently supported SPARQL query types, i.e., SELECT and UPDATE.'
)
_run_manager.on_text('Identified intent:', end='\n', verbose=self.verbose)
_run_manager.on_text(intent, color='green', end='\n', verbose=self.verbose)
generated_sparql = sparql_generation_chain.run({'prompt': prompt, 'schema':
self.graph.get_schema}, callbacks=callbacks)
_run_manager.on_text('Generated SPARQL:', end='\n', verbose=self.verbose)
_run_manager.on_text(generated_sparql, color='green', end='\n', verbose=
self.verbose)
if intent == 'SELECT':
context = self.graph.query(generated_sparql)
_run_manager.on_text('Full Context:', end='\n', verbose=self.verbose)
_run_manager.on_text(str(context), color='green', end='\n', verbose=
self.verbose)
result = self.qa_chain({'prompt': prompt, 'context': context},
callbacks=callbacks)
res = result[self.qa_chain.output_key]
elif intent == 'UPDATE':
self.graph.update(generated_sparql)
res = 'Successfully inserted triples into the graph.'
else:
raise ValueError('Unsupported SPARQL query type.')
return {self.output_key: res} | def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
"""
Generate SPARQL query, use it to retrieve a response from the gdb and answer
the question.
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
prompt = inputs[self.input_key]
_intent = self.sparql_intent_chain.run({'prompt': prompt}, callbacks=
callbacks)
intent = _intent.strip()
if 'SELECT' in intent and 'UPDATE' not in intent:
sparql_generation_chain = self.sparql_generation_select_chain
intent = 'SELECT'
elif 'UPDATE' in intent and 'SELECT' not in intent:
sparql_generation_chain = self.sparql_generation_update_chain
intent = 'UPDATE'
else:
raise ValueError(
'I am sorry, but this prompt seems to fit none of the currently supported SPARQL query types, i.e., SELECT and UPDATE.'
)
_run_manager.on_text('Identified intent:', end='\n', verbose=self.verbose)
_run_manager.on_text(intent, color='green', end='\n', verbose=self.verbose)
generated_sparql = sparql_generation_chain.run({'prompt': prompt,
'schema': self.graph.get_schema}, callbacks=callbacks)
_run_manager.on_text('Generated SPARQL:', end='\n', verbose=self.verbose)
_run_manager.on_text(generated_sparql, color='green', end='\n', verbose
=self.verbose)
if intent == 'SELECT':
context = self.graph.query(generated_sparql)
_run_manager.on_text('Full Context:', end='\n', verbose=self.verbose)
_run_manager.on_text(str(context), color='green', end='\n', verbose
=self.verbose)
result = self.qa_chain({'prompt': prompt, 'context': context},
callbacks=callbacks)
res = result[self.qa_chain.output_key]
elif intent == 'UPDATE':
self.graph.update(generated_sparql)
res = 'Successfully inserted triples into the graph.'
else:
raise ValueError('Unsupported SPARQL query type.')
return {self.output_key: res} | Generate SPARQL query, use it to retrieve a response from the gdb and answer
the question. |
_identifying_params | """Get the identifying parameters."""
return {**{'model': self.model}, **self._default_params} | @property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {**{'model': self.model}, **self._default_params} | Get the identifying parameters. |
parse | try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish
] = self.output_fixing_parser.parse_folder(text)
else:
parsed_obj = self.base_parser.parse_folder(text)
return parsed_obj
except Exception as e:
raise OutputParserException(f'Could not parse LLM output: {text}') from e | def parse(self, text: str) ->Union[AgentAction, AgentFinish]:
try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish
] = self.output_fixing_parser.parse_folder(text)
else:
parsed_obj = self.base_parser.parse_folder(text)
return parsed_obj
except Exception as e:
raise OutputParserException(f'Could not parse LLM output: {text}'
) from e | null |
_evaluate_string_pairs | """Evaluate the output string pairs.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
reference (Optional[str], optional): The expected output / reference string.
input (Optional[str], optional): The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
dict: A dictionary containing the preference, scores, and/or other information.
""" | @abstractmethod
def _evaluate_string_pairs(self, *, prediction: str, prediction_b: str,
reference: Optional[str]=None, input: Optional[str]=None, **kwargs: Any
) ->dict:
"""Evaluate the output string pairs.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
reference (Optional[str], optional): The expected output / reference string.
input (Optional[str], optional): The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
dict: A dictionary containing the preference, scores, and/or other information.
""" | Evaluate the output string pairs.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
reference (Optional[str], optional): The expected output / reference string.
input (Optional[str], optional): The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
dict: A dictionary containing the preference, scores, and/or other information. |
test_iterative_text_splitter | """Test iterative text splitter."""
text = """Hi.
I'm Harrison.
How? Are? You?
Okay then f f f f.
This is a weird text to write, but gotta test the splittingggg some how.
Bye!
-H."""
splitter = RecursiveCharacterTextSplitter(chunk_size=10, chunk_overlap=1)
output = splitter.split_text(text)
expected_output = ['Hi.', "I'm", 'Harrison.', 'How? Are?', 'You?',
'Okay then', 'f f f f.', 'This is a', 'weird', 'text to', 'write,',
'but gotta', 'test the', 'splitting', 'gggg', 'some how.', 'Bye!', '-H.']
assert output == expected_output | def test_iterative_text_splitter() ->None:
"""Test iterative text splitter."""
text = """Hi.
I'm Harrison.
How? Are? You?
Okay then f f f f.
This is a weird text to write, but gotta test the splittingggg some how.
Bye!
-H."""
splitter = RecursiveCharacterTextSplitter(chunk_size=10, chunk_overlap=1)
output = splitter.split_text(text)
expected_output = ['Hi.', "I'm", 'Harrison.', 'How? Are?', 'You?',
'Okay then', 'f f f f.', 'This is a', 'weird', 'text to', 'write,',
'but gotta', 'test the', 'splitting', 'gggg', 'some how.', 'Bye!',
'-H.']
assert output == expected_output | Test iterative text splitter. |
_hash | """Use a deterministic hashing approach."""
return hashlib.md5(_input.encode()).hexdigest() | def _hash(_input: str) ->str:
"""Use a deterministic hashing approach."""
return hashlib.md5(_input.encode()).hexdigest() | Use a deterministic hashing approach. |
moderate | from langchain_experimental.comprehend_moderation.base_moderation_config import ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ModerationPiiError, ModerationPromptSafetyError, ModerationToxicityError
try:
input_text = self._convert_prompt_to_text(prompt=prompt)
output_text = str()
filter_functions = {'pii': ComprehendPII, 'toxicity':
ComprehendToxicity, 'prompt_safety': ComprehendPromptSafety}
filters = self.config.filters
for _filter in filters:
filter_name = 'pii' if isinstance(_filter, ModerationPiiConfig
) else 'toxicity' if isinstance(_filter, ModerationToxicityConfig
) else 'prompt_safety' if isinstance(_filter,
ModerationPromptSafetyConfig) else None
if filter_name in filter_functions:
self._log_message_for_verbose(
f'Running {filter_name} Validation...\n')
validation_fn = self._moderation_class(moderation_class=
filter_functions[filter_name])
input_text = input_text if not output_text else output_text
output_text = validation_fn(prompt_value=input_text, config=
_filter.dict())
return self._convert_text_to_prompt(prompt=prompt, text=output_text)
except ModerationPiiError as e:
self._log_message_for_verbose(f'Found PII content..stopping..\n{str(e)}\n')
raise e
except ModerationToxicityError as e:
self._log_message_for_verbose(
f'Found Toxic content..stopping..\n{str(e)}\n')
raise e
except ModerationPromptSafetyError as e:
self._log_message_for_verbose(
f'Found Harmful intention..stopping..\n{str(e)}\n')
raise e
except Exception as e:
raise e | def moderate(self, prompt: Any) ->str:
from langchain_experimental.comprehend_moderation.base_moderation_config import ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ModerationPiiError, ModerationPromptSafetyError, ModerationToxicityError
try:
input_text = self._convert_prompt_to_text(prompt=prompt)
output_text = str()
filter_functions = {'pii': ComprehendPII, 'toxicity':
ComprehendToxicity, 'prompt_safety': ComprehendPromptSafety}
filters = self.config.filters
for _filter in filters:
filter_name = 'pii' if isinstance(_filter, ModerationPiiConfig
) else 'toxicity' if isinstance(_filter,
ModerationToxicityConfig) else 'prompt_safety' if isinstance(
_filter, ModerationPromptSafetyConfig) else None
if filter_name in filter_functions:
self._log_message_for_verbose(
f'Running {filter_name} Validation...\n')
validation_fn = self._moderation_class(moderation_class=
filter_functions[filter_name])
input_text = input_text if not output_text else output_text
output_text = validation_fn(prompt_value=input_text, config
=_filter.dict())
return self._convert_text_to_prompt(prompt=prompt, text=output_text)
except ModerationPiiError as e:
self._log_message_for_verbose(
f'Found PII content..stopping..\n{str(e)}\n')
raise e
except ModerationToxicityError as e:
self._log_message_for_verbose(
f'Found Toxic content..stopping..\n{str(e)}\n')
raise e
except ModerationPromptSafetyError as e:
self._log_message_for_verbose(
f'Found Harmful intention..stopping..\n{str(e)}\n')
raise e
except Exception as e:
raise e | null |
test_deprecated_method | """Test deprecated method."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
obj = ClassWithDeprecatedMethods()
assert obj.deprecated_method() == 'This is a deprecated method.'
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning
) == 'The function `deprecated_method` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
doc = obj.deprecated_method.__doc__
assert isinstance(doc, str)
assert doc.startswith('[*Deprecated*] original doc') | def test_deprecated_method() ->None:
"""Test deprecated method."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
obj = ClassWithDeprecatedMethods()
assert obj.deprecated_method() == 'This is a deprecated method.'
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning
) == 'The function `deprecated_method` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
doc = obj.deprecated_method.__doc__
assert isinstance(doc, str)
assert doc.startswith('[*Deprecated*] original doc') | Test deprecated method. |
test__convert_dict_to_message_ai | message = AIMessage(content='foo')
result = _convert_message_to_dict(message)
expected_output = {'role': 'assistant', 'content': 'foo'}
assert result == expected_output | def test__convert_dict_to_message_ai() ->None:
message = AIMessage(content='foo')
result = _convert_message_to_dict(message)
expected_output = {'role': 'assistant', 'content': 'foo'}
assert result == expected_output | null |
__del__ | if hasattr(self, 'temp_dir'):
self.temp_dir.cleanup() | def __del__(self) ->None:
if hasattr(self, 'temp_dir'):
self.temp_dir.cleanup() | null |
test_init | assert isinstance(clickup_wrapper, ClickupAPIWrapper) | def test_init(clickup_wrapper: ClickupAPIWrapper) ->None:
assert isinstance(clickup_wrapper, ClickupAPIWrapper) | null |
_import_beam | from langchain_community.llms.beam import Beam
return Beam | def _import_beam() ->Any:
from langchain_community.llms.beam import Beam
return Beam | null |
from_llm_and_tools | """Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(tools, system_message_prefix=
system_message_prefix, system_message_suffix=system_message_suffix,
human_message=human_message, format_instructions=format_instructions,
input_variables=input_variables)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(llm_chain=llm_chain, allowed_tools=tool_names, output_parser=
_output_parser, **kwargs) | @classmethod
def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[
BaseTool], callback_manager: Optional[BaseCallbackManager]=None,
output_parser: Optional[AgentOutputParser]=None, system_message_prefix:
str=SYSTEM_MESSAGE_PREFIX, system_message_suffix: str=
SYSTEM_MESSAGE_SUFFIX, human_message: str=HUMAN_MESSAGE,
format_instructions: str=FORMAT_INSTRUCTIONS, input_variables: Optional
[List[str]]=None, **kwargs: Any) ->Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(tools, system_message_prefix=
system_message_prefix, system_message_suffix=system_message_suffix,
human_message=human_message, format_instructions=
format_instructions, input_variables=input_variables)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(llm_chain=llm_chain, allowed_tools=tool_names, output_parser
=_output_parser, **kwargs) | Construct an agent from an LLM and tools. |
hash_string | """Hash a string using sha1.
Parameters:
s (str): The string to hash.
Returns:
(str): The hashed string.
"""
return hashlib.sha1(s.encode('utf-8')).hexdigest() | def hash_string(s: str) ->str:
"""Hash a string using sha1.
Parameters:
s (str): The string to hash.
Returns:
(str): The hashed string.
"""
return hashlib.sha1(s.encode('utf-8')).hexdigest() | Hash a string using sha1.
Parameters:
s (str): The string to hash.
Returns:
(str): The hashed string. |
test_resolver | response = 'Test resolution'
llm = FakeListLLM(responses=[response])
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2)
prompt_value, _ = chain.prep_prompts({'product': 'socks'})
chain.history.question = prompt_value.to_string()
chain.history.ideas = ['Test Idea 1', 'Test Idea 2']
chain.history.critique = 'Test Critique'
result = chain._resolve()
assert result == response | def test_resolver() ->None:
response = 'Test resolution'
llm = FakeListLLM(responses=[response])
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2)
prompt_value, _ = chain.prep_prompts({'product': 'socks'})
chain.history.question = prompt_value.to_string()
chain.history.ideas = ['Test Idea 1', 'Test Idea 2']
chain.history.critique = 'Test Critique'
result = chain._resolve()
assert result == response | null |
search | """Lookup things online."""
return 'foo' | @tool
def search(query: str) ->str:
"""Lookup things online."""
return 'foo' | Lookup things online. |
get_documents_array_uri | """Get the URI of the documents array."""
return f'{uri}/{DOCUMENTS_ARRAY_NAME}' | def get_documents_array_uri(uri: str) ->str:
"""Get the URI of the documents array."""
return f'{uri}/{DOCUMENTS_ARRAY_NAME}' | Get the URI of the documents array. |
delete | """Delete documents from the Elasticsearch index.
Args:
ids: List of ids of documents to delete.
refresh_indices: Whether to refresh the index
after deleting documents. Defaults to True.
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
body = []
if ids is None:
raise ValueError('ids must be provided.')
for _id in ids:
body.append({'_op_type': 'delete', '_index': self.index_name, '_id': _id})
if len(body) > 0:
try:
bulk(self.client, body, refresh=refresh_indices, ignore_status=404)
logger.debug(f'Deleted {len(body)} texts from index')
return True
except BulkIndexError as e:
logger.error(f'Error deleting texts: {e}')
firstError = e.errors[0].get('index', {}).get('error', {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug('No texts to delete from index')
return False | def delete(self, ids: Optional[List[str]]=None, refresh_indices: Optional[
bool]=True, **kwargs: Any) ->Optional[bool]:
"""Delete documents from the Elasticsearch index.
Args:
ids: List of ids of documents to delete.
refresh_indices: Whether to refresh the index
after deleting documents. Defaults to True.
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
body = []
if ids is None:
raise ValueError('ids must be provided.')
for _id in ids:
body.append({'_op_type': 'delete', '_index': self.index_name, '_id':
_id})
if len(body) > 0:
try:
bulk(self.client, body, refresh=refresh_indices, ignore_status=404)
logger.debug(f'Deleted {len(body)} texts from index')
return True
except BulkIndexError as e:
logger.error(f'Error deleting texts: {e}')
firstError = e.errors[0].get('index', {}).get('error', {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug('No texts to delete from index')
return False | Delete documents from the Elasticsearch index.
Args:
ids: List of ids of documents to delete.
refresh_indices: Whether to refresh the index
after deleting documents. Defaults to True. |
ignore_func | return [f for f in files if f == '.git'] | def ignore_func(_, files):
return [f for f in files if f == '.git'] | null |
_llm_type | return 'google-palm-chat' | @property
def _llm_type(self) ->str:
return 'google-palm-chat' | null |
json_distance_evaluator | return JsonEditDistanceEvaluator() | @pytest.fixture
def json_distance_evaluator() ->JsonEditDistanceEvaluator:
return JsonEditDistanceEvaluator() | null |
check_valid_template | """Check that template string is valid.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
input_variables: The input variables.
Raises:
ValueError: If the template format is not supported.
"""
try:
validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]
except KeyError as exc:
raise ValueError(
f'Invalid template format {template_format!r}, should be one of {list(DEFAULT_FORMATTER_MAPPING)}.'
) from exc
try:
validator_func(template, input_variables)
except (KeyError, IndexError) as exc:
raise ValueError(
f'Invalid prompt schema; check for mismatched or missing input parameters from {input_variables}.'
) from exc | def check_valid_template(template: str, template_format: str,
input_variables: List[str]) ->None:
"""Check that template string is valid.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
input_variables: The input variables.
Raises:
ValueError: If the template format is not supported.
"""
try:
validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]
except KeyError as exc:
raise ValueError(
f'Invalid template format {template_format!r}, should be one of {list(DEFAULT_FORMATTER_MAPPING)}.'
) from exc
try:
validator_func(template, input_variables)
except (KeyError, IndexError) as exc:
raise ValueError(
f'Invalid prompt schema; check for mismatched or missing input parameters from {input_variables}.'
) from exc | Check that template string is valid.
Args:
template: The template string.
template_format: The template format. Should be one of "f-string" or "jinja2".
input_variables: The input variables.
Raises:
ValueError: If the template format is not supported. |
_import_gmail_GmailSendMessage | from langchain_community.tools.gmail import GmailSendMessage
return GmailSendMessage | def _import_gmail_GmailSendMessage() ->Any:
from langchain_community.tools.gmail import GmailSendMessage
return GmailSendMessage | null |
test_tracing_context_manager | from langchain.agents import AgentType, initialize_agent, load_tools
llm = OpenAI(temperature=0)
tools = load_tools(['llm-math', 'serpapi'], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
if 'LANGCHAIN_WANDB_TRACING' in os.environ:
del os.environ['LANGCHAIN_WANDB_TRACING']
with wandb_tracing_enabled():
agent.run(questions[0])
agent.run(questions[0]) | def test_tracing_context_manager() ->None:
from langchain.agents import AgentType, initialize_agent, load_tools
llm = OpenAI(temperature=0)
tools = load_tools(['llm-math', 'serpapi'], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
if 'LANGCHAIN_WANDB_TRACING' in os.environ:
del os.environ['LANGCHAIN_WANDB_TRACING']
with wandb_tracing_enabled():
agent.run(questions[0])
agent.run(questions[0]) | null |
__init__ | """Initialize the johnsnowlabs model."""
super().__init__(**kwargs)
try:
from johnsnowlabs import nlp
from nlu.pipe.pipeline import NLUPipeline
except ImportError as exc:
raise ImportError(
'Could not import johnsnowlabs python package. Please install it with `pip install johnsnowlabs`.'
) from exc
try:
os.environ['PYSPARK_PYTHON'] = sys.executable
os.environ['PYSPARK_DRIVER_PYTHON'] = sys.executable
nlp.start(hardware_target=hardware_target)
except Exception as exc:
raise Exception('Failure starting Spark Session') from exc
try:
if isinstance(model, str):
self.model = nlp.load(model)
elif isinstance(model, NLUPipeline):
self.model = model
else:
self.model = nlp.to_nlu_pipe(model)
except Exception as exc:
raise Exception('Failure loading model') from exc | def __init__(self, model: Any='embed_sentence.bert', hardware_target: str=
'cpu', **kwargs: Any):
"""Initialize the johnsnowlabs model."""
super().__init__(**kwargs)
try:
from johnsnowlabs import nlp
from nlu.pipe.pipeline import NLUPipeline
except ImportError as exc:
raise ImportError(
'Could not import johnsnowlabs python package. Please install it with `pip install johnsnowlabs`.'
) from exc
try:
os.environ['PYSPARK_PYTHON'] = sys.executable
os.environ['PYSPARK_DRIVER_PYTHON'] = sys.executable
nlp.start(hardware_target=hardware_target)
except Exception as exc:
raise Exception('Failure starting Spark Session') from exc
try:
if isinstance(model, str):
self.model = nlp.load(model)
elif isinstance(model, NLUPipeline):
self.model = model
else:
self.model = nlp.to_nlu_pipe(model)
except Exception as exc:
raise Exception('Failure loading model') from exc | Initialize the johnsnowlabs model. |
validate_environment | """Validate that api key exists in environment."""
values['edenai_api_key'] = get_from_dict_or_env(values, 'edenai_api_key',
'EDENAI_API_KEY')
return values | @root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key exists in environment."""
values['edenai_api_key'] = get_from_dict_or_env(values,
'edenai_api_key', 'EDENAI_API_KEY')
return values | Validate that api key exists in environment. |
generate_from_stream | """Generate from a stream."""
generation: Optional[ChatGenerationChunk] = None
for chunk in stream:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return ChatResult(generations=[ChatGeneration(message=
message_chunk_to_message(generation.message), generation_info=
generation.generation_info)]) | def generate_from_stream(stream: Iterator[ChatGenerationChunk]) ->ChatResult:
"""Generate from a stream."""
generation: Optional[ChatGenerationChunk] = None
for chunk in stream:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return ChatResult(generations=[ChatGeneration(message=
message_chunk_to_message(generation.message), generation_info=
generation.generation_info)]) | Generate from a stream. |
_user_posts_loader | user = reddit.redditor(search_query)
method = getattr(user.submissions, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {'post_subreddit': post.subreddit_name_prefixed,
'post_category': category, 'post_title': post.title, 'post_score':
post.score, 'post_id': post.id, 'post_url': post.url, 'post_author':
post.author}
yield Document(page_content=post.selftext, metadata=metadata) | def _user_posts_loader(self, search_query: str, category: str, reddit: praw
.reddit.Reddit) ->Iterable[Document]:
user = reddit.redditor(search_query)
method = getattr(user.submissions, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {'post_subreddit': post.subreddit_name_prefixed,
'post_category': category, 'post_title': post.title,
'post_score': post.score, 'post_id': post.id, 'post_url': post.
url, 'post_author': post.author}
yield Document(page_content=post.selftext, metadata=metadata) | null |
test_causal_chain | """Test CausalChain can translate a narrative's plot into a causal model
containing operations linked by a DAG."""
llm = OpenAI(temperature=0, max_tokens=512)
casual_chain = CausalChain.from_univariate_prompt(llm)
narrative_plot = (
'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. '
)
output = casual_chain(narrative_plot)
expected_output = {'chain_answer': None, 'chain_data': CausalModel(
attribute='pet_count', entities=[EntityModel(name='cindy', code='pass',
value=0.0, depends_on=[]), EntityModel(name='marcia', code=
'marcia.value = cindy.value + 2', value=0.0, depends_on=['cindy']),
EntityModel(name='jan', code='jan.value = marcia.value * 3', value=0.0,
depends_on=['marcia'])]), 'narrative_input':
'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. '
}
self.assertDictEqual(output, expected_output)
self.assertEqual(isinstance(output[Constant.chain_data.value], CausalModel),
True) | def test_causal_chain(self) ->None:
"""Test CausalChain can translate a narrative's plot into a causal model
containing operations linked by a DAG."""
llm = OpenAI(temperature=0, max_tokens=512)
casual_chain = CausalChain.from_univariate_prompt(llm)
narrative_plot = (
'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. '
)
output = casual_chain(narrative_plot)
expected_output = {'chain_answer': None, 'chain_data': CausalModel(
attribute='pet_count', entities=[EntityModel(name='cindy', code=
'pass', value=0.0, depends_on=[]), EntityModel(name='marcia', code=
'marcia.value = cindy.value + 2', value=0.0, depends_on=['cindy']),
EntityModel(name='jan', code='jan.value = marcia.value * 3', value=
0.0, depends_on=['marcia'])]), 'narrative_input':
'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. '
}
self.assertDictEqual(output, expected_output)
self.assertEqual(isinstance(output[Constant.chain_data.value],
CausalModel), True) | Test CausalChain can translate a narrative's plot into a causal model
containing operations linked by a DAG. |
test_load_fail_no_func | """Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasets(dataset_name='mlqa/en', split_name='test',
load_max_docs=MAX_DOCS)
assert 'Please provide a function' in str(exc_info.value) | def test_load_fail_no_func() ->None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasets(dataset_name='mlqa/en', split_name='test',
load_max_docs=MAX_DOCS)
assert 'Please provide a function' in str(exc_info.value) | Test that fails to load |
_parse_content | try:
import html2text
return html2text.html2text(content).strip()
except ImportError as e:
raise ImportError(
'Could not import `html2text`. Although it is not a required package to use Langchain, using the EverNote loader requires `html2text`. Please install `html2text` via `pip install html2text` and try again.'
) from e | @staticmethod
def _parse_content(content: str) ->str:
try:
import html2text
return html2text.html2text(content).strip()
except ImportError as e:
raise ImportError(
'Could not import `html2text`. Although it is not a required package to use Langchain, using the EverNote loader requires `html2text`. Please install `html2text` via `pip install html2text` and try again.'
) from e | null |
on_text | try:
crumbs_str = f'[{self.get_breadcrumbs(run=self._get_run(run_id=run_id))}] '
except TracerException:
crumbs_str = ''
self.function_callback(
f"""{get_colored_text('[text]', color='blue')} {get_bolded_text(f'{crumbs_str}New text:')}
{text}"""
) | def on_text(self, text: str, *, run_id: UUID, parent_run_id: Optional[UUID]
=None, **kwargs: Any) ->None:
try:
crumbs_str = (
f'[{self.get_breadcrumbs(run=self._get_run(run_id=run_id))}] ')
except TracerException:
crumbs_str = ''
self.function_callback(
f"""{get_colored_text('[text]', color='blue')} {get_bolded_text(f'{crumbs_str}New text:')}
{text}"""
) | null |
similarity_search | """Run similarity search with Neo4jVector.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(text=query)
return self.similarity_search_by_vector(embedding=embedding, k=k, query=query) | def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Run similarity search with Neo4jVector.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(text=query)
return self.similarity_search_by_vector(embedding=embedding, k=k, query
=query) | Run similarity search with Neo4jVector.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query. |
on_text | """Do nothing""" | def on_text(self, text: str, **kwargs: Any) ->None:
"""Do nothing""" | Do nothing |
index_name | """Return the index name."""
return f'test_{uuid.uuid4().hex}' | @pytest.fixture(scope='function')
def index_name(self) ->str:
"""Return the index name."""
return f'test_{uuid.uuid4().hex}' | Return the index name. |
_llm_type | """Return type of llm."""
return 'databricks' | @property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'databricks' | Return type of llm. |
test_modal_call | """Test valid call to Modal."""
llm = Modal()
output = llm('Say foo:')
assert isinstance(output, str) | def test_modal_call() ->None:
"""Test valid call to Modal."""
llm = Modal()
output = llm('Say foo:')
assert isinstance(output, str) | Test valid call to Modal. |
_run | """Use the tool."""
if self.func:
new_argument_supported = signature(self.func).parameters.get('callbacks')
return self.func(*args, callbacks=run_manager.get_child() if
run_manager else None, **kwargs
) if new_argument_supported else self.func(*args, **kwargs)
raise NotImplementedError('Tool does not support sync') | def _run(self, *args: Any, run_manager: Optional[CallbackManagerForToolRun]
=None, **kwargs: Any) ->Any:
"""Use the tool."""
if self.func:
new_argument_supported = signature(self.func).parameters.get(
'callbacks')
return self.func(*args, callbacks=run_manager.get_child() if
run_manager else None, **kwargs
) if new_argument_supported else self.func(*args, **kwargs)
raise NotImplementedError('Tool does not support sync') | Use the tool. |
_get_relevant_documents | search_results = self._search(query)
return [Document(page_content=result.pop(self.content_key), metadata=result
) for result in search_results] | def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
search_results = self._search(query)
return [Document(page_content=result.pop(self.content_key), metadata=
result) for result in search_results] | null |
_merge_partial_and_user_variables | intermediate_steps = kwargs.pop('intermediate_steps')
kwargs['agent_scratchpad'] = self._construct_agent_scratchpad(
intermediate_steps)
return kwargs | def _merge_partial_and_user_variables(self, **kwargs: Any) ->Dict[str, Any]:
intermediate_steps = kwargs.pop('intermediate_steps')
kwargs['agent_scratchpad'] = self._construct_agent_scratchpad(
intermediate_steps)
return kwargs | null |
test_failure | """Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run('sjefbsmnf')
assert output == "No relevant results found for 'sjefbsmnf' on Stack Overflow" | def test_failure() ->None:
"""Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run('sjefbsmnf')
assert output == "No relevant results found for 'sjefbsmnf' on Stack Overflow" | Test that call that doesn't run. |
test_konko_token_streaming_test | """Check token streaming for ChatKonko."""
chat_instance = ChatKonko(max_tokens=10)
for token in chat_instance.stream('Just a test'):
assert isinstance(token.content, str) | def test_konko_token_streaming_test() ->None:
"""Check token streaming for ChatKonko."""
chat_instance = ChatKonko(max_tokens=10)
for token in chat_instance.stream('Just a test'):
assert isinstance(token.content, str) | Check token streaming for ChatKonko. |
test_mrkl_serialization | agent = initialize_agent([Tool(name='Test tool', func=lambda x: x,
description='Test description')], FakeListLLM(responses=[]), agent=
AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
with TemporaryDirectory() as tempdir:
file = Path(tempdir) / 'agent.json'
agent.save_agent(file)
load_agent(file) | def test_mrkl_serialization() ->None:
agent = initialize_agent([Tool(name='Test tool', func=lambda x: x,
description='Test description')], FakeListLLM(responses=[]), agent=
AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
with TemporaryDirectory() as tempdir:
file = Path(tempdir) / 'agent.json'
agent.save_agent(file)
load_agent(file) | null |
_llm_type | return 'baichuan-chat' | @property
def _llm_type(self) ->str:
return 'baichuan-chat' | null |
_convert_message_chunk_to_delta | _dict = _convert_message_chunk(chunk, i)
return {'choices': [{'delta': _dict}]} | def _convert_message_chunk_to_delta(chunk: BaseMessageChunk, i: int) ->Dict[
str, Any]:
_dict = _convert_message_chunk(chunk, i)
return {'choices': [{'delta': _dict}]} | null |
observation_prefix | """Prefix to append the observation with."""
return 'Observation: ' | @property
def observation_prefix(self) ->str:
"""Prefix to append the observation with."""
return 'Observation: ' | Prefix to append the observation with. |
_identifying_params | """Get the identifying parameters."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
return {'model': set_model_value, 'temperature': self.temperature, 'top_p':
self.top_p, 'top_k': self.top_k, 'n': self.n} | @property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
return {'model': set_model_value, 'temperature': self.temperature,
'top_p': self.top_p, 'top_k': self.top_k, 'n': self.n} | Get the identifying parameters. |
_is_openai_parts_format | return 'type' in part | def _is_openai_parts_format(part: dict) ->bool:
return 'type' in part | null |
validate_channel_or_videoIds_is_set | """Validate that either folder_id or document_ids is set, but not both."""
if not values.get('credentials_path') and not values.get('service_account_path'
):
raise ValueError('Must specify either channel_name or video_ids')
return values | @root_validator
def validate_channel_or_videoIds_is_set(cls, values: Dict[str, Any]) ->Dict[
str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get('credentials_path') and not values.get(
'service_account_path'):
raise ValueError('Must specify either channel_name or video_ids')
return values | Validate that either folder_id or document_ids is set, but not both. |
test_transform_chain | """Test basic transform chain."""
transform_chain = TransformChain(input_variables=['first_name', 'last_name'
], output_variables=['greeting'], transform=dummy_transform)
input_dict = {'first_name': 'Leroy', 'last_name': 'Jenkins'}
response = transform_chain(input_dict)
expected_response = {'greeting': 'Leroy Jenkins says hello'}
assert response == expected_response | def test_transform_chain() ->None:
"""Test basic transform chain."""
transform_chain = TransformChain(input_variables=['first_name',
'last_name'], output_variables=['greeting'], transform=dummy_transform)
input_dict = {'first_name': 'Leroy', 'last_name': 'Jenkins'}
response = transform_chain(input_dict)
expected_response = {'greeting': 'Leroy Jenkins says hello'}
assert response == expected_response | Test basic transform chain. |
__call__ | """Maps the Run to a dictionary."""
if not run.outputs:
raise ValueError(f'Run {run.id} has no outputs to evaluate.')
return self.map(run) | def __call__(self, run: Run) ->Dict[str, str]:
"""Maps the Run to a dictionary."""
if not run.outputs:
raise ValueError(f'Run {run.id} has no outputs to evaluate.')
return self.map(run) | Maps the Run to a dictionary. |
test_prompt_missing_input_variables | """Test error is raised when input variables are not provided."""
template = 'This is a {foo} test.'
with pytest.raises(ValueError):
FewShotPromptTemplate(input_variables=[], suffix=template, examples=[],
example_prompt=EXAMPLE_PROMPT, validate_template=True)
assert FewShotPromptTemplate(input_variables=[], suffix=template, examples=
[], example_prompt=EXAMPLE_PROMPT).input_variables == ['foo']
template = 'This is a {foo} test.'
with pytest.raises(ValueError):
FewShotPromptTemplate(input_variables=[], suffix='foo', examples=[],
prefix=template, example_prompt=EXAMPLE_PROMPT, validate_template=True)
assert FewShotPromptTemplate(input_variables=[], suffix='foo', examples=[],
prefix=template, example_prompt=EXAMPLE_PROMPT).input_variables == ['foo'] | def test_prompt_missing_input_variables() ->None:
"""Test error is raised when input variables are not provided."""
template = 'This is a {foo} test.'
with pytest.raises(ValueError):
FewShotPromptTemplate(input_variables=[], suffix=template, examples
=[], example_prompt=EXAMPLE_PROMPT, validate_template=True)
assert FewShotPromptTemplate(input_variables=[], suffix=template,
examples=[], example_prompt=EXAMPLE_PROMPT).input_variables == ['foo']
template = 'This is a {foo} test.'
with pytest.raises(ValueError):
FewShotPromptTemplate(input_variables=[], suffix='foo', examples=[],
prefix=template, example_prompt=EXAMPLE_PROMPT,
validate_template=True)
assert FewShotPromptTemplate(input_variables=[], suffix='foo', examples
=[], prefix=template, example_prompt=EXAMPLE_PROMPT
).input_variables == ['foo'] | Test error is raised when input variables are not provided. |
texts | return ['foo', 'bar', 'baz'] | @pytest.fixture
def texts() ->List[str]:
return ['foo', 'bar', 'baz'] | null |
_metadata_extractor | """Extract metadata from raw html using BeautifulSoup."""
metadata = {'source': url}
try:
from bs4 import BeautifulSoup
except ImportError:
logger.warning(
'The bs4 package is required for default metadata extraction. Please install it with `pip install bs4`.'
)
return metadata
soup = BeautifulSoup(raw_html, 'html.parser')
if (title := soup.find('title')):
metadata['title'] = title.get_text()
if (description := soup.find('meta', attrs={'name': 'description'})):
metadata['description'] = description.get('content', None)
if (html := soup.find('html')):
metadata['language'] = html.get('lang', None)
return metadata | def _metadata_extractor(raw_html: str, url: str) ->dict:
"""Extract metadata from raw html using BeautifulSoup."""
metadata = {'source': url}
try:
from bs4 import BeautifulSoup
except ImportError:
logger.warning(
'The bs4 package is required for default metadata extraction. Please install it with `pip install bs4`.'
)
return metadata
soup = BeautifulSoup(raw_html, 'html.parser')
if (title := soup.find('title')):
metadata['title'] = title.get_text()
if (description := soup.find('meta', attrs={'name': 'description'})):
metadata['description'] = description.get('content', None)
if (html := soup.find('html')):
metadata['language'] = html.get('lang', None)
return metadata | Extract metadata from raw html using BeautifulSoup. |
_hash_string_to_uuid | """Hashes a string and returns the corresponding UUID."""
hash_value = hashlib.sha1(input_string.encode('utf-8')).hexdigest()
return uuid.uuid5(NAMESPACE_UUID, hash_value) | def _hash_string_to_uuid(input_string: str) ->uuid.UUID:
"""Hashes a string and returns the corresponding UUID."""
hash_value = hashlib.sha1(input_string.encode('utf-8')).hexdigest()
return uuid.uuid5(NAMESPACE_UUID, hash_value) | Hashes a string and returns the corresponding UUID. |
_is_visible | parts = p.parts
for _p in parts:
if _p.startswith('.'):
return False
return True | def _is_visible(p: Path) ->bool:
parts = p.parts
for _p in parts:
if _p.startswith('.'):
return False
return True | null |
test_self_hosted_huggingface_instructor_embedding_documents | """Test self-hosted huggingface instruct embeddings."""
documents = ['foo bar']
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768 | def test_self_hosted_huggingface_instructor_embedding_documents() ->None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ['foo bar']
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768 | Test self-hosted huggingface instruct embeddings. |
replace_brackets | words_inside_brackets = match.group(1).split(', ')
embedded_words = [str(embeddings_model.embed_query(word)) for word in
words_inside_brackets]
return "', '".join(embedded_words) | def replace_brackets(match):
words_inside_brackets = match.group(1).split(', ')
embedded_words = [str(embeddings_model.embed_query(word)) for word in
words_inside_brackets]
return "', '".join(embedded_words) | null |
_query_body | query_vector_body = {'vector': query_vector, 'k': search_params.get('k', 2)}
if filter is not None and len(filter) != 0:
query_vector_body['filter'] = filter
if 'linear' == self.index_type:
query_vector_body['linear'] = True
else:
query_vector_body['ef'] = search_params.get('ef', 10)
return {'size': search_params.get('size', 4), 'query': {'knn': {self.
vector_query_field: query_vector_body}}} | def _query_body(self, query_vector: Union[List[float], None], filter:
Optional[dict]=None, search_params: Dict={}) ->Dict:
query_vector_body = {'vector': query_vector, 'k': search_params.get('k', 2)
}
if filter is not None and len(filter) != 0:
query_vector_body['filter'] = filter
if 'linear' == self.index_type:
query_vector_body['linear'] = True
else:
query_vector_body['ef'] = search_params.get('ef', 10)
return {'size': search_params.get('size', 4), 'query': {'knn': {self.
vector_query_field: query_vector_body}}} | null |
similarity_search | from nuclia.sdk import NucliaSearch
from nucliadb_models.search import FindRequest, ResourceProperties
request = FindRequest(query=query, page_size=k, show=[ResourceProperties.
VALUES, ResourceProperties.EXTRA])
search = NucliaSearch()
results = search.find(query=request, url=self.kb_url, api_key=self._config[
'TOKEN'])
paragraphs = []
for resource in results.resources.values():
for field in resource.fields.values():
for paragraph_id, paragraph in field.paragraphs.items():
info = paragraph_id.split('/')
field_type = FIELD_TYPES.get(info[1], None)
field_id = info[2]
if not field_type:
continue
value = getattr(resource.data, field_type, {}).get(field_id, None)
paragraphs.append({'text': paragraph.text, 'metadata': {'extra':
getattr(getattr(resource, 'extra', {}), 'metadata', None),
'value': value}, 'order': paragraph.order})
sorted_paragraphs = sorted(paragraphs, key=lambda x: x['order'])
return [Document(page_content=paragraph['text'], metadata=paragraph[
'metadata']) for paragraph in sorted_paragraphs] | def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
from nuclia.sdk import NucliaSearch
from nucliadb_models.search import FindRequest, ResourceProperties
request = FindRequest(query=query, page_size=k, show=[
ResourceProperties.VALUES, ResourceProperties.EXTRA])
search = NucliaSearch()
results = search.find(query=request, url=self.kb_url, api_key=self.
_config['TOKEN'])
paragraphs = []
for resource in results.resources.values():
for field in resource.fields.values():
for paragraph_id, paragraph in field.paragraphs.items():
info = paragraph_id.split('/')
field_type = FIELD_TYPES.get(info[1], None)
field_id = info[2]
if not field_type:
continue
value = getattr(resource.data, field_type, {}).get(field_id,
None)
paragraphs.append({'text': paragraph.text, 'metadata': {
'extra': getattr(getattr(resource, 'extra', {}),
'metadata', None), 'value': value}, 'order': paragraph.
order})
sorted_paragraphs = sorted(paragraphs, key=lambda x: x['order'])
return [Document(page_content=paragraph['text'], metadata=paragraph[
'metadata']) for paragraph in sorted_paragraphs] | null |
_fake_docs_len_func | return len(_fake_combine_docs_func(docs)) | def _fake_docs_len_func(docs: List[Document]) ->int:
return len(_fake_combine_docs_func(docs)) | null |
test_from_texts_passed_optimizers_config_and_on_disk_payload | from qdrant_client import models
collection_name = uuid.uuid4().hex
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
optimizers_config = models.OptimizersConfigDiff(memmap_threshold=1000)
vec_store = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), metadatas=
metadatas, optimizers_config=optimizers_config, on_disk_payload=True,
on_disk=True, collection_name=collection_name)
collection_info = vec_store.client.get_collection(collection_name)
assert collection_info.config.params.vectors.on_disk is True
assert collection_info.config.optimizer_config.memmap_threshold == 1000
assert collection_info.config.params.on_disk_payload is True | @pytest.mark.skipif(qdrant_is_not_running(), reason='Qdrant is not running')
def test_from_texts_passed_optimizers_config_and_on_disk_payload() ->None:
from qdrant_client import models
collection_name = uuid.uuid4().hex
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
optimizers_config = models.OptimizersConfigDiff(memmap_threshold=1000)
vec_store = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(),
metadatas=metadatas, optimizers_config=optimizers_config,
on_disk_payload=True, on_disk=True, collection_name=collection_name)
collection_info = vec_store.client.get_collection(collection_name)
assert collection_info.config.params.vectors.on_disk is True
assert collection_info.config.optimizer_config.memmap_threshold == 1000
assert collection_info.config.params.on_disk_payload is True | null |
_import_gigachat | from langchain_community.llms.gigachat import GigaChat
return GigaChat | def _import_gigachat() ->Any:
from langchain_community.llms.gigachat import GigaChat
return GigaChat | null |
_get_col_range_str | if request.num_cols:
return f'from 1 to {request.num_cols}'
else:
return '' | def _get_col_range_str(request: FileProcessingRequest):
if request.num_cols:
return f'from 1 to {request.num_cols}'
else:
return '' | null |
similarity_search_with_score | """Run similarity search with score using Clarifai.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
try:
from clarifai.client.search import Search
from clarifai_grpc.grpc.api import resources_pb2
from google.protobuf import json_format
except ImportError as e:
raise ImportError(
'Could not import clarifai python package. Please install it with `pip install clarifai`.'
) from e
if self._number_of_docs is not None:
k = self._number_of_docs
search_obj = Search(user_id=self._user_id, app_id=self._app_id, top_k=k)
rank = [{'text_raw': query}]
if filters is not None:
search_metadata = {'metadata': filters}
search_response = search_obj.query(ranks=rank, filters=[search_metadata])
else:
search_response = search_obj.query(ranks=rank)
hits = [hit for data in search_response for hit in data.hits]
executor = ThreadPoolExecutor(max_workers=10)
def hit_to_document(hit: resources_pb2.Hit) ->Tuple[Document, float]:
metadata = json_format.MessageToDict(hit.input.data.metadata)
h = {'Authorization': f'Key {self._pat}'}
request = requests.get(hit.input.data.text.url, headers=h)
request.encoding = request.apparent_encoding
requested_text = request.text
logger.debug(
f'\tScore {hit.score:.2f} for annotation: {hit.annotation.id} off input: {hit.input.id}, text: {requested_text[:125]}'
)
return Document(page_content=requested_text, metadata=metadata), hit.score
futures = [executor.submit(hit_to_document, hit) for hit in hits]
docs_and_scores = [future.result() for future in futures]
return docs_and_scores | def similarity_search_with_score(self, query: str, k: int=4, filters:
Optional[dict]=None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""Run similarity search with score using Clarifai.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
try:
from clarifai.client.search import Search
from clarifai_grpc.grpc.api import resources_pb2
from google.protobuf import json_format
except ImportError as e:
raise ImportError(
'Could not import clarifai python package. Please install it with `pip install clarifai`.'
) from e
if self._number_of_docs is not None:
k = self._number_of_docs
search_obj = Search(user_id=self._user_id, app_id=self._app_id, top_k=k)
rank = [{'text_raw': query}]
if filters is not None:
search_metadata = {'metadata': filters}
search_response = search_obj.query(ranks=rank, filters=[
search_metadata])
else:
search_response = search_obj.query(ranks=rank)
hits = [hit for data in search_response for hit in data.hits]
executor = ThreadPoolExecutor(max_workers=10)
def hit_to_document(hit: resources_pb2.Hit) ->Tuple[Document, float]:
metadata = json_format.MessageToDict(hit.input.data.metadata)
h = {'Authorization': f'Key {self._pat}'}
request = requests.get(hit.input.data.text.url, headers=h)
request.encoding = request.apparent_encoding
requested_text = request.text
logger.debug(
f'\tScore {hit.score:.2f} for annotation: {hit.annotation.id} off input: {hit.input.id}, text: {requested_text[:125]}'
)
return Document(page_content=requested_text, metadata=metadata
), hit.score
futures = [executor.submit(hit_to_document, hit) for hit in hits]
docs_and_scores = [future.result() for future in futures]
return docs_and_scores | Run similarity search with score using Clarifai.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text. |
transform_run | """Transforms a run dictionary to be compatible with WBTraceTree.
:param run: The run dictionary to transform.
:return: The transformed run dictionary.
"""
transformed_dict = transform_serialized(run)
serialized = transformed_dict.pop('serialized')
for k, v in serialized.items():
transformed_dict[k] = v
_kind = transformed_dict.get('_kind', None)
name = transformed_dict.pop('name', None)
exec_ord = transformed_dict.pop('execution_order', None)
if not name:
name = _kind
output_dict = {f'{exec_ord}_{name}': transformed_dict}
return output_dict | def transform_run(run: Dict[str, Any]) ->Dict[str, Any]:
"""Transforms a run dictionary to be compatible with WBTraceTree.
:param run: The run dictionary to transform.
:return: The transformed run dictionary.
"""
transformed_dict = transform_serialized(run)
serialized = transformed_dict.pop('serialized')
for k, v in serialized.items():
transformed_dict[k] = v
_kind = transformed_dict.get('_kind', None)
name = transformed_dict.pop('name', None)
exec_ord = transformed_dict.pop('execution_order', None)
if not name:
name = _kind
output_dict = {f'{exec_ord}_{name}': transformed_dict}
return output_dict | Transforms a run dictionary to be compatible with WBTraceTree.
:param run: The run dictionary to transform.
:return: The transformed run dictionary. |
load | """Load from the dataframe."""
if self.df.count() > self.max_num_rows:
logger.warning(
f'The number of DataFrame rows is {self.df.count()}, but we will only include the amount of rows that can reasonably fit in memory: {self.num_rows}.'
)
lazy_load_iterator = self.lazy_load()
return list(itertools.islice(lazy_load_iterator, self.num_rows)) | def load(self) ->List[Document]:
"""Load from the dataframe."""
if self.df.count() > self.max_num_rows:
logger.warning(
f'The number of DataFrame rows is {self.df.count()}, but we will only include the amount of rows that can reasonably fit in memory: {self.num_rows}.'
)
lazy_load_iterator = self.lazy_load()
return list(itertools.islice(lazy_load_iterator, self.num_rows)) | Load from the dataframe. |
test_continue_on_failure_true | """Test exception is not raised when continue_on_failure=True."""
loader = UnstructuredURLLoader(['badurl.foobar'])
loader.load() | def test_continue_on_failure_true() ->None:
"""Test exception is not raised when continue_on_failure=True."""
loader = UnstructuredURLLoader(['badurl.foobar'])
loader.load() | Test exception is not raised when continue_on_failure=True. |
_import_google_search_tool_GoogleSearchRun | from langchain_community.tools.google_search.tool import GoogleSearchRun
return GoogleSearchRun | def _import_google_search_tool_GoogleSearchRun() ->Any:
from langchain_community.tools.google_search.tool import GoogleSearchRun
return GoogleSearchRun | null |
lazy_parse | """Extract the first character of a blob."""
yield Document(page_content=blob.as_string()[0]) | def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the first character of a blob."""
yield Document(page_content=blob.as_string()[0]) | Extract the first character of a blob. |
_evaluate_expression | import numexpr
try:
local_dict = {'pi': math.pi, 'e': math.e}
output = str(numexpr.evaluate(expression.strip(), global_dict={},
local_dict=local_dict))
except Exception as e:
raise ValueError(
f'LLMMathChain._evaluate("{expression}") raised error: {e}. Please try again with a valid numerical expression'
)
return re.sub('^\\[|\\]$', '', output) | def _evaluate_expression(self, expression: str) ->str:
import numexpr
try:
local_dict = {'pi': math.pi, 'e': math.e}
output = str(numexpr.evaluate(expression.strip(), global_dict={},
local_dict=local_dict))
except Exception as e:
raise ValueError(
f'LLMMathChain._evaluate("{expression}") raised error: {e}. Please try again with a valid numerical expression'
)
return re.sub('^\\[|\\]$', '', output) | null |
FakeDelete | def fn(self: Any, **kwargs: Any) ->None:
return None
return fn | def FakeDelete(**args: Any) ->Any:
def fn(self: Any, **kwargs: Any) ->None:
return None
return fn | null |
is_lc_serializable | return True | @classmethod
def is_lc_serializable(cls) ->bool:
return True | null |
validate_environment | """Validate that the python package exists in environment."""
try:
from google.api_core.client_options import ClientOptions
from google.cloud.aiplatform.gapic import PredictionServiceAsyncClient, PredictionServiceClient
except ImportError:
raise_vertex_import_error()
if not values['project']:
raise ValueError(
'A GCP project should be provided to run inference on Model Garden!')
client_options = ClientOptions(api_endpoint=
f"{values['location']}-aiplatform.googleapis.com")
client_info = get_client_info(module='vertex-ai-model-garden')
values['client'] = PredictionServiceClient(client_options=client_options,
client_info=client_info)
values['async_client'] = PredictionServiceAsyncClient(client_options=
client_options, client_info=client_info)
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that the python package exists in environment."""
try:
from google.api_core.client_options import ClientOptions
from google.cloud.aiplatform.gapic import PredictionServiceAsyncClient, PredictionServiceClient
except ImportError:
raise_vertex_import_error()
if not values['project']:
raise ValueError(
'A GCP project should be provided to run inference on Model Garden!'
)
client_options = ClientOptions(api_endpoint=
f"{values['location']}-aiplatform.googleapis.com")
client_info = get_client_info(module='vertex-ai-model-garden')
values['client'] = PredictionServiceClient(client_options=
client_options, client_info=client_info)
values['async_client'] = PredictionServiceAsyncClient(client_options=
client_options, client_info=client_info)
return values | Validate that the python package exists in environment. |
test_visit_comparison_lt | comp = Comparison(comparator=Comparator.LT, attribute='qty', value=20)
expected = {'qty': {'$lt': 20}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual | def test_visit_comparison_lt() ->None:
comp = Comparison(comparator=Comparator.LT, attribute='qty', value=20)
expected = {'qty': {'$lt': 20}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual | null |
_chat_generation_from_qwen_resp | choice = resp['output']['choices'][0]
message = convert_dict_to_message(choice['message'], is_chunk=is_chunk)
return dict(message=message, generation_info=dict(finish_reason=choice[
'finish_reason'], request_id=resp['request_id'], token_usage=dict(resp[
'usage']))) | @staticmethod
def _chat_generation_from_qwen_resp(resp: Any, is_chunk: bool=False) ->Dict[
str, Any]:
choice = resp['output']['choices'][0]
message = convert_dict_to_message(choice['message'], is_chunk=is_chunk)
return dict(message=message, generation_info=dict(finish_reason=choice[
'finish_reason'], request_id=resp['request_id'], token_usage=dict(
resp['usage']))) | null |
click | js = """
links = document.getElementsByTagName("a");
for (var i = 0; i < links.length; i++) {
links[i].removeAttribute("target");
}
"""
self.page.evaluate(js)
element = self.page_element_buffer.get(int(id))
if element:
x: float = element['center_x']
y: float = element['center_y']
self.page.mouse.click(x, y)
else:
print('Could not find element') | def click(self, id: Union[str, int]) ->None:
js = """
links = document.getElementsByTagName("a");
for (var i = 0; i < links.length; i++) {
links[i].removeAttribute("target");
}
"""
self.page.evaluate(js)
element = self.page_element_buffer.get(int(id))
if element:
x: float = element['center_x']
y: float = element['center_y']
self.page.mouse.click(x, y)
else:
print('Could not find element') | null |