method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
β |
---|---|---|---|
draw | """
CPAL chain can draw its resulting DAG.
Usage in a jupyter notebook:
>>> from IPython.display import SVG
>>> cpal_chain.draw(path="graph.svg")
>>> SVG('graph.svg')
"""
self._story._networkx_wrapper.draw_graphviz(**kwargs) | def draw(self, **kwargs: Any) ->None:
"""
CPAL chain can draw its resulting DAG.
Usage in a jupyter notebook:
>>> from IPython.display import SVG
>>> cpal_chain.draw(path="graph.svg")
>>> SVG('graph.svg')
"""
self._story._networkx_wrapper.draw_graphviz(**kwargs) | CPAL chain can draw its resulting DAG.
Usage in a jupyter notebook:
>>> from IPython.display import SVG
>>> cpal_chain.draw(path="graph.svg")
>>> SVG('graph.svg') |
similarity_search | """Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter,
fetch_k=fetch_k, **kwargs)
return [doc for doc, _ in docs_and_scores] | def similarity_search(self, query: str, k: int=4, filter: Optional[Dict[str,
Any]]=None, fetch_k: int=20, **kwargs: Any) ->List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=
filter, fetch_k=fetch_k, **kwargs)
return [doc for doc, _ in docs_and_scores] | Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query. |
on_llm_start | """Run when LLM starts running.""" | def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Run when LLM starts running.""" | Run when LLM starts running. |
cosine_similarity_top_k | """Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0
top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores | def cosine_similarity_top_k(X: Matrix, Y: Matrix, top_k: Optional[int]=5,
score_threshold: Optional[float]=None) ->Tuple[List[Tuple[int, int]],
List[float]]:
"""Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0
top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores | Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities. |
_import_zapier_tool_ZapierNLAListActions | from langchain_community.tools.zapier.tool import ZapierNLAListActions
return ZapierNLAListActions | def _import_zapier_tool_ZapierNLAListActions() ->Any:
from langchain_community.tools.zapier.tool import ZapierNLAListActions
return ZapierNLAListActions | null |
test_model_response | msg = AIMessage(content='Model response.')
result = _parse_ai_message(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {'output': 'Model response.'}
assert result.log == 'Model response.' | def test_model_response(self) ->None:
msg = AIMessage(content='Model response.')
result = _parse_ai_message(msg)
assert isinstance(result, AgentFinish)
assert result.return_values == {'output': 'Model response.'}
assert result.log == 'Model response.' | null |
__init__ | super().__init__(**kwargs)
self.function_callback = function | def __init__(self, function: Callable[[str], None], **kwargs: Any) ->None:
super().__init__(**kwargs)
self.function_callback = function | null |
_import_requests_tool_RequestsPatchTool | from langchain_community.tools.requests.tool import RequestsPatchTool
return RequestsPatchTool | def _import_requests_tool_RequestsPatchTool() ->Any:
from langchain_community.tools.requests.tool import RequestsPatchTool
return RequestsPatchTool | null |
__repr__ | map_for_repr = ',\n '.join(
f"{k}: {indent_lines_after_first(repr(v), ' ' + k + ': ')}" for k, v in
self.steps.items())
return '{\n ' + map_for_repr + '\n}' | def __repr__(self) ->str:
map_for_repr = ',\n '.join(
f"{k}: {indent_lines_after_first(repr(v), ' ' + k + ': ')}" for k,
v in self.steps.items())
return '{\n ' + map_for_repr + '\n}' | null |
__init__ | self.json_data = json_data
self.status_code = status_code | def __init__(self, json_data: Dict, status_code: int):
self.json_data = json_data
self.status_code = status_code | null |
call | """call."""
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' +
self.endpoint_api_key}
if self.deployment_name != '':
headers['azureml-model-deployment'] = self.deployment_name
req = urllib.request.Request(self.endpoint_url, body, headers)
response = urllib.request.urlopen(req, timeout=kwargs.get('timeout', 50))
result = response.read()
return result | def call(self, body: bytes, **kwargs: Any) ->bytes:
"""call."""
headers = {'Content-Type': 'application/json', 'Authorization':
'Bearer ' + self.endpoint_api_key}
if self.deployment_name != '':
headers['azureml-model-deployment'] = self.deployment_name
req = urllib.request.Request(self.endpoint_url, body, headers)
response = urllib.request.urlopen(req, timeout=kwargs.get('timeout', 50))
result = response.read()
return result | call. |
test_scann_vector_sim_with_score_threshold | """Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content
='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]:
Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=2,
score_threshold=0.2)
assert output == [Document(page_content='foo')] | def test_scann_vector_sim_with_score_threshold() ->None:
"""Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(
page_content='foo'), index_to_id[1]: Document(page_content='bar'),
index_to_id[2]: Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=2,
score_threshold=0.2)
assert output == [Document(page_content='foo')] | Test vector similarity. |
test_qdrant_similarity_search_with_relevance_score_no_threshold | """Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i, 'metadata': {'page': i + 1, 'pages': [i + 2, -1]}} for
i in range(len(texts))]
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), metadatas=
metadatas, location=':memory:', vector_name=vector_name)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3,
score_threshold=None)
assert len(output) == 3
for i in range(len(output)):
assert round(output[i][1], 2) >= 0
assert round(output[i][1], 2) <= 1 | @pytest.mark.parametrize('vector_name', [None, 'my-vector'])
def test_qdrant_similarity_search_with_relevance_score_no_threshold(vector_name
: Optional[str]) ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i, 'metadata': {'page': i + 1, 'pages': [i + 2, -
1]}} for i in range(len(texts))]
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(),
metadatas=metadatas, location=':memory:', vector_name=vector_name)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3,
score_threshold=None)
assert len(output) == 3
for i in range(len(output)):
assert round(output[i][1], 2) >= 0
assert round(output[i][1], 2) <= 1 | Test end to end construction and search. |
full_table_id | return self._full_table_id | @property
def full_table_id(self) ->str:
return self._full_table_id | null |
test_results_with_custom_params | """Test that call gives correct answer with custom params."""
search = SearchApiAPIWrapper()
output = search.results('cafeteria', hl='es', gl='es', google_domain=
'google.es', location='Madrid, Spain')
assert 'Madrid' in output['search_information']['detected_location'] | def test_results_with_custom_params() ->None:
"""Test that call gives correct answer with custom params."""
search = SearchApiAPIWrapper()
output = search.results('cafeteria', hl='es', gl='es', google_domain=
'google.es', location='Madrid, Spain')
assert 'Madrid' in output['search_information']['detected_location'] | Test that call gives correct answer with custom params. |
test_pgvector_max_marginal_relevance_search_with_score | """Test max marginal relevance search with relevance scores."""
texts = ['foo', 'bar', 'baz']
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_collection=True)
output = docsearch.max_marginal_relevance_search_with_score('foo', k=1,
fetch_k=3)
assert output == [(Document(page_content='foo'), 0.0)] | def test_pgvector_max_marginal_relevance_search_with_score() ->None:
"""Test max marginal relevance search with relevance scores."""
texts = ['foo', 'bar', 'baz']
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_collection=True)
output = docsearch.max_marginal_relevance_search_with_score('foo', k=1,
fetch_k=3)
assert output == [(Document(page_content='foo'), 0.0)] | Test max marginal relevance search with relevance scores. |
search_results | from zep_python import MemorySearchResult, Message
search_result = [{'message': {'uuid':
'66830914-19f5-490b-8677-1ba06bcd556b', 'created_at':
'2023-05-18T20:40:42.743773Z', 'role': 'user', 'content':
"I'm looking to plan a trip to Iceland. Can you help me?",
'token_count': 17}, 'summary': None, 'dist': 0.8734284910450115}, {
'message': {'uuid': '015e618c-ba9d-45b6-95c3-77a8e611570b',
'created_at': '2023-05-18T20:40:42.743773Z', 'role': 'user', 'content':
'How much does a trip to Iceland typically cost?', 'token_count': 12},
'summary': None, 'dist': 0.8554048017463456}]
return [MemorySearchResult(message=Message.parse_obj(result['message']),
summary=result['summary'], dist=result['dist']) for result in search_result
] | @pytest.fixture
def search_results() ->List[MemorySearchResult]:
from zep_python import MemorySearchResult, Message
search_result = [{'message': {'uuid':
'66830914-19f5-490b-8677-1ba06bcd556b', 'created_at':
'2023-05-18T20:40:42.743773Z', 'role': 'user', 'content':
"I'm looking to plan a trip to Iceland. Can you help me?",
'token_count': 17}, 'summary': None, 'dist': 0.8734284910450115}, {
'message': {'uuid': '015e618c-ba9d-45b6-95c3-77a8e611570b',
'created_at': '2023-05-18T20:40:42.743773Z', 'role': 'user',
'content': 'How much does a trip to Iceland typically cost?',
'token_count': 12}, 'summary': None, 'dist': 0.8554048017463456}]
return [MemorySearchResult(message=Message.parse_obj(result['message']),
summary=result['summary'], dist=result['dist']) for result in
search_result] | null |
_get_dataforseo_api_search_json | return DataForSeoAPISearchResults(api_wrapper=DataForSeoAPIWrapper(**kwargs)) | def _get_dataforseo_api_search_json(**kwargs: Any) ->BaseTool:
return DataForSeoAPISearchResults(api_wrapper=DataForSeoAPIWrapper(**
kwargs)) | null |
test_placeholder | """Used for compiling integration tests without running any real tests."""
pass | @pytest.mark.compile
def test_placeholder() ->None:
"""Used for compiling integration tests without running any real tests."""
pass | Used for compiling integration tests without running any real tests. |
__init__ | """Initialize with access token, ids, and key.
Args:
access_token: The access token for the Figma REST API.
ids: The ids of the Figma file.
key: The key for the Figma file
"""
self.access_token = access_token
self.ids = ids
self.key = key | def __init__(self, access_token: str, ids: str, key: str):
"""Initialize with access token, ids, and key.
Args:
access_token: The access token for the Figma REST API.
ids: The ids of the Figma file.
key: The key for the Figma file
"""
self.access_token = access_token
self.ids = ids
self.key = key | Initialize with access token, ids, and key.
Args:
access_token: The access token for the Figma REST API.
ids: The ids of the Figma file.
key: The key for the Figma file |
_identifying_params | """Get the identifying parameters."""
return {**{'model_path': self.model_path}, **self._default_params} | @property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {**{'model_path': self.model_path}, **self._default_params} | Get the identifying parameters. |
__init__ | """Initialize the parser.
Args:
text_kwargs: Keyword arguments to pass to ``pdfplumber.Page.extract_text()``
dedupe: Avoiding the error of duplicate characters if `dedupe=True`.
"""
self.text_kwargs = text_kwargs or {}
self.dedupe = dedupe
self.extract_images = extract_images | def __init__(self, text_kwargs: Optional[Mapping[str, Any]]=None, dedupe:
bool=False, extract_images: bool=False) ->None:
"""Initialize the parser.
Args:
text_kwargs: Keyword arguments to pass to ``pdfplumber.Page.extract_text()``
dedupe: Avoiding the error of duplicate characters if `dedupe=True`.
"""
self.text_kwargs = text_kwargs or {}
self.dedupe = dedupe
self.extract_images = extract_images | Initialize the parser.
Args:
text_kwargs: Keyword arguments to pass to ``pdfplumber.Page.extract_text()``
dedupe: Avoiding the error of duplicate characters if `dedupe=True`. |
test_mget | store = UpstashRedisByteStore(client=redis_client, ttl=None)
keys = ['key1', 'key2']
redis_client.mset({'key1': 'value1', 'key2': 'value2'})
result = store.mget(keys)
assert result == [b'value1', b'value2'] | def test_mget(redis_client: Redis) ->None:
store = UpstashRedisByteStore(client=redis_client, ttl=None)
keys = ['key1', 'key2']
redis_client.mset({'key1': 'value1', 'key2': 'value2'})
result = store.mget(keys)
assert result == [b'value1', b'value2'] | null |
_identifying_params | """Gets the identifying parameters."""
return {**{'model_name': self.model_name}, **self._default_params} | @property
def _identifying_params(self) ->Dict[str, Any]:
"""Gets the identifying parameters."""
return {**{'model_name': self.model_name}, **self._default_params} | Gets the identifying parameters. |
_identifying_params | """Get the identifying parameters."""
return {**{'model_name': self.model_name}, **self._default_params} | @property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {**{'model_name': self.model_name}, **self._default_params} | Get the identifying parameters. |
__getitem__ | return getattr(self, item) | def __getitem__(self, item: str) ->Any:
return getattr(self, item) | null |
_import_supabase | from langchain_community.vectorstores.supabase import SupabaseVectorStore
return SupabaseVectorStore | def _import_supabase() ->Any:
from langchain_community.vectorstores.supabase import SupabaseVectorStore
return SupabaseVectorStore | null |
assert_docs | for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {'Copyright Information', 'uid', 'Title',
'Published'} | def assert_docs(docs: List[Document]) ->None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {'Copyright Information', 'uid',
'Title', 'Published'} | null |
__init__ | """Initialize with bagel client"""
try:
import bagel
import bagel.config
except ImportError:
raise ImportError('Please install bagel `pip install betabageldb`.')
if client is not None:
self._client_settings = client_settings
self._client = client
else:
if client_settings:
_client_settings = client_settings
else:
_client_settings = bagel.config.Settings(bagel_api_impl='rest',
bagel_server_host='api.bageldb.ai')
self._client_settings = _client_settings
self._client = bagel.Client(_client_settings)
self._cluster = self._client.get_or_create_cluster(name=cluster_name,
metadata=cluster_metadata)
self.override_relevance_score_fn = relevance_score_fn
self._embedding_function = embedding_function | def __init__(self, cluster_name: str=_LANGCHAIN_DEFAULT_CLUSTER_NAME,
client_settings: Optional[bagel.config.Settings]=None,
embedding_function: Optional[Embeddings]=None, cluster_metadata:
Optional[Dict]=None, client: Optional[bagel.Client]=None,
relevance_score_fn: Optional[Callable[[float], float]]=None) ->None:
"""Initialize with bagel client"""
try:
import bagel
import bagel.config
except ImportError:
raise ImportError('Please install bagel `pip install betabageldb`.')
if client is not None:
self._client_settings = client_settings
self._client = client
else:
if client_settings:
_client_settings = client_settings
else:
_client_settings = bagel.config.Settings(bagel_api_impl='rest',
bagel_server_host='api.bageldb.ai')
self._client_settings = _client_settings
self._client = bagel.Client(_client_settings)
self._cluster = self._client.get_or_create_cluster(name=cluster_name,
metadata=cluster_metadata)
self.override_relevance_score_fn = relevance_score_fn
self._embedding_function = embedding_function | Initialize with bagel client |
__from | if metric not in INDEX_METRICS:
raise ValueError(
f'Unsupported distance metric: {metric}. Expected one of {list(INDEX_METRICS)}'
)
tiledb_vs, tiledb = dependable_tiledb_import()
input_vectors = np.array(embeddings).astype(np.float32)
cls.create(index_uri=index_uri, index_type=index_type, dimensions=
input_vectors.shape[1], vector_type=input_vectors.dtype, metadatas=
metadatas is not None, config=config)
with tiledb.scope_ctx(ctx_or_config=config):
if not embeddings:
raise ValueError('embeddings must be provided to build a TileDB index')
vector_index_uri = get_vector_index_uri(index_uri)
docs_uri = get_documents_array_uri(index_uri)
if ids is None:
ids = [str(random.randint(0, MAX_UINT64 - 1)) for _ in texts]
external_ids = np.array(ids).astype(np.uint64)
tiledb_vs.ingestion.ingest(index_type=index_type, index_uri=
vector_index_uri, input_vectors=input_vectors, external_ids=
external_ids, index_timestamp=index_timestamp if index_timestamp !=
0 else None, config=config, **kwargs)
with tiledb.open(docs_uri, 'w') as A:
if external_ids is None:
external_ids = np.zeros(len(texts), dtype=np.uint64)
for i in range(len(texts)):
external_ids[i] = i
data = {}
data['text'] = np.array(texts)
if metadatas is not None:
metadata_attr = np.empty([len(metadatas)], dtype=object)
i = 0
for metadata in metadatas:
metadata_attr[i] = np.frombuffer(pickle.dumps(metadata),
dtype=np.uint8)
i += 1
data['metadata'] = metadata_attr
A[external_ids] = data
return cls(embedding=embedding, index_uri=index_uri, metric=metric, config=
config, **kwargs) | @classmethod
def __from(cls, texts: List[str], embeddings: List[List[float]], embedding:
Embeddings, index_uri: str, *, metadatas: Optional[List[dict]]=None,
ids: Optional[List[str]]=None, metric: str=DEFAULT_METRIC, index_type:
str='FLAT', config: Optional[Mapping[str, Any]]=None, index_timestamp:
int=0, **kwargs: Any) ->TileDB:
if metric not in INDEX_METRICS:
raise ValueError(
f'Unsupported distance metric: {metric}. Expected one of {list(INDEX_METRICS)}'
)
tiledb_vs, tiledb = dependable_tiledb_import()
input_vectors = np.array(embeddings).astype(np.float32)
cls.create(index_uri=index_uri, index_type=index_type, dimensions=
input_vectors.shape[1], vector_type=input_vectors.dtype, metadatas=
metadatas is not None, config=config)
with tiledb.scope_ctx(ctx_or_config=config):
if not embeddings:
raise ValueError(
'embeddings must be provided to build a TileDB index')
vector_index_uri = get_vector_index_uri(index_uri)
docs_uri = get_documents_array_uri(index_uri)
if ids is None:
ids = [str(random.randint(0, MAX_UINT64 - 1)) for _ in texts]
external_ids = np.array(ids).astype(np.uint64)
tiledb_vs.ingestion.ingest(index_type=index_type, index_uri=
vector_index_uri, input_vectors=input_vectors, external_ids=
external_ids, index_timestamp=index_timestamp if
index_timestamp != 0 else None, config=config, **kwargs)
with tiledb.open(docs_uri, 'w') as A:
if external_ids is None:
external_ids = np.zeros(len(texts), dtype=np.uint64)
for i in range(len(texts)):
external_ids[i] = i
data = {}
data['text'] = np.array(texts)
if metadatas is not None:
metadata_attr = np.empty([len(metadatas)], dtype=object)
i = 0
for metadata in metadatas:
metadata_attr[i] = np.frombuffer(pickle.dumps(metadata),
dtype=np.uint8)
i += 1
data['metadata'] = metadata_attr
A[external_ids] = data
return cls(embedding=embedding, index_uri=index_uri, metric=metric,
config=config, **kwargs) | null |
visit_comparison | if isinstance(comparison.value, list):
return self.visit_operation(Operation(operator=Operator.AND, arguments=
(Comparison(comparator=comparison.comparator, attribute=comparison.
attribute, value=value) for value in comparison.value)))
return '.'.join([
f'{self.metadata_column}{self._get_json_operator(comparison.value)}{comparison.attribute}'
, f'{self._map_comparator(comparison.comparator)}', f'{comparison.value}']) | def visit_comparison(self, comparison: Comparison) ->str:
if isinstance(comparison.value, list):
return self.visit_operation(Operation(operator=Operator.AND,
arguments=(Comparison(comparator=comparison.comparator,
attribute=comparison.attribute, value=value) for value in
comparison.value)))
return '.'.join([
f'{self.metadata_column}{self._get_json_operator(comparison.value)}{comparison.attribute}'
, f'{self._map_comparator(comparison.comparator)}',
f'{comparison.value}']) | null |
get_named_result | """
Get a named result from a query.
Args:
connection: The connection to the database
query: The query to execute
Returns:
List[dict[str, Any]]: The result of the query
"""
cursor = connection.cursor()
cursor.execute(query)
columns = cursor.description
result = []
for value in cursor.fetchall():
r = {}
for idx, datum in enumerate(value):
k = columns[idx][0]
r[k] = datum
result.append(r)
debug_output(result)
cursor.close()
return result | def get_named_result(connection: Any, query: str) ->List[dict[str, Any]]:
"""
Get a named result from a query.
Args:
connection: The connection to the database
query: The query to execute
Returns:
List[dict[str, Any]]: The result of the query
"""
cursor = connection.cursor()
cursor.execute(query)
columns = cursor.description
result = []
for value in cursor.fetchall():
r = {}
for idx, datum in enumerate(value):
k = columns[idx][0]
r[k] = datum
result.append(r)
debug_output(result)
cursor.close()
return result | Get a named result from a query.
Args:
connection: The connection to the database
query: The query to execute
Returns:
List[dict[str, Any]]: The result of the query |
input_keys | extra_keys = [k for k in self.llm_chain.input_keys if k != self.
document_variable_name]
return super().input_keys + extra_keys | @property
def input_keys(self) ->List[str]:
extra_keys = [k for k in self.llm_chain.input_keys if k != self.
document_variable_name]
return super().input_keys + extra_keys | null |
_import_promptlayer_chat | from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
return PromptLayerOpenAIChat | def _import_promptlayer_chat() ->Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
return PromptLayerOpenAIChat | null |
elasticsearch_connection | from elasticsearch import Elasticsearch
es_url = os.environ.get('ES_URL', 'http://localhost:9200')
cloud_id = os.environ.get('ES_CLOUD_ID')
es_username = os.environ.get('ES_USERNAME', 'elastic')
es_password = os.environ.get('ES_PASSWORD', 'changeme')
if cloud_id:
es = Elasticsearch(cloud_id=cloud_id, basic_auth=(es_username, es_password)
)
yield {'es_cloud_id': cloud_id, 'es_user': es_username, 'es_password':
es_password}
else:
es = Elasticsearch(hosts=es_url)
yield {'es_url': es_url}
index_names = es.indices.get(index='_all').keys()
for index_name in index_names:
if index_name.startswith('test_'):
es.indices.delete(index=index_name)
es.indices.refresh(index='_all')
try:
response = es.ingest.get_pipeline(id='test_*,*_sparse_embedding')
for pipeline_id, _ in response.items():
try:
es.ingest.delete_pipeline(id=pipeline_id)
print(f'Deleted pipeline: {pipeline_id}')
except Exception as e:
print(f'Pipeline error: {e}')
except Exception:
pass | @pytest.fixture(scope='class', autouse=True)
def elasticsearch_connection(self) ->Union[dict, Generator[dict, None, None]]:
from elasticsearch import Elasticsearch
es_url = os.environ.get('ES_URL', 'http://localhost:9200')
cloud_id = os.environ.get('ES_CLOUD_ID')
es_username = os.environ.get('ES_USERNAME', 'elastic')
es_password = os.environ.get('ES_PASSWORD', 'changeme')
if cloud_id:
es = Elasticsearch(cloud_id=cloud_id, basic_auth=(es_username,
es_password))
yield {'es_cloud_id': cloud_id, 'es_user': es_username,
'es_password': es_password}
else:
es = Elasticsearch(hosts=es_url)
yield {'es_url': es_url}
index_names = es.indices.get(index='_all').keys()
for index_name in index_names:
if index_name.startswith('test_'):
es.indices.delete(index=index_name)
es.indices.refresh(index='_all')
try:
response = es.ingest.get_pipeline(id='test_*,*_sparse_embedding')
for pipeline_id, _ in response.items():
try:
es.ingest.delete_pipeline(id=pipeline_id)
print(f'Deleted pipeline: {pipeline_id}')
except Exception as e:
print(f'Pipeline error: {e}')
except Exception:
pass | null |
on_llm_start | """Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
metadata = self._init_resp()
metadata.update({'action': 'on_llm_start'})
metadata.update(flatten_dict(serialized))
metadata.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(metadata)
prompt_resp['prompts'] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self._log_stream(prompt, metadata, self.step) | def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
metadata = self._init_resp()
metadata.update({'action': 'on_llm_start'})
metadata.update(flatten_dict(serialized))
metadata.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(metadata)
prompt_resp['prompts'] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self._log_stream(prompt, metadata, self.step) | Run when LLM starts. |
test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emakeep | feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False,
model=MockEncoder())
str1 = '0'
str2 = '1'
str3 = '2'
encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1))
encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3))
ctx_str_1 = 'context1'
ctx_str_2 = 'context2'
encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword +
ctx_str_2))
named_actions = {'action1': [{'a': str1, 'b': rl_chain.EmbedAndKeep(str1)},
str2, rl_chain.EmbedAndKeep(str3)]}
context = {'context1': ctx_str_1, 'context2': rl_chain.EmbedAndKeep(ctx_str_2)}
expected = f"""shared |context1 {ctx_str_1} |context2 {ctx_str_2 + ' ' + encoded_ctx_str_2}
0:-0.0:1.0 |a {str1} |b {str1 + ' ' + encoded_str1}
|action1 {str2}
|action1 {str3 + ' ' + encoded_str3} """
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0
)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected | @pytest.mark.requires('vowpal_wabbit_next')
def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emakeep(
) ->None:
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder())
str1 = '0'
str2 = '1'
str3 = '2'
encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1))
encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3))
ctx_str_1 = 'context1'
ctx_str_2 = 'context2'
encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword +
ctx_str_2))
named_actions = {'action1': [{'a': str1, 'b': rl_chain.EmbedAndKeep(
str1)}, str2, rl_chain.EmbedAndKeep(str3)]}
context = {'context1': ctx_str_1, 'context2': rl_chain.EmbedAndKeep(
ctx_str_2)}
expected = f"""shared |context1 {ctx_str_1} |context2 {ctx_str_2 + ' ' + encoded_ctx_str_2}
0:-0.0:1.0 |a {str1} |b {str1 + ' ' + encoded_str1}
|action1 {str2}
|action1 {str3 + ' ' + encoded_str3} """
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0,
score=0.0)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected | null |
test_get_nfts_valid_contract | max_alchemy_tokens = 100
contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c'
result = BlockchainDocumentLoader(contract_address).load()
print('Tokens returned for valid contract: ', len(result))
assert len(result
) == max_alchemy_tokens, f'Wrong number of NFTs returned. Expected {max_alchemy_tokens}, got {len(result)}' | @pytest.mark.skipif(not alchemyKeySet, reason='Alchemy API key not provided.')
def test_get_nfts_valid_contract() ->None:
max_alchemy_tokens = 100
contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c'
result = BlockchainDocumentLoader(contract_address).load()
print('Tokens returned for valid contract: ', len(result))
assert len(result
) == max_alchemy_tokens, f'Wrong number of NFTs returned. Expected {max_alchemy_tokens}, got {len(result)}' | null |
_format_func | self._validate_func(func)
comp_operator_map = {Comparator.EQ: 'term', Comparator.LT: 'lt', Comparator
.LTE: 'lte', Comparator.GT: 'gt', Comparator.GTE: 'gte', Comparator.
CONTAIN: 'match', Comparator.LIKE: 'fuzzy', Operator.AND: 'must',
Operator.OR: 'should', Operator.NOT: 'must_not'}
return comp_operator_map[func] | def _format_func(self, func: Union[Operator, Comparator]) ->str:
self._validate_func(func)
comp_operator_map = {Comparator.EQ: 'term', Comparator.LT: 'lt',
Comparator.LTE: 'lte', Comparator.GT: 'gt', Comparator.GTE: 'gte',
Comparator.CONTAIN: 'match', Comparator.LIKE: 'fuzzy', Operator.AND:
'must', Operator.OR: 'should', Operator.NOT: 'must_not'}
return comp_operator_map[func] | null |
on_chain_error | if parent_run_id is None:
self.increment() | def on_chain_error(self, error: BaseException, *, run_id: UUID,
parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any:
if parent_run_id is None:
self.increment() | null |
endpoint_path | return self.client.endpoint_path(project=self.project, location=self.
location, endpoint=self.endpoint_id) | @property
def endpoint_path(self) ->str:
return self.client.endpoint_path(project=self.project, location=self.
location, endpoint=self.endpoint_id) | null |
wait_for_futures | """Wait for all futures to complete."""
wait(self.futures) | def wait_for_futures(self) ->None:
"""Wait for all futures to complete."""
wait(self.futures) | Wait for all futures to complete. |
test__convert_message_to_dict_system | message = SystemMessage(content='foo')
with pytest.raises(TypeError) as e:
_convert_message_to_dict(message)
assert 'Got unknown type' in str(e) | def test__convert_message_to_dict_system() ->None:
message = SystemMessage(content='foo')
with pytest.raises(TypeError) as e:
_convert_message_to_dict(message)
assert 'Got unknown type' in str(e) | null |
import_mlflow | """Import the mlflow python package and raise an error if it is not installed."""
try:
import mlflow
except ImportError:
raise ImportError(
'To use the mlflow callback manager you need to have the `mlflow` python package installed. Please install it with `pip install mlflow>=2.3.0`'
)
return mlflow | def import_mlflow() ->Any:
"""Import the mlflow python package and raise an error if it is not installed."""
try:
import mlflow
except ImportError:
raise ImportError(
'To use the mlflow callback manager you need to have the `mlflow` python package installed. Please install it with `pip install mlflow>=2.3.0`'
)
return mlflow | Import the mlflow python package and raise an error if it is not installed. |
combine_documents | """
Combine a list of documents into a single string that might be passed further down
to a language model.
:param documents: list of documents to combine
:return:
"""
formatter = Formatter()
return '\n\n'.join(formatter.format(document_template, page_content=
document.page_content, metadata=document.metadata) for document in
documents) | def combine_documents(documents: List[Document]) ->str:
"""
Combine a list of documents into a single string that might be passed further down
to a language model.
:param documents: list of documents to combine
:return:
"""
formatter = Formatter()
return '\n\n'.join(formatter.format(document_template, page_content=
document.page_content, metadata=document.metadata) for document in
documents) | Combine a list of documents into a single string that might be passed further down
to a language model.
:param documents: list of documents to combine
:return: |
on_chain_error | """Run when chain errors."""
self.step += 1
self.errors += 1 | def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when chain errors."""
self.step += 1
self.errors += 1 | Run when chain errors. |
step | """Take step."""
response = self.chain.run(**inputs, callbacks=callbacks)
return StepResponse(response=response) | def step(self, inputs: dict, callbacks: Callbacks=None, **kwargs: Any
) ->StepResponse:
"""Take step."""
response = self.chain.run(**inputs, callbacks=callbacks)
return StepResponse(response=response) | Take step. |
from_texts | """
Create and initialize a Bagel instance from list of texts.
Args:
texts (List[str]): List of text content to be added.
cluster_name (str): The name of the BagelDB cluster.
client_settings (Optional[bagel.config.Settings]): Client settings.
cluster_metadata (Optional[Dict]): Metadata of the cluster.
embeddings (Optional[Embeddings]): List of embedding.
metadatas (Optional[List[dict]]): List of metadata.
ids (Optional[List[str]]): List of unique ID. Defaults to None.
client (Optional[bagel.Client]): Bagel client instance.
Returns:
Bagel: Bagel vectorstore.
"""
bagel_cluster = cls(cluster_name=cluster_name, embedding_function=embedding,
client_settings=client_settings, client=client, cluster_metadata=
cluster_metadata, **kwargs)
_ = bagel_cluster.add_texts(texts=texts, embeddings=text_embeddings,
metadatas=metadatas, ids=ids)
return bagel_cluster | @classmethod
def from_texts(cls: Type[Bagel], texts: List[str], embedding: Optional[
Embeddings]=None, metadatas: Optional[List[dict]]=None, ids: Optional[
List[str]]=None, cluster_name: str=_LANGCHAIN_DEFAULT_CLUSTER_NAME,
client_settings: Optional[bagel.config.Settings]=None, cluster_metadata:
Optional[Dict]=None, client: Optional[bagel.Client]=None,
text_embeddings: Optional[List[List[float]]]=None, **kwargs: Any) ->Bagel:
"""
Create and initialize a Bagel instance from list of texts.
Args:
texts (List[str]): List of text content to be added.
cluster_name (str): The name of the BagelDB cluster.
client_settings (Optional[bagel.config.Settings]): Client settings.
cluster_metadata (Optional[Dict]): Metadata of the cluster.
embeddings (Optional[Embeddings]): List of embedding.
metadatas (Optional[List[dict]]): List of metadata.
ids (Optional[List[str]]): List of unique ID. Defaults to None.
client (Optional[bagel.Client]): Bagel client instance.
Returns:
Bagel: Bagel vectorstore.
"""
bagel_cluster = cls(cluster_name=cluster_name, embedding_function=
embedding, client_settings=client_settings, client=client,
cluster_metadata=cluster_metadata, **kwargs)
_ = bagel_cluster.add_texts(texts=texts, embeddings=text_embeddings,
metadatas=metadatas, ids=ids)
return bagel_cluster | Create and initialize a Bagel instance from list of texts.
Args:
texts (List[str]): List of text content to be added.
cluster_name (str): The name of the BagelDB cluster.
client_settings (Optional[bagel.config.Settings]): Client settings.
cluster_metadata (Optional[Dict]): Metadata of the cluster.
embeddings (Optional[Embeddings]): List of embedding.
metadatas (Optional[List[dict]]): List of metadata.
ids (Optional[List[str]]): List of unique ID. Defaults to None.
client (Optional[bagel.Client]): Bagel client instance.
Returns:
Bagel: Bagel vectorstore. |
test_extract_paragraphs | html2text_transformer = Html2TextTransformer()
paragraphs_html = (
'<html><h1>Header</h1><p>First paragraph.</p><p>Second paragraph.</p><h1>Ignore at end</h1></html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == """# Header
First paragraph.
Second paragraph.
# Ignore at end
""" | @pytest.mark.requires('html2text')
def test_extract_paragraphs() ->None:
html2text_transformer = Html2TextTransformer()
paragraphs_html = (
'<html><h1>Header</h1><p>First paragraph.</p><p>Second paragraph.</p><h1>Ignore at end</h1></html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == """# Header
First paragraph.
Second paragraph.
# Ignore at end
""" | null |
test_all_imports | assert set(__all__) == set(EXPECTED_ALL) | def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL) | null |
from_llm | """Initialize from llm using default template.
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
include_original: Whether to include the original query in the list of
generated queries.
Returns:
MultiQueryRetriever
"""
output_parser = LineListOutputParser()
llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser)
return cls(retriever=retriever, llm_chain=llm_chain, parser_key=parser_key,
include_original=include_original) | @classmethod
def from_llm(cls, retriever: BaseRetriever, llm: BaseLLM, prompt:
PromptTemplate=DEFAULT_QUERY_PROMPT, parser_key: str='lines',
include_original: bool=False) ->'MultiQueryRetriever':
"""Initialize from llm using default template.
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
include_original: Whether to include the original query in the list of
generated queries.
Returns:
MultiQueryRetriever
"""
output_parser = LineListOutputParser()
llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser)
return cls(retriever=retriever, llm_chain=llm_chain, parser_key=
parser_key, include_original=include_original) | Initialize from llm using default template.
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
include_original: Whether to include the original query in the list of
generated queries.
Returns:
MultiQueryRetriever |
load_schemas | """
Args:
str_schemas: string of schemas
"""
values = str_schemas.replace('(', '').replace(')', '').split(',')
schemas = []
for i in range(len(values) // 3):
schemas.append(Schema(values[i * 3].strip(), values[i * 3 + 1].strip(),
values[i * 3 + 2].strip()))
return schemas | def load_schemas(str_schemas: str) ->List[Schema]:
"""
Args:
str_schemas: string of schemas
"""
values = str_schemas.replace('(', '').replace(')', '').split(',')
schemas = []
for i in range(len(values) // 3):
schemas.append(Schema(values[i * 3].strip(), values[i * 3 + 1].
strip(), values[i * 3 + 2].strip()))
return schemas | Args:
str_schemas: string of schemas |
validate_environment_override | """Validate that api key and python package exists in environment."""
values['openai_api_key'] = get_from_dict_or_env(values, 'anyscale_api_key',
'ANYSCALE_API_KEY')
values['anyscale_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'anyscale_api_key', 'ANYSCALE_API_KEY'))
values['openai_api_base'] = get_from_dict_or_env(values,
'anyscale_api_base', 'ANYSCALE_API_BASE', default=DEFAULT_API_BASE)
values['openai_proxy'] = get_from_dict_or_env(values, 'anyscale_proxy',
'ANYSCALE_PROXY', default='')
try:
import openai
except ImportError as e:
raise ValueError(
'Could not import openai python package. Please install it with `pip install openai`.'
) from e
try:
if is_openai_v1():
client_params = {'api_key': values['openai_api_key'], 'base_url':
values['openai_api_base']}
values['client'] = openai.OpenAI(**client_params).chat.completions
else:
values['client'] = openai.ChatCompletion
except AttributeError as exc:
raise ValueError(
'`openai` has no `ChatCompletion` attribute, this is likely due to an old version of the openai package. Try upgrading it with `pip install --upgrade openai`.'
) from exc
if 'model_name' not in values.keys():
values['model_name'] = DEFAULT_MODEL
model_name = values['model_name']
available_models = cls.get_available_models(values['openai_api_key'],
values['openai_api_base'])
if model_name not in available_models:
raise ValueError(
f'Model name {model_name} not found in available models: {available_models}.'
)
values['available_models'] = available_models
return values | @root_validator(pre=True)
def validate_environment_override(cls, values: dict) ->dict:
"""Validate that api key and python package exists in environment."""
values['openai_api_key'] = get_from_dict_or_env(values,
'anyscale_api_key', 'ANYSCALE_API_KEY')
values['anyscale_api_key'] = convert_to_secret_str(get_from_dict_or_env
(values, 'anyscale_api_key', 'ANYSCALE_API_KEY'))
values['openai_api_base'] = get_from_dict_or_env(values,
'anyscale_api_base', 'ANYSCALE_API_BASE', default=DEFAULT_API_BASE)
values['openai_proxy'] = get_from_dict_or_env(values, 'anyscale_proxy',
'ANYSCALE_PROXY', default='')
try:
import openai
except ImportError as e:
raise ValueError(
'Could not import openai python package. Please install it with `pip install openai`.'
) from e
try:
if is_openai_v1():
client_params = {'api_key': values['openai_api_key'],
'base_url': values['openai_api_base']}
values['client'] = openai.OpenAI(**client_params).chat.completions
else:
values['client'] = openai.ChatCompletion
except AttributeError as exc:
raise ValueError(
'`openai` has no `ChatCompletion` attribute, this is likely due to an old version of the openai package. Try upgrading it with `pip install --upgrade openai`.'
) from exc
if 'model_name' not in values.keys():
values['model_name'] = DEFAULT_MODEL
model_name = values['model_name']
available_models = cls.get_available_models(values['openai_api_key'],
values['openai_api_base'])
if model_name not in available_models:
raise ValueError(
f'Model name {model_name} not found in available models: {available_models}.'
)
values['available_models'] = available_models
return values | Validate that api key and python package exists in environment. |
on_chain_error | self.on_chain_error_common() | def on_chain_error(self, *args: Any, **kwargs: Any) ->Any:
self.on_chain_error_common() | null |
on_tool_start_common | self.tool_starts += 1
self.starts += 1 | def on_tool_start_common(self) ->None:
self.tool_starts += 1
self.starts += 1 | null |
_stop | """
Stop the iterator and raise a StopIteration exception with the stopped response.
"""
logger.warning('Stopping agent prematurely due to triggering stop condition')
output = self.agent_executor.agent.return_stopped_response(self.
agent_executor.early_stopping_method, self.intermediate_steps, **self.
inputs)
return self._return(output, run_manager=run_manager) | def _stop(self, run_manager: CallbackManagerForChainRun) ->AddableDict:
"""
Stop the iterator and raise a StopIteration exception with the stopped response.
"""
logger.warning(
'Stopping agent prematurely due to triggering stop condition')
output = self.agent_executor.agent.return_stopped_response(self.
agent_executor.early_stopping_method, self.intermediate_steps, **
self.inputs)
return self._return(output, run_manager=run_manager) | Stop the iterator and raise a StopIteration exception with the stopped response. |
test_json_validity_evaluator_evaluate_invalid_json | prediction = '{"name": "John", "age": 30, "city": "New York",}'
result = json_validity_evaluator.evaluate_strings(prediction=prediction)
assert result['score'] == 0
assert result['reasoning'].startswith(
'Expecting property name enclosed in double quotes') | def test_json_validity_evaluator_evaluate_invalid_json(json_validity_evaluator:
JsonValidityEvaluator) ->None:
prediction = '{"name": "John", "age": 30, "city": "New York",}'
result = json_validity_evaluator.evaluate_strings(prediction=prediction)
assert result['score'] == 0
assert result['reasoning'].startswith(
'Expecting property name enclosed in double quotes') | null |
_llm_type | return 'koboldai' | @property
def _llm_type(self) ->str:
return 'koboldai' | null |
is_lc_serializable | """Return whether this model can be serialized by Langchain."""
return False | @classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether this model can be serialized by Langchain."""
return False | Return whether this model can be serialized by Langchain. |
run | """Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
.. code-block:: python
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..."
"""
_output_key = self._run_output_key
if args and not kwargs:
if len(args) != 1:
raise ValueError('`run` supports only one positional argument.')
return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key]
if kwargs and not args:
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key]
if not kwargs and not args:
raise ValueError(
'`run` supported with either positional arguments or keyword arguments, but none were provided.'
)
else:
raise ValueError(
f'`run` supported with either positional arguments or keyword arguments but not both. Got args: {args} and kwargs: {kwargs}.'
) | def run(self, *args: Any, callbacks: Callbacks=None, tags: Optional[List[
str]]=None, metadata: Optional[Dict[str, Any]]=None, **kwargs: Any) ->Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
.. code-block:: python
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..."
"""
_output_key = self._run_output_key
if args and not kwargs:
if len(args) != 1:
raise ValueError('`run` supports only one positional argument.')
return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata
)[_output_key]
if kwargs and not args:
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key]
if not kwargs and not args:
raise ValueError(
'`run` supported with either positional arguments or keyword arguments, but none were provided.'
)
else:
raise ValueError(
f'`run` supported with either positional arguments or keyword arguments but not both. Got args: {args} and kwargs: {kwargs}.'
) | Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
.. code-block:: python
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..." |
from_llm | llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs) | @classmethod
def from_llm(cls, llm: BaseLanguageModel, prompt: BasePromptTemplate=PROMPT,
**kwargs: Any) ->LLMMathChain:
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs) | null |
similarity_search | """Same as `similarity_search_with_relevance_scores` but
doesn't return the scores.
"""
return self.similarity_search_by_vector(self._embeddings.embed_query(query),
k, distance_func, where_str, **kwargs) | def similarity_search(self, query: str, k: int=4, distance_func:
DistanceFunction=DistanceFunction.COSINE_SIM, where_str: Optional[str]=
None, **kwargs: Any) ->List[Document]:
"""Same as `similarity_search_with_relevance_scores` but
doesn't return the scores.
"""
return self.similarity_search_by_vector(self._embeddings.embed_query(
query), k, distance_func, where_str, **kwargs) | Same as `similarity_search_with_relevance_scores` but
doesn't return the scores. |
test_chat_google_palm_generate | """Test Google PaLM Chat API wrapper with generate."""
chat = ChatGooglePalm(n=2, temperature=1.0)
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content | def test_chat_google_palm_generate() ->None:
"""Test Google PaLM Chat API wrapper with generate."""
chat = ChatGooglePalm(n=2, temperature=1.0)
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content | Test Google PaLM Chat API wrapper with generate. |
test_basic_functionality | """Test basic functionality of methods exposed by class"""
combined_memory = CombinedMemory(memories=[example_memory[0],
example_memory[1]])
assert combined_memory.memory_variables == ['foo', 'bar']
assert combined_memory.load_memory_variables({}) == {'foo': '', 'bar': ''}
combined_memory.save_context({'input': 'Hello there'}, {'output':
'Hello, how can I help you?'})
assert combined_memory.load_memory_variables({}) == {'foo':
"""Human: Hello there
AI: Hello, how can I help you?""", 'bar':
"""Human: Hello there
AI: Hello, how can I help you?"""}
combined_memory.clear()
assert combined_memory.load_memory_variables({}) == {'foo': '', 'bar': ''} | def test_basic_functionality(example_memory: List[ConversationBufferMemory]
) ->None:
"""Test basic functionality of methods exposed by class"""
combined_memory = CombinedMemory(memories=[example_memory[0],
example_memory[1]])
assert combined_memory.memory_variables == ['foo', 'bar']
assert combined_memory.load_memory_variables({}) == {'foo': '', 'bar': ''}
combined_memory.save_context({'input': 'Hello there'}, {'output':
'Hello, how can I help you?'})
assert combined_memory.load_memory_variables({}) == {'foo':
"""Human: Hello there
AI: Hello, how can I help you?""", 'bar':
"""Human: Hello there
AI: Hello, how can I help you?"""}
combined_memory.clear()
assert combined_memory.load_memory_variables({}) == {'foo': '', 'bar': ''} | Test basic functionality of methods exposed by class |
draw_graphviz | """
Provides better drawing
Usage in a jupyter notebook:
>>> from IPython.display import SVG
>>> self.draw_graphviz_svg(layout="dot", filename="web.svg")
>>> SVG('web.svg')
"""
from networkx.drawing.nx_agraph import to_agraph
try:
import pygraphviz
except ImportError as e:
if e.name == '_graphviz':
"""
>>> e.msg # pygraphviz throws this error
ImportError: libcgraph.so.6: cannot open shared object file
"""
raise ImportError(
'Could not import graphviz debian package. Please install it with:`sudo apt-get update``sudo apt-get install graphviz graphviz-dev`'
)
else:
raise ImportError(
'Could not import pygraphviz python package. Please install it with:`pip install pygraphviz`.'
)
graph = to_agraph(self._graph)
graph.layout(prog=kwargs.get('prog', 'dot'))
graph.draw(kwargs.get('path', 'graph.svg')) | def draw_graphviz(self, **kwargs: Any) ->None:
"""
Provides better drawing
Usage in a jupyter notebook:
>>> from IPython.display import SVG
>>> self.draw_graphviz_svg(layout="dot", filename="web.svg")
>>> SVG('web.svg')
"""
from networkx.drawing.nx_agraph import to_agraph
try:
import pygraphviz
except ImportError as e:
if e.name == '_graphviz':
"""
>>> e.msg # pygraphviz throws this error
ImportError: libcgraph.so.6: cannot open shared object file
"""
raise ImportError(
'Could not import graphviz debian package. Please install it with:`sudo apt-get update``sudo apt-get install graphviz graphviz-dev`'
)
else:
raise ImportError(
'Could not import pygraphviz python package. Please install it with:`pip install pygraphviz`.'
)
graph = to_agraph(self._graph)
graph.layout(prog=kwargs.get('prog', 'dot'))
graph.draw(kwargs.get('path', 'graph.svg')) | Provides better drawing
Usage in a jupyter notebook:
>>> from IPython.display import SVG
>>> self.draw_graphviz_svg(layout="dot", filename="web.svg")
>>> SVG('web.svg') |
test_sqlitevss_with_score | """Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _sqlite_vss_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
distances = [o[1] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
assert distances[0] < distances[1] < distances[2] | @pytest.mark.requires('sqlite-vss')
def test_sqlitevss_with_score() ->None:
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _sqlite_vss_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
distances = [o[1] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
assert distances[0] < distances[1] < distances[2] | Test end to end construction and search with scores and IDs. |
test_tool_with_kwargs | """Test functionality when only return direct is provided."""
@tool(return_direct=True)
def search_api(arg_0: str, arg_1: float=4.3, ping: str='hi') ->str:
"""Search the API for the query."""
return f'arg_0={arg_0}, arg_1={arg_1}, ping={ping}'
assert isinstance(search_api, BaseTool)
result = search_api.run(tool_input={'arg_0': 'foo', 'arg_1': 3.2, 'ping':
'pong'})
assert result == 'arg_0=foo, arg_1=3.2, ping=pong'
result = search_api.run(tool_input={'arg_0': 'foo'})
assert result == 'arg_0=foo, arg_1=4.3, ping=hi'
result = search_api.run('foobar')
assert result == 'arg_0=foobar, arg_1=4.3, ping=hi' | def test_tool_with_kwargs() ->None:
"""Test functionality when only return direct is provided."""
@tool(return_direct=True)
def search_api(arg_0: str, arg_1: float=4.3, ping: str='hi') ->str:
"""Search the API for the query."""
return f'arg_0={arg_0}, arg_1={arg_1}, ping={ping}'
assert isinstance(search_api, BaseTool)
result = search_api.run(tool_input={'arg_0': 'foo', 'arg_1': 3.2,
'ping': 'pong'})
assert result == 'arg_0=foo, arg_1=3.2, ping=pong'
result = search_api.run(tool_input={'arg_0': 'foo'})
assert result == 'arg_0=foo, arg_1=4.3, ping=hi'
result = search_api.run('foobar')
assert result == 'arg_0=foobar, arg_1=4.3, ping=hi' | Test functionality when only return direct is provided. |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat'] | Get the namespace of the langchain object. |
_on_chain_start | """Process the Chain Run upon start.""" | def _on_chain_start(self, run: Run) ->None:
"""Process the Chain Run upon start.""" | Process the Chain Run upon start. |
_get_relevant_documents | try:
if self.is_arxiv_identifier(query):
results = self.arxiv_search(id_list=query.split(), max_results=self
.top_k_results).results()
else:
results = self.arxiv_search(query[:self.ARXIV_MAX_QUERY_LENGTH],
max_results=self.top_k_results).results()
except self.arxiv_exceptions as ex:
return [Document(page_content=f'Arxiv exception: {ex}')]
docs = [Document(page_content=result.summary, metadata={'Published': result
.updated.date(), 'Title': result.title, 'Authors': ', '.join(a.name for
a in result.authors)}) for result in results]
return docs | def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
try:
if self.is_arxiv_identifier(query):
results = self.arxiv_search(id_list=query.split(), max_results=
self.top_k_results).results()
else:
results = self.arxiv_search(query[:self.ARXIV_MAX_QUERY_LENGTH],
max_results=self.top_k_results).results()
except self.arxiv_exceptions as ex:
return [Document(page_content=f'Arxiv exception: {ex}')]
docs = [Document(page_content=result.summary, metadata={'Published':
result.updated.date(), 'Title': result.title, 'Authors': ', '.join(
a.name for a in result.authors)}) for result in results]
return docs | null |
__init__ | """Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine) | def __init__(self, engine: Engine, cache_schema: Type[FullMd5LLMCache]=
FullMd5LLMCache):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine) | Initialize by creating all tables. |
test_redis_semantic_cache_chat | set_llm_cache(RedisSemanticCache(embedding=FakeEmbeddings(), redis_url=
REDIS_TEST_URL, score_threshold=0.1))
llm = FakeChatModel()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
prompt: List[BaseMessage] = [HumanMessage(content='foo')]
get_llm_cache().update(dumps(prompt), llm_string, [ChatGeneration(message=
AIMessage(content='fizz'))])
output = llm.generate([prompt])
expected_output = LLMResult(generations=[[ChatGeneration(message=AIMessage(
content='fizz'))]], llm_output={})
assert output == expected_output
get_llm_cache().clear(llm_string=llm_string) | def test_redis_semantic_cache_chat() ->None:
set_llm_cache(RedisSemanticCache(embedding=FakeEmbeddings(), redis_url=
REDIS_TEST_URL, score_threshold=0.1))
llm = FakeChatModel()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
prompt: List[BaseMessage] = [HumanMessage(content='foo')]
get_llm_cache().update(dumps(prompt), llm_string, [ChatGeneration(
message=AIMessage(content='fizz'))])
output = llm.generate([prompt])
expected_output = LLMResult(generations=[[ChatGeneration(message=
AIMessage(content='fizz'))]], llm_output={})
assert output == expected_output
get_llm_cache().clear(llm_string=llm_string) | null |
test_chroma_search_filter_with_scores | """Test end to end construction and scored search with metadata filtering."""
texts = ['far', 'bar', 'baz']
metadatas = [{'first_letter': '{}'.format(text[0])} for text in texts]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search_with_score('far', k=1, filter={
'first_letter': 'f'})
assert output == [(Document(page_content='far', metadata={'first_letter':
'f'}), 0.0)]
output = docsearch.similarity_search_with_score('far', k=1, filter={
'first_letter': 'b'})
assert output == [(Document(page_content='bar', metadata={'first_letter':
'b'}), 1.0)] | def test_chroma_search_filter_with_scores() ->None:
"""Test end to end construction and scored search with metadata filtering."""
texts = ['far', 'bar', 'baz']
metadatas = [{'first_letter': '{}'.format(text[0])} for text in texts]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search_with_score('far', k=1, filter={
'first_letter': 'f'})
assert output == [(Document(page_content='far', metadata={
'first_letter': 'f'}), 0.0)]
output = docsearch.similarity_search_with_score('far', k=1, filter={
'first_letter': 'b'})
assert output == [(Document(page_content='bar', metadata={
'first_letter': 'b'}), 1.0)] | Test end to end construction and scored search with metadata filtering. |
__call__ | """Correct the query to make it valid. If
Args:
query: cypher query
"""
return self.correct_query(query) | def __call__(self, query: str) ->str:
"""Correct the query to make it valid. If
Args:
query: cypher query
"""
return self.correct_query(query) | Correct the query to make it valid. If
Args:
query: cypher query |
requires_reference | """Whether this evaluator requires a reference label."""
return False | @property
def requires_reference(self) ->bool:
"""Whether this evaluator requires a reference label."""
return False | Whether this evaluator requires a reference label. |
on_agent_action | """Do nothing."""
pass | def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any:
"""Do nothing."""
pass | Do nothing. |
__init__ | """
Initialize a new LangSmithRunChatLoader instance.
:param runs: List of LLM run IDs or run objects.
:param client: An instance of LangSmith client, if not provided,
a new client instance will be created.
"""
from langsmith.client import Client
self.runs = runs
self.client = client or Client() | def __init__(self, runs: Iterable[Union[str, Run]], client: Optional[
'Client']=None):
"""
Initialize a new LangSmithRunChatLoader instance.
:param runs: List of LLM run IDs or run objects.
:param client: An instance of LangSmith client, if not provided,
a new client instance will be created.
"""
from langsmith.client import Client
self.runs = runs
self.client = client or Client() | Initialize a new LangSmithRunChatLoader instance.
:param runs: List of LLM run IDs or run objects.
:param client: An instance of LangSmith client, if not provided,
a new client instance will be created. |
test_dashscope_embedding_documents | """Test dashscope embeddings."""
documents = ['foo bar']
embedding = DashScopeEmbeddings(model='text-embedding-v1')
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536 | def test_dashscope_embedding_documents() ->None:
"""Test dashscope embeddings."""
documents = ['foo bar']
embedding = DashScopeEmbeddings(model='text-embedding-v1')
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536 | Test dashscope embeddings. |
__init__ | """Create an Astra DB chat message history."""
try:
from astrapy.db import AstraDB as LibAstraDB
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.'
)
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'."
)
self.session_id = session_id
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(token=self.token, api_endpoint=self.
api_endpoint, namespace=self.namespace)
self.collection = self.astra_db.create_collection(self.collection_name) | def __init__(self, *, session_id: str, collection_name: str=
DEFAULT_COLLECTION_NAME, token: Optional[str]=None, api_endpoint:
Optional[str]=None, astra_db_client: Optional[LibAstraDB]=None,
namespace: Optional[str]=None) ->None:
"""Create an Astra DB chat message history."""
try:
from astrapy.db import AstraDB as LibAstraDB
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.'
)
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'."
)
self.session_id = session_id
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(token=self.token, api_endpoint=self.
api_endpoint, namespace=self.namespace)
self.collection = self.astra_db.create_collection(self.collection_name) | Create an Astra DB chat message history. |
test_jinachat_extra_kwargs | """Test extra kwargs to chat openai."""
llm = JinaChat(foo=3, max_tokens=10)
assert llm.max_tokens == 10
assert llm.model_kwargs == {'foo': 3}
llm = JinaChat(foo=3, model_kwargs={'bar': 2})
assert llm.model_kwargs == {'foo': 3, 'bar': 2}
with pytest.raises(ValueError):
JinaChat(foo=3, model_kwargs={'foo': 2})
with pytest.raises(ValueError):
JinaChat(model_kwargs={'temperature': 0.2}) | def test_jinachat_extra_kwargs() ->None:
"""Test extra kwargs to chat openai."""
llm = JinaChat(foo=3, max_tokens=10)
assert llm.max_tokens == 10
assert llm.model_kwargs == {'foo': 3}
llm = JinaChat(foo=3, model_kwargs={'bar': 2})
assert llm.model_kwargs == {'foo': 3, 'bar': 2}
with pytest.raises(ValueError):
JinaChat(foo=3, model_kwargs={'foo': 2})
with pytest.raises(ValueError):
JinaChat(model_kwargs={'temperature': 0.2}) | Test extra kwargs to chat openai. |
test_cosine_similarity_score_threshold | expected_idxs = [(0, 0), (2, 2)]
expected_scores = [1.0, 0.93419873]
actual_idxs, actual_scores = cosine_similarity_top_k(X, Y, top_k=None,
score_threshold=0.9)
assert actual_idxs == expected_idxs
assert np.allclose(expected_scores, actual_scores) | def test_cosine_similarity_score_threshold(X: List[List[float]], Y: List[
List[float]]) ->None:
expected_idxs = [(0, 0), (2, 2)]
expected_scores = [1.0, 0.93419873]
actual_idxs, actual_scores = cosine_similarity_top_k(X, Y, top_k=None,
score_threshold=0.9)
assert actual_idxs == expected_idxs
assert np.allclose(expected_scores, actual_scores) | null |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages'] | Get the namespace of the langchain object. |
_import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool | from langchain_community.tools.azure_cognitive_services import AzureCogsTextAnalyticsHealthTool
return AzureCogsTextAnalyticsHealthTool | def _import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool() ->Any:
from langchain_community.tools.azure_cognitive_services import AzureCogsTextAnalyticsHealthTool
return AzureCogsTextAnalyticsHealthTool | null |
_generate | should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager,
**kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts, run_manager=
run_manager, **params)
return self._create_chat_result(response) | def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream:
Optional[bool]=None, **kwargs: Any) ->ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts,
run_manager=run_manager, **params)
return self._create_chat_result(response) | null |
_parse_search_response | documents = []
for doc in response.matching_documents:
metadata = {'title': doc.document.title, 'source': doc.document.
raw_document_path}
documents.append(Document(page_content=doc.search_text_snippet,
metadata=metadata))
return documents | def _parse_search_response(self, response: 'SearchDocumentsPager') ->List[
Document]:
documents = []
for doc in response.matching_documents:
metadata = {'title': doc.document.title, 'source': doc.document.
raw_document_path}
documents.append(Document(page_content=doc.search_text_snippet,
metadata=metadata))
return documents | null |
get_prompt | """Get default prompt for a language model.""" | @abstractmethod
def get_prompt(self, llm: BaseLanguageModel) ->BasePromptTemplate:
"""Get default prompt for a language model.""" | Get default prompt for a language model. |
list | """
List all or search for available templates.
"""
from langchain_cli.utils.github import list_packages
packages = list_packages(contains=contains)
for package in packages:
typer.echo(package) | @package_cli.command()
def list(contains: Annotated[Optional[str], typer.Argument()]=None) ->None:
"""
List all or search for available templates.
"""
from langchain_cli.utils.github import list_packages
packages = list_packages(contains=contains)
for package in packages:
typer.echo(package) | List all or search for available templates. |
get_llm_kwargs | """Returns the kwargs for the LLMChain constructor.
Args:
function: The function to use.
Returns:
The kwargs for the LLMChain constructor.
"""
return {'functions': [function], 'function_call': {'name': function['name']}} | def get_llm_kwargs(function: dict) ->dict:
"""Returns the kwargs for the LLMChain constructor.
Args:
function: The function to use.
Returns:
The kwargs for the LLMChain constructor.
"""
return {'functions': [function], 'function_call': {'name': function[
'name']}} | Returns the kwargs for the LLMChain constructor.
Args:
function: The function to use.
Returns:
The kwargs for the LLMChain constructor. |
__init__ | super().__init__(criteria=criteria, normalize_by=normalize_by, **kwargs) | def __init__(self, criteria: Optional[CRITERIA_TYPE]=None, normalize_by:
Optional[float]=None, **kwargs: Any) ->None:
super().__init__(criteria=criteria, normalize_by=normalize_by, **kwargs) | null |
from_llm | """Load chain from LLM."""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(llm, chain_type=chain_type, callbacks=callbacks,
**combine_docs_chain_kwargs)
condense_question_chain = LLMChain(llm=llm, prompt=condense_question_prompt,
callbacks=callbacks)
return cls(vectorstore=vectorstore, combine_docs_chain=doc_chain,
question_generator=condense_question_chain, callbacks=callbacks, **kwargs) | @classmethod
def from_llm(cls, llm: BaseLanguageModel, vectorstore: VectorStore,
condense_question_prompt: BasePromptTemplate=CONDENSE_QUESTION_PROMPT,
chain_type: str='stuff', combine_docs_chain_kwargs: Optional[Dict]=None,
callbacks: Callbacks=None, **kwargs: Any
) ->BaseConversationalRetrievalChain:
"""Load chain from LLM."""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(llm, chain_type=chain_type, callbacks=
callbacks, **combine_docs_chain_kwargs)
condense_question_chain = LLMChain(llm=llm, prompt=
condense_question_prompt, callbacks=callbacks)
return cls(vectorstore=vectorstore, combine_docs_chain=doc_chain,
question_generator=condense_question_chain, callbacks=callbacks, **
kwargs) | Load chain from LLM. |
test_get_relevant_documents | retriever.add_texts(['Hai there!', 'Hello world!', 'Foo bar baz!'])
expected = [Document(page_content='Hai there!')]
retriever.k = 1
results = retriever.get_relevant_documents('Hai there!')
assert len(results) == retriever.k
assert results == expected
assert retriever.get_relevant_documents('Hai there!') == expected | def test_get_relevant_documents(retriever: QdrantSparseVectorRetriever) ->None:
retriever.add_texts(['Hai there!', 'Hello world!', 'Foo bar baz!'])
expected = [Document(page_content='Hai there!')]
retriever.k = 1
results = retriever.get_relevant_documents('Hai there!')
assert len(results) == retriever.k
assert results == expected
assert retriever.get_relevant_documents('Hai there!') == expected | null |
test_intervention_chain | """
Test intervention chain correctly transforms
the LLM's text completion into a setting-like object.
"""
intervention_chain = InterventionChain.from_univariate_prompt(llm=self.fake_llm
)
output = intervention_chain('if cindy has ten pets')
expected_output = {'chain_answer': None, 'chain_data': InterventionModel(
entity_settings=[EntitySettingModel(name='cindy', attribute='pet_count',
value=10)]), 'narrative_input': 'if cindy has ten pets'}
assert output == expected_output | def test_intervention_chain(self) ->None:
"""
Test intervention chain correctly transforms
the LLM's text completion into a setting-like object.
"""
intervention_chain = InterventionChain.from_univariate_prompt(llm=self.
fake_llm)
output = intervention_chain('if cindy has ten pets')
expected_output = {'chain_answer': None, 'chain_data':
InterventionModel(entity_settings=[EntitySettingModel(name='cindy',
attribute='pet_count', value=10)]), 'narrative_input':
'if cindy has ten pets'}
assert output == expected_output | Test intervention chain correctly transforms
the LLM's text completion into a setting-like object. |
_import_google_search_tool_GoogleSearchResults | from langchain_community.tools.google_search.tool import GoogleSearchResults
return GoogleSearchResults | def _import_google_search_tool_GoogleSearchResults() ->Any:
from langchain_community.tools.google_search.tool import GoogleSearchResults
return GoogleSearchResults | null |
_call | """Split document into chunks and pass to CombineDocumentsChain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
document = inputs[self.input_key]
docs = self.text_splitter.create_documents([document])
other_keys: Dict = {k: v for k, v in inputs.items() if k != self.input_key}
other_keys[self.combine_docs_chain.input_key] = docs
return self.combine_docs_chain(other_keys, return_only_outputs=True,
callbacks=_run_manager.get_child()) | def _call(self, inputs: Dict[str, str], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
"""Split document into chunks and pass to CombineDocumentsChain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
document = inputs[self.input_key]
docs = self.text_splitter.create_documents([document])
other_keys: Dict = {k: v for k, v in inputs.items() if k != self.input_key}
other_keys[self.combine_docs_chain.input_key] = docs
return self.combine_docs_chain(other_keys, return_only_outputs=True,
callbacks=_run_manager.get_child()) | Split document into chunks and pass to CombineDocumentsChain. |
test_move_file_errs_outside_root_dir | """Test the FileMove tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = MoveFileTool(root_dir=temp_dir)
result = tool.run({'source_path': '../source.txt', 'destination_path':
'../destination.txt'})
assert result == INVALID_PATH_TEMPLATE.format(arg_name='source_path',
value='../source.txt') | def test_move_file_errs_outside_root_dir() ->None:
"""Test the FileMove tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = MoveFileTool(root_dir=temp_dir)
result = tool.run({'source_path': '../source.txt',
'destination_path': '../destination.txt'})
assert result == INVALID_PATH_TEMPLATE.format(arg_name=
'source_path', value='../source.txt') | Test the FileMove tool when a root dir is specified. |
get_chat_model_table | feat_table = {}
for cm in chat_models.__all__:
feat_table[cm] = {}
cls = getattr(chat_models, cm)
if issubclass(cls, SimpleChatModel):
comparison_cls = SimpleChatModel
else:
comparison_cls = BaseChatModel
for feat in ('_stream', '_astream', '_agenerate'):
feat_table[cm][feat] = getattr(cls, feat) != getattr(comparison_cls,
feat)
final_feats = {k: v for k, v in {**feat_table, **
CHAT_MODEL_FEAT_TABLE_CORRECTION}.items() if k not in CHAT_MODEL_IGNORE}
header = ['model', '_agenerate', '_stream', '_astream']
title = ['Model', 'Invoke', 'Async invoke', 'Stream', 'Async stream']
rows = [title, [':-'] + [':-:'] * (len(title) - 1)]
for llm, feats in sorted(final_feats.items()):
rows += [[llm, 'β
'] + [('β
' if feats.get(h) else 'β') for h in header[1:]]]
return '\n'.join(['|'.join(row) for row in rows]) | def get_chat_model_table():
feat_table = {}
for cm in chat_models.__all__:
feat_table[cm] = {}
cls = getattr(chat_models, cm)
if issubclass(cls, SimpleChatModel):
comparison_cls = SimpleChatModel
else:
comparison_cls = BaseChatModel
for feat in ('_stream', '_astream', '_agenerate'):
feat_table[cm][feat] = getattr(cls, feat) != getattr(comparison_cls
, feat)
final_feats = {k: v for k, v in {**feat_table, **
CHAT_MODEL_FEAT_TABLE_CORRECTION}.items() if k not in CHAT_MODEL_IGNORE
}
header = ['model', '_agenerate', '_stream', '_astream']
title = ['Model', 'Invoke', 'Async invoke', 'Stream', 'Async stream']
rows = [title, [':-'] + [':-:'] * (len(title) - 1)]
for llm, feats in sorted(final_feats.items()):
rows += [[llm, 'β
'] + [('β
' if feats.get(h) else 'β') for h in
header[1:]]]
return '\n'.join(['|'.join(row) for row in rows]) | null |
get_format_instructions | """Instructions on how the LLM output should be formatted."""
initial = f'For your first output: {self.parsers[0].get_format_instructions()}'
subsequent = '\n'.join(
f'Complete that output fully. Then produce another output, separated by two newline characters: {p.get_format_instructions()}'
for p in self.parsers[1:])
return f"""{initial}
{subsequent}""" | def get_format_instructions(self) ->str:
"""Instructions on how the LLM output should be formatted."""
initial = (
f'For your first output: {self.parsers[0].get_format_instructions()}')
subsequent = '\n'.join(
f'Complete that output fully. Then produce another output, separated by two newline characters: {p.get_format_instructions()}'
for p in self.parsers[1:])
return f'{initial}\n{subsequent}' | Instructions on how the LLM output should be formatted. |
on_llm_end | self.saved_things['generation'] = args[0] | def on_llm_end(self, *args: Any, **kwargs: Any) ->Any:
self.saved_things['generation'] = args[0] | null |
_call | """Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain(inputs, callbacks=_run_manager.get_child()) | def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain(inputs, callbacks=_run_manager.get_child()) | Call the internal llm chain. |
test_run_kwargs_error | """Test run method with kwargs errors as expected."""
chain = FakeChain(the_input_keys=['foo', 'bar'])
with pytest.raises(ValueError):
chain.run(foo='bar', baz='foo') | def test_run_kwargs_error() ->None:
"""Test run method with kwargs errors as expected."""
chain = FakeChain(the_input_keys=['foo', 'bar'])
with pytest.raises(ValueError):
chain.run(foo='bar', baz='foo') | Test run method with kwargs errors as expected. |