code
stringlengths 193
97.3k
| apis
sequencelengths 1
8
| extract_api
stringlengths 113
214k
|
---|---|---|
"""
Unit test for retrieve_utils.py
"""
from autogen.retrieve_utils import (
split_text_to_chunks,
extract_text_from_pdf,
split_files_to_chunks,
get_files_from_dir,
is_url,
create_vector_db_from_dir,
query_vector_db,
)
from autogen.token_count_utils import count_token
import os
import pytest
import chromadb
test_dir = os.path.join(os.path.dirname(__file__), "test_files")
expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities
of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and
simplify the process of building applications that leverage the power of LLMs, allowing for seamless
integration, testing, and deployment."""
class TestRetrieveUtils:
def test_split_text_to_chunks(self):
long_text = "A" * 10000
chunks = split_text_to_chunks(long_text, max_tokens=1000)
assert all(count_token(chunk) <= 1000 for chunk in chunks)
def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self):
with pytest.raises(AssertionError):
split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode")
def test_extract_text_from_pdf(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split())
def test_split_files_to_chunks(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
chunks = split_files_to_chunks([pdf_file_path, txt_file_path])
assert all(isinstance(chunk, str) and chunk.strip() for chunk in chunks)
def test_get_files_from_dir(self):
files = get_files_from_dir(test_dir)
assert all(os.path.isfile(file) for file in files)
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
files = get_files_from_dir([pdf_file_path, txt_file_path])
assert all(os.path.isfile(file) for file in files)
def test_is_url(self):
assert is_url("https://www.example.com")
assert not is_url("not_a_url")
def test_create_vector_db_from_dir(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else:
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
assert client.get_collection("all-my-documents")
def test_query_vector_db(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else: # If the database does not exist, create it first
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
results = query_vector_db(["autogen"], client=client)
assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", []))
def test_custom_vector_db(self):
try:
import lancedb
except ImportError:
return
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
db_path = "/tmp/lancedb"
def create_lancedb():
db = lancedb.connect(db_path)
data = [
{"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"},
{"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"},
{"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"},
{"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"},
{"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"},
{"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"},
]
try:
db.create_table("my_table", data)
except OSError:
pass
class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent):
def query_vector_db(
self,
query_texts,
n_results=10,
search_string="",
):
if query_texts:
vector = [0.1, 0.3]
db = lancedb.connect(db_path)
table = db.open_table("my_table")
query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df()
return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]}
def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""):
results = self.query_vector_db(
query_texts=[problem],
n_results=n_results,
search_string=search_string,
)
self._results = results
print("doc_ids: ", results["ids"])
ragragproxyagent = MyRetrieveUserProxyAgent(
name="ragproxyagent",
human_input_mode="NEVER",
max_consecutive_auto_reply=2,
retrieve_config={
"task": "qa",
"chunk_token_size": 2000,
"client": "__",
"embedding_model": "all-mpnet-base-v2",
},
)
create_lancedb()
ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark")
assert ragragproxyagent._results["ids"] == [[3, 1, 5]]
def test_custom_text_split_function(self):
def custom_text_split_function(text):
return [text[: len(text) // 2], text[len(text) // 2 :]]
db_path = "/tmp/test_retrieve_utils_chromadb.db"
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(
os.path.join(test_dir, "example.txt"),
client=client,
collection_name="mytestcollection",
custom_text_split_function=custom_text_split_function,
get_or_create=True,
)
results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1)
assert (
results.get("documents")[0][0]
== "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities\nof Large Language Models (LLMs) for various applications. The primary purpose o"
)
def test_retrieve_utils(self):
client = chromadb.PersistentClient(path="/tmp/chromadb")
create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs")
results = query_vector_db(
query_texts=[
"How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
],
n_results=4,
client=client,
collection_name="autogen-docs",
search_string="AutoGen",
)
print(results["ids"][0])
assert len(results["ids"][0]) == 4
if __name__ == "__main__":
pytest.main()
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
os.remove(db_path) # Delete the database file after tests are finished
| [
"lancedb.connect"
] | [((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((7383, 7396), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7394, 7396), False, 'import pytest\n'), ((7458, 7481), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7472, 7481), False, 'import os\n'), ((881, 929), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (901, 929), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1256, 1293), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1268, 1293), False, 'import os\n'), ((1472, 1509), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1484, 1509), False, 'import os\n'), ((1534, 1571), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1546, 1571), False, 'import os\n'), ((1589, 1642), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1610, 1642), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1780, 1808), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {}), '(test_dir)\n', (1798, 1808), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1892, 1929), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1904, 1929), False, 'import os\n'), ((1954, 1991), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1966, 1991), False, 'import os\n'), ((2008, 2058), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2026, 2058), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2161, 2194), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (2167, 2194), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2349, 2372), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2363, 2372), False, 'import os\n'), ((2736, 2759), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2750, 2759), False, 'import os\n'), ((3030, 3073), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (3045, 3073), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6070, 6109), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (6095, 6109), False, 'import chromadb\n'), ((6398, 6495), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (6413, 6495), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6801, 6848), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (6826, 6848), False, 'import chromadb\n'), ((6857, 6960), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs')\n", (6882, 6960), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6975, 7185), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (6990, 7185), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7491, 7509), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (7500, 7509), False, 'import os\n'), ((1081, 1110), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1094, 1110), False, 'import pytest\n'), ((1124, 1188), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1144, 1188), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2214, 2233), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (2220, 2233), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2395, 2434), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2420, 2434), False, 'import chromadb\n'), ((2470, 2509), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2495, 2509), False, 'import chromadb\n'), ((2522, 2572), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2547, 2572), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2782, 2821), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2807, 2821), False, 'import chromadb\n'), ((2908, 2947), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2933, 2947), False, 'import chromadb\n'), ((2960, 3010), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2985, 3010), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3494, 3518), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (3509, 3518), False, 'import lancedb\n'), ((6157, 6194), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (6169, 6194), False, 'import os\n'), ((1828, 1848), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1842, 1848), False, 'import os\n'), ((2078, 2098), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2092, 2098), False, 'import os\n'), ((4574, 4598), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4589, 4598), False, 'import lancedb\n'), ((949, 967), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (960, 967), False, 'from autogen.token_count_utils import count_token\n'), ((1351, 1387), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1372, 1387), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')] |
"""
Unit test for retrieve_utils.py
"""
from autogen.retrieve_utils import (
split_text_to_chunks,
extract_text_from_pdf,
split_files_to_chunks,
get_files_from_dir,
is_url,
create_vector_db_from_dir,
query_vector_db,
)
from autogen.token_count_utils import count_token
import os
import pytest
import chromadb
test_dir = os.path.join(os.path.dirname(__file__), "test_files")
expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities
of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and
simplify the process of building applications that leverage the power of LLMs, allowing for seamless
integration, testing, and deployment."""
class TestRetrieveUtils:
def test_split_text_to_chunks(self):
long_text = "A" * 10000
chunks = split_text_to_chunks(long_text, max_tokens=1000)
assert all(count_token(chunk) <= 1000 for chunk in chunks)
def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self):
with pytest.raises(AssertionError):
split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode")
def test_extract_text_from_pdf(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split())
def test_split_files_to_chunks(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
chunks = split_files_to_chunks([pdf_file_path, txt_file_path])
assert all(isinstance(chunk, str) and chunk.strip() for chunk in chunks)
def test_get_files_from_dir(self):
files = get_files_from_dir(test_dir)
assert all(os.path.isfile(file) for file in files)
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
files = get_files_from_dir([pdf_file_path, txt_file_path])
assert all(os.path.isfile(file) for file in files)
def test_is_url(self):
assert is_url("https://www.example.com")
assert not is_url("not_a_url")
def test_create_vector_db_from_dir(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else:
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
assert client.get_collection("all-my-documents")
def test_query_vector_db(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else: # If the database does not exist, create it first
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
results = query_vector_db(["autogen"], client=client)
assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", []))
def test_custom_vector_db(self):
try:
import lancedb
except ImportError:
return
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
db_path = "/tmp/lancedb"
def create_lancedb():
db = lancedb.connect(db_path)
data = [
{"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"},
{"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"},
{"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"},
{"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"},
{"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"},
{"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"},
]
try:
db.create_table("my_table", data)
except OSError:
pass
class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent):
def query_vector_db(
self,
query_texts,
n_results=10,
search_string="",
):
if query_texts:
vector = [0.1, 0.3]
db = lancedb.connect(db_path)
table = db.open_table("my_table")
query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df()
return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]}
def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""):
results = self.query_vector_db(
query_texts=[problem],
n_results=n_results,
search_string=search_string,
)
self._results = results
print("doc_ids: ", results["ids"])
ragragproxyagent = MyRetrieveUserProxyAgent(
name="ragproxyagent",
human_input_mode="NEVER",
max_consecutive_auto_reply=2,
retrieve_config={
"task": "qa",
"chunk_token_size": 2000,
"client": "__",
"embedding_model": "all-mpnet-base-v2",
},
)
create_lancedb()
ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark")
assert ragragproxyagent._results["ids"] == [[3, 1, 5]]
def test_custom_text_split_function(self):
def custom_text_split_function(text):
return [text[: len(text) // 2], text[len(text) // 2 :]]
db_path = "/tmp/test_retrieve_utils_chromadb.db"
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(
os.path.join(test_dir, "example.txt"),
client=client,
collection_name="mytestcollection",
custom_text_split_function=custom_text_split_function,
get_or_create=True,
)
results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1)
assert (
results.get("documents")[0][0]
== "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities\nof Large Language Models (LLMs) for various applications. The primary purpose o"
)
def test_retrieve_utils(self):
client = chromadb.PersistentClient(path="/tmp/chromadb")
create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs")
results = query_vector_db(
query_texts=[
"How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
],
n_results=4,
client=client,
collection_name="autogen-docs",
search_string="AutoGen",
)
print(results["ids"][0])
assert len(results["ids"][0]) == 4
if __name__ == "__main__":
pytest.main()
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
os.remove(db_path) # Delete the database file after tests are finished
| [
"lancedb.connect"
] | [((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((7383, 7396), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7394, 7396), False, 'import pytest\n'), ((7458, 7481), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7472, 7481), False, 'import os\n'), ((881, 929), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (901, 929), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1256, 1293), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1268, 1293), False, 'import os\n'), ((1472, 1509), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1484, 1509), False, 'import os\n'), ((1534, 1571), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1546, 1571), False, 'import os\n'), ((1589, 1642), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1610, 1642), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1780, 1808), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {}), '(test_dir)\n', (1798, 1808), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1892, 1929), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1904, 1929), False, 'import os\n'), ((1954, 1991), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1966, 1991), False, 'import os\n'), ((2008, 2058), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2026, 2058), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2161, 2194), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (2167, 2194), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2349, 2372), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2363, 2372), False, 'import os\n'), ((2736, 2759), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2750, 2759), False, 'import os\n'), ((3030, 3073), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (3045, 3073), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6070, 6109), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (6095, 6109), False, 'import chromadb\n'), ((6398, 6495), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (6413, 6495), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6801, 6848), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (6826, 6848), False, 'import chromadb\n'), ((6857, 6960), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs')\n", (6882, 6960), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6975, 7185), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (6990, 7185), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7491, 7509), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (7500, 7509), False, 'import os\n'), ((1081, 1110), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1094, 1110), False, 'import pytest\n'), ((1124, 1188), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1144, 1188), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2214, 2233), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (2220, 2233), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2395, 2434), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2420, 2434), False, 'import chromadb\n'), ((2470, 2509), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2495, 2509), False, 'import chromadb\n'), ((2522, 2572), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2547, 2572), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2782, 2821), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2807, 2821), False, 'import chromadb\n'), ((2908, 2947), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2933, 2947), False, 'import chromadb\n'), ((2960, 3010), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2985, 3010), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3494, 3518), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (3509, 3518), False, 'import lancedb\n'), ((6157, 6194), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (6169, 6194), False, 'import os\n'), ((1828, 1848), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1842, 1848), False, 'import os\n'), ((2078, 2098), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2092, 2098), False, 'import os\n'), ((4574, 4598), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4589, 4598), False, 'import lancedb\n'), ((949, 967), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (960, 967), False, 'from autogen.token_count_utils import count_token\n'), ((1351, 1387), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1372, 1387), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')] |
import os
import lancedb
import shutil
import uvicorn
import openai
from fastapi import FastAPI, HTTPException, WebSocket, UploadFile, File
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.document_loaders import PyPDFLoader
from langchain.vectorstores import LanceDB
from langchain.text_splitter import RecursiveCharacterTextSplitter
from fastapi import FastAPI, HTTPException, UploadFile, File
from pydantic import BaseModel
# Initialize FastAPI app with metadata
app = FastAPI(
title="Chatbot RAG API",
description="This is a chatbot API template for RAG system.",
version="1.0.0",
)
# Pydantic model for chatbot request and response
class ChatRequest(BaseModel):
prompt: str
class ChatResponse(BaseModel):
response: str
# Global variable to store the path of the uploaded file
uploaded_file_path = None
# Endpoint to upload PDF
@app.post("/upload-pdf/")
async def upload_pdf(file: UploadFile = File(...)):
global uploaded_file_path
uploaded_file_path = f"uploaded_files/{file.filename}"
os.makedirs(os.path.dirname(uploaded_file_path), exist_ok=True)
with open(uploaded_file_path, "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
return {"filename": file.filename}
# Setup LangChain
def setup_chain():
global uploaded_file_path
if not uploaded_file_path or not os.path.exists(uploaded_file_path):
raise HTTPException(
status_code=400, detail="No PDF file uploaded or file not found."
)
template = """Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Use three sentences maximum and keep the answer as concise as possible.
{context}
Question: {question}
Helpful Answer:"""
OPENAI_API_KEY = "sk-yourkey"
loader = PyPDFLoader(uploaded_file_path)
docs = loader.load_and_split()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=50)
documents = text_splitter.split_documents(docs)
prompt = PromptTemplate(input_variables=["context", "question"], template=template)
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
db_lance = lancedb.connect("/tmp/lancedb")
table = db_lance.create_table(
"my_table",
data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
db = LanceDB.from_documents(documents, embeddings, connection=table)
retriever = db.as_retriever()
chain_type_kwargs = {"prompt": prompt}
llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY)
chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs=chain_type_kwargs,
verbose=True,
)
return chain
# WebSocket endpoint for chat interaction
@app.websocket("/ws/chat")
async def websocket_chat(websocket: WebSocket):
await websocket.accept()
try:
while True:
data = await websocket.receive_text()
try:
agent = (
setup_chain()
) # Setup agent for each request to use the latest uploaded file
response = agent.run(data)
await websocket.send_text(response)
except Exception as e:
await websocket.send_text(f"Error: {str(e)}")
except Exception as e:
await websocket.close(code=1001, reason=str(e))
# Endpoint for chatbot interaction
@app.post("/chat", response_model=ChatResponse)
async def chat(request: ChatRequest):
agent = setup_chain()
response = agent.run(request.prompt)
return {"response": response}
# Health check endpoint
@app.get("/", tags=["Health Check"])
async def read_root():
return {"message": "Chatbot API is running!"}
# Main function to run the app
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"lancedb.connect"
] | [((621, 737), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Chatbot RAG API"""', 'description': '"""This is a chatbot API template for RAG system."""', 'version': '"""1.0.0"""'}), "(title='Chatbot RAG API', description=\n 'This is a chatbot API template for RAG system.', version='1.0.0')\n", (628, 737), False, 'from fastapi import FastAPI, HTTPException, UploadFile, File\n'), ((1075, 1084), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (1079, 1084), False, 'from fastapi import FastAPI, HTTPException, UploadFile, File\n'), ((2017, 2048), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['uploaded_file_path'], {}), '(uploaded_file_path)\n', (2028, 2048), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((2105, 2169), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(200)', 'chunk_overlap': '(50)'}), '(chunk_size=200, chunk_overlap=50)\n', (2135, 2169), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2236, 2310), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2250, 2310), False, 'from langchain.prompts import PromptTemplate\n'), ((2328, 2375), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (2344, 2375), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2392, 2423), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (2407, 2423), False, 'import lancedb\n'), ((2706, 2769), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (2728, 2769), False, 'from langchain.vectorstores import LanceDB\n'), ((2858, 2899), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (2868, 2899), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2913, 3046), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever', 'chain_type_kwargs': 'chain_type_kwargs', 'verbose': '(True)'}), "(llm=llm, chain_type='stuff', retriever=\n retriever, chain_type_kwargs=chain_type_kwargs, verbose=True)\n", (2940, 3046), False, 'from langchain.chains import RetrievalQA\n'), ((4191, 4234), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "(app, host='0.0.0.0', port=8000)\n", (4202, 4234), False, 'import uvicorn\n'), ((1192, 1227), 'os.path.dirname', 'os.path.dirname', (['uploaded_file_path'], {}), '(uploaded_file_path)\n', (1207, 1227), False, 'import os\n'), ((1303, 1340), 'shutil.copyfileobj', 'shutil.copyfileobj', (['file.file', 'buffer'], {}), '(file.file, buffer)\n', (1321, 1340), False, 'import shutil\n'), ((1536, 1621), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""No PDF file uploaded or file not found."""'}), "(status_code=400, detail='No PDF file uploaded or file not found.'\n )\n", (1549, 1621), False, 'from fastapi import FastAPI, HTTPException, UploadFile, File\n'), ((1486, 1520), 'os.path.exists', 'os.path.exists', (['uploaded_file_path'], {}), '(uploaded_file_path)\n', (1500, 1520), False, 'import os\n')] |
import os
import typer
import pickle
import pandas as pd
from dotenv import load_dotenv
import openai
import pinecone
import lancedb
import pyarrow as pa
from collections import deque
TASK_CREATION_PROMPT = """
You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective:
{objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}.
These are incomplete tasks: {task_list}. Based on the result, create new tasks to be completed by the AI system that
do not overlap with incomplete tasks. Return the tasks as an array."""
PRIORITIZATION_PROMPT = """
You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing
the following tasks: {task_names}. Consider the ultimate objective of your team:{objective}. Do not remove any tasks.
Return the result as a numbered list, like:
#. First task
#. Second task
Start the task list with number {next_task_id}."""
EXECUTION_PROMPT = """
You are an AI who performs one task based on the following objective: {objective}. Your task: {task}\nResponse:
"""
class OpenAIService:
def __init__(self, api_key):
openai.api_key = api_key
def get_ada_embedding(self, text):
text = text.replace('\n', ' ')
return openai.Embedding.create(input=[text], model='text-embedding-ada-002')['data'][0]['embedding']
def create(self, prompt, max_tokens=100, temperature=0.5):
return (
openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
.choices[0]
.text.strip()
)
class TestAIService:
def __init__(self, ai_service, cache_file):
self.ai_service = ai_service
self.cache_file = cache_file
if os.path.isfile(cache_file):
self.cache = pickle.load(open(cache_file, 'rb'))
else:
self.cache = {'ada': {}, 'create': {}}
pickle.dump(self.cache, open(cache_file, 'wb'))
def get_ada_embedding(self, text):
if text not in self.cache['ada']:
self.cache['ada'][text] = self.ai_service.get_ada_embedding(text)
pickle.dump(self.cache, open(self.cache_file, 'wb'))
return self.cache['ada'][text]
def create(self, prompt, max_tokens=100, temperature=0.5):
key = (prompt, max_tokens, temperature)
if key not in self.cache['create']:
self.cache['create'][key] = self.ai_service.create(prompt, max_tokens, temperature)
pickle.dump(self.cache, open(self.cache_file, 'wb'))
return self.cache['create'][key]
class PineconeService:
def __init__(self, api_key, environment, table_name, dimension, metric, pod_type):
self.table_name = table_name
pinecone.init(api_key=api_key, environment=environment)
if table_name not in pinecone.list_indexes():
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
self.index = pinecone.Index(table_name)
def query(self, query_embedding, top_k):
results = self.index.query(query_embedding, top_k=top_k, include_metadata=True)
sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)
return [(str(item.metadata['task'])) for item in sorted_results]
def upsert(self, data):
self.index.upsert(data)
class LanceService:
def __init__(self, table_name, dimension):
self.db = lancedb.connect('.')
schema = pa.schema(
[
pa.field('result_id', pa.string()),
pa.field('vector', pa.list_(pa.float32(), dimension)),
pa.field('task', pa.string()),
pa.field('result', pa.string()), # TODO There is a fixed schema but we keep converting
]
)
data = [{'result_id': 0, 'vector': [0.0] * dimension, 'task': 'asd', 'result': 'asd'}]
self.table = self.db.create_table(table_name, mode='overwrite', data=data, schema=schema)
def query(self, query_embedding, top_k):
result = self.table.search(query_embedding).limit(top_k).to_df()
return [v for v in result['task']]
def upsert(self, data):
data = { # TODO This doesn't look good, why are we converting?
'result_id': data[0][0],
'vector': data[0][1],
'task': data[0][2]['task'],
'result': data[0][2]['result'],
}
self.table.add(pd.DataFrame([data]))
class BabyAGI:
def __init__(self, objective, ai_service, vector_service):
self.objective = objective
self.ai_service = ai_service
self.vector_service = vector_service
self.task_list = deque([])
def add_task(self, task):
self.task_list.append(task)
def task_creation_agent(self, result, task_description):
prompt = TASK_CREATION_PROMPT.format(
objective=self.objective,
result=result,
task_description=task_description,
task_list=', '.join([t['task_name'] for t in self.task_list]),
)
response = self.ai_service.create(prompt)
new_tasks = response.split('\n')
return [{'task_name': task_name} for task_name in new_tasks]
def prioritization_agent(self, this_task_id):
task_names = [t['task_name'] for t in self.task_list]
next_task_id = int(this_task_id) + 1
prompt = PRIORITIZATION_PROMPT.format(
task_names=task_names, objective=self.objective, next_task_id=next_task_id
)
response = self.ai_service.create(prompt, max_tokens=1000)
new_tasks = response.split('\n')
self.task_list = deque()
for task_string in new_tasks:
task_parts = task_string.strip().split('.', 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
self.task_list.append({'task_id': task_id, 'task_name': task_name})
def execution_agent(self, task) -> str:
context = self.context_agent(query=self.objective, n=5)
response = self.ai_service.create(
prompt=EXECUTION_PROMPT.format(objective=self.objective, task=task), max_tokens=2000, temperature=0.7
)
return response
def context_agent(self, query, n):
query_embedding = self.ai_service.get_ada_embedding(query)
return self.vector_service.query(query_embedding, n)
def run(self, first_task):
print(self.objective)
first_task = {'task_id': 1, 'task_name': first_task}
self.add_task(first_task)
task_id_counter = 1
for _ in range(4):
if self.task_list:
task = self.task_list.popleft()
print(task['task_name'])
result = self.execution_agent(task['task_name'])
print(result)
this_task_id = int(task['task_id'])
enriched_result = {'data': result}
result_id = f'result_{task["task_id"]}'
vector = enriched_result['data']
self.vector_service.upsert(
[
(
result_id,
self.ai_service.get_ada_embedding(vector),
{'task': task['task_name'], 'result': result},
)
]
)
new_tasks = self.task_creation_agent(enriched_result, task['task_name'])
for new_task in new_tasks:
task_id_counter += 1
new_task.update({'task_id': task_id_counter})
self.add_task(new_task)
self.prioritization_agent(this_task_id)
def main():
load_dotenv()
baby_agi = BabyAGI(
objective='Solve world hunger.',
ai_service=TestAIService(
ai_service=OpenAIService(api_key=os.getenv('OPENAI_API_KEY')),
cache_file='babyagi_cache.pkl',
),
vector_service=LanceService(
table_name='test-table',
dimension=1536,
)
# vector_service=PineconeService(
# api_key=os.getenv('PINECONE_API_KEY'),
# environment=os.getenv('PINECONE_ENVIRONMENT'),
# table_name='test-table',
# dimension=1536,
# metric='cosine',
# pod_type='p1',
# ),
)
baby_agi.run(first_task='Develop a task list.')
if __name__ == '__main__':
typer.run(main)
| [
"lancedb.connect"
] | [((8057, 8070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (8068, 8070), False, 'from dotenv import load_dotenv\n'), ((8801, 8816), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (8810, 8816), False, 'import typer\n'), ((2034, 2060), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_file)\n', (2048, 2060), False, 'import os\n'), ((3027, 3082), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'api_key', 'environment': 'environment'}), '(api_key=api_key, environment=environment)\n', (3040, 3082), False, 'import pinecone\n'), ((3259, 3285), 'pinecone.Index', 'pinecone.Index', (['table_name'], {}), '(table_name)\n', (3273, 3285), False, 'import pinecone\n'), ((3727, 3747), 'lancedb.connect', 'lancedb.connect', (['"""."""'], {}), "('.')\n", (3742, 3747), False, 'import lancedb\n'), ((4976, 4985), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (4981, 4985), False, 'from collections import deque\n'), ((5953, 5960), 'collections.deque', 'deque', ([], {}), '()\n', (5958, 5960), False, 'from collections import deque\n'), ((3112, 3135), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (3133, 3135), False, 'import pinecone\n'), ((3149, 3241), 'pinecone.create_index', 'pinecone.create_index', (['table_name'], {'dimension': 'dimension', 'metric': 'metric', 'pod_type': 'pod_type'}), '(table_name, dimension=dimension, metric=metric,\n pod_type=pod_type)\n', (3170, 3241), False, 'import pinecone\n'), ((4732, 4752), 'pandas.DataFrame', 'pd.DataFrame', (['[data]'], {}), '([data])\n', (4744, 4752), True, 'import pandas as pd\n'), ((1339, 1408), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': '[text]', 'model': '"""text-embedding-ada-002"""'}), "(input=[text], model='text-embedding-ada-002')\n", (1362, 1408), False, 'import openai\n'), ((3828, 3839), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3837, 3839), True, 'import pyarrow as pa\n'), ((3946, 3957), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3955, 3957), True, 'import pyarrow as pa\n'), ((3995, 4006), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4004, 4006), True, 'import pyarrow as pa\n'), ((3886, 3898), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3896, 3898), True, 'import pyarrow as pa\n'), ((1526, 1698), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=temperature, max_tokens=max_tokens, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n", (1550, 1698), False, 'import openai\n'), ((8215, 8242), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (8224, 8242), False, 'import os\n')] |
import openai
import os
import lancedb
import pickle
import requests
from pathlib import Path
from bs4 import BeautifulSoup
import re
from langchain.document_loaders import UnstructuredHTMLLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import LanceDB
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
# Function to fetch and save a page as an HTML file
def save_page(url, save_dir):
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
title = soup.find('title').text
filename = f"{title}.html"
with open(os.path.join(save_dir, filename), 'w', encoding='utf-8') as file:
file.write(str(soup))
def get_document_title(document):
m = str(document.metadata["source"])
title = re.findall("(.*)\.html", m)
print("PRINTING TITLES")
print(title)
if title[0] is not None:
return(title[0])
return ''
# if "OPENAI_API_KEY" not in os.environ:
openai.api_key = "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"
assert len(openai.Model.list()["data"]) > 0
print("fetching data")
# Base URL of Wikivoyage
base_url = "https://en.wikivoyage.org/wiki/"
# List of page titles to download
page_titles = ["London", "Paris", "New_York_City"] # Add more as needed
# Directory to save the HTML files
save_directory = "./wikivoyage_pages"
# Create the save directory if it doesn't exist
if not os.path.exists(save_directory):
os.makedirs(save_directory)
# Loop through the page titles and download the pages
for title in page_titles:
url = f"{base_url}{title}"
save_page(url, save_directory)
docs_path = Path("cities.pkl")
docs = []
if not docs_path.exists():
for p in Path("./wikivoyage_pages").rglob("*.html"):
if p.is_dir():
continue
loader = UnstructuredHTMLLoader(p)
raw_document = loader.load()
m = {}
m["title"] = get_document_title(raw_document[0])
raw_document[0].metadata = raw_document[0].metadata | m
raw_document[0].metadata["source"] = str(raw_document[0].metadata["source"])
docs = docs + raw_document
with docs_path.open("wb") as fh:
pickle.dump(docs, fh)
else:
with docs_path.open("rb") as fh:
docs = pickle.load(fh)
#split text
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50,
)
documents = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings(openai_api_key="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")
db = lancedb.connect('/tmp/lancedb')
table = db.create_table("city_docs", data=[
{"vector": embeddings.embed_query("Hello World"), "text": "Hello World"}
], mode="overwrite")
print("generated embeddings!")
docsearch = LanceDB.from_documents(documents[5:], embeddings, connection=table)
qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key="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"), chain_type="stuff", retriever=docsearch.as_retriever())
query_file = open('query.pkl', 'wb')
pickle.dump(qa, query_file)
query_file.close()
print("returning query object") | [
"lancedb.connect"
] | [((1788, 1806), 'pathlib.Path', 'Path', (['"""cities.pkl"""'], {}), "('cities.pkl')\n", (1792, 1806), False, 'from pathlib import Path\n'), ((2462, 2526), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(50)'}), '(chunk_size=500, chunk_overlap=50)\n', (2492, 2526), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2601, 2692), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': '"""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"""'}), "(openai_api_key=\n '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')\n", (2617, 2692), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2694, 2725), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (2709, 2725), False, 'import lancedb\n'), ((2913, 2980), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents[5:]', 'embeddings'], {'connection': 'table'}), '(documents[5:], embeddings, connection=table)\n', (2935, 2980), False, 'from langchain.vectorstores import LanceDB\n'), ((3190, 3217), 'pickle.dump', 'pickle.dump', (['qa', 'query_file'], {}), '(qa, query_file)\n', (3201, 3217), False, 'import pickle\n'), ((530, 547), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (542, 547), False, 'import requests\n'), ((931, 959), 're.findall', 're.findall', (['"""(.*)\\\\.html"""', 'm'], {}), "('(.*)\\\\.html', m)\n", (941, 959), False, 'import re\n'), ((1564, 1594), 'os.path.exists', 'os.path.exists', (['save_directory'], {}), '(save_directory)\n', (1578, 1594), False, 'import os\n'), ((1600, 1627), 'os.makedirs', 'os.makedirs', (['save_directory'], {}), '(save_directory)\n', (1611, 1627), False, 'import os\n'), ((599, 645), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (612, 645), False, 'from bs4 import BeautifulSoup\n'), ((1963, 1988), 'langchain.document_loaders.UnstructuredHTMLLoader', 'UnstructuredHTMLLoader', (['p'], {}), '(p)\n', (1985, 1988), False, 'from langchain.document_loaders import UnstructuredHTMLLoader\n'), ((2337, 2358), 'pickle.dump', 'pickle.dump', (['docs', 'fh'], {}), '(docs, fh)\n', (2348, 2358), False, 'import pickle\n'), ((2417, 2432), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (2428, 2432), False, 'import pickle\n'), ((3018, 3094), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': '"""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"""'}), "(openai_api_key='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')\n", (3024, 3094), False, 'from langchain.llms import OpenAI\n'), ((1199, 1218), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (1216, 1218), False, 'import openai\n'), ((1858, 1884), 'pathlib.Path', 'Path', (['"""./wikivoyage_pages"""'], {}), "('./wikivoyage_pages')\n", (1862, 1884), False, 'from pathlib import Path\n'), ((739, 771), 'os.path.join', 'os.path.join', (['save_dir', 'filename'], {}), '(save_dir, filename)\n', (751, 771), False, 'import os\n')] |
from flask import Flask, render_template, jsonify, request
from scripts.mock_llm_api import llm_api
import lancedb
import pandas as pd
uri = "data/lancedb"
db = lancedb.connect(uri)
# Set initial entries in items vector database
def _reset_tables():
items = ['Fire', 'Earth', 'Water', 'Wind']
descriptions = ["Strength: 10\nCost: 300\nCategory: Element",
"Strength: 7\nCost: 100\nCategory: Element",
"Strength: 3\nCost: 50\nCategory: Element",
"Strength: 1\nCost: 10\nCategory: Element"
]
vectors = [llm_api.embedding_request(item) for item in items]
df = pd.DataFrame({"item": items, "vector": vectors, "description": descriptions})
db.create_table("items", mode="overwrite", data=df)
if not db.table_names():
print("No DB set up, creating initial tables")
_reset_tables()
app = Flask(__name__)
@app.route("/")
def hello_world():
table = db.open_table("items")
return render_template('index.html', items=table.to_pandas()['item'].values.tolist())
@app.route('/generate')
def generate():
item_1 = request.args.get('item_1', type=str)
item_2 = request.args.get('item_2', type=str)
# Generate prompt embedding
with open("prompt_templates/basic_embedding_prompt.txt") as embedding_prompt_file:
embedding_prompt_template = embedding_prompt_file.read().strip()
embeddding_prompt = embedding_prompt_template.format(item_1=item_1, item_2=item_2)
prompt_embedding = llm_api.embedding_request(embeddding_prompt)
# Generate combination
with open("prompt_templates/basic_prompt.txt") as prompt_file:
prompt_template = prompt_file.read().strip()
prompt = prompt_template.format(item_1=item_1, item_2=item_2)
combination = llm_api.completion_request(prompt, max_tokens=30)
# Generate description for combination
with open("prompt_templates/basic_description_prompt.txt") as description_prompt_file:
prompt_template = description_prompt_file.read().strip()
prompt = prompt_template.format(item=combination)
description = llm_api.completion_request(prompt, max_tokens=100)
# Add result to vector database
table = db.open_table("items")
table.add(pd.DataFrame([{"item": combination, "vector": llm_api.embedding_request(combination), "description": description}]))
return jsonify(result=combination)
@app.route('/get_description')
def get_description():
item = request.args.get('item', type=str)
table = db.open_table("items")
# Get description
# TODO: Important! This retrieves the whole database, which is very inefficient
# You may be tempted to do something like
# df = table.search(llm_api.embedding_request(item)).limit(1).to_df()
# description = df['description'].values.tolist()[0]
# instead. However, LanceDB is a bit unstable and will occasionally crash if you do this
# The ideal setup would be to have a standard SQL database for this lookup
df = table.to_pandas()
description = df[df["item"] == item]["description"].values[0]
return jsonify(result=description)
| [
"lancedb.connect"
] | [((162, 182), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (177, 182), False, 'import lancedb\n'), ((879, 894), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (884, 894), False, 'from flask import Flask, render_template, jsonify, request\n'), ((640, 717), 'pandas.DataFrame', 'pd.DataFrame', (["{'item': items, 'vector': vectors, 'description': descriptions}"], {}), "({'item': items, 'vector': vectors, 'description': descriptions})\n", (652, 717), True, 'import pandas as pd\n'), ((1111, 1147), 'flask.request.args.get', 'request.args.get', (['"""item_1"""'], {'type': 'str'}), "('item_1', type=str)\n", (1127, 1147), False, 'from flask import Flask, render_template, jsonify, request\n'), ((1161, 1197), 'flask.request.args.get', 'request.args.get', (['"""item_2"""'], {'type': 'str'}), "('item_2', type=str)\n", (1177, 1197), False, 'from flask import Flask, render_template, jsonify, request\n'), ((1505, 1549), 'scripts.mock_llm_api.llm_api.embedding_request', 'llm_api.embedding_request', (['embeddding_prompt'], {}), '(embeddding_prompt)\n', (1530, 1549), False, 'from scripts.mock_llm_api import llm_api\n'), ((1782, 1831), 'scripts.mock_llm_api.llm_api.completion_request', 'llm_api.completion_request', (['prompt'], {'max_tokens': '(30)'}), '(prompt, max_tokens=30)\n', (1808, 1831), False, 'from scripts.mock_llm_api import llm_api\n'), ((2104, 2154), 'scripts.mock_llm_api.llm_api.completion_request', 'llm_api.completion_request', (['prompt'], {'max_tokens': '(100)'}), '(prompt, max_tokens=100)\n', (2130, 2154), False, 'from scripts.mock_llm_api import llm_api\n'), ((2370, 2397), 'flask.jsonify', 'jsonify', ([], {'result': 'combination'}), '(result=combination)\n', (2377, 2397), False, 'from flask import Flask, render_template, jsonify, request\n'), ((2465, 2499), 'flask.request.args.get', 'request.args.get', (['"""item"""'], {'type': 'str'}), "('item', type=str)\n", (2481, 2499), False, 'from flask import Flask, render_template, jsonify, request\n'), ((3104, 3131), 'flask.jsonify', 'jsonify', ([], {'result': 'description'}), '(result=description)\n', (3111, 3131), False, 'from flask import Flask, render_template, jsonify, request\n'), ((579, 610), 'scripts.mock_llm_api.llm_api.embedding_request', 'llm_api.embedding_request', (['item'], {}), '(item)\n', (604, 610), False, 'from scripts.mock_llm_api import llm_api\n'), ((2287, 2325), 'scripts.mock_llm_api.llm_api.embedding_request', 'llm_api.embedding_request', (['combination'], {}), '(combination)\n', (2312, 2325), False, 'from scripts.mock_llm_api import llm_api\n')] |
import logging
import json
import gradio as gr
import numpy as np
import lancedb
import os
from huggingface_hub import AsyncInferenceClient
# Setting up the logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# db
TABLE_NAME = "docs"
TEXT_COLUMN = "text"
BATCH_SIZE = int(os.getenv("BATCH_SIZE"))
NPROBES = int(os.getenv("NPROBES"))
REFINE_FACTOR = int(os.getenv("REFINE_FACTOR"))
retriever = AsyncInferenceClient(model=os.getenv("EMBED_URL") + "/embed")
reranker = AsyncInferenceClient(model=os.getenv("RERANK_URL") + "/rerank")
db = lancedb.connect("/usr/src/.lancedb")
tbl = db.open_table(TABLE_NAME)
async def retrieve(query: str, k: int) -> list[str]:
"""
Retrieve top k items with RETRIEVER
"""
resp = await retriever.post(
json={
"inputs": query,
"truncate": True
}
)
try:
query_vec = json.loads(resp)[0]
except:
raise gr.Error(resp.decode())
documents = tbl.search(
query=query_vec
).nprobes(NPROBES).refine_factor(REFINE_FACTOR).limit(k).to_list()
documents = [doc[TEXT_COLUMN] for doc in documents]
return documents
async def rerank(query: str, documents: list[str], k: int) -> list[str]:
"""
Rerank items returned by RETRIEVER and return top k
"""
scores = []
for i in range(int(np.ceil(len(documents) / BATCH_SIZE))):
resp = await reranker.post(
json={
"query": query,
"texts": documents[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
"truncate": True
}
)
try:
batch_scores = json.loads(resp)
batch_scores = [s["score"] for s in batch_scores]
scores.extend(batch_scores)
except:
raise gr.Error(resp.decode())
documents = [doc for _, doc in sorted(zip(scores, documents))[-k:]]
return documents | [
"lancedb.connect"
] | [((167, 206), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (186, 206), False, 'import logging\n'), ((216, 243), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (233, 243), False, 'import logging\n'), ((573, 609), 'lancedb.connect', 'lancedb.connect', (['"""/usr/src/.lancedb"""'], {}), "('/usr/src/.lancedb')\n", (588, 609), False, 'import lancedb\n'), ((308, 331), 'os.getenv', 'os.getenv', (['"""BATCH_SIZE"""'], {}), "('BATCH_SIZE')\n", (317, 331), False, 'import os\n'), ((347, 367), 'os.getenv', 'os.getenv', (['"""NPROBES"""'], {}), "('NPROBES')\n", (356, 367), False, 'import os\n'), ((389, 415), 'os.getenv', 'os.getenv', (['"""REFINE_FACTOR"""'], {}), "('REFINE_FACTOR')\n", (398, 415), False, 'import os\n'), ((457, 479), 'os.getenv', 'os.getenv', (['"""EMBED_URL"""'], {}), "('EMBED_URL')\n", (466, 479), False, 'import os\n'), ((530, 553), 'os.getenv', 'os.getenv', (['"""RERANK_URL"""'], {}), "('RERANK_URL')\n", (539, 553), False, 'import os\n'), ((904, 920), 'json.loads', 'json.loads', (['resp'], {}), '(resp)\n', (914, 920), False, 'import json\n'), ((1663, 1679), 'json.loads', 'json.loads', (['resp'], {}), '(resp)\n', (1673, 1679), False, 'import json\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
from langchain.embeddings import BedrockEmbeddings
from langchain.document_loaders import PyPDFDirectoryLoader
import lancedb as ldb
import pyarrow as pa
embeddings = BedrockEmbeddings()
# we split the data into chunks of 1,000 characters, with an overlap
# of 200 characters between the chunks, which helps to give better results
# and contain the context of the information between chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
db = ldb.connect('/tmp/embeddings')
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 1536)), # document vector with 1.5k dimensions (TitanEmbedding)
pa.field("text", pa.string()), # langchain requires it
pa.field("id", pa.string()) # langchain requires it
])
tbl = db.create_table("doc_table", schema=schema)
# load the document as before
loader = PyPDFDirectoryLoader("./docs/")
docs = loader.load()
docs = text_splitter.split_documents(docs)
LanceDB.from_documents(docs, embeddings, connection=tbl)
| [
"lancedb.connect"
] | [((384, 403), 'langchain.embeddings.BedrockEmbeddings', 'BedrockEmbeddings', ([], {}), '()\n', (401, 403), False, 'from langchain.embeddings import BedrockEmbeddings\n'), ((625, 682), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (646, 682), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((689, 719), 'lancedb.connect', 'ldb.connect', (['"""/tmp/embeddings"""'], {}), "('/tmp/embeddings')\n", (700, 719), True, 'import lancedb as ldb\n'), ((1073, 1104), 'langchain.document_loaders.PyPDFDirectoryLoader', 'PyPDFDirectoryLoader', (['"""./docs/"""'], {}), "('./docs/')\n", (1093, 1104), False, 'from langchain.document_loaders import PyPDFDirectoryLoader\n'), ((1171, 1227), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['docs', 'embeddings'], {'connection': 'tbl'}), '(docs, embeddings, connection=tbl)\n', (1193, 1227), False, 'from langchain.vectorstores import LanceDB\n'), ((880, 891), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (889, 891), True, 'import pyarrow as pa\n'), ((939, 950), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (948, 950), True, 'import pyarrow as pa\n'), ((779, 791), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (789, 791), True, 'import pyarrow as pa\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
from io import BytesIO
from pathlib import Path
from typing import Any, List, Tuple, Union
import cv2
import numpy as np
import torch
from PIL import Image
from matplotlib import pyplot as plt
from pandas import DataFrame
from tqdm import tqdm
from engine.data.augment import Format
from engine.data.dataset import YOLODataset
from engine.data.utils import check_det_dataset
from engine.models.yolo.model import YOLO
from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR
from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch
class ExplorerDataset(YOLODataset):
def __init__(self, *args, data: dict = None, **kwargs) -> None:
super().__init__(*args, data=data, **kwargs)
def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]:
"""Loads 1 image from dataset index 'i' without any resize ops."""
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i]
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
if im is None:
raise FileNotFoundError(f"Image Not Found {f}")
h0, w0 = im.shape[:2] # orig hw
return im, (h0, w0), im.shape[:2]
return self.ims[i], self.im_hw0[i], self.im_hw[i]
def build_transforms(self, hyp: IterableSimpleNamespace = None):
"""Creates transforms for dataset images without resizing."""
return Format(
bbox_format="xyxy",
normalize=False,
return_mask=self.use_segments,
return_keypoint=self.use_keypoints,
batch_idx=True,
mask_ratio=hyp.mask_ratio,
mask_overlap=hyp.overlap_mask,
)
class Explorer:
def __init__(
self,
data: Union[str, Path] = "coco128.yaml",
model: str = "yolov8n.pt",
uri: str = USER_CONFIG_DIR / "explorer",
) -> None:
# Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181
checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"])
import lancedb
self.connection = lancedb.connect(uri)
self.table_name = Path(data).name.lower() + "_" + model.lower()
self.sim_idx_base_name = (
f"{self.table_name}_sim_idx".lower()
) # Use this name and append thres and top_k to reuse the table
self.model = YOLO(model)
self.data = data # None
self.choice_set = None
self.table = None
self.progress = 0
def create_embeddings_table(self, force: bool = False, split: str = "train") -> None:
"""
Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it
already exists. Pass force=True to overwrite the existing table.
Args:
force (bool): Whether to overwrite the existing table or not. Defaults to False.
split (str): Split of the dataset to use. Defaults to 'train'.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
```
"""
if self.table is not None and not force:
LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.")
return
if self.table_name in self.connection.table_names() and not force:
LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.")
self.table = self.connection.open_table(self.table_name)
self.progress = 1
return
if self.data is None:
raise ValueError("Data must be provided to create embeddings table")
data_info = check_det_dataset(self.data)
if split not in data_info:
raise ValueError(
f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}"
)
choice_set = data_info[split]
choice_set = choice_set if isinstance(choice_set, list) else [choice_set]
self.choice_set = choice_set
dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task)
# Create the table schema
batch = dataset[0]
vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0]
table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite")
table.add(
self._yield_batches(
dataset,
data_info,
self.model,
exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"],
)
)
self.table = table
def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]):
"""Generates batches of data for embedding, excluding specified keys."""
for i in tqdm(range(len(dataset))):
self.progress = float(i + 1) / len(dataset)
batch = dataset[i]
for k in exclude_keys:
batch.pop(k, None)
batch = sanitize_batch(batch, data_info)
batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist()
yield [batch]
def query(
self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25
) -> Any: # pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
imgs (str or list): Path to the image or a list of paths to the images.
limit (int): Number of results to return.
Returns:
(pyarrow.Table): An arrow table containing the results. Supports converting to:
- pandas dataframe: `result.to_pandas()`
- dict of lists: `result.to_pydict()`
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.query(img='https://ultralytics.com/images/zidane.jpg')
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
if isinstance(imgs, str):
imgs = [imgs]
assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}"
embeds = self.model.embed(imgs)
# Get avg if multiple images are passed (len > 1)
embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy()
return self.table.search(embeds).limit(limit).to_arrow()
def sql_query(
self, query: str, return_type: str = "pandas"
) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table
"""
Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown.
Args:
query (str): SQL query to run.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pyarrow.Table): An arrow table containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.sql_query(query)
```
"""
assert return_type in {
"pandas",
"arrow",
}, f"Return type should be either `pandas` or `arrow`, but got {return_type}"
import duckdb
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
# Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this.
table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB
if not query.startswith("SELECT") and not query.startswith("WHERE"):
raise ValueError(
f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}"
)
if query.startswith("WHERE"):
query = f"SELECT * FROM 'table' {query}"
LOGGER.info(f"Running query: {query}")
rs = duckdb.sql(query)
if return_type == "arrow":
return rs.arrow()
elif return_type == "pandas":
return rs.df()
def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image:
"""
Plot the results of a SQL-Like query on the table.
Args:
query (str): SQL query to run.
labels (bool): Whether to plot the labels or not.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.plot_sql_query(query)
```
"""
result = self.sql_query(query, return_type="arrow")
if len(result) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(result, plot_labels=labels)
return Image.fromarray(img)
def get_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
return_type: str = "pandas",
) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
limit (int): Number of results to return. Defaults to 25.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pandas.DataFrame): A dataframe containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
assert return_type in {
"pandas",
"arrow",
}, f"Return type should be either `pandas` or `arrow`, but got {return_type}"
img = self._check_imgs_or_idxs(img, idx)
similar = self.query(img, limit=limit)
if return_type == "arrow":
return similar
elif return_type == "pandas":
return similar.to_pandas()
def plot_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
labels: bool = True,
) -> Image.Image:
"""
Plot the similar images. Accepts images or indexes.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
labels (bool): Whether to plot the labels or not.
limit (int): Number of results to return. Defaults to 25.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
similar = self.get_similar(img, idx, limit, return_type="arrow")
if len(similar) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(similar, plot_labels=labels)
return Image.fromarray(img)
def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame:
"""
Calculate the similarity index of all the images in the table. Here, the index will contain the data points that
are max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running
vector search. Defaults: None.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns
include indices of similar images and their respective distances.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
sim_idx = exp.similarity_index()
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower()
if sim_idx_table_name in self.connection.table_names() and not force:
LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.")
return self.connection.open_table(sim_idx_table_name).to_pandas()
if top_k and not (1.0 >= top_k >= 0.0):
raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}")
if max_dist < 0.0:
raise ValueError(f"max_dist must be greater than 0. Got {max_dist}")
top_k = int(top_k * len(self.table)) if top_k else len(self.table)
top_k = max(top_k, 1)
features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict()
im_files = features["im_file"]
embeddings = features["vector"]
sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite")
def _yield_sim_idx():
"""Generates a dataframe with similarity indices and distances for images."""
for i in tqdm(range(len(embeddings))):
sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}")
yield [
{
"idx": i,
"im_file": im_files[i],
"count": len(sim_idx),
"sim_im_files": sim_idx["im_file"].tolist(),
}
]
sim_table.add(_yield_sim_idx())
self.sim_index = sim_table
return sim_table.to_pandas()
def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image:
"""
Plot the similarity index of all the images in the table. Here, the index will contain the data points that are
max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when
running vector search. Defaults to 0.01.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similarity_idx_plot = exp.plot_similarity_index()
similarity_idx_plot.show() # view image preview
similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file
```
"""
sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force)
sim_count = sim_idx["count"].tolist()
sim_count = np.array(sim_count)
indices = np.arange(len(sim_count))
# Create the bar plot
plt.bar(indices, sim_count)
# Customize the plot (optional)
plt.xlabel("data idx")
plt.ylabel("Count")
plt.title("Similarity Count")
buffer = BytesIO()
plt.savefig(buffer, format="png")
buffer.seek(0)
# Use Pillow to open the image from the buffer
return Image.fromarray(np.array(Image.open(buffer)))
def _check_imgs_or_idxs(
self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]]
) -> List[np.ndarray]:
if img is None and idx is None:
raise ValueError("Either img or idx must be provided.")
if img is not None and idx is not None:
raise ValueError("Only one of img or idx must be provided.")
if idx is not None:
idx = idx if isinstance(idx, list) else [idx]
img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"]
return img if isinstance(img, list) else [img]
def ask_ai(self, query):
"""
Ask AI a question.
Args:
query (str): Question to ask.
Returns:
(pandas.DataFrame): A dataframe containing filtered results to the SQL query.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
answer = exp.ask_ai('Show images with 1 person and 2 dogs')
```
"""
result = prompt_sql_query(query)
try:
df = self.sql_query(result)
except Exception as e:
LOGGER.error("AI generated query is not valid. Please try again with a different prompt")
LOGGER.error(e)
return None
return df
def visualize(self, result):
"""
Visualize the results of a query. TODO.
Args:
result (pyarrow.Table): Table containing the results of a query.
"""
pass
def generate_report(self, result):
"""
Generate a report of the dataset.
TODO
"""
pass
| [
"lancedb.connect"
] | [((1672, 1865), 'engine.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1678, 1865), False, 'from engine.data.augment import Format\n'), ((2245, 2307), 'engine.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb<=0.9.2']"], {}), "(['lancedb>=0.4.3', 'duckdb<=0.9.2'])\n", (2270, 2307), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2358, 2378), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2373, 2378), False, 'import lancedb\n'), ((2629, 2640), 'engine.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2633, 2640), False, 'from engine.models.yolo.model import YOLO\n'), ((3972, 4000), 'engine.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3989, 4000), False, 'from engine.data.utils import check_det_dataset\n'), ((8607, 8645), 'engine.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8618, 8645), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8660, 8677), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8670, 8677), False, 'import duckdb\n'), ((9639, 9659), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9654, 9659), False, 'from PIL import Image\n'), ((12284, 12304), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12299, 12304), False, 'from PIL import Image\n'), ((16556, 16575), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16564, 16575), True, 'import numpy as np\n'), ((16660, 16687), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16667, 16687), True, 'from matplotlib import pyplot as plt\n'), ((16737, 16759), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16747, 16759), True, 'from matplotlib import pyplot as plt\n'), ((16768, 16787), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16778, 16787), True, 'from matplotlib import pyplot as plt\n'), ((16796, 16825), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16805, 16825), True, 'from matplotlib import pyplot as plt\n'), ((16843, 16852), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16850, 16852), False, 'from io import BytesIO\n'), ((16861, 16894), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16872, 16894), True, 'from matplotlib import pyplot as plt\n'), ((3433, 3519), 'engine.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3444, 3519), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3621, 3731), 'engine.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3632, 3731), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9507, 9539), 'engine.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9518, 9539), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12151, 12183), 'engine.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12162, 12183), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13761, 13864), 'engine.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13772, 13864), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1182, 1193), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1189, 1193), True, 'import numpy as np\n'), ((1247, 1260), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1257, 1260), False, 'import cv2\n'), ((17014, 17032), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (17024, 17032), False, 'from PIL import Image\n'), ((18250, 18349), 'engine.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18262, 18349), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18352, 18367), 'engine.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18364, 18367), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2405, 2415), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2409, 2415), False, 'from pathlib import Path\n'), ((6832, 6851), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6843, 6851), False, 'import torch\n')] |
import pyarrow as pa
from typing import Union
from dryg.settings import DB_URI
import lancedb
def connection() -> lancedb.LanceDBConnection:
"""
Connect to the database
Returns:
lancedb.LanceDBConnection: LanceDBConnection object
"""
db = lancedb.connect(DB_URI)
return db
def open_table(table_name: str) -> Union[lancedb.table.LanceTable, None]:
"""
Open a table from the database
Args:
table_name (str): Name of the table
Returns:
lancedb.table.LanceTable: LanceTable object
"""
db = connection()
try:
table = db.open_table(table_name) if table_name in db.table_names() else None
return table
except ValueError:
return None
def create_table(table_name: str, table: pa.Table, mode: str = "overwrite") -> lancedb.LanceDBConnection:
"""
Create a table in the database
Args:
table_name (str): Name of the table
table (pa.Table): Table to be created
mode (str, optional): Mode to use when creating the table. Defaults to "overwrite".
Returns:
lancedb.LanceDBConnection: LanceDBConnection object
"""
db = connection()
db.create_table(table_name, table, mode=mode)
return db
| [
"lancedb.connect"
] | [((271, 294), 'lancedb.connect', 'lancedb.connect', (['DB_URI'], {}), '(DB_URI)\n', (286, 294), False, 'import lancedb\n')] |
import os
import typer
import pickle
import pandas as pd
from dotenv import load_dotenv
import openai
import pinecone
import lancedb
import pyarrow as pa
from collections import deque
TASK_CREATION_PROMPT = """
You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective:
{objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}.
These are incomplete tasks: {task_list}. Based on the result, create new tasks to be completed by the AI system that
do not overlap with incomplete tasks. Return the tasks as an array."""
PRIORITIZATION_PROMPT = """
You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing
the following tasks: {task_names}. Consider the ultimate objective of your team:{objective}. Do not remove any tasks.
Return the result as a numbered list, like:
#. First task
#. Second task
Start the task list with number {next_task_id}."""
EXECUTION_PROMPT = """
You are an AI who performs one task based on the following objective: {objective}. Your task: {task}\nResponse:
"""
class Task:
def __init__(self, name, id=None, result=None, vector=None):
self.name = name
self.id = id
self.result = result
self.vector = vector
class OpenAIService:
def __init__(self, api_key):
openai.api_key = api_key
def get_ada_embedding(self, text):
return openai.Embedding.create(input=[text.replace('\n', ' ')], model='text-embedding-ada-002')['data'][0][
'embedding'
]
def create(self, prompt, max_tokens=100, temperature=0.5):
return (
openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
.choices[0]
.text.strip()
)
class TestAIService:
def __init__(self, ai_service, cache_file):
self.ai_service = ai_service
self.cache_file = cache_file
if os.path.isfile(cache_file):
self.cache = pickle.load(open(cache_file, 'rb'))
else:
self.cache = {'ada': {}, 'create': {}}
pickle.dump(self.cache, open(cache_file, 'wb'))
def get_ada_embedding(self, text):
if text not in self.cache['ada']:
self.cache['ada'][text] = self.ai_service.get_ada_embedding(text)
pickle.dump(self.cache, open(self.cache_file, 'wb'))
return self.cache['ada'][text]
def create(self, prompt, max_tokens=100, temperature=0.5):
key = (prompt, max_tokens, temperature)
if key not in self.cache['create']:
self.cache['create'][key] = self.ai_service.create(prompt, max_tokens, temperature)
pickle.dump(self.cache, open(self.cache_file, 'wb'))
return self.cache['create'][key]
class PineconeService:
def __init__(self, api_key, environment, table_name, dimension, metric, pod_type):
self.table_name = table_name
pinecone.init(api_key=api_key, environment=environment)
if table_name not in pinecone.list_indexes():
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
self.index = pinecone.Index(table_name)
def query(self, query_embedding, top_k):
results = self.index.query(query_embedding, top_k=top_k, include_metadata=True)
sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)
return [Task(**item.metadata) for item in sorted_results]
def upsert(self, task):
self.index.upsert([(task.id, task.vector, task.__dict__)])
class LanceService:
def __init__(self, table_name, dimension):
self.db = lancedb.connect('.')
schema = pa.schema(
[
pa.field('id', pa.int32()),
pa.field('vector', pa.list_(pa.float32(), dimension)),
pa.field('name', pa.string()),
pa.field('result', pa.string()), # TODO There is a fixed schema but we keep converting
]
)
data = [{'id': 0, 'vector': [0.0] * dimension, 'name': 'asd', 'result': 'asd'}]
self.table = self.db.create_table(table_name, mode='overwrite', data=data, schema=schema)
def query(self, query_embedding, top_k):
result = self.table.search(query_embedding).limit(top_k).to_df().drop(columns=['score'])
return [Task(**v) for v in result.to_dict(orient="records")]
def upsert(self, task):
self.table.add(pd.DataFrame([task.__dict__]))
class BabyAGI:
def __init__(self, objective, ai_service, vector_service):
self.ai_service = ai_service
self.vector_service = vector_service
self.objective = objective
self.objective_embedding = self.ai_service.get_ada_embedding(self.objective)
self.task_list = deque([])
def add_task(self, task):
if task.id is None:
task.id = max([t.id for t in self.task_list], default=0) + 1
self.task_list.append(task)
def task_creation_agent(self, task):
prompt = TASK_CREATION_PROMPT.format(
objective=self.objective,
result=task.result,
task_description=task.name,
task_list=', '.join([t.name for t in self.task_list]),
)
return [{'task_name': task_name} for task_name in self.ai_service.create(prompt).split('\n')]
def prioritization_agent(self, this_task_id):
def to_task(value):
parts = value.strip().split('.', 1)
if len(parts) != 2:
return None
return Task(id=int(parts[0].strip()), name=parts[1].strip())
prompt = PRIORITIZATION_PROMPT.format(
task_names=', '.join([t.name for t in self.task_list]),
objective=self.objective,
next_task_id=int(this_task_id) + 1,
)
new_tasks = self.ai_service.create(prompt, max_tokens=1000)
self.task_list = deque([to_task(v) for v in new_tasks.split('\n') if to_task(v) is not None])
def run(self, first_task):
self.add_task(Task(name=first_task))
for _ in range(4):
if self.task_list:
context = self.vector_service.query(self.objective_embedding, 5)
task = self.task_list.popleft()
task.result = self.ai_service.create(
prompt=EXECUTION_PROMPT.format(objective=self.objective, task=task),
max_tokens=2000,
temperature=0.7,
)
task.vector = self.ai_service.get_ada_embedding(task.result)
self.vector_service.upsert(task)
new_tasks = self.task_creation_agent(task)
task_id_counter = 1
for new_task in new_tasks:
task_id_counter += 1
new_task.update({'task_id': task_id_counter})
self.add_task(Task(id=new_task['task_id'], name=new_task['task_name']))
self.prioritization_agent(task.id)
def main():
load_dotenv()
baby_agi = BabyAGI(
objective='Solve world hunger.',
ai_service=TestAIService(
ai_service=OpenAIService(api_key=os.getenv('OPENAI_API_KEY')),
cache_file='babyagi_cache.pkl',
),
vector_service=LanceService(
table_name='test-table',
dimension=1536,
)
# vector_service=PineconeService(
# api_key=os.getenv('PINECONE_API_KEY'),
# environment=os.getenv('PINECONE_ENVIRONMENT'),
# table_name='test-table',
# dimension=1536,
# metric='cosine',
# pod_type='p1',
# ),
)
baby_agi.run(first_task='Develop a task list.')
if __name__ == '__main__':
typer.run(main)
| [
"lancedb.connect"
] | [((7282, 7295), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (7293, 7295), False, 'from dotenv import load_dotenv\n'), ((8026, 8041), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (8035, 8041), False, 'import typer\n'), ((2219, 2245), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_file)\n', (2233, 2245), False, 'import os\n'), ((3212, 3267), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'api_key', 'environment': 'environment'}), '(api_key=api_key, environment=environment)\n', (3225, 3267), False, 'import pinecone\n'), ((3444, 3470), 'pinecone.Index', 'pinecone.Index', (['table_name'], {}), '(table_name)\n', (3458, 3470), False, 'import pinecone\n'), ((3940, 3960), 'lancedb.connect', 'lancedb.connect', (['"""."""'], {}), "('.')\n", (3955, 3960), False, 'import lancedb\n'), ((5081, 5090), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (5086, 5090), False, 'from collections import deque\n'), ((3297, 3320), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (3318, 3320), False, 'import pinecone\n'), ((3334, 3426), 'pinecone.create_index', 'pinecone.create_index', (['table_name'], {'dimension': 'dimension', 'metric': 'metric', 'pod_type': 'pod_type'}), '(table_name, dimension=dimension, metric=metric,\n pod_type=pod_type)\n', (3355, 3426), False, 'import pinecone\n'), ((4743, 4772), 'pandas.DataFrame', 'pd.DataFrame', (['[task.__dict__]'], {}), '([task.__dict__])\n', (4755, 4772), True, 'import pandas as pd\n'), ((4034, 4044), 'pyarrow.int32', 'pa.int32', ([], {}), '()\n', (4042, 4044), True, 'import pyarrow as pa\n'), ((4151, 4162), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4160, 4162), True, 'import pyarrow as pa\n'), ((4200, 4211), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4209, 4211), True, 'import pyarrow as pa\n'), ((4091, 4103), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4101, 4103), True, 'import pyarrow as pa\n'), ((1711, 1883), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=temperature, max_tokens=max_tokens, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n", (1735, 1883), False, 'import openai\n'), ((7440, 7467), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (7449, 7467), False, 'import os\n')] |
import json
from generate_data import *
from create_embeddings import *
import lancedb
uri = "./sample-lancedb"
db = lancedb.connect(uri)
text_table = "table_from_df_text"
img_table = "table_from_df_images"
tbl_txt = db.open_table(text_table)
tbl_img = db.open_table(img_table)
with open('./test_data.json') as f:
test_user_scripts = json.loads(f.read())
customers = list(test_user_scripts.keys())
customer_id = 0 # change this to test for different customers
# Format user chat history
def build_customer_chat_history(customer_id):
outstring=""
for q,a in test_user_scripts[customers[customer_id]].items():
outstring+=f"Question- {q} : User Answer: {a}"
return outstring
# this function reformats the user inputs to a json structured output very similar to the one used to create embeddings
def get_reformatted_output(user_prompt):
response = client.chat.completions.create(
model="gpt-4-turbo-preview",
response_format={ "type": "json_object" },
messages=[
{"role": "system", "content": "You are a helpful assistant with deep expertise in real estate."},
{"role": "user", "content": user_prompt}
]
)
listings = json.loads(response.choices[0].message.content)["listings"]
# print(listings)
return listings
def format_response(response):
out_string=""
for item,val in response.items():
if val != 'None':
out_string+=f"{item}: {val} "
return out_string
def get_user_preference(customer_id, img_path=None):
# if user provides an image as reference, we shall also use that. The assumption is that the image has been
# loaded and placed in a path the application can access
image = img_path
# if img_path:
# try:
# image = Image.open(image_path)
# except:
# pass
chat_history = build_customer_chat_history(customer_id)
user_prompt= f"""
Please only use the customer chat history given below to create a desired listing for them.
Use the example given below and format the results in json format.
All the results should be saved inside a key called listings.
Each result should have the following keys: Neighborhood, Price, Bedrooms, Bathrooms, House Size, Description, Neighborhood Description.
Use only information from the chat history. If any of the fields are unavailable,list them as None.
Customer Chat History: {chat_history}
Example:{example_listing}
"""
response = get_reformatted_output(user_prompt)[0]
formatted_response = format_response(response)
return formatted_response, image
def get_embeddings_user_prefs(resp):
text_resp, img_resp = resp[0],resp[1]
text_embs = get_embedding(text_resp)
img_embs = None
if img_resp:
try:
img_embs = create_clip_image_embeddings(img_resp, model_name)
except:
pass
return text_embs,img_embs
def search_tables(embeddings,num_responses=5):
text_embeddings = embeddings[0]
img_embeddings = embeddings[1]
df = tbl_txt.search(text_embeddings) \
.metric("cosine") \
.limit(num_responses) \
.to_pandas()
return df
resp = get_user_preference(customer_id)
embeddings = get_embeddings_user_prefs(resp)
print(search_tables(embeddings))
| [
"lancedb.connect"
] | [((119, 139), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (134, 139), False, 'import lancedb\n'), ((1192, 1239), 'json.loads', 'json.loads', (['response.choices[0].message.content'], {}), '(response.choices[0].message.content)\n', (1202, 1239), False, 'import json\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
from io import BytesIO
from pathlib import Path
from typing import Any, List, Tuple, Union
import cv2
import numpy as np
import torch
from matplotlib import pyplot as plt
from pandas import DataFrame
from PIL import Image
from tqdm import tqdm
from ultralytics.data.augment import Format
from ultralytics.data.dataset import YOLODataset
from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.model import YOLO
from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks
from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch
class ExplorerDataset(YOLODataset):
def __init__(self, *args, data: dict = None, **kwargs) -> None:
super().__init__(*args, data=data, **kwargs)
def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]:
"""Loads 1 image from dataset index 'i' without any resize ops."""
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i]
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
if im is None:
raise FileNotFoundError(f"Image Not Found {f}")
h0, w0 = im.shape[:2] # orig hw
return im, (h0, w0), im.shape[:2]
return self.ims[i], self.im_hw0[i], self.im_hw[i]
def build_transforms(self, hyp: IterableSimpleNamespace = None):
"""Creates transforms for dataset images without resizing."""
return Format(
bbox_format="xyxy",
normalize=False,
return_mask=self.use_segments,
return_keypoint=self.use_keypoints,
batch_idx=True,
mask_ratio=hyp.mask_ratio,
mask_overlap=hyp.overlap_mask,
)
class Explorer:
def __init__(
self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = "~/ultralytics/explorer"
) -> None:
checks.check_requirements(["lancedb>=0.4.3", "duckdb"])
import lancedb
self.connection = lancedb.connect(uri)
self.table_name = Path(data).name.lower() + "_" + model.lower()
self.sim_idx_base_name = (
f"{self.table_name}_sim_idx".lower()
) # Use this name and append thres and top_k to reuse the table
self.model = YOLO(model)
self.data = data # None
self.choice_set = None
self.table = None
self.progress = 0
def create_embeddings_table(self, force: bool = False, split: str = "train") -> None:
"""
Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it
already exists. Pass force=True to overwrite the existing table.
Args:
force (bool): Whether to overwrite the existing table or not. Defaults to False.
split (str): Split of the dataset to use. Defaults to 'train'.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
```
"""
if self.table is not None and not force:
LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.")
return
if self.table_name in self.connection.table_names() and not force:
LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.")
self.table = self.connection.open_table(self.table_name)
self.progress = 1
return
if self.data is None:
raise ValueError("Data must be provided to create embeddings table")
data_info = check_det_dataset(self.data)
if split not in data_info:
raise ValueError(
f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}"
)
choice_set = data_info[split]
choice_set = choice_set if isinstance(choice_set, list) else [choice_set]
self.choice_set = choice_set
dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task)
# Create the table schema
batch = dataset[0]
vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0]
table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite")
table.add(
self._yield_batches(
dataset,
data_info,
self.model,
exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"],
)
)
self.table = table
def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]):
"""Generates batches of data for embedding, excluding specified keys."""
for i in tqdm(range(len(dataset))):
self.progress = float(i + 1) / len(dataset)
batch = dataset[i]
for k in exclude_keys:
batch.pop(k, None)
batch = sanitize_batch(batch, data_info)
batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist()
yield [batch]
def query(
self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25
) -> Any: # pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
imgs (str or list): Path to the image or a list of paths to the images.
limit (int): Number of results to return.
Returns:
(pyarrow.Table): An arrow table containing the results. Supports converting to:
- pandas dataframe: `result.to_pandas()`
- dict of lists: `result.to_pydict()`
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.query(img='https://ultralytics.com/images/zidane.jpg')
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
if isinstance(imgs, str):
imgs = [imgs]
assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}"
embeds = self.model.embed(imgs)
# Get avg if multiple images are passed (len > 1)
embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy()
return self.table.search(embeds).limit(limit).to_arrow()
def sql_query(
self, query: str, return_type: str = "pandas"
) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table
"""
Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown.
Args:
query (str): SQL query to run.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pyarrow.Table): An arrow table containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.sql_query(query)
```
"""
assert return_type in [
"pandas",
"arrow",
], f"Return type should be either `pandas` or `arrow`, but got {return_type}"
import duckdb
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
# Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this.
table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB
if not query.startswith("SELECT") and not query.startswith("WHERE"):
raise ValueError(
f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}"
)
if query.startswith("WHERE"):
query = f"SELECT * FROM 'table' {query}"
LOGGER.info(f"Running query: {query}")
rs = duckdb.sql(query)
if return_type == "pandas":
return rs.df()
elif return_type == "arrow":
return rs.arrow()
def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image:
"""
Plot the results of a SQL-Like query on the table.
Args:
query (str): SQL query to run.
labels (bool): Whether to plot the labels or not.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.plot_sql_query(query)
```
"""
result = self.sql_query(query, return_type="arrow")
if len(result) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(result, plot_labels=labels)
return Image.fromarray(img)
def get_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
return_type: str = "pandas",
) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
limit (int): Number of results to return. Defaults to 25.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pandas.DataFrame): A dataframe containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
assert return_type in [
"pandas",
"arrow",
], f"Return type should be either `pandas` or `arrow`, but got {return_type}"
img = self._check_imgs_or_idxs(img, idx)
similar = self.query(img, limit=limit)
if return_type == "pandas":
return similar.to_pandas()
elif return_type == "arrow":
return similar
def plot_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
labels: bool = True,
) -> Image.Image:
"""
Plot the similar images. Accepts images or indexes.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
labels (bool): Whether to plot the labels or not.
limit (int): Number of results to return. Defaults to 25.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
similar = self.get_similar(img, idx, limit, return_type="arrow")
if len(similar) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(similar, plot_labels=labels)
return Image.fromarray(img)
def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame:
"""
Calculate the similarity index of all the images in the table. Here, the index will contain the data points that
are max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running
vector search. Defaults: None.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns
include indices of similar images and their respective distances.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
sim_idx = exp.similarity_index()
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower()
if sim_idx_table_name in self.connection.table_names() and not force:
LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.")
return self.connection.open_table(sim_idx_table_name).to_pandas()
if top_k and not (1.0 >= top_k >= 0.0):
raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}")
if max_dist < 0.0:
raise ValueError(f"max_dist must be greater than 0. Got {max_dist}")
top_k = int(top_k * len(self.table)) if top_k else len(self.table)
top_k = max(top_k, 1)
features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict()
im_files = features["im_file"]
embeddings = features["vector"]
sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite")
def _yield_sim_idx():
"""Generates a dataframe with similarity indices and distances for images."""
for i in tqdm(range(len(embeddings))):
sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}")
yield [
{
"idx": i,
"im_file": im_files[i],
"count": len(sim_idx),
"sim_im_files": sim_idx["im_file"].tolist(),
}
]
sim_table.add(_yield_sim_idx())
self.sim_index = sim_table
return sim_table.to_pandas()
def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image:
"""
Plot the similarity index of all the images in the table. Here, the index will contain the data points that are
max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when
running vector search. Defaults to 0.01.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similarity_idx_plot = exp.plot_similarity_index()
similarity_idx_plot.show() # view image preview
similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file
```
"""
sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force)
sim_count = sim_idx["count"].tolist()
sim_count = np.array(sim_count)
indices = np.arange(len(sim_count))
# Create the bar plot
plt.bar(indices, sim_count)
# Customize the plot (optional)
plt.xlabel("data idx")
plt.ylabel("Count")
plt.title("Similarity Count")
buffer = BytesIO()
plt.savefig(buffer, format="png")
buffer.seek(0)
# Use Pillow to open the image from the buffer
return Image.fromarray(np.array(Image.open(buffer)))
def _check_imgs_or_idxs(
self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]]
) -> List[np.ndarray]:
if img is None and idx is None:
raise ValueError("Either img or idx must be provided.")
if img is not None and idx is not None:
raise ValueError("Only one of img or idx must be provided.")
if idx is not None:
idx = idx if isinstance(idx, list) else [idx]
img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"]
return img if isinstance(img, list) else [img]
def ask_ai(self, query):
"""
Ask AI a question.
Args:
query (str): Question to ask.
Returns:
(pandas.DataFrame): A dataframe containing filtered results to the SQL query.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
answer = exp.ask_ai('Show images with 1 person and 2 dogs')
```
"""
result = prompt_sql_query(query)
try:
df = self.sql_query(result)
except Exception as e:
LOGGER.error("AI generated query is not valid. Please try again with a different prompt")
LOGGER.error(e)
return None
return df
def visualize(self, result):
"""
Visualize the results of a query. TODO.
Args:
result (pyarrow.Table): Table containing the results of a query.
"""
pass
def generate_report(self, result):
"""
Generate a report of the dataset.
TODO
"""
pass
| [
"lancedb.connect"
] | [((1681, 1874), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1687, 1874), False, 'from ultralytics.data.augment import Format\n'), ((2138, 2193), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2163, 2193), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2244, 2264), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2259, 2264), False, 'import lancedb\n'), ((2515, 2526), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2519, 2526), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3858, 3886), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3875, 3886), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8493, 8531), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8504, 8531), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8546, 8563), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8556, 8563), False, 'import duckdb\n'), ((9525, 9545), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9540, 9545), False, 'from PIL import Image\n'), ((12170, 12190), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12185, 12190), False, 'from PIL import Image\n'), ((16442, 16461), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16450, 16461), True, 'import numpy as np\n'), ((16546, 16573), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16553, 16573), True, 'from matplotlib import pyplot as plt\n'), ((16623, 16645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16633, 16645), True, 'from matplotlib import pyplot as plt\n'), ((16654, 16673), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16664, 16673), True, 'from matplotlib import pyplot as plt\n'), ((16682, 16711), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16691, 16711), True, 'from matplotlib import pyplot as plt\n'), ((16729, 16738), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16736, 16738), False, 'from io import BytesIO\n'), ((16747, 16780), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16758, 16780), True, 'from matplotlib import pyplot as plt\n'), ((3319, 3405), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3330, 3405), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3507, 3617), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3518, 3617), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9393, 9425), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9404, 9425), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12037, 12069), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12048, 12069), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13647, 13750), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13658, 13750), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1191, 1202), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1198, 1202), True, 'import numpy as np\n'), ((1256, 1269), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1266, 1269), False, 'import cv2\n'), ((16900, 16918), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16910, 16918), False, 'from PIL import Image\n'), ((18136, 18235), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18148, 18235), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((18238, 18253), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18250, 18253), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2291, 2301), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2295, 2301), False, 'from pathlib import Path\n'), ((6718, 6737), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6729, 6737), False, 'import torch\n')] |
from PIL import Image
import streamlit as st
import openai
#exercise 11
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
#exercise 12
from langchain.memory import ConversationBufferWindowMemory
#exercise 13
from langchain.document_loaders import TextLoader,PyPDFLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import LanceDB
import lancedb
import os
import tempfile
# os.environ["OPENAI_API_KEY"] = st.secrets["openapi_key"]
# openai.api_key = st.secrets["openapi_key"]
#Global ex 13
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
def ex11a(): # change in ex11a
# langchain prompt template
prompt = PromptTemplate(
input_variables=["subject", "topic"],
template="""Design a lesson plan on {subject} on the topic of {topic} for primary 1 students""",
)
# openai_api_key = st.secrets["openapi_key"]
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9)
# creating a LLM chain with the langchain call and prompt template
chain = LLMChain(llm=llm, prompt=prompt)
if st.button("Run my chain"):
input_prompt = prompt.format(subject="English", topic="Verbs")
# Showing what is sent to LLM Chain
st.write("Input prompt: ", input_prompt)
# Showing the output from LLM Chain
st.write(chain.run({"subject": "English", "topic": "Verbs"}))
def prompt_inputs_form(): #Using st.form, create the starting prompt to your prompt template, this is an expert on a topic that is talking to a user of a certain age
#langchain prompt template
with st.form("Prompt Template"):
occupation = st.text_input("Enter the occupation:")
topic = st.text_input("Enter the topic:")
age = st.text_input("Enter the age:")
# Every form must have a submit button.
submitted = st.form_submit_button("Submit")
#return a dictionary of the values
if submitted:
return {
'occupation': occupation,
'topic': topic,
'age': age
}
def ex11b():
# create your template
prompt_template = PromptTemplate(
input_variables=["occupation", "topic", "age"],
template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""",
)
# create a langchain function call to openai
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.9,
)
# create a LLM chain with the langchain call and prompt template
chain = LLMChain(llm=llm, prompt=prompt_template)
# call the prompt_inputs_form()
dict_inputs = prompt_inputs_form()
if dict_inputs:
st.write(chain.run(dict_inputs))
def ex12():
memory = ConversationBufferWindowMemory(k=3)
memory.save_context({"input": "hi"}, {"output": "whats up?"})
memory.save_context({"input": "not much"}, {"output": "what can I help you with?"})
st.write(memory.load_memory_variables({}))
memory = ConversationBufferWindowMemory( k=3, return_messages=True)
memory.save_context({"input": "hi"}, {"output": "whats up?"})
memory.save_context({"input": "not much"}, {"output": "what can I help you with?"})
st.write(memory.load_memory_variables({}))
#exercise 13 - loading
def upload_file_streamlit():
def get_file_extension(file_name):
return os.path.splitext(file_name)[1]
st.subheader("Upload your docs")
# Streamlit file uploader to accept file input
uploaded_file = st.file_uploader("Choose a file", type=["docx", "txt", "pdf"])
if uploaded_file:
# Reading file content
file_content = uploaded_file.read()
# Determine the suffix based on uploaded file's name
file_suffix = get_file_extension(uploaded_file.name)
# Saving the uploaded file temporarily to process it
with tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as temp_file:
temp_file.write(file_content)
temp_file.flush() # Ensure the data is written to the file
temp_file_path = temp_file.name
return temp_file_path
#exercise 13 - split and chunk, embeddings and storing in vectorstores for reference
def vectorstore_creator():
# WORKING_DIRECTORY set above in the main.py
# Process the temporary file using UnstructuredFileLoader (or any other method you need)
embeddings = OpenAIEmbeddings()
db = lancedb.connect(WORKING_DIRECTORY)
table = db.create_table(
"my_table",
data=[
{
"vector": embeddings.embed_query("Query unsuccessful"),
"text": "Query unsuccessful",
"id": "1",
}
],
mode="overwrite",
)
# st.write(temp_file_path)
temp_file_path = upload_file_streamlit()
if temp_file_path:
loader = PyPDFLoader(temp_file_path)
documents = loader.load_and_split()
db = LanceDB.from_documents(documents, embeddings, connection=table)
return db
def ex13():
if "vectorstore" not in st.session_state:
st.session_state.vectorstore = False
db = vectorstore_creator()
st.session_state.vectorstore = db
if st.session_state.vectorstore:
query = st.text_input("Enter a query")
if query:
st.session_state.vectorstore = db
docs = db.similarity_search(query)
st.write(docs[0].page_content)
def chat_completion_stream_prompt(prompt):
MODEL = "gpt-3.5-turbo" #consider changing this to session_state
response = openai.ChatCompletion.create(
model=MODEL,
messages=[
{"role": "system", "content": st.session_state.prompt_template},
{"role": "user", "content": prompt},
],
temperature= 0, # temperature
stream=True #stream option
)
return response
# save the vectorstore in st.session_state
# add semantic search prompt into memory prompt
# integrate back into your chatbot
def ex14_basebot():
# Prompt_template form from ex11
prompt_template = PromptTemplate(
input_variables=["occupation", "topic", "age"],
template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information
to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""",
)
dict_inputs = prompt_inputs_form()
if dict_inputs:
input_prompt = prompt_template.format(
occupation=dict_inputs["occupation"],
topic=dict_inputs["topic"],
age=dict_inputs["age"],
)
st.session_state.input_prompt = input_prompt
if "input_prompt" not in st.session_state:
st.session_state.input_prompt = "Speak like Yoda from Star Wars"
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferWindowMemory(k=5)
# step 1 save the memory from your chatbot
# step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint
memory_data = st.session_state.memory.load_memory_variables({})
st.write(memory_data)
st.session_state.prompt_template = f"""
st.session_state.input_prompt: {st.session_state.input_prompt}
This is the last conversation history
{memory_data}
"""
st.write("new prompt template: ", st.session_state.prompt_template)
st.session_state.vectorstore = vectorstore_creator()
# Initialize chat history
if "msg" not in st.session_state:
st.session_state.msg = []
# Showing Chat history
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
#
if prompt := st.chat_input("What is up?"):
# query information
if st.session_state.vectorstore:
docs = st.session_state.vectorstore.similarity_search(prompt)
docs = docs[0].page_content
# add your query prompt
vs_prompt = f"""You should reference this search result to help your answer,
{docs}
if the search result does not anwer the query, please say you are unable to answer, do not make up an answer"""
else:
vs_prompt = ""
# add query prompt to your memory prompt and send it to LLM
st.session_state.prompt_template = (
st.session_state.prompt_template + vs_prompt
)
# set user prompt in chat history
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# streaming function
for response in chat_completion_stream_prompt(prompt):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.msg.append({"role": "assistant", "content": full_response})
st.session_state.memory.save_context(
{"input": prompt}, {"output": full_response}
)
except Exception as e:
st.error(e) | [
"lancedb.connect"
] | [((649, 660), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (658, 660), False, 'import os\n'), ((681, 710), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (693, 710), False, 'import os\n'), ((719, 752), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (733, 752), False, 'import os\n'), ((755, 785), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (766, 785), False, 'import os\n'), ((859, 1014), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['subject', 'topic']", 'template': '"""Design a lesson plan on {subject} on the topic of {topic} for primary 1 students"""'}), "(input_variables=['subject', 'topic'], template=\n 'Design a lesson plan on {subject} on the topic of {topic} for primary 1 students'\n )\n", (873, 1014), False, 'from langchain.prompts import PromptTemplate\n'), ((1071, 1126), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model_name='gpt-3.5-turbo', temperature=0.9)\n", (1081, 1126), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1205, 1237), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1213, 1237), False, 'from langchain.chains import LLMChain\n'), ((1242, 1267), 'streamlit.button', 'st.button', (['"""Run my chain"""'], {}), "('Run my chain')\n", (1251, 1267), True, 'import streamlit as st\n'), ((2158, 2497), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['occupation', 'topic', 'age']", 'template': '"""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""'}), "(input_variables=['occupation', 'topic', 'age'], template=\n 'Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up'\n )\n", (2172, 2497), False, 'from langchain.prompts import PromptTemplate\n'), ((2553, 2608), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model_name='gpt-3.5-turbo', temperature=0.9)\n", (2563, 2608), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2692, 2733), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template'}), '(llm=llm, prompt=prompt_template)\n', (2700, 2733), False, 'from langchain.chains import LLMChain\n'), ((2880, 2915), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(3)'}), '(k=3)\n', (2910, 2915), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((3123, 3180), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(3)', 'return_messages': '(True)'}), '(k=3, return_messages=True)\n', (3153, 3180), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((3506, 3538), 'streamlit.subheader', 'st.subheader', (['"""Upload your docs"""'], {}), "('Upload your docs')\n", (3518, 3538), True, 'import streamlit as st\n'), ((3605, 3667), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a file"""'], {'type': "['docx', 'txt', 'pdf']"}), "('Choose a file', type=['docx', 'txt', 'pdf'])\n", (3621, 3667), True, 'import streamlit as st\n'), ((4420, 4438), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4436, 4438), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((4445, 4479), 'lancedb.connect', 'lancedb.connect', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (4460, 4479), False, 'import lancedb\n'), ((5402, 5592), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'MODEL', 'messages': "[{'role': 'system', 'content': st.session_state.prompt_template}, {'role':\n 'user', 'content': prompt}]", 'temperature': '(0)', 'stream': '(True)'}), "(model=MODEL, messages=[{'role': 'system',\n 'content': st.session_state.prompt_template}, {'role': 'user',\n 'content': prompt}], temperature=0, stream=True)\n", (5430, 5592), False, 'import openai\n'), ((5855, 6204), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['occupation', 'topic', 'age']", 'template': '"""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""'}), '(input_variables=[\'occupation\', \'topic\', \'age\'], template=\n """Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""\n )\n', (5869, 6204), False, 'from langchain.prompts import PromptTemplate\n'), ((6823, 6872), 'streamlit.session_state.memory.load_memory_variables', 'st.session_state.memory.load_memory_variables', (['{}'], {}), '({})\n', (6868, 6872), True, 'import streamlit as st\n'), ((6874, 6895), 'streamlit.write', 'st.write', (['memory_data'], {}), '(memory_data)\n', (6882, 6895), True, 'import streamlit as st\n'), ((7059, 7126), 'streamlit.write', 'st.write', (['"""new prompt template: """', 'st.session_state.prompt_template'], {}), "('new prompt template: ', st.session_state.prompt_template)\n", (7067, 7126), True, 'import streamlit as st\n'), ((1374, 1414), 'streamlit.write', 'st.write', (['"""Input prompt: """', 'input_prompt'], {}), "('Input prompt: ', input_prompt)\n", (1382, 1414), True, 'import streamlit as st\n'), ((1719, 1745), 'streamlit.form', 'st.form', (['"""Prompt Template"""'], {}), "('Prompt Template')\n", (1726, 1745), True, 'import streamlit as st\n'), ((1762, 1800), 'streamlit.text_input', 'st.text_input', (['"""Enter the occupation:"""'], {}), "('Enter the occupation:')\n", (1775, 1800), True, 'import streamlit as st\n'), ((1811, 1844), 'streamlit.text_input', 'st.text_input', (['"""Enter the topic:"""'], {}), "('Enter the topic:')\n", (1824, 1844), True, 'import streamlit as st\n'), ((1853, 1884), 'streamlit.text_input', 'st.text_input', (['"""Enter the age:"""'], {}), "('Enter the age:')\n", (1866, 1884), True, 'import streamlit as st\n'), ((1941, 1972), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {}), "('Submit')\n", (1962, 1972), True, 'import streamlit as st\n'), ((4777, 4804), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['temp_file_path'], {}), '(temp_file_path)\n', (4788, 4804), False, 'from langchain.document_loaders import TextLoader, PyPDFLoader\n'), ((4850, 4913), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (4872, 4913), False, 'from langchain.vectorstores import LanceDB\n'), ((5128, 5158), 'streamlit.text_input', 'st.text_input', (['"""Enter a query"""'], {}), "('Enter a query')\n", (5141, 5158), True, 'import streamlit as st\n'), ((6626, 6661), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(5)'}), '(k=5)\n', (6656, 6661), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((3473, 3500), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (3489, 3500), False, 'import os\n'), ((3925, 3986), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': 'file_suffix'}), '(delete=False, suffix=file_suffix)\n', (3952, 3986), False, 'import tempfile\n'), ((5249, 5279), 'streamlit.write', 'st.write', (['docs[0].page_content'], {}), '(docs[0].page_content)\n', (5257, 5279), True, 'import streamlit as st\n'), ((7343, 7375), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (7358, 7375), True, 'import streamlit as st\n'), ((7380, 7411), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (7391, 7411), True, 'import streamlit as st\n'), ((7437, 7465), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (7450, 7465), True, 'import streamlit as st\n'), ((8093, 8157), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (8120, 8157), True, 'import streamlit as st\n'), ((8571, 8647), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content': full_response})\n", (8598, 8647), True, 'import streamlit as st\n'), ((8651, 8737), 'streamlit.session_state.memory.save_context', 'st.session_state.memory.save_context', (["{'input': prompt}", "{'output': full_response}"], {}), "({'input': prompt}, {'output':\n full_response})\n", (8687, 8737), True, 'import streamlit as st\n'), ((8770, 8781), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (8778, 8781), True, 'import streamlit as st\n'), ((7537, 7591), 'streamlit.session_state.vectorstore.similarity_search', 'st.session_state.vectorstore.similarity_search', (['prompt'], {}), '(prompt)\n', (7583, 7591), True, 'import streamlit as st\n'), ((8166, 8189), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (8181, 8189), True, 'import streamlit as st\n'), ((8195, 8214), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (8206, 8214), True, 'import streamlit as st\n'), ((8224, 8252), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (8239, 8252), True, 'import streamlit as st\n'), ((8280, 8290), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (8288, 8290), True, 'import streamlit as st\n')] |
from PIL import Image
import streamlit as st
import openai
#exercise 11
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
#exercise 12
from langchain.memory import ConversationBufferWindowMemory
#exercise 13
from langchain.document_loaders import TextLoader,PyPDFLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import LanceDB
import lancedb
import os
import tempfile
#exercise 15
import sqlite3
import pandas as pd
from datetime import datetime
#exercise 16
from langchain.agents import ConversationalChatAgent, AgentExecutor
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain.tools import DuckDuckGoSearchRun
#Exercise 17
from langchain.agents import tool
import json
# os.environ["OPENAI_API_KEY"] = st.secrets["openapi_key"]
# openai.api_key = st.secrets["openapi_key"]
#Global ex 13
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
#ex15
DB_NAME = os.path.join(WORKING_DIRECTORY, "default_db")
def ex15_initialise():
# Create or check for the 'database' directory in the current working directory
# Set DB_NAME to be within the 'database' directory at the top of main.py
# Connect to the SQLite database
conn = sqlite3.connect(DB_NAME)
cursor = conn.cursor()
# Conversation data table
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS data_table (
id INTEGER PRIMARY KEY,
date TEXT NOT NULL UNIQUE,
username TEXT NOT NULL,
chatbot_ans TEXT NOT NULL,
user_prompt TEXT NOT NULL,
tokens TEXT
)
"""
)
conn.commit()
conn.close()
def ex15_collect(username, chatbot_response, prompt):
# collect data from bot
conn = sqlite3.connect(DB_NAME)
cursor = conn.cursor()
now = datetime.now() # Using ISO format for date
tokens = len(chatbot_response) * 1.3
cursor.execute(
"""
INSERT INTO data_table (date, username,chatbot_ans, user_prompt, tokens)
VALUES (?, ?, ?, ?, ?)
""",
(now, username, chatbot_response, prompt, tokens),
)
conn.commit()
conn.close()
# implementing data collection and displaying
def ex15():
# initialise database first
ex15_initialise()
# collect some data
ex15_collect("yoda", "I am Yoda. The Force is strong with you", "Who are you?")
# display data
# Connect to the specified database
conn = sqlite3.connect(DB_NAME)
cursor = conn.cursor()
# Fetch all data from data_table
cursor.execute("SELECT * FROM data_table")
rows = cursor.fetchall()
column_names = [description[0] for description in cursor.description]
df = pd.DataFrame(rows, columns=column_names)
st.dataframe(df)
conn.close()
# smart agents accessing the internet for free
# https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/search_and_chat.py
def ex16_agent_bot():
st.title("🦜 LangChain: Chat with internet search")
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
chat_memory=msgs,
return_messages=True,
memory_key="chat_history",
output_key="output",
)
if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"):
msgs.clear()
msgs.add_ai_message("How can I help you?")
st.session_state.steps = {}
avatars = {"human": "user", "ai": "assistant"}
for idx, msg in enumerate(msgs.messages):
with st.chat_message(avatars[msg.type]):
# Render intermediate steps if any were saved
for step in st.session_state.steps.get(str(idx), []):
if step[0].tool == "_Exception":
continue
with st.status(
f"**{step[0].tool}**: {step[0].tool_input}", state="complete"
):
st.write(step[0].log)
st.write(step[1])
st.write(msg.content)
if prompt := st.chat_input(placeholder="Enter a query on the Internet"):
st.chat_message("user").write(prompt)
llm = ChatOpenAI(
model_name="gpt-3.5-turbo", openai_api_key=openai.api_key, streaming=True
)
tools = [DuckDuckGoSearchRun(name="Search")]
chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools)
executor = AgentExecutor.from_agent_and_tools(
agent=chat_agent,
tools=tools,
memory=memory,
return_intermediate_steps=True,
handle_parsing_errors=True,
)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = executor(prompt, callbacks=[st_cb])
st.write(response["output"])
st.session_state.steps[str(len(msgs.messages) - 1)] = response[
"intermediate_steps"
]
def upload_file_streamlit():
def get_file_extension(file_name):
return os.path.splitext(file_name)[1]
st.subheader("Upload your docs")
# Streamlit file uploader to accept file input
uploaded_file = st.file_uploader("Choose a file", type=["docx", "txt", "pdf"])
if uploaded_file:
# Reading file content
file_content = uploaded_file.read()
# Determine the suffix based on uploaded file's name
file_suffix = get_file_extension(uploaded_file.name)
# Saving the uploaded file temporarily to process it
with tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as temp_file:
temp_file.write(file_content)
temp_file.flush() # Ensure the data is written to the file
temp_file_path = temp_file.name
return temp_file_path
#exercise 13 - split and chunk, embeddings and storing in vectorstores for reference
def vectorstore_creator():
# WORKING_DIRECTORY set above in the main.py
# Process the temporary file using UnstructuredFileLoader (or any other method you need)
embeddings = OpenAIEmbeddings()
db = lancedb.connect(WORKING_DIRECTORY)
table = db.create_table(
"my_table",
data=[
{
"vector": embeddings.embed_query("Query unsuccessful"),
"text": "Query unsuccessful",
"id": "1",
}
],
mode="overwrite",
)
# st.write(temp_file_path)
temp_file_path = upload_file_streamlit()
if temp_file_path:
loader = PyPDFLoader(temp_file_path)
documents = loader.load_and_split()
db = LanceDB.from_documents(documents, embeddings, connection=table)
return db
# agents ,vectorstores, wiki
# https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval
# note tool
@tool("Document search")
def document_search(query: str) -> str:
# this is the prompt to the tool itself
"Use this function first to search for documents pertaining to the query before going into the internet"
docs = st.session_state.vectorstore.similarity_search(query)
docs = docs[0].page_content
json_string = json.dumps(docs, ensure_ascii=False, indent=4)
return json_string
# combine vector store and internet search
def ex17_agent_bot():
st.title("🦜 LangChain: Chat with internet search")
st.session_state.vectorstore = vectorstore_creator()
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
chat_memory=msgs,
return_messages=True,
memory_key="chat_history",
output_key="output",
)
if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"):
msgs.clear()
msgs.add_ai_message("How can I help you?")
st.session_state.steps = {}
avatars = {"human": "user", "ai": "assistant"}
for idx, msg in enumerate(msgs.messages):
with st.chat_message(avatars[msg.type]):
# Render intermediate steps if any were saved
for step in st.session_state.steps.get(str(idx), []):
if step[0].tool == "_Exception":
continue
with st.status(
f"**{step[0].tool}**: {step[0].tool_input}", state="complete"
):
st.write(step[0].log)
st.write(step[1])
st.write(msg.content)
if prompt := st.chat_input(placeholder="Enter a query on the Internet"):
st.chat_message("user").write(prompt)
llm = ChatOpenAI(
model_name="gpt-3.5-turbo", openai_api_key=openai.api_key, streaming=True
)
tools = [document_search, DuckDuckGoSearchRun(name="Internet Search")]
chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools)
executor = AgentExecutor.from_agent_and_tools(
agent=chat_agent,
tools=tools,
memory=memory,
return_intermediate_steps=True,
handle_parsing_errors=True,
)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = executor(prompt, callbacks=[st_cb])
st.write(response["output"])
st.session_state.steps[str(len(msgs.messages) - 1)] = response[
"intermediate_steps"
]
| [
"lancedb.connect"
] | [((1106, 1117), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1115, 1117), False, 'import os\n'), ((1138, 1167), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1150, 1167), False, 'import os\n'), ((1259, 1304), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""default_db"""'], {}), "(WORKING_DIRECTORY, 'default_db')\n", (1271, 1304), False, 'import os\n'), ((6373, 6396), 'langchain.agents.tool', 'tool', (['"""Document search"""'], {}), "('Document search')\n", (6377, 6396), False, 'from langchain.agents import tool\n'), ((1176, 1209), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1190, 1209), False, 'import os\n'), ((1212, 1242), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1223, 1242), False, 'import os\n'), ((1527, 1551), 'sqlite3.connect', 'sqlite3.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (1542, 1551), False, 'import sqlite3\n'), ((1957, 1981), 'sqlite3.connect', 'sqlite3.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (1972, 1981), False, 'import sqlite3\n'), ((2013, 2027), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2025, 2027), False, 'from datetime import datetime\n'), ((2579, 2603), 'sqlite3.connect', 'sqlite3.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2594, 2603), False, 'import sqlite3\n'), ((2810, 2850), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': 'column_names'}), '(rows, columns=column_names)\n', (2822, 2850), True, 'import pandas as pd\n'), ((2852, 2868), 'streamlit.dataframe', 'st.dataframe', (['df'], {}), '(df)\n', (2864, 2868), True, 'import streamlit as st\n'), ((3049, 3099), 'streamlit.title', 'st.title', (['"""🦜 LangChain: Chat with internet search"""'], {}), "('🦜 LangChain: Chat with internet search')\n", (3057, 3099), True, 'import streamlit as st\n'), ((3109, 3138), 'langchain.memory.chat_message_histories.StreamlitChatMessageHistory', 'StreamlitChatMessageHistory', ([], {}), '()\n', (3136, 3138), False, 'from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n'), ((3149, 3266), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'msgs', 'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(chat_memory=msgs, return_messages=True, memory_key\n ='chat_history', output_key='output')\n", (3173, 3266), False, 'from langchain.memory import ConversationBufferMemory\n'), ((4818, 4850), 'streamlit.subheader', 'st.subheader', (['"""Upload your docs"""'], {}), "('Upload your docs')\n", (4830, 4850), True, 'import streamlit as st\n'), ((4917, 4979), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a file"""'], {'type': "['docx', 'txt', 'pdf']"}), "('Choose a file', type=['docx', 'txt', 'pdf'])\n", (4933, 4979), True, 'import streamlit as st\n'), ((5732, 5750), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5748, 5750), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((5757, 5791), 'lancedb.connect', 'lancedb.connect', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (5772, 5791), False, 'import lancedb\n'), ((6592, 6645), 'streamlit.session_state.vectorstore.similarity_search', 'st.session_state.vectorstore.similarity_search', (['query'], {}), '(query)\n', (6638, 6645), True, 'import streamlit as st\n'), ((6690, 6736), 'json.dumps', 'json.dumps', (['docs'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(docs, ensure_ascii=False, indent=4)\n', (6700, 6736), False, 'import json\n'), ((6824, 6874), 'streamlit.title', 'st.title', (['"""🦜 LangChain: Chat with internet search"""'], {}), "('🦜 LangChain: Chat with internet search')\n", (6832, 6874), True, 'import streamlit as st\n'), ((6939, 6968), 'langchain.memory.chat_message_histories.StreamlitChatMessageHistory', 'StreamlitChatMessageHistory', ([], {}), '()\n', (6966, 6968), False, 'from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n'), ((6979, 7096), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'msgs', 'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(chat_memory=msgs, return_messages=True, memory_key\n ='chat_history', output_key='output')\n", (7003, 7096), False, 'from langchain.memory import ConversationBufferMemory\n'), ((3305, 3344), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Reset chat history"""'], {}), "('Reset chat history')\n", (3322, 3344), True, 'import streamlit as st\n'), ((3912, 3970), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Enter a query on the Internet"""'}), "(placeholder='Enter a query on the Internet')\n", (3925, 3970), True, 'import streamlit as st\n'), ((4021, 4110), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'openai.api_key', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', openai_api_key=openai.api_key,\n streaming=True)\n", (4031, 4110), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4176, 4240), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'tools': 'tools'}), '(llm=llm, tools=tools)\n', (4218, 4240), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((4254, 4399), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'chat_agent', 'tools': 'tools', 'memory': 'memory', 'return_intermediate_steps': '(True)', 'handle_parsing_errors': '(True)'}), '(agent=chat_agent, tools=tools, memory=\n memory, return_intermediate_steps=True, handle_parsing_errors=True)\n', (4288, 4399), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((6089, 6116), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['temp_file_path'], {}), '(temp_file_path)\n', (6100, 6116), False, 'from langchain.document_loaders import TextLoader, PyPDFLoader\n'), ((6162, 6225), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (6184, 6225), False, 'from langchain.vectorstores import LanceDB\n'), ((7135, 7174), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Reset chat history"""'], {}), "('Reset chat history')\n", (7152, 7174), True, 'import streamlit as st\n'), ((7742, 7800), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Enter a query on the Internet"""'}), "(placeholder='Enter a query on the Internet')\n", (7755, 7800), True, 'import streamlit as st\n'), ((7851, 7940), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'openai.api_key', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', openai_api_key=openai.api_key,\n streaming=True)\n", (7861, 7940), False, 'from langchain.chat_models import ChatOpenAI\n'), ((8032, 8096), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'tools': 'tools'}), '(llm=llm, tools=tools)\n', (8074, 8096), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((8110, 8255), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'chat_agent', 'tools': 'tools', 'memory': 'memory', 'return_intermediate_steps': '(True)', 'handle_parsing_errors': '(True)'}), '(agent=chat_agent, tools=tools, memory=\n memory, return_intermediate_steps=True, handle_parsing_errors=True)\n', (8144, 8255), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((3535, 3569), 'streamlit.chat_message', 'st.chat_message', (['avatars[msg.type]'], {}), '(avatars[msg.type])\n', (3550, 3569), True, 'import streamlit as st\n'), ((3875, 3896), 'streamlit.write', 'st.write', (['msg.content'], {}), '(msg.content)\n', (3883, 3896), True, 'import streamlit as st\n'), ((4125, 4159), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {'name': '"""Search"""'}), "(name='Search')\n", (4144, 4159), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((4422, 4450), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (4437, 4450), True, 'import streamlit as st\n'), ((4584, 4612), 'streamlit.write', 'st.write', (["response['output']"], {}), "(response['output'])\n", (4592, 4612), True, 'import streamlit as st\n'), ((4785, 4812), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (4801, 4812), False, 'import os\n'), ((5237, 5298), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': 'file_suffix'}), '(delete=False, suffix=file_suffix)\n', (5264, 5298), False, 'import tempfile\n'), ((7365, 7399), 'streamlit.chat_message', 'st.chat_message', (['avatars[msg.type]'], {}), '(avatars[msg.type])\n', (7380, 7399), True, 'import streamlit as st\n'), ((7705, 7726), 'streamlit.write', 'st.write', (['msg.content'], {}), '(msg.content)\n', (7713, 7726), True, 'import streamlit as st\n'), ((7972, 8015), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {'name': '"""Internet Search"""'}), "(name='Internet Search')\n", (7991, 8015), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((8278, 8306), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (8293, 8306), True, 'import streamlit as st\n'), ((8440, 8468), 'streamlit.write', 'st.write', (["response['output']"], {}), "(response['output'])\n", (8448, 8468), True, 'import streamlit as st\n'), ((3974, 3997), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (3989, 3997), True, 'import streamlit as st\n'), ((4488, 4502), 'streamlit.container', 'st.container', ([], {}), '()\n', (4500, 4502), True, 'import streamlit as st\n'), ((7804, 7827), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (7819, 7827), True, 'import streamlit as st\n'), ((8344, 8358), 'streamlit.container', 'st.container', ([], {}), '()\n', (8356, 8358), True, 'import streamlit as st\n'), ((3737, 3809), 'streamlit.status', 'st.status', (['f"""**{step[0].tool}**: {step[0].tool_input}"""'], {'state': '"""complete"""'}), "(f'**{step[0].tool}**: {step[0].tool_input}', state='complete')\n", (3746, 3809), True, 'import streamlit as st\n'), ((3827, 3848), 'streamlit.write', 'st.write', (['step[0].log'], {}), '(step[0].log)\n', (3835, 3848), True, 'import streamlit as st\n'), ((3854, 3871), 'streamlit.write', 'st.write', (['step[1]'], {}), '(step[1])\n', (3862, 3871), True, 'import streamlit as st\n'), ((7567, 7639), 'streamlit.status', 'st.status', (['f"""**{step[0].tool}**: {step[0].tool_input}"""'], {'state': '"""complete"""'}), "(f'**{step[0].tool}**: {step[0].tool_input}', state='complete')\n", (7576, 7639), True, 'import streamlit as st\n'), ((7657, 7678), 'streamlit.write', 'st.write', (['step[0].log'], {}), '(step[0].log)\n', (7665, 7678), True, 'import streamlit as st\n'), ((7684, 7701), 'streamlit.write', 'st.write', (['step[1]'], {}), '(step[1])\n', (7692, 7701), True, 'import streamlit as st\n')] |
import os
import glob
import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer, StandardScaler
from sklearn.pipeline import Pipeline
import joblib
# from sklearn.svm import SVC, LinearSVC
# from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE, Isomap
from .config import get_cfg
from .util.step_annotations import load_object_annotations, get_obj_anns
from IPython import embed
import warnings
warnings.simplefilter('once')
STATE = 'state'
def remap_labels(sdf, old_col, new_col):
RENAME = {
'[partial]': '',
'[full]': '',
'floss-underneath': 'ends-cut',
'floss-crossed': 'ends-cut',
'raisins[cooked]': 'raisins',
'oatmeal[cooked]+raisins': 'oatmeal+raisins',
'teabag': 'tea-bag',
'+stirrer': '',
'[stirred]': '',
'water+honey': 'water',
'with-quesadilla': 'with-food',
'with-pinwheels': 'with-food',
}
sdf[new_col] = sdf[old_col].copy()
for old, new in RENAME.items():
sdf[new_col] = sdf[new_col].str.replace(old, new)
sdf = sdf[~sdf[new_col].isin(['folding', 'on-plate', 'rolling'])]
return sdf
# ---------------------------------------------------------------------------- #
# Data Loading #
# ---------------------------------------------------------------------------- #
class bc:
HEADER = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def load_data(cfg, data_file_pattern, include=None):
'''Load npz files (one per video) with embedding and label keys and concatenate
'''
# if os.path.isfile('dataset.pkl'):
# print('reading pickle')
# df = pd.read_pickle('dataset.pkl')
# print(df.head())
# return df
use_aug = cfg.EVAL.USE_AUGMENTATIONS
embeddings_list, df_list = [], []
class_map = {}
# steps_df, meta_df, object_names = load_annotations(cfg)
dfs = load_object_annotations(cfg)
fs = glob.glob(data_file_pattern)
# if cfg.EVAL.TRAIN_BASE_ROOT:
# fs += glob.glob(f'{cfg.EVAL.TRAIN_BASE_ROOT}/embeddings/{cfg.EVAL.DETECTION_NAME}/*/clip/*.npz')
if len(set(fs)) < len(fs):
print("Warning duplicate files in training set!\n\n")
input()
print(f"Found {len(fs)} files", fs[:1])
for f in tqdm.tqdm(fs, desc='loading data...'):
# if 'coffee_mit-eval' in f:
# embed()
if include and not any(fi in f for fi in include):
print("Skipping", f)
continue
data = np.load(f)
z = data['z']
z = z / np.linalg.norm(z, axis=-1, keepdims=True)
frame_idx = data['frame_index']
# maybe filter out augmentations
aug = data.get('augmented')
if aug is None or use_aug:
aug = np.zeros(len(z), dtype=bool)
z = z[~aug]
frame_idx = frame_idx[~aug]
# get video ID and track ID
video_id = data.get('video_name')
if video_id is None:
video_id = f.split('/')[-3]
else:
video_id = video_id.item()
video_id = os.path.splitext(video_id)[0]
track_id = data.get('track_id')
if track_id is None:
track_id = f.split('/')[-1].split('.')[0]
else:
track_id = track_id.item()
track_id = int(track_id)
if video_id not in dfs or track_id not in dfs[video_id]:
tqdm.tqdm.write(f"{bc.FAIL}Skipping{bc.END}: {video_id}: {track_id}")
continue
tqdm.tqdm.write(f"Using: {video_id}: {track_id}")
# get object state annotations
ann = get_obj_anns(remap_labels(dfs[video_id][track_id], 'state', 'state'), frame_idx)
embeddings_list.append(z)
df_list.append(pd.DataFrame({
'index': frame_idx,
'object': ann.object,
'state': ann.state,
'track_id': track_id,
'video_id': video_id,
}))
# print()
# print(df_list[-1][['object', 'state']].value_counts())
# print()
# if input(): embed()
if input():embed()
X = np.concatenate(embeddings_list)
df = pd.concat(df_list)
df['vector'] = list(X)
df.to_pickle('dataset.pkl')
return df
def load_data_from_db(cfg, state_col, emb_type='clip'):
import lancedb
dfs = []
fs = cfg.EVAL.EMBEDDING_DBS
f = os.path.join(cfg.DATASET.ROOT, f'{emb_type}.lancedb')
if not fs and os.path.isfile(f):
fs = [f]
for db_fname in fs:
print(db_fname)
assert os.path.isdir(db_fname)
db = lancedb.connect(db_fname)
for object_name in tqdm.tqdm(db.table_names()):
dfs.append(db.open_table(object_name).to_pandas())
df = pd.concat(dfs) if dfs else pd.DataFrame({state_col: []})
if state_col:
df['state'] = df[state_col]
return df
def read_split_file(fname):
lines = open(fname).read().splitlines()
lines = [l.strip() for l in lines]
lines = [l for l in lines if l and not l.startswith('#')]
return lines
# ---------------------------------------------------------------------------- #
# Training #
# ---------------------------------------------------------------------------- #
def train_eval(run_name, model, X, y, i_train, i_test, video_ids, plot_dir='plots', **meta):
'''Train and evaluate a model'''
print(run_name, model)
# plot_dir = f'{plot_dir}/{run_name}'
# os.makedirs(plot_dir, exist_ok=True)
X_train, X_test = X[i_train], X[i_test]
y_train, y_test = y[i_train], y[i_test]
# print(set(y_train))
# print(set(y_test))
# if input(): embed()
# from imblearn.over_sampling import SMOTE
# from imblearn.under_sampling import RandomUnderSampler
# from imblearn.pipeline import Pipeline
# # Create a pipeline to balance the classes using SMOTE
# pipeline = Pipeline([
# # ('oversample', SMOTE(sampling_strategy='auto')), # You can adjust sampling_strategy
# ('undersample', RandomUnderSampler(sampling_strategy='auto')) # You can adjust sampling_strategy
# ])
# X_test2, y_test2 = X_test, y_test
# X_test, y_test = pipeline.fit_resample(X_test, y_test)
# print(X_test2.shape, y_test2.shape, X_test.shape, y_test.shape)
assert not (set(video_ids[i_train]) & set(video_ids[i_test])), "Being extra sure... this is a nono"
# Standardize features
scaler = StandardScaler()
pipeline = Pipeline([
('scaler', scaler),
('model', model)
])
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# ----------------------------------- Train ---------------------------------- #
# Train the classifier
model.fit(X_train, y_train)
# Make predictions on the test set
y_pred = model.predict(X_test)
y_emis = model.predict_proba(X_test)
# with open(os.path.join(plot_dir, f'{run_name}.pkl'), 'rb') as f:
# pickle.dump(y_pred, f)
# Save the entire pipeline
with open(os.path.join(plot_dir, f'{run_name}_pipeline.pkl'), 'wb') as f:
joblib.dump(pipeline, f)
# ------------------------------- Visualization ------------------------------ #
# Generate plots
all_metrics = []
# compute vanilla metrics
meta['run_name'] = meta['metric_name'] = run_name
metrics = get_metrics(y_test, y_pred, **meta)
all_metrics.append(metrics)
tqdm.tqdm.write(f'Accuracy for {run_name}: {metrics["accuracy"]:.2f}')
# generate vanilla plots
video_ids_test = video_ids[i_test]
emission_plot(plot_dir, y_emis, y_test, model.classes_, f'{run_name}_ma0_', video_ids=video_ids_test)
emission_plot(plot_dir, y_emis, y_test, model.classes_, f'{run_name}_ma0_ypred_', show_ypred=True, video_ids=video_ids_test)
cm_plot(plot_dir, y_test, y_pred, model.classes_, f'{run_name}_')
# with moving average
for winsize in [2, 4, 8, 16]:
y_ = moving_average(y_emis, winsize)
y_pred_ = np.asarray(model.classes_)[np.argmax(y_, axis=1)]
# emission_plot(plot_dir, y_, y_test, model.classes_, f'{run_name}_ma{winsize}_', video_ids=video_ids_test)
emission_plot(plot_dir, y_, y_test, model.classes_, f'{run_name}_ma{winsize}_ypred_', show_ypred=True, video_ids=video_ids_test)
cm_plot(plot_dir, y_test, y_pred_, model.classes_, f'{run_name}_cm_ma{winsize}_')
meta = {**meta}
meta['metric_name'] = f'{run_name}_movingavg-{winsize}'
metrics = get_metrics(y_test, y_pred_, smoothing='ma', win_size=winsize, **meta)
all_metrics.append(metrics)
for alpha in [0.1, 0.2, 0.5]:
y_ = exponentially_decaying_average(y_emis, alpha)
y_pred_ = np.asarray(model.classes_)[np.argmax(y_, axis=1)]
emission_plot(plot_dir, y_, y_test, model.classes_, f'{run_name}_ema{alpha}_ypred_', show_ypred=True, video_ids=video_ids_test)
cm_plot(plot_dir, y_test, y_pred, model.classes_, f'{run_name}_')
meta = {**meta}
meta['metric_name'] = f'{run_name}_expmovingavg-{alpha}'
metrics = get_metrics(y_test, y_pred_, smoothing='ema', alpha=alpha, **meta)
all_metrics.append(metrics)
# y_hmm = hmm_forward(y_emis, len(model.classes_))
# emission_plot(plot_dir, y_hmm, y_test, model.classes_, f'{run_name}_trans_', video_ids=video_ids_test)
# emission_plot(plot_dir, y_hmm, y_test, model.classes_, f'{run_name}_trans_ypred_', show_ypred=True, video_ids=video_ids_test)
# # embed()
# get per class metrics
per_class_metrics = []
for c in np.unique(y):
per_class_metrics.append(get_metrics(
y_test[y_test==c], y_pred[y_test==c], label=c, **meta))
# tqdm.tqdm.write(f'F1 for {run_name}: {metrics["f1"]:.2f}')
return all_metrics, per_class_metrics
def get_metrics(y_test, y_pred, **meta):
precision, recall, f1_score, _ = precision_recall_fscore_support(y_test, y_pred, zero_division=np.nan, average='macro')
return {
'accuracy': accuracy_score(y_test, y_pred),
'f1': f1_score,
'ap': precision,
'avg_recal': recall,
**meta
}
# ---------------------------------------------------------------------------- #
# Visualization #
# ---------------------------------------------------------------------------- #
def emb_plot(plot_dir, X, y, prefix='', n=3000):
fname = f'{plot_dir}/{prefix}_proj.png'
if os.path.isfile(fname): return
print("creating emb plot", fname)
# Create a TSNE embedding plot (optional)
# tsne = TSNE(n_components=2)
m = Isomap()
i = np.random.choice(np.arange(len(X)), size=n)
X, y = X[i], y[i]
Z = m.fit_transform(X)
print(Z.shape)
plt.figure(figsize=(10, 8))
for c in np.unique(y):
plt.scatter(Z[y==c, 0], Z[y==c, 1], label=str(c), s=20, alpha=0.3)
plt.legend()
plt.title(f'Embedding Projection: {prefix}')
pltsave(fname)
def emission_plot(plot_dir, X, y, classes, prefix='', video_ids=None, show_ypred=False):
plt.figure(figsize=(10, 8))
plt.imshow(X.T, cmap='cubehelix', aspect='auto')
cs = {c: i for i, c in enumerate(classes)}
classes = list(classes)
for c in set(y) - set(cs):
cs[c] = len(cs)
classes.append(c)
plt.plot(np.array([cs[yi] for yi in y]), c='r')
if show_ypred:
plt.scatter(np.arange(len(X)), np.argmax(X, axis=1), c='white', s=5, alpha=0.2)
ic = range(len(classes))
plt.yticks(ic, [classes[i] for i in ic])
pltsave(f'{plot_dir}/{prefix}emissions.png')
os.makedirs(f'{plot_dir}/npzs', exist_ok=True)
np.savez(
f'{plot_dir}/npzs/{prefix}emissions.npz',
predictions=X, ground_truth=y,
video_ids=video_ids, classes=classes)
def cm_plot(plot_dir, y_test, y_pred, classes, prefix=''):
# classes = np.unique(y_test) if classes is None else classes
cm = confusion_matrix(y_test, y_pred, labels=classes, normalize='true')*100
# Plot and save the confusion matrix
plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt='.0f', cmap='magma', cbar=False, square=True,
xticklabels=classes, yticklabels=classes)
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title(f'Confusion Matrix')
pltsave(f'{plot_dir}/{prefix}confusion_matrix.png')
def n_videos_metrics(plot_dir, all_metrics, prefix=''):
# Plot accuracy and F1-score vs. the number of videos
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(all_metrics.n_videos, all_metrics.accuracy, marker='o')
plt.title('Accuracy vs. Number of Videos')
plt.xlabel('Number of Videos')
plt.ylabel('Accuracy')
plt.subplot(1, 2, 2)
plt.plot(all_metrics.n_videos, all_metrics.f1, marker='o', color='orange')
plt.title('F1 Score vs. Number of Videos')
plt.xlabel('Number of Videos')
plt.ylabel('F1 Score')
plt.tight_layout()
pltsave(f'{plot_dir}/{prefix}accuracy_f1_vs_videos.png')
def cross_model_metrics(plot_dir, all_metrics, prefix=''):
# Plot accuracy and F1-score vs. the number of videos
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
for name, mdf in all_metrics[all_metrics.smoothing == 'ma'].groupby("run_name"):
plt.plot(mdf.win_size, mdf.f1, label=name)
plt.legend()
plt.title('F1 Score vs. Number of Videos')
plt.xlabel('Moving Average Window Size')
plt.ylabel('F1 Score')
plt.tight_layout()
plt.subplot(1, 2, 2)
for name, mdf in all_metrics[all_metrics.smoothing == 'ema'].groupby("run_name"):
plt.plot(mdf.alpha, mdf.f1, label=name)
plt.legend()
plt.title('F1 Score vs. EMA alpha * x[t] + (1 - alpha) * x[t-1]')
plt.xlabel('Exp Moving Average alpha')
plt.ylabel('F1 Score')
plt.tight_layout()
pltsave(f'{plot_dir}/{prefix}accuracy_f1_vs_smooth.png')
# def n_videos_class_metrics(plot_dir, all_metrics, prefix=''):
# # Plot accuracy and F1-score vs. the number of videos
# plt.figure(figsize=(12, 5))
# plt.subplot(1, 2, 1)
# for c, df in all_metrics.groupby('label'):
# # cc = df.class_count.mean()
# plt.plot(df.n_videos, df.accuracy, marker='o', label=c)#f'{c} {cc:.0f}'
# plt.legend()
# plt.title('Accuracy vs. Number of Videos')
# plt.xlabel('Number of Videos')
# plt.ylabel('Accuracy')
# plt.subplot(1, 2, 2)
# for c, df in all_metrics.groupby('label'):
# # cc = df.class_count.mean()
# plt.plot(df.n_videos, df.accuracy, marker='o', label=c)#f'{c} {cc:.0f}'
# plt.title('F1 Score vs. Number of Videos')
# plt.xlabel('Number of Videos')
# plt.ylabel('F1 Score')
# plt.legend()
# plt.tight_layout()
# pltsave(f'{plot_dir}/{prefix}accuracy_f1_vs_videos_per_class.png')
def pltsave(fname):
os.makedirs(os.path.dirname(fname) or '.', exist_ok=True)
plt.savefig(fname)
plt.close()
# ---------------------------------------------------------------------------- #
# HMM #
# ---------------------------------------------------------------------------- #
# def create_hmm(num_states, p_self=0.9):
# transition_matrix = np.eye(num_states) * p_self + (1.0 - p_self) / (num_states - 1)
# emission_matrix = np.eye(num_states)
# initial_prob = np.ones(num_states) / num_states
# return initial_prob, emission_matrix, transition_matrix
# Forward pass to compute the forward probabilities
def hmm_forward(sequence, num_states, p_self=0.9):
transition_matrix = np.eye(num_states) * p_self + (1.0 - p_self) / (num_states - 1)
forward_prob = np.zeros((len(sequence), num_states))
forward_prob[0, :] = 1 / num_states
for t in range(1, len(sequence)):
for j in range(num_states):
forward_prob[t] = np.sum(forward_prob[t - 1, i] * transition_matrix[i, j] for i in range(num_states))
forward_prob[t] *= 1.0 / np.sum(forward_prob[t])
return forward_prob
def moving_average(a, n=3, axis=0):
ret = np.cumsum(a, dtype=float, axis=axis)
ret[n:] = (ret[n:] - ret[:-n]) / n
ret[:n] = ret[:n] / np.arange(n)[:, None]
return ret
def exponentially_decaying_average(a, decay_rate):
assert 0 < decay_rate < 1, "Decay rate must be between 0 and 1."
result = a.copy()
result[0, :] = a[0, :]
for t in range(1, a.shape[0]):
result[t, :] = decay_rate * result[t - 1, :] + (1 - decay_rate) * a[t, :]
return result
# ---------------------------------------------------------------------------- #
# Training Meta Loop #
# ---------------------------------------------------------------------------- #
def get_data(cfg, STATE, full_split, emb_type='clip'):
emb_dirs = cfg.EVAL.EMBEDDING_DIRS or [os.path.join(cfg.DATASET.ROOT, 'embeddings-all', cfg.EVAL.DETECTION_NAME)]
ydf = load_data_from_db(cfg, state_col='mod_state')
db_train_split = ydf.video_id.unique().tolist()
ydf = pd.concat([
*[
load_data(cfg, f'{d}/{cfg.EVAL.DETECTION_NAME}/*/{emb_type}/*.npz', include=set(full_split) - set(ydf.video_id.unique()))
for d in emb_dirs
],
ydf
])
print(ydf.groupby('object').state.value_counts())
# sampling 12k per state
ydf = sample_random(ydf, STATE, 15000)
print(ydf.groupby('object').state.value_counts())
print('Nulls:', ydf[pd.isna(ydf.state)].video_id.value_counts())
assert None not in set(ydf.state)
return ydf, db_train_split
def sample_random(df, STATE, n):
df = df.groupby(STATE, group_keys=False).apply(lambda x: x.sample(min(len(x), n)))
return df
def get_models(cfg):
return [
# (KNeighborsClassifier, 'knn5', {'n_neighbors': 5}),
# (KNeighborsClassifier, 'knn11-50', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 50)),
# (KNeighborsClassifier, 'knn11-100', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 100)),
(KNeighborsClassifier, 'knn5-2000', {'n_neighbors': 5}, lambda df: sample_random(df, STATE, 2000)),
(KNeighborsClassifier, 'knn21-2000', {'n_neighbors': 21}, lambda df: sample_random(df, STATE, 2000)),
(KNeighborsClassifier, 'knn11-1000', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 1000)),
(KNeighborsClassifier, 'knn11-2000', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 2000)),
(KNeighborsClassifier, 'knn11-5000', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 5000)),
(KNeighborsClassifier, 'knn11-12000', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 12000)),
# (KNeighborsClassifier, 'knn50', {'n_neighbors': 50}),
(LogisticRegression, 'logreg', {}, lambda df: sample_random(df, STATE, 5000)),
(RandomForestClassifier, 'rf', {}, lambda df: sample_random(df, STATE, 5000)),
# (
# make_pipeline(
# StandardScaler(),
# KBinsDiscretizer(encode="onehot", random_state=0),
# LogisticRegression(random_state=0),
# ),
# 'kbins_logreg',
# {
# "kbinsdiscretizer__n_bins": np.arange(5, 8),
# "logisticregression__C": np.logspace(-1, 1, 3),
# },
# ),
]
def prepare_data(odf, STATE, sampler, train_split, val_split):
video_ids = odf['video_id'].values
unique_video_ids = np.unique(video_ids)
# obj_train_base_split = [f for f in train_base_split if f in video_ids and f not in val_split]
obj_train_split = [f for f in train_split if f in video_ids and f not in val_split]
obj_val_split = [f for f in val_split if f in video_ids]
# embed()
# obj_train_split = sorted(obj_train_split, key=lambda v: -len(odf[odf.video_id == v].state.unique()))
# print("Base Training split:", obj_train_base_split)
print("Training split:", obj_train_split)
print("Validation split:", obj_val_split)
print("Unused videos:", set(unique_video_ids) - set(obj_train_split+obj_val_split))
print("Missing videos:", set(obj_train_split+obj_val_split) - set(unique_video_ids))
i_train = np.isin(video_ids, obj_train_split)
# i_train = np.isin(video_ids, obj_train_base_split + obj_train_split[:nvids])
i_val = np.isin(video_ids, obj_val_split)
odfo=odf
if sampler is not None:
odf_train = sampler(odf.iloc[i_train])
odf_val = odf.iloc[i_val]
i_train = np.arange(len(odf_train))
i_val = np.arange(len(odf_val)) + i_train.max()+1
odf = pd.concat([odf_train, odf_val])
video_ids = odf['video_id'].values
i_train = np.isin(video_ids, obj_train_split)
i_val = np.isin(video_ids, obj_val_split)
X = np.array(list(odf['vector'].values))
y = odf[STATE].values
print()
print("all data:")
print('X', X.shape)
print('y', y.shape)
print(odf[['video_id', 'track_id', STATE]].value_counts())
# embed()
return X, y, video_ids, i_train, i_val
import ipdb
@ipdb.iex
def run(config_name):
cfg = get_cfg(config_name)
root_plot_dir = root_plot_dir_ = cfg.EVAL.PLOT_DIR or 'plots'
# i=0
# while os.path.isdir(root_plot_dir_):
# root_plot_dir_ = root_plot_dir + f'_{i}'
# i+=1
# root_plot_dir=root_plot_dir_
if os.path.isdir(root_plot_dir):
raise RuntimeError(f"{root_plot_dir} exists")
os.makedirs(root_plot_dir, exist_ok=True)
# STATE = 'super_simple_state'
# STATE = 'state'
train_split = read_split_file(cfg.EVAL.TRAIN_CSV)
train_base_split = read_split_file(cfg.EVAL.TRAIN_BASE_CSV)
val_splits = [(f, read_split_file(f)) for f in cfg.EVAL.VAL_CSVS]
print(len(train_base_split), train_base_split[:5])
print(len(train_split), train_split[:5])
print(len(val_splits), val_splits[:5])
full_train_split = train_split + train_base_split
full_val_split = [x for f, xs in val_splits for x in xs]
full_split = full_train_split + full_val_split
print(full_split)
for _,val_split in val_splits:
assert not set(full_train_split) & set(val_split), f"what are you doing silly {set(full_train_split) & set(val_split)}"
models = get_models(cfg)
cfg.EVAL.EMBEDDING_TYPES=['clip']
for emb_type in tqdm.tqdm(cfg.EVAL.EMBEDDING_TYPES, desc='embedding type'):
ydf, db_train_split = get_data(cfg, STATE, full_split)
emb_plot(f'{root_plot_dir}/{emb_type}', np.array(list(ydf['vector'].values)), ydf['object'].values, 'object')
# emb_plot(f'{root_plot_dir}/{emb_type}', np.array(list(ydf['vector'].values)), ydf[STATE].values, 'states')
for (val_split_fname, val_split) in val_splits:
val_split_name = val_split_fname.split('/')[-1].removesuffix('.txt')
for object_name, odf in ydf.groupby('object'):
plot_dir = f'{root_plot_dir}/{val_split_name}/{emb_type}/{object_name}'
os.makedirs(plot_dir, exist_ok=True)
all_metrics = []
all_per_class_metrics = []
for cls, name, kw, sampler in tqdm.tqdm(models, desc='models'):
X, y, video_ids, i_train, i_val = prepare_data(odf, STATE, sampler, db_train_split, val_split)
emb_plot(f'{root_plot_dir}/{emb_type}_{object_name}', X, y, 'states')
if not i_val.sum():
print(f"\n\n\n\nSKIPPING i_val is empty. {val_split}\n\n\n")
continue
print(f"Training with: train size: {len(i_train)} val size: {len(i_val)}")
print("Train Counts:")
train_counts = show_counts(y[i_train])
print(train_counts)
print("Val Counts:")
val_counts = show_counts(y[i_val])
print(val_counts)
model = cls(**kw)
metrics, per_class_metrics = train_eval(
f'{val_split_name}_{emb_type}_{name}', model,
X, y, i_train, i_val,
video_ids=video_ids,
plot_dir=plot_dir,
model_name=name,
# n_videos=nvids,
**kw)
all_metrics.extend(metrics)
all_per_class_metrics.extend(per_class_metrics)
all_metrics_df = pd.DataFrame(all_metrics)
all_per_class_metrics_df = pd.DataFrame(all_per_class_metrics)
all_metrics_df.to_csv(f'{plot_dir}/metrics.csv')
all_per_class_metrics_df.to_csv(f'{plot_dir}/class_metrics.csv')
# ---------- Show how it performs as a function of number of videos ---------- #
if len(all_metrics):
cross_model_metrics(plot_dir, all_metrics_df, f'{emb_type}_')
# n_videos_class_metrics(plot_dir, all_per_class_metrics_df, f'{emb_type}_')
def show_counts(y):
yu, counts = np.unique(y, return_counts=True)
for yui, c in zip(yu, counts):
print(yui, c)
return dict(zip(yu, counts))
import ipdb
@ipdb.iex
def show_data(config_name, emb_type='clip'):
cfg = get_cfg(config_name)
emb_dir = os.path.join(cfg.DATASET.ROOT, 'embeddings1', cfg.EVAL.DETECTION_NAME)
emb_types = cfg.EVAL.EMBEDDING_TYPES
data_file_pattern = f'{emb_dir}/*/{emb_type}/*.npz'
# dfs = load_object_annotations(cfg)
# # for vid, odfs in dfs.items():
# # print(vid)
# # print(set(odfs))
# # input()
# for vid, odfs in dfs.items():
# print(vid)
# print({k: odfs[k].shape for k in odfs})
# for vid, odfs in dfs.items():
# print(vid)
# for k in odfs:
# print(k)
# print(odfs[k])
# embed()
# X, y, video_ids, class_map = load_data(cfg, data_file_pattern, use_aug=False)
# df = pd.DataFrame({'vids': video_ids, 'y': y})
# df['label'] = df.y.apply(lambda y: class_map[y])
# for v, rows in df.groupby('vids'):
# print(v)
# print(rows.label.value_counts())
df = load_data(cfg, data_file_pattern, use_aug=False)
for object_name, odf in df.groupby('object'):
print(object_name)
print(odf[['state']].value_counts())
# print(odf[['state', 'video_id']].value_counts())
x = odf[['state', 'video_id']].value_counts().unstack().fillna(0)
print(x)
x.to_csv(f"{object_name}_video_counts.csv")
# embed()
if __name__ == '__main__':
import fire
fire.Fire() | [
"lancedb.connect"
] | [((938, 967), 'warnings.simplefilter', 'warnings.simplefilter', (['"""once"""'], {}), "('once')\n", (959, 967), False, 'import warnings\n'), ((2673, 2701), 'glob.glob', 'glob.glob', (['data_file_pattern'], {}), '(data_file_pattern)\n', (2682, 2701), False, 'import glob\n'), ((3011, 3048), 'tqdm.tqdm', 'tqdm.tqdm', (['fs'], {'desc': '"""loading data..."""'}), "(fs, desc='loading data...')\n", (3020, 3048), False, 'import tqdm\n'), ((4817, 4848), 'numpy.concatenate', 'np.concatenate', (['embeddings_list'], {}), '(embeddings_list)\n', (4831, 4848), True, 'import numpy as np\n'), ((4858, 4876), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (4867, 4876), True, 'import pandas as pd\n'), ((5081, 5134), 'os.path.join', 'os.path.join', (['cfg.DATASET.ROOT', 'f"""{emb_type}.lancedb"""'], {}), "(cfg.DATASET.ROOT, f'{emb_type}.lancedb')\n", (5093, 5134), False, 'import os\n'), ((7191, 7207), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7205, 7207), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7223, 7271), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('scaler', scaler), ('model', model)]"], {}), "([('scaler', scaler), ('model', model)])\n", (7231, 7271), False, 'from sklearn.pipeline import Pipeline\n'), ((8186, 8256), 'tqdm.tqdm.write', 'tqdm.tqdm.write', (['f"""Accuracy for {run_name}: {metrics[\'accuracy\']:.2f}"""'], {}), '(f"Accuracy for {run_name}: {metrics[\'accuracy\']:.2f}")\n', (8201, 8256), False, 'import tqdm\n'), ((10331, 10343), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (10340, 10343), True, 'import numpy as np\n'), ((10652, 10742), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'zero_division': 'np.nan', 'average': '"""macro"""'}), "(y_test, y_pred, zero_division=np.nan,\n average='macro')\n", (10683, 10742), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((11250, 11271), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (11264, 11271), False, 'import os\n'), ((11406, 11414), 'sklearn.manifold.Isomap', 'Isomap', ([], {}), '()\n', (11412, 11414), False, 'from sklearn.manifold import TSNE, Isomap\n'), ((11539, 11566), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (11549, 11566), True, 'import matplotlib.pyplot as plt\n'), ((11580, 11592), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (11589, 11592), True, 'import numpy as np\n'), ((11673, 11685), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11683, 11685), True, 'import matplotlib.pyplot as plt\n'), ((11690, 11734), 'matplotlib.pyplot.title', 'plt.title', (['f"""Embedding Projection: {prefix}"""'], {}), "(f'Embedding Projection: {prefix}')\n", (11699, 11734), True, 'import matplotlib.pyplot as plt\n'), ((11849, 11876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (11859, 11876), True, 'import matplotlib.pyplot as plt\n'), ((11881, 11929), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X.T'], {'cmap': '"""cubehelix"""', 'aspect': '"""auto"""'}), "(X.T, cmap='cubehelix', aspect='auto')\n", (11891, 11929), True, 'import matplotlib.pyplot as plt\n'), ((12278, 12318), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ic', '[classes[i] for i in ic]'], {}), '(ic, [classes[i] for i in ic])\n', (12288, 12318), True, 'import matplotlib.pyplot as plt\n'), ((12372, 12418), 'os.makedirs', 'os.makedirs', (['f"""{plot_dir}/npzs"""'], {'exist_ok': '(True)'}), "(f'{plot_dir}/npzs', exist_ok=True)\n", (12383, 12418), False, 'import os\n'), ((12423, 12546), 'numpy.savez', 'np.savez', (['f"""{plot_dir}/npzs/{prefix}emissions.npz"""'], {'predictions': 'X', 'ground_truth': 'y', 'video_ids': 'video_ids', 'classes': 'classes'}), "(f'{plot_dir}/npzs/{prefix}emissions.npz', predictions=X,\n ground_truth=y, video_ids=video_ids, classes=classes)\n", (12431, 12546), True, 'import numpy as np\n'), ((12822, 12849), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (12832, 12849), True, 'import matplotlib.pyplot as plt\n'), ((12854, 12978), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '""".0f"""', 'cmap': '"""magma"""', 'cbar': '(False)', 'square': '(True)', 'xticklabels': 'classes', 'yticklabels': 'classes'}), "(cm, annot=True, fmt='.0f', cmap='magma', cbar=False, square=\n True, xticklabels=classes, yticklabels=classes)\n", (12865, 12978), True, 'import seaborn as sns\n'), ((12994, 13017), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {}), "('Predicted')\n", (13004, 13017), True, 'import matplotlib.pyplot as plt\n'), ((13022, 13042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Actual"""'], {}), "('Actual')\n", (13032, 13042), True, 'import matplotlib.pyplot as plt\n'), ((13047, 13077), 'matplotlib.pyplot.title', 'plt.title', (['f"""Confusion Matrix"""'], {}), "(f'Confusion Matrix')\n", (13056, 13077), True, 'import matplotlib.pyplot as plt\n'), ((13254, 13281), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (13264, 13281), True, 'import matplotlib.pyplot as plt\n'), ((13286, 13306), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (13297, 13306), True, 'import matplotlib.pyplot as plt\n'), ((13311, 13375), 'matplotlib.pyplot.plot', 'plt.plot', (['all_metrics.n_videos', 'all_metrics.accuracy'], {'marker': '"""o"""'}), "(all_metrics.n_videos, all_metrics.accuracy, marker='o')\n", (13319, 13375), True, 'import matplotlib.pyplot as plt\n'), ((13380, 13422), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy vs. Number of Videos"""'], {}), "('Accuracy vs. Number of Videos')\n", (13389, 13422), True, 'import matplotlib.pyplot as plt\n'), ((13427, 13457), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Videos"""'], {}), "('Number of Videos')\n", (13437, 13457), True, 'import matplotlib.pyplot as plt\n'), ((13462, 13484), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (13472, 13484), True, 'import matplotlib.pyplot as plt\n'), ((13490, 13510), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (13501, 13510), True, 'import matplotlib.pyplot as plt\n'), ((13515, 13589), 'matplotlib.pyplot.plot', 'plt.plot', (['all_metrics.n_videos', 'all_metrics.f1'], {'marker': '"""o"""', 'color': '"""orange"""'}), "(all_metrics.n_videos, all_metrics.f1, marker='o', color='orange')\n", (13523, 13589), True, 'import matplotlib.pyplot as plt\n'), ((13594, 13636), 'matplotlib.pyplot.title', 'plt.title', (['"""F1 Score vs. Number of Videos"""'], {}), "('F1 Score vs. Number of Videos')\n", (13603, 13636), True, 'import matplotlib.pyplot as plt\n'), ((13641, 13671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Videos"""'], {}), "('Number of Videos')\n", (13651, 13671), True, 'import matplotlib.pyplot as plt\n'), ((13676, 13698), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 Score"""'], {}), "('F1 Score')\n", (13686, 13698), True, 'import matplotlib.pyplot as plt\n'), ((13703, 13721), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13719, 13721), True, 'import matplotlib.pyplot as plt\n'), ((13906, 13933), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (13916, 13933), True, 'import matplotlib.pyplot as plt\n'), ((13938, 13958), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (13949, 13958), True, 'import matplotlib.pyplot as plt\n'), ((14099, 14111), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14109, 14111), True, 'import matplotlib.pyplot as plt\n'), ((14117, 14159), 'matplotlib.pyplot.title', 'plt.title', (['"""F1 Score vs. Number of Videos"""'], {}), "('F1 Score vs. Number of Videos')\n", (14126, 14159), True, 'import matplotlib.pyplot as plt\n'), ((14164, 14204), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Moving Average Window Size"""'], {}), "('Moving Average Window Size')\n", (14174, 14204), True, 'import matplotlib.pyplot as plt\n'), ((14209, 14231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 Score"""'], {}), "('F1 Score')\n", (14219, 14231), True, 'import matplotlib.pyplot as plt\n'), ((14236, 14254), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14252, 14254), True, 'import matplotlib.pyplot as plt\n'), ((14260, 14280), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (14271, 14280), True, 'import matplotlib.pyplot as plt\n'), ((14419, 14431), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14429, 14431), True, 'import matplotlib.pyplot as plt\n'), ((14437, 14502), 'matplotlib.pyplot.title', 'plt.title', (['"""F1 Score vs. EMA alpha * x[t] + (1 - alpha) * x[t-1]"""'], {}), "('F1 Score vs. EMA alpha * x[t] + (1 - alpha) * x[t-1]')\n", (14446, 14502), True, 'import matplotlib.pyplot as plt\n'), ((14507, 14545), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Exp Moving Average alpha"""'], {}), "('Exp Moving Average alpha')\n", (14517, 14545), True, 'import matplotlib.pyplot as plt\n'), ((14550, 14572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 Score"""'], {}), "('F1 Score')\n", (14560, 14572), True, 'import matplotlib.pyplot as plt\n'), ((14577, 14595), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14593, 14595), True, 'import matplotlib.pyplot as plt\n'), ((15666, 15684), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (15677, 15684), True, 'import matplotlib.pyplot as plt\n'), ((15689, 15700), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15698, 15700), True, 'import matplotlib.pyplot as plt\n'), ((16849, 16885), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float', 'axis': 'axis'}), '(a, dtype=float, axis=axis)\n', (16858, 16885), True, 'import numpy as np\n'), ((20276, 20296), 'numpy.unique', 'np.unique', (['video_ids'], {}), '(video_ids)\n', (20285, 20296), True, 'import numpy as np\n'), ((21011, 21046), 'numpy.isin', 'np.isin', (['video_ids', 'obj_train_split'], {}), '(video_ids, obj_train_split)\n', (21018, 21046), True, 'import numpy as np\n'), ((21142, 21175), 'numpy.isin', 'np.isin', (['video_ids', 'obj_val_split'], {}), '(video_ids, obj_val_split)\n', (21149, 21175), True, 'import numpy as np\n'), ((21502, 21537), 'numpy.isin', 'np.isin', (['video_ids', 'obj_train_split'], {}), '(video_ids, obj_train_split)\n', (21509, 21537), True, 'import numpy as np\n'), ((21550, 21583), 'numpy.isin', 'np.isin', (['video_ids', 'obj_val_split'], {}), '(video_ids, obj_val_split)\n', (21557, 21583), True, 'import numpy as np\n'), ((22162, 22190), 'os.path.isdir', 'os.path.isdir', (['root_plot_dir'], {}), '(root_plot_dir)\n', (22175, 22190), False, 'import os\n'), ((22250, 22291), 'os.makedirs', 'os.makedirs', (['root_plot_dir'], {'exist_ok': '(True)'}), '(root_plot_dir, exist_ok=True)\n', (22261, 22291), False, 'import os\n'), ((23125, 23183), 'tqdm.tqdm', 'tqdm.tqdm', (['cfg.EVAL.EMBEDDING_TYPES'], {'desc': '"""embedding type"""'}), "(cfg.EVAL.EMBEDDING_TYPES, desc='embedding type')\n", (23134, 23183), False, 'import tqdm\n'), ((25907, 25939), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (25916, 25939), True, 'import numpy as np\n'), ((26143, 26213), 'os.path.join', 'os.path.join', (['cfg.DATASET.ROOT', '"""embeddings1"""', 'cfg.EVAL.DETECTION_NAME'], {}), "(cfg.DATASET.ROOT, 'embeddings1', cfg.EVAL.DETECTION_NAME)\n", (26155, 26213), False, 'import os\n'), ((27455, 27466), 'fire.Fire', 'fire.Fire', ([], {}), '()\n', (27464, 27466), False, 'import fire\n'), ((3237, 3247), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (3244, 3247), True, 'import numpy as np\n'), ((4220, 4269), 'tqdm.tqdm.write', 'tqdm.tqdm.write', (['f"""Using: {video_id}: {track_id}"""'], {}), "(f'Using: {video_id}: {track_id}')\n", (4235, 4269), False, 'import tqdm\n'), ((4801, 4808), 'IPython.embed', 'embed', ([], {}), '()\n', (4806, 4808), False, 'from IPython import embed\n'), ((5153, 5170), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (5167, 5170), False, 'import os\n'), ((5252, 5275), 'os.path.isdir', 'os.path.isdir', (['db_fname'], {}), '(db_fname)\n', (5265, 5275), False, 'import os\n'), ((5289, 5314), 'lancedb.connect', 'lancedb.connect', (['db_fname'], {}), '(db_fname)\n', (5304, 5314), False, 'import lancedb\n'), ((5443, 5457), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (5452, 5457), True, 'import pandas as pd\n'), ((5470, 5499), 'pandas.DataFrame', 'pd.DataFrame', (['{state_col: []}'], {}), '({state_col: []})\n', (5482, 5499), True, 'import pandas as pd\n'), ((7860, 7884), 'joblib.dump', 'joblib.dump', (['pipeline', 'f'], {}), '(pipeline, f)\n', (7871, 7884), False, 'import joblib\n'), ((10772, 10802), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10786, 10802), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((12099, 12129), 'numpy.array', 'np.array', (['[cs[yi] for yi in y]'], {}), '([cs[yi] for yi in y])\n', (12107, 12129), True, 'import numpy as np\n'), ((12706, 12772), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {'labels': 'classes', 'normalize': '"""true"""'}), "(y_test, y_pred, labels=classes, normalize='true')\n", (12722, 12772), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((14052, 14094), 'matplotlib.pyplot.plot', 'plt.plot', (['mdf.win_size', 'mdf.f1'], {'label': 'name'}), '(mdf.win_size, mdf.f1, label=name)\n', (14060, 14094), True, 'import matplotlib.pyplot as plt\n'), ((14375, 14414), 'matplotlib.pyplot.plot', 'plt.plot', (['mdf.alpha', 'mdf.f1'], {'label': 'name'}), '(mdf.alpha, mdf.f1, label=name)\n', (14383, 14414), True, 'import matplotlib.pyplot as plt\n'), ((21416, 21447), 'pandas.concat', 'pd.concat', (['[odf_train, odf_val]'], {}), '([odf_train, odf_val])\n', (21425, 21447), True, 'import pandas as pd\n'), ((3286, 3327), 'numpy.linalg.norm', 'np.linalg.norm', (['z'], {'axis': '(-1)', 'keepdims': '(True)'}), '(z, axis=-1, keepdims=True)\n', (3300, 3327), True, 'import numpy as np\n'), ((3804, 3830), 'os.path.splitext', 'os.path.splitext', (['video_id'], {}), '(video_id)\n', (3820, 3830), False, 'import os\n'), ((4121, 4190), 'tqdm.tqdm.write', 'tqdm.tqdm.write', (['f"""{bc.FAIL}Skipping{bc.END}: {video_id}: {track_id}"""'], {}), "(f'{bc.FAIL}Skipping{bc.END}: {video_id}: {track_id}')\n", (4136, 4190), False, 'import tqdm\n'), ((4462, 4586), 'pandas.DataFrame', 'pd.DataFrame', (["{'index': frame_idx, 'object': ann.object, 'state': ann.state, 'track_id':\n track_id, 'video_id': video_id}"], {}), "({'index': frame_idx, 'object': ann.object, 'state': ann.state,\n 'track_id': track_id, 'video_id': video_id})\n", (4474, 4586), True, 'import pandas as pd\n'), ((7788, 7838), 'os.path.join', 'os.path.join', (['plot_dir', 'f"""{run_name}_pipeline.pkl"""'], {}), "(plot_dir, f'{run_name}_pipeline.pkl')\n", (7800, 7838), False, 'import os\n'), ((8755, 8781), 'numpy.asarray', 'np.asarray', (['model.classes_'], {}), '(model.classes_)\n', (8765, 8781), True, 'import numpy as np\n'), ((8782, 8803), 'numpy.argmax', 'np.argmax', (['y_'], {'axis': '(1)'}), '(y_, axis=1)\n', (8791, 8803), True, 'import numpy as np\n'), ((9474, 9500), 'numpy.asarray', 'np.asarray', (['model.classes_'], {}), '(model.classes_)\n', (9484, 9500), True, 'import numpy as np\n'), ((9501, 9522), 'numpy.argmax', 'np.argmax', (['y_'], {'axis': '(1)'}), '(y_, axis=1)\n', (9510, 9522), True, 'import numpy as np\n'), ((12196, 12216), 'numpy.argmax', 'np.argmax', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (12205, 12216), True, 'import numpy as np\n'), ((15616, 15638), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (15631, 15638), False, 'import os\n'), ((16366, 16384), 'numpy.eye', 'np.eye', (['num_states'], {}), '(num_states)\n', (16372, 16384), True, 'import numpy as np\n'), ((16949, 16961), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (16958, 16961), True, 'import numpy as np\n'), ((17638, 17711), 'os.path.join', 'os.path.join', (['cfg.DATASET.ROOT', '"""embeddings-all"""', 'cfg.EVAL.DETECTION_NAME'], {}), "(cfg.DATASET.ROOT, 'embeddings-all', cfg.EVAL.DETECTION_NAME)\n", (17650, 17711), False, 'import os\n'), ((16752, 16775), 'numpy.sum', 'np.sum', (['forward_prob[t]'], {}), '(forward_prob[t])\n', (16758, 16775), True, 'import numpy as np\n'), ((23784, 23820), 'os.makedirs', 'os.makedirs', (['plot_dir'], {'exist_ok': '(True)'}), '(plot_dir, exist_ok=True)\n', (23795, 23820), False, 'import os\n'), ((23945, 23977), 'tqdm.tqdm', 'tqdm.tqdm', (['models'], {'desc': '"""models"""'}), "(models, desc='models')\n", (23954, 23977), False, 'import tqdm\n'), ((25301, 25326), 'pandas.DataFrame', 'pd.DataFrame', (['all_metrics'], {}), '(all_metrics)\n', (25313, 25326), True, 'import pandas as pd\n'), ((25370, 25405), 'pandas.DataFrame', 'pd.DataFrame', (['all_per_class_metrics'], {}), '(all_per_class_metrics)\n', (25382, 25405), True, 'import pandas as pd\n'), ((18253, 18271), 'pandas.isna', 'pd.isna', (['ydf.state'], {}), '(ydf.state)\n', (18260, 18271), True, 'import pandas as pd\n')] |
"""LanceDB vector store."""
from typing import Any, List, Optional
from llama_index.schema import MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
NodeWithEmbedding,
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import node_to_metadata_dict
def _to_lance_filter(standard_filters: MetadataFilters) -> Any:
"""Translate standard metadata filters to Lance specific spec."""
filters = []
for filter in standard_filters.filters:
if isinstance(filter.value, str):
filters.append(filter.key + ' = "' + filter.value + '"')
else:
filters.append(filter.key + " = " + str(filter.value))
return " AND ".join(filters)
class LanceDBVectorStore(VectorStore):
"""The LanceDB Vector Store.
Stores text and embeddings in LanceDB. The vector store will open an existing
LanceDB dataset or create the dataset if it does not exist.
Args:
uri (str, required): Location where LanceDB will store its files.
table_name (str, optional): The table name where the embeddings will be stored.
Defaults to "vectors".
nprobes (int, optional): The number of probes used.
A higher number makes search more accurate but also slower.
Defaults to 20.
refine_factor: (int, optional): Refine the results by reading extra elements
and re-ranking them in memory.
Defaults to None
Raises:
ImportError: Unable to import `lancedb`.
Returns:
LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and
querying it.
"""
stores_text = True
flat_metadata: bool = True
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Init params."""
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb # noqa: F401
except ImportError:
raise ImportError(import_err_msg)
self.connection = lancedb.connect(uri)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
@property
def client(self) -> None:
"""Get client."""
return None
def add(
self,
embedding_results: List[NodeWithEmbedding],
) -> List[str]:
data = []
ids = []
for result in embedding_results:
metadata = node_to_metadata_dict(
result.node, remove_text=True, flat_metadata=self.flat_metadata
)
append_data = {
"id": result.id,
"doc_id": result.ref_doc_id,
"vector": result.embedding,
"text": result.node.get_content(metadata_mode=MetadataMode.NONE),
}
append_data.update(metadata)
data.append(append_data)
ids.append(result.id)
if self.table_name in self.connection.table_names():
tbl = self.connection.open_table(self.table_name)
tbl.add(data)
else:
self.connection.create_table(self.table_name, data)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
table = self.connection.open_table(self.table_name)
table.delete('document_id = "' + ref_doc_id + '"')
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface."
)
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding)
.limit(query.similarity_top_k)
.where(where)
.nprobes(self.nprobes)
)
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
results = lance_query.to_df()
nodes = []
for _, item in results.iterrows():
node = TextNode(
text=item.text,
id_=item.id,
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id),
},
)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=results["score"].tolist(),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((2271, 2291), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2286, 2291), False, 'import lancedb\n'), ((2716, 2807), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['result.node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(result.node, remove_text=True, flat_metadata=self.\n flat_metadata)\n', (2737, 2807), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((5023, 5059), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5038, 5059), False, 'from llama_index.schema import MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from time import time_ns
import lancedb
uri = "./.lancedb"
db = lancedb.connect(uri)
tns = db.table_names()
print(tns)
tn = 'my_table'
now = time_ns()
if (tn not in tns):
# 创建表的时候就确定了字段结构了。
# 之后通过 add 添加字段无效。
table = db.create_table(
tn,
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0, "createAt": now},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0, "createAt": now},
],
)
else:
table = db.open_table(tn)
table.add(
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0, "createAt": now},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0, "createAt": now},
],
)
df = table.to_pandas()
print(df) | [
"lancedb.connect"
] | [((65, 85), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (80, 85), False, 'import lancedb\n'), ((143, 152), 'time.time_ns', 'time_ns', ([], {}), '()\n', (150, 152), False, 'from time import time_ns\n')] |
from datetime import datetime
import lancedb
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore, LanceDB
from config import Config
from utils.files import get_root_path
def get_vectorstore(table_name: str, embedding: Embeddings) -> VectorStore:
config = Config()
db_path = get_root_path() / config.lancedb_url
db = lancedb.connect(db_path)
if not table_name in db.table_names():
table = db.create_table(table_name, data=[
{
"vector": embedding.embed_query("Hello World"),
"text": "Hello World",
"url": "https://google.com/",
"time": datetime.now().timestamp()}
])
else:
table = db.open_table(table_name)
vectorstore = LanceDB(embedding=embedding, connection=table)
return vectorstore | [
"lancedb.connect"
] | [((307, 315), 'config.Config', 'Config', ([], {}), '()\n', (313, 315), False, 'from config import Config\n'), ((376, 400), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (391, 400), False, 'import lancedb\n'), ((792, 838), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embedding', 'connection': 'table'}), '(embedding=embedding, connection=table)\n', (799, 838), False, 'from langchain.vectorstores import VectorStore, LanceDB\n'), ((330, 345), 'utils.files.get_root_path', 'get_root_path', ([], {}), '()\n', (343, 345), False, 'from utils.files import get_root_path\n'), ((682, 696), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (694, 696), False, 'from datetime import datetime\n')] |
# Answer questions about a PDF file using the RAG model
# TODO: Maintain the context of the conversation
import lancedb
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.vectorstores import LanceDB
from langchain_community.document_loaders import PyPDFLoader
from langchain_openai import OpenAI
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
import random
import time
import os
import sys
debug=False
# Get the name of a PDF file from the command line
pdf_name = sys.argv[1]
if not os.path.exists(pdf_name):
print("The PDF file does not exist. Exiting program.")
os._exit(1)
# Load the document, split it into chunks, embed each chunk and load it into the vector store.
# Vector DB connection
vectorStore = lancedb.connect("/tmp/lancedb")
embeddings=OpenAIEmbeddings()
table=random.seed(time.time())
table = vectorStore.create_table(
table,
data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
# Load the document, split it into chunks, embed each chunk and load it into the vector store.
print(pdf_name)
loader = PyPDFLoader(pdf_name)
data = loader.load()
# Split docs
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs = text_splitter.split_documents(data)
vectorStore = LanceDB.from_documents(docs, OpenAIEmbeddings(), connection=table)
def answerQuestion(debug, vectorStore):
question = input("Please enter a question about the PDF: ")
# If question is blank, exit program
if not question:
print("No question entered. Exiting program.")
os._exit(1)
# Display results if debug is true
if debug:
results = vectorStore.similarity_search_with_score(
query=question,
k=5,)
for result in results:
# print just the page_content field
print(result[0].page_content )
qa_retriever = vectorStore.as_retriever(
search_type="similarity",
search_kwargs={"k": 25},
)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"],history_variables=["chat_history"]
)
qa = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=qa_retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": PROMPT},
)
docs = qa.invoke({"query": question})
print(docs["result"])
print()
while True:
answerQuestion(debug, vectorStore)
| [
"lancedb.connect"
] | [((972, 1003), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (987, 1003), False, 'import lancedb\n'), ((1015, 1033), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1031, 1033), False, 'from langchain_openai import OpenAIEmbeddings\n'), ((1412, 1433), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['pdf_name'], {}), '(pdf_name)\n', (1423, 1433), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((1489, 1552), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(0)'}), '(chunk_size=500, chunk_overlap=0)\n', (1519, 1552), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((736, 760), 'os.path.exists', 'os.path.exists', (['pdf_name'], {}), '(pdf_name)\n', (750, 760), False, 'import os\n'), ((825, 836), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (833, 836), False, 'import os\n'), ((1052, 1063), 'time.time', 'time.time', ([], {}), '()\n', (1061, 1063), False, 'import time\n'), ((1639, 1657), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1655, 1657), False, 'from langchain_openai import OpenAIEmbeddings\n'), ((2529, 2650), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']", 'history_variables': "['chat_history']"}), "(template=prompt_template, input_variables=['context',\n 'question'], history_variables=['chat_history'])\n", (2543, 2650), False, 'from langchain.prompts import PromptTemplate\n'), ((1907, 1918), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (1915, 1918), False, 'import os\n'), ((2700, 2708), 'langchain_openai.OpenAI', 'OpenAI', ([], {}), '()\n', (2706, 2708), False, 'from langchain_openai import OpenAI\n')] |
import lancedb
import os
import gradio as gr
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# For Text Similarity and Relevance Ranking:
# valhalla/distilbart-mnli-12-3
# sentence-transformers/cross-encoder/stsb-roberta-large
#
# For Question Answering:
# deepset/roberta-base-squad2
# cross-encoder/quora-distilroberta-base
CROSS_ENC_MODEL = os.getenv("CROSS_ENC_MODEL", "cross-encoder/ms-marco-MiniLM-L-6-v2")
# Initialize the tokenizer and model for reranking
tokenizer = AutoTokenizer.from_pretrained(CROSS_ENC_MODEL)
cross_encoder = AutoModelForSequenceClassification.from_pretrained(CROSS_ENC_MODEL)
cross_encoder.eval() # Put model in evaluation mode
db = lancedb.connect(".lancedb")
TABLE = db.open_table(os.getenv("TABLE_NAME"))
VECTOR_COLUMN = os.getenv("VECTOR_COLUMN", "vector")
TEXT_COLUMN = os.getenv("TEXT_COLUMN", "text")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", 32))
retriever = SentenceTransformer(os.getenv("EMB_MODEL"))
def rerank(query, documents):
pairs = [[query, doc] for doc in documents] # Create pairs of query and each document
inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors="pt")
with torch.no_grad():
scores = cross_encoder(**inputs).logits.squeeze() # Get scores for each pair
sorted_docs = [doc for _, doc in sorted(zip(scores, documents), key=lambda x: x[0], reverse=True)]
return sorted_docs
def retrieve(query, k, rr=True):
query_vec = retriever.encode(query)
try:
documents = TABLE.search(query_vec, vector_column_name=VECTOR_COLUMN).limit(k).to_list()
documents = [doc[TEXT_COLUMN] for doc in documents]
# Rerank the retrieved documents if rr (rerank) is True
if rr:
documents = rerank(query, documents)
return documents
except Exception as e:
raise gr.Error(str(e))
| [
"lancedb.connect"
] | [((440, 508), 'os.getenv', 'os.getenv', (['"""CROSS_ENC_MODEL"""', '"""cross-encoder/ms-marco-MiniLM-L-6-v2"""'], {}), "('CROSS_ENC_MODEL', 'cross-encoder/ms-marco-MiniLM-L-6-v2')\n", (449, 508), False, 'import os\n'), ((573, 619), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['CROSS_ENC_MODEL'], {}), '(CROSS_ENC_MODEL)\n', (602, 619), False, 'from transformers import AutoTokenizer, AutoModelForSequenceClassification\n'), ((636, 703), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['CROSS_ENC_MODEL'], {}), '(CROSS_ENC_MODEL)\n', (686, 703), False, 'from transformers import AutoTokenizer, AutoModelForSequenceClassification\n'), ((763, 790), 'lancedb.connect', 'lancedb.connect', (['""".lancedb"""'], {}), "('.lancedb')\n", (778, 790), False, 'import lancedb\n'), ((854, 890), 'os.getenv', 'os.getenv', (['"""VECTOR_COLUMN"""', '"""vector"""'], {}), "('VECTOR_COLUMN', 'vector')\n", (863, 890), False, 'import os\n'), ((905, 937), 'os.getenv', 'os.getenv', (['"""TEXT_COLUMN"""', '"""text"""'], {}), "('TEXT_COLUMN', 'text')\n", (914, 937), False, 'import os\n'), ((813, 836), 'os.getenv', 'os.getenv', (['"""TABLE_NAME"""'], {}), "('TABLE_NAME')\n", (822, 836), False, 'import os\n'), ((955, 982), 'os.getenv', 'os.getenv', (['"""BATCH_SIZE"""', '(32)'], {}), "('BATCH_SIZE', 32)\n", (964, 982), False, 'import os\n'), ((1017, 1039), 'os.getenv', 'os.getenv', (['"""EMB_MODEL"""'], {}), "('EMB_MODEL')\n", (1026, 1039), False, 'import os\n'), ((1254, 1269), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1267, 1269), False, 'import torch\n')] |
from langchain_community.document_loaders import PyPDFDirectoryLoader
from langchain_text_splitters import TokenTextSplitter
from langchain_community.embeddings import OllamaEmbeddings
import lancedb
import pyarrow as pa
embedding_model = OllamaEmbeddings()
db_path = "./lancedb"
db = lancedb.connect(db_path)
placeholder_embedding = embedding_model.embed_query("Hello World")
if placeholder_embedding is not None:
placeholder_embedding_list = placeholder_embedding.tolist() if hasattr(placeholder_embedding, "tolist") else placeholder_embedding
placeholder_data = [{
"id": "placeholder",
"text": "Placeholder text",
"vector": placeholder_embedding_list,
}]
table = db.create_table(
"my_table",
data=placeholder_data,
mode="overwrite",
)
loader = PyPDFDirectoryLoader("data/")
docs = loader.load()
text_splitter = TokenTextSplitter()
for doc_index, doc in enumerate(docs):
text_content = getattr(doc, 'page_content', None)
if text_content:
tokenized_doc = text_splitter.split_text(text_content)
for chunk_index, chunk in enumerate(tokenized_doc):
chunk_text = ' '.join(chunk)
embedding = embedding_model.embed_query(chunk_text)
if embedding:
embedding_list = embedding.tolist() if hasattr(embedding, "tolist") else embedding
try:
db["my_table"].add({
"id": f"{doc_index}_{chunk_index}",
"text": chunk_text,
"vector": embedding_list,
})
except Exception as e:
print(f"Failed to insert data for chunk {chunk_index} of document {doc_index}: {e}")
else:
print(f"No embedding generated for chunk {chunk_index} of document {doc_index}.")
else:
print(f"Skipping a document {doc_index} due to missing text content.")
print("Embeddings inserted into LanceDB successfully.")
print(table.count_rows()) | [
"lancedb.connect"
] | [((246, 264), 'langchain_community.embeddings.OllamaEmbeddings', 'OllamaEmbeddings', ([], {}), '()\n', (262, 264), False, 'from langchain_community.embeddings import OllamaEmbeddings\n'), ((296, 320), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (311, 320), False, 'import lancedb\n'), ((844, 873), 'langchain_community.document_loaders.PyPDFDirectoryLoader', 'PyPDFDirectoryLoader', (['"""data/"""'], {}), "('data/')\n", (864, 873), False, 'from langchain_community.document_loaders import PyPDFDirectoryLoader\n'), ((913, 932), 'langchain_text_splitters.TokenTextSplitter', 'TokenTextSplitter', ([], {}), '()\n', (930, 932), False, 'from langchain_text_splitters import TokenTextSplitter\n')] |
import os
import pandas as pd
from datetime import datetime
import time
import subprocess
from docarray import DocumentArray, Document
import json
import pyarrow as pa
import lancedb
from google.cloud import bigquery
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "passculture-data-ehp")
ENV_SHORT_NAME = os.environ.get("ENV_SHORT_NAME", "dev")
BIGQUERY_CLEAN_DATASET = f"clean_{ENV_SHORT_NAME}"
BIGQUERY_ANALYTICS_DATASET = f"analytics_{ENV_SHORT_NAME}"
MODELS_RESULTS_TABLE_NAME = "mlflow_training_results"
item_columns = [
"vector",
"item_id",
"booking_number_desc",
"booking_trend_desc",
"booking_creation_trend_desc",
"booking_release_trend_desc",
"raw_embeddings",
"topic_id",
"cluster_id",
"category",
"subcategory_id",
"search_group_name",
"offer_type_label",
"offer_type_domain",
"gtl_id",
"gtl_l1",
"gtl_l2",
"gtl_l3",
"gtl_l4",
"is_numerical",
"is_national",
"is_geolocated",
"is_underage_recommendable",
"is_restrained",
"is_sensitive",
"offer_is_duo",
"booking_number",
"booking_number_last_7_days",
"booking_number_last_14_days",
"booking_number_last_28_days",
"stock_price",
"offer_creation_date",
"stock_beginning_date",
"total_offers",
"example_offer_id",
"example_offer_name",
"example_venue_id",
"example_venue_latitude",
"example_venue_longitude",
]
def save_experiment(experiment_name, model_name, serving_container, run_id):
log_results = {
"execution_date": datetime.now().isoformat(),
"experiment_name": experiment_name,
"model_name": model_name,
"model_type": "custom",
"run_id": run_id,
"run_start_time": int(time.time() * 1000.0),
"run_end_time": int(time.time() * 1000.0),
"artifact_uri": None,
"serving_container": serving_container,
}
client = bigquery.Client()
table_id = f"""{BIGQUERY_CLEAN_DATASET}.{MODELS_RESULTS_TABLE_NAME}"""
job_config = bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField("execution_date", "STRING"),
bigquery.SchemaField("experiment_name", "STRING"),
bigquery.SchemaField("model_name", "STRING"),
bigquery.SchemaField("model_type", "STRING"),
bigquery.SchemaField("run_id", "STRING"),
bigquery.SchemaField("run_start_time", "INTEGER"),
bigquery.SchemaField("run_end_time", "INTEGER"),
bigquery.SchemaField("artifact_uri", "STRING"),
bigquery.SchemaField("serving_container", "STRING"),
]
)
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
df = pd.DataFrame.from_dict([log_results], orient="columns")
job = client.load_table_from_dataframe(df, table_id, job_config=job_config)
job.result()
def deploy_container(serving_container, workers):
command = f"sh ./deploy_to_docker_registery.sh {serving_container} {workers}"
results = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
# TODO handle errors
for line in results.stdout:
print(line.rstrip().decode("utf-8"))
def get_items_metadata():
client = bigquery.Client()
sql = f"""
SELECT
*,
ROW_NUMBER() OVER (ORDER BY booking_number DESC) as booking_number_desc,
ROW_NUMBER() OVER (ORDER BY booking_trend DESC) as booking_trend_desc,
ROW_NUMBER() OVER (ORDER BY booking_creation_trend DESC) as booking_creation_trend_desc,
ROW_NUMBER() OVER (ORDER BY booking_release_trend DESC) as booking_release_trend_desc
FROM `{GCP_PROJECT_ID}.{BIGQUERY_ANALYTICS_DATASET}.recommendable_items_raw`
"""
return client.query(sql).to_dataframe()
def get_users_metadata():
client = bigquery.Client()
sql = f"""
SELECT
user_id,
user_total_deposit_amount,
user_current_deposit_type,
COALESCE(user_theoretical_remaining_credit, user_last_deposit_amount) as user_theoretical_remaining_credit
FROM `{GCP_PROJECT_ID}.{BIGQUERY_ANALYTICS_DATASET}.enriched_user_data`
"""
return client.query(sql).to_dataframe()
def to_ts(f):
try:
return float(f.timestamp())
except:
return 0.0
def to_float(f):
try:
return float(f)
except:
return None
def save_model_type(model_type):
with open("./metadata/model_type.json", "w") as file:
json.dump(model_type, file)
def get_table_batches(item_embedding_dict: dict, items_df, emb_size):
for row in items_df.itertuples():
embedding_id = item_embedding_dict.get(row.item_id, None)
if embedding_id is not None:
_item_id = row.item_id
yield pa.RecordBatch.from_arrays(
[
pa.array([embedding_id], pa.list_(pa.float32(), emb_size)),
pa.array([_item_id], pa.utf8()),
pa.array(
[[float(row.booking_number_desc)]], pa.list_(pa.float32(), 1)
),
pa.array(
[[float(row.booking_trend_desc)]],
pa.list_(pa.float32(), 1),
),
pa.array(
[[float(row.booking_creation_trend_desc)]],
pa.list_(pa.float32(), 1),
),
pa.array(
[[float(row.booking_release_trend_desc)]],
pa.list_(pa.float32(), 1),
),
pa.array([embedding_id], pa.list_(pa.float32(), emb_size)),
pa.array([str(row.topic_id or "")], pa.utf8()),
pa.array([str(row.cluster_id or "")], pa.utf8()),
pa.array([str(row.category or "")], pa.utf8()),
pa.array([str(row.subcategory_id or "")], pa.utf8()),
pa.array([str(row.search_group_name or "")], pa.utf8()),
pa.array([str(row.offer_type_label or "")], pa.utf8()),
pa.array([str(row.offer_type_domain or "")], pa.utf8()),
pa.array([str(row.gtl_id or "")], pa.utf8()),
pa.array([str(row.gtl_l1 or "")], pa.utf8()),
pa.array([str(row.gtl_l2 or "")], pa.utf8()),
pa.array([str(row.gtl_l3 or "")], pa.utf8()),
pa.array([str(row.gtl_l4 or "")], pa.utf8()),
pa.array([to_float(row.is_numerical)], pa.float32()),
pa.array([to_float(row.is_national)], pa.float32()),
pa.array([to_float(row.is_geolocated)], pa.float32()),
pa.array([to_float(row.is_underage_recommendable)], pa.float32()),
pa.array([to_float(row.is_restrained)], pa.float32()),
pa.array([to_float(row.is_sensitive)], pa.float32()),
pa.array([to_float(row.offer_is_duo)], pa.float32()),
pa.array([to_float(row.booking_number)], pa.float32()),
pa.array([to_float(row.booking_number_last_7_days)], pa.float32()),
pa.array([to_float(row.booking_number_last_14_days)], pa.float32()),
pa.array([to_float(row.booking_number_last_28_days)], pa.float32()),
pa.array([to_float(row.stock_price)], pa.float32()),
pa.array([to_ts(row.offer_creation_date)], pa.float32()),
pa.array([to_ts(row.stock_beginning_date)], pa.float32()),
# if unique
pa.array([to_float(row.total_offers)], pa.float32()),
pa.array([str(row.example_offer_id or "")], pa.utf8()),
pa.array([str(row.example_offer_name or "")], pa.utf8()),
pa.array([str(row.example_venue_id or "")], pa.utf8()),
pa.array(
[to_float(row.example_venue_latitude or 0.0)], pa.float32()
),
pa.array(
[to_float(row.example_venue_longitude or 0.0)], pa.float32()
),
],
item_columns,
)
def create_items_table(
item_embedding_dict, items_df, emb_size, uri="./metadata/vector"
):
data = pa.Table.from_batches(
get_table_batches(item_embedding_dict, items_df, emb_size)
)
db = lancedb.connect(uri)
db.drop_database()
table = db.create_table("items", data=data)
table.create_index(num_partitions=1024, num_sub_vectors=32)
def get_item_docs(item_embedding_dict, items_df):
docs = DocumentArray()
for row in items_df.itertuples():
embedding_id = item_embedding_dict.get(row.item_id, None)
if embedding_id is not None:
_item_id = row.item_id
docs.append(Document(id=str(_item_id), embedding=embedding_id))
if len(docs) == 0:
raise Exception("Item Document is empty. Does the model match the query ?")
return docs
def get_user_docs(user_dict):
docs = DocumentArray()
for k, v in user_dict.items():
docs.append(Document(id=str(k), embedding=v))
return docs
| [
"lancedb.connect"
] | [((237, 293), 'os.environ.get', 'os.environ.get', (['"""GCP_PROJECT_ID"""', '"""passculture-data-ehp"""'], {}), "('GCP_PROJECT_ID', 'passculture-data-ehp')\n", (251, 293), False, 'import os\n'), ((311, 350), 'os.environ.get', 'os.environ.get', (['"""ENV_SHORT_NAME"""', '"""dev"""'], {}), "('ENV_SHORT_NAME', 'dev')\n", (325, 350), False, 'import os\n'), ((1925, 1942), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (1940, 1942), False, 'from google.cloud import bigquery\n'), ((2721, 2776), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['[log_results]'], {'orient': '"""columns"""'}), "([log_results], orient='columns')\n", (2743, 2776), True, 'import pandas as pd\n'), ((3023, 3115), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n', (3039, 3115), False, 'import subprocess\n'), ((3268, 3285), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (3283, 3285), False, 'from google.cloud import bigquery\n'), ((3859, 3876), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (3874, 3876), False, 'from google.cloud import bigquery\n'), ((8567, 8587), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (8582, 8587), False, 'import lancedb\n'), ((8786, 8801), 'docarray.DocumentArray', 'DocumentArray', ([], {}), '()\n', (8799, 8801), False, 'from docarray import DocumentArray, Document\n'), ((9222, 9237), 'docarray.DocumentArray', 'DocumentArray', ([], {}), '()\n', (9235, 9237), False, 'from docarray import DocumentArray, Document\n'), ((4537, 4564), 'json.dump', 'json.dump', (['model_type', 'file'], {}), '(model_type, file)\n', (4546, 4564), False, 'import json\n'), ((1559, 1573), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1571, 1573), False, 'from datetime import datetime\n'), ((1753, 1764), 'time.time', 'time.time', ([], {}), '()\n', (1762, 1764), False, 'import time\n'), ((1804, 1815), 'time.time', 'time.time', ([], {}), '()\n', (1813, 1815), False, 'import time\n'), ((2089, 2137), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""execution_date"""', '"""STRING"""'], {}), "('execution_date', 'STRING')\n", (2109, 2137), False, 'from google.cloud import bigquery\n'), ((2151, 2200), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""experiment_name"""', '"""STRING"""'], {}), "('experiment_name', 'STRING')\n", (2171, 2200), False, 'from google.cloud import bigquery\n'), ((2214, 2258), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""model_name"""', '"""STRING"""'], {}), "('model_name', 'STRING')\n", (2234, 2258), False, 'from google.cloud import bigquery\n'), ((2272, 2316), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""model_type"""', '"""STRING"""'], {}), "('model_type', 'STRING')\n", (2292, 2316), False, 'from google.cloud import bigquery\n'), ((2330, 2370), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""run_id"""', '"""STRING"""'], {}), "('run_id', 'STRING')\n", (2350, 2370), False, 'from google.cloud import bigquery\n'), ((2384, 2433), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""run_start_time"""', '"""INTEGER"""'], {}), "('run_start_time', 'INTEGER')\n", (2404, 2433), False, 'from google.cloud import bigquery\n'), ((2447, 2494), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""run_end_time"""', '"""INTEGER"""'], {}), "('run_end_time', 'INTEGER')\n", (2467, 2494), False, 'from google.cloud import bigquery\n'), ((2508, 2554), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""artifact_uri"""', '"""STRING"""'], {}), "('artifact_uri', 'STRING')\n", (2528, 2554), False, 'from google.cloud import bigquery\n'), ((2568, 2619), 'google.cloud.bigquery.SchemaField', 'bigquery.SchemaField', (['"""serving_container"""', '"""STRING"""'], {}), "('serving_container', 'STRING')\n", (2588, 2619), False, 'from google.cloud import bigquery\n'), ((4998, 5007), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (5005, 5007), True, 'import pyarrow as pa\n'), ((5791, 5800), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (5798, 5800), True, 'import pyarrow as pa\n'), ((5861, 5870), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (5868, 5870), True, 'import pyarrow as pa\n'), ((5929, 5938), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (5936, 5938), True, 'import pyarrow as pa\n'), ((6003, 6012), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (6010, 6012), True, 'import pyarrow as pa\n'), ((6080, 6089), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (6087, 6089), True, 'import pyarrow as pa\n'), ((6156, 6165), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (6163, 6165), True, 'import pyarrow as pa\n'), ((6233, 6242), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (6240, 6242), True, 'import pyarrow as pa\n'), ((6299, 6308), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (6306, 6308), True, 'import pyarrow as pa\n'), ((6365, 6374), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (6372, 6374), True, 'import pyarrow as pa\n'), ((6431, 6440), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (6438, 6440), True, 'import pyarrow as pa\n'), ((6497, 6506), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (6504, 6506), True, 'import pyarrow as pa\n'), ((6563, 6572), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (6570, 6572), True, 'import pyarrow as pa\n'), ((6634, 6646), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (6644, 6646), True, 'import pyarrow as pa\n'), ((6707, 6719), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (6717, 6719), True, 'import pyarrow as pa\n'), ((6782, 6794), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (6792, 6794), True, 'import pyarrow as pa\n'), ((6869, 6881), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (6879, 6881), True, 'import pyarrow as pa\n'), ((6944, 6956), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (6954, 6956), True, 'import pyarrow as pa\n'), ((7018, 7030), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7028, 7030), True, 'import pyarrow as pa\n'), ((7092, 7104), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7102, 7104), True, 'import pyarrow as pa\n'), ((7168, 7180), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7178, 7180), True, 'import pyarrow as pa\n'), ((7256, 7268), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7266, 7268), True, 'import pyarrow as pa\n'), ((7345, 7357), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7355, 7357), True, 'import pyarrow as pa\n'), ((7434, 7446), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7444, 7446), True, 'import pyarrow as pa\n'), ((7507, 7519), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7517, 7519), True, 'import pyarrow as pa\n'), ((7585, 7597), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7595, 7597), True, 'import pyarrow as pa\n'), ((7664, 7676), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7674, 7676), True, 'import pyarrow as pa\n'), ((7770, 7782), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7780, 7782), True, 'import pyarrow as pa\n'), ((7849, 7858), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (7856, 7858), True, 'import pyarrow as pa\n'), ((7927, 7936), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (7934, 7936), True, 'import pyarrow as pa\n'), ((8003, 8012), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (8010, 8012), True, 'import pyarrow as pa\n'), ((8116, 8128), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (8126, 8128), True, 'import pyarrow as pa\n'), ((8254, 8266), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (8264, 8266), True, 'import pyarrow as pa\n'), ((4931, 4943), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4941, 4943), True, 'import pyarrow as pa\n'), ((5109, 5121), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (5119, 5121), True, 'import pyarrow as pa\n'), ((5271, 5283), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (5281, 5283), True, 'import pyarrow as pa\n'), ((5443, 5455), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (5453, 5455), True, 'import pyarrow as pa\n'), ((5614, 5626), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (5624, 5626), True, 'import pyarrow as pa\n'), ((5709, 5721), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (5719, 5721), True, 'import pyarrow as pa\n')] |
from pgvector.psycopg import register_vector
from pgvector.sqlalchemy import Vector
import psycopg
from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text
from sqlalchemy.orm import sessionmaker, mapped_column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import func
import re
from tqdm import tqdm
from typing import Optional, List, Iterator
import numpy as np
from tqdm import tqdm
import pandas as pd
from memgpt.config import MemGPTConfig
from memgpt.connectors.storage import StorageConnector, Passage
from memgpt.config import AgentConfig, MemGPTConfig
from memgpt.constants import MEMGPT_DIR
from memgpt.utils import printd
Base = declarative_base()
def get_db_model(table_name: str):
config = MemGPTConfig.load()
class PassageModel(Base):
"""Defines data model for storing Passages (consisting of text, embedding)"""
__abstract__ = True # this line is necessary
# Assuming passage_id is the primary key
id = Column(BIGINT, primary_key=True, nullable=False, autoincrement=True)
doc_id = Column(String)
text = Column(String, nullable=False)
embedding = mapped_column(Vector(config.embedding_dim))
# metadata_ = Column(JSON(astext_type=Text()))
def __repr__(self):
return f"<Passage(passage_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
"""Create database model for table_name"""
class_name = f"{table_name.capitalize()}Model"
Model = type(class_name, (PassageModel,), {"__tablename__": table_name, "__table_args__": {"extend_existing": True}})
return Model
class PostgresStorageConnector(StorageConnector):
"""Storage via Postgres"""
# TODO: this should probably eventually be moved into a parent DB class
def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None):
config = MemGPTConfig.load()
# determine table name
if agent_config:
assert name is None, f"Cannot specify both agent config and name {name}"
self.table_name = self.generate_table_name_agent(agent_config)
elif name:
assert agent_config is None, f"Cannot specify both agent config and name {name}"
self.table_name = self.generate_table_name(name)
else:
raise ValueError("Must specify either agent config or name")
printd(f"Using table name {self.table_name}")
# create table
self.uri = config.archival_storage_uri
if config.archival_storage_uri is None:
raise ValueError(f"Must specifiy archival_storage_uri in config {config.config_path}")
self.db_model = get_db_model(self.table_name)
self.engine = create_engine(self.uri)
Base.metadata.create_all(self.engine) # Create the table if it doesn't exist
self.Session = sessionmaker(bind=self.engine)
self.Session().execute(text("CREATE EXTENSION IF NOT EXISTS vector")) # Enables the vector extension
def get_all_paginated(self, page_size: int) -> Iterator[List[Passage]]:
session = self.Session()
offset = 0
while True:
# Retrieve a chunk of records with the given page_size
db_passages_chunk = session.query(self.db_model).offset(offset).limit(page_size).all()
# If the chunk is empty, we've retrieved all records
if not db_passages_chunk:
break
# Yield a list of Passage objects converted from the chunk
yield [Passage(text=p.text, embedding=p.embedding, doc_id=p.doc_id, passage_id=p.id) for p in db_passages_chunk]
# Increment the offset to get the next chunk in the next iteration
offset += page_size
def get_all(self, limit=10) -> List[Passage]:
session = self.Session()
db_passages = session.query(self.db_model).limit(limit).all()
return [Passage(text=p.text, embedding=p.embedding, doc_id=p.doc_id, passage_id=p.id) for p in db_passages]
def get(self, id: str) -> Optional[Passage]:
session = self.Session()
db_passage = session.query(self.db_model).get(id)
if db_passage is None:
return None
return Passage(text=db_passage.text, embedding=db_passage.embedding, doc_id=db_passage.doc_id, passage_id=db_passage.passage_id)
def size(self) -> int:
# return size of table
session = self.Session()
return session.query(self.db_model).count()
def insert(self, passage: Passage):
session = self.Session()
db_passage = self.db_model(doc_id=passage.doc_id, text=passage.text, embedding=passage.embedding)
session.add(db_passage)
session.commit()
def insert_many(self, passages: List[Passage], show_progress=True):
session = self.Session()
iterable = tqdm(passages) if show_progress else passages
for passage in iterable:
db_passage = self.db_model(doc_id=passage.doc_id, text=passage.text, embedding=passage.embedding)
session.add(db_passage)
session.commit()
def query(self, query: str, query_vec: List[float], top_k: int = 10) -> List[Passage]:
session = self.Session()
# Assuming PassageModel.embedding has the capability of computing l2_distance
results = session.scalars(select(self.db_model).order_by(self.db_model.embedding.l2_distance(query_vec)).limit(top_k)).all()
# Convert the results into Passage objects
passages = [
Passage(text=result.text, embedding=np.frombuffer(result.embedding), doc_id=result.doc_id, passage_id=result.id)
for result in results
]
return passages
def delete(self):
"""Drop the passage table from the database."""
# Bind the engine to the metadata of the base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = self.engine
# Drop the table specified by the PassageModel class
self.db_model.__table__.drop(self.engine)
def save(self):
return
@staticmethod
def list_loaded_data():
config = MemGPTConfig.load()
engine = create_engine(config.archival_storage_uri)
inspector = inspect(engine)
tables = inspector.get_table_names()
tables = [table for table in tables if table.startswith("memgpt_") and not table.startswith("memgpt_agent_")]
tables = [table.replace("memgpt_", "") for table in tables]
return tables
def sanitize_table_name(self, name: str) -> str:
# Remove leading and trailing whitespace
name = name.strip()
# Replace spaces and invalid characters with underscores
name = re.sub(r"\s+|\W+", "_", name)
# Truncate to the maximum identifier length (e.g., 63 for PostgreSQL)
max_length = 63
if len(name) > max_length:
name = name[:max_length].rstrip("_")
# Convert to lowercase
name = name.lower()
return name
def generate_table_name_agent(self, agent_config: AgentConfig):
return f"memgpt_agent_{self.sanitize_table_name(agent_config.name)}"
def generate_table_name(self, name: str):
return f"memgpt_{self.sanitize_table_name(name)}"
class LanceDBConnector(StorageConnector):
"""Storage via LanceDB"""
# TODO: this should probably eventually be moved into a parent DB class
def __init__(self, name: Optional[str] = None):
config = MemGPTConfig.load()
# determine table name
if name:
self.table_name = self.generate_table_name(name)
else:
self.table_name = "lancedb_tbl"
printd(f"Using table name {self.table_name}")
# create table
self.uri = config.archival_storage_uri
if config.archival_storage_uri is None:
raise ValueError(f"Must specifiy archival_storage_uri in config {config.config_path}")
import lancedb
self.db = lancedb.connect(self.uri)
self.table = None
def get_all_paginated(self, page_size: int) -> Iterator[List[Passage]]:
session = self.Session()
offset = 0
while True:
# Retrieve a chunk of records with the given page_size
db_passages_chunk = self.table.search().limit(page_size).to_list()
# If the chunk is empty, we've retrieved all records
if not db_passages_chunk:
break
# Yield a list of Passage objects converted from the chunk
yield [
Passage(text=p["text"], embedding=p["vector"], doc_id=p["doc_id"], passage_id=p["passage_id"]) for p in db_passages_chunk
]
# Increment the offset to get the next chunk in the next iteration
offset += page_size
def get_all(self, limit=10) -> List[Passage]:
db_passages = self.table.search().limit(limit).to_list()
return [Passage(text=p["text"], embedding=p["vector"], doc_id=p["doc_id"], passage_id=p["passage_id"]) for p in db_passages]
def get(self, id: str) -> Optional[Passage]:
db_passage = self.table.where(f"passage_id={id}").to_list()
if len(db_passage) == 0:
return None
return Passage(
text=db_passage["text"], embedding=db_passage["embedding"], doc_id=db_passage["doc_id"], passage_id=db_passage["passage_id"]
)
def size(self) -> int:
# return size of table
if self.table:
return len(self.table.search().to_list())
else:
print(f"Table with name {self.table_name} not present")
return 0
def insert(self, passage: Passage):
data = [{"doc_id": passage.doc_id, "text": passage.text, "passage_id": passage.passage_id, "vector": passage.embedding}]
if self.table:
self.table.add(data)
else:
self.table = self.db.create_table(self.table_name, data=data, mode="overwrite")
def insert_many(self, passages: List[Passage], show_progress=True):
data = []
iterable = tqdm(passages) if show_progress else passages
for passage in iterable:
temp_dict = {"doc_id": passage.doc_id, "text": passage.text, "passage_id": passage.passage_id, "vector": passage.embedding}
data.append(temp_dict)
if self.table:
self.table.add(data)
else:
self.table = self.db.create_table(self.table_name, data=data, mode="overwrite")
def query(self, query: str, query_vec: List[float], top_k: int = 10) -> List[Passage]:
# Assuming query_vec is of same length as embeddings inside table
results = self.table.search(query_vec).limit(top_k)
# Convert the results into Passage objects
passages = [
Passage(text=result["text"], embedding=result["embedding"], doc_id=result["doc_id"], passage_id=result["passage_id"])
for result in results
]
return passages
def delete(self):
"""Drop the passage table from the database."""
# Drop the table specified by the PassageModel class
self.db.drop_table(self.table_name)
def save(self):
return
@staticmethod
def list_loaded_data():
config = MemGPTConfig.load()
import lancedb
db = lancedb.connect(config.archival_storage_uri)
tables = db.table_names()
tables = [table for table in tables if table.startswith("memgpt_")]
tables = [table.replace("memgpt_", "") for table in tables]
return tables
def sanitize_table_name(self, name: str) -> str:
# Remove leading and trailing whitespace
name = name.strip()
# Replace spaces and invalid characters with underscores
name = re.sub(r"\s+|\W+", "_", name)
# Truncate to the maximum identifier length
max_length = 63
if len(name) > max_length:
name = name[:max_length].rstrip("_")
# Convert to lowercase
name = name.lower()
return name
def generate_table_name(self, name: str):
return f"memgpt_{self.sanitize_table_name(name)}"
| [
"lancedb.connect"
] | [((702, 720), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (718, 720), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((771, 790), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (788, 790), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((1026, 1094), 'sqlalchemy.Column', 'Column', (['BIGINT'], {'primary_key': '(True)', 'nullable': '(False)', 'autoincrement': '(True)'}), '(BIGINT, primary_key=True, nullable=False, autoincrement=True)\n', (1032, 1094), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((1112, 1126), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1118, 1126), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((1142, 1172), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (1148, 1172), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((1938, 1957), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (1955, 1957), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((2444, 2489), 'memgpt.utils.printd', 'printd', (['f"""Using table name {self.table_name}"""'], {}), "(f'Using table name {self.table_name}')\n", (2450, 2489), False, 'from memgpt.utils import printd\n'), ((2784, 2807), 'sqlalchemy.create_engine', 'create_engine', (['self.uri'], {}), '(self.uri)\n', (2797, 2807), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((2917, 2947), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'self.engine'}), '(bind=self.engine)\n', (2929, 2947), False, 'from sqlalchemy.orm import sessionmaker, mapped_column\n'), ((4289, 4415), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': 'db_passage.text', 'embedding': 'db_passage.embedding', 'doc_id': 'db_passage.doc_id', 'passage_id': 'db_passage.passage_id'}), '(text=db_passage.text, embedding=db_passage.embedding, doc_id=\n db_passage.doc_id, passage_id=db_passage.passage_id)\n', (4296, 4415), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((6249, 6268), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (6266, 6268), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((6286, 6328), 'sqlalchemy.create_engine', 'create_engine', (['config.archival_storage_uri'], {}), '(config.archival_storage_uri)\n', (6299, 6328), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((6349, 6364), 'sqlalchemy.inspect', 'inspect', (['engine'], {}), '(engine)\n', (6356, 6364), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((6830, 6860), 're.sub', 're.sub', (['"""\\\\s+|\\\\W+"""', '"""_"""', 'name'], {}), "('\\\\s+|\\\\W+', '_', name)\n", (6836, 6860), False, 'import re\n'), ((7600, 7619), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (7617, 7619), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((7797, 7842), 'memgpt.utils.printd', 'printd', (['f"""Using table name {self.table_name}"""'], {}), "(f'Using table name {self.table_name}')\n", (7803, 7842), False, 'from memgpt.utils import printd\n'), ((8103, 8128), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (8118, 8128), False, 'import lancedb\n'), ((9371, 9509), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': "db_passage['text']", 'embedding': "db_passage['embedding']", 'doc_id': "db_passage['doc_id']", 'passage_id': "db_passage['passage_id']"}), "(text=db_passage['text'], embedding=db_passage['embedding'], doc_id=\n db_passage['doc_id'], passage_id=db_passage['passage_id'])\n", (9378, 9509), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((11403, 11422), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (11420, 11422), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((11460, 11504), 'lancedb.connect', 'lancedb.connect', (['config.archival_storage_uri'], {}), '(config.archival_storage_uri)\n', (11475, 11504), False, 'import lancedb\n'), ((11918, 11948), 're.sub', 're.sub', (['"""\\\\s+|\\\\W+"""', '"""_"""', 'name'], {}), "('\\\\s+|\\\\W+', '_', name)\n", (11924, 11948), False, 'import re\n'), ((1207, 1235), 'pgvector.sqlalchemy.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (1213, 1235), False, 'from pgvector.sqlalchemy import Vector\n'), ((2979, 3024), 'sqlalchemy.text', 'text', (['"""CREATE EXTENSION IF NOT EXISTS vector"""'], {}), "('CREATE EXTENSION IF NOT EXISTS vector')\n", (2983, 3024), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((3978, 4055), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': 'p.text', 'embedding': 'p.embedding', 'doc_id': 'p.doc_id', 'passage_id': 'p.id'}), '(text=p.text, embedding=p.embedding, doc_id=p.doc_id, passage_id=p.id)\n', (3985, 4055), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((4917, 4931), 'tqdm.tqdm', 'tqdm', (['passages'], {}), '(passages)\n', (4921, 4931), False, 'from tqdm import tqdm\n'), ((9064, 9162), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': "p['text']", 'embedding': "p['vector']", 'doc_id': "p['doc_id']", 'passage_id': "p['passage_id']"}), "(text=p['text'], embedding=p['vector'], doc_id=p['doc_id'],\n passage_id=p['passage_id'])\n", (9071, 9162), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((10209, 10223), 'tqdm.tqdm', 'tqdm', (['passages'], {}), '(passages)\n', (10213, 10223), False, 'from tqdm import tqdm\n'), ((10933, 11055), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': "result['text']", 'embedding': "result['embedding']", 'doc_id': "result['doc_id']", 'passage_id': "result['passage_id']"}), "(text=result['text'], embedding=result['embedding'], doc_id=result[\n 'doc_id'], passage_id=result['passage_id'])\n", (10940, 11055), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((3590, 3667), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': 'p.text', 'embedding': 'p.embedding', 'doc_id': 'p.doc_id', 'passage_id': 'p.id'}), '(text=p.text, embedding=p.embedding, doc_id=p.doc_id, passage_id=p.id)\n', (3597, 3667), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((5632, 5663), 'numpy.frombuffer', 'np.frombuffer', (['result.embedding'], {}), '(result.embedding)\n', (5645, 5663), True, 'import numpy as np\n'), ((8684, 8782), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': "p['text']", 'embedding': "p['vector']", 'doc_id': "p['doc_id']", 'passage_id': "p['passage_id']"}), "(text=p['text'], embedding=p['vector'], doc_id=p['doc_id'],\n passage_id=p['passage_id'])\n", (8691, 8782), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((5412, 5433), 'sqlalchemy.select', 'select', (['self.db_model'], {}), '(self.db_model)\n', (5418, 5433), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n')] |
from dotenv import load_dotenv
import os
import lancedb
import torch
from PIL import Image
import glob
import re
from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast
import concurrent.futures
# Set options for youtube_dl
ydl_opts = {
"quiet": True, # Silence youtube_dl output
"extract_flat": True, # Extract metadata only, no download
}
MODEL_ID = None
MODEL = None
TOKENIZER = None
PROCESSOR = None
def setup_clip_model(model_id):
global MODEL_ID, MODEL, TOKENIZER, PROCESSOR
MODEL_ID = model_id
TOKENIZER = CLIPTokenizerFast.from_pretrained(MODEL_ID)
MODEL = CLIPModel.from_pretrained(MODEL_ID)
PROCESSOR = CLIPProcessor.from_pretrained(MODEL_ID)
def embed_func(image):
inputs = PROCESSOR(images=image, padded=True, return_tensors="pt")
text_features = MODEL.get_image_features(**inputs)
return text_features.detach().numpy()[0]
from concurrent.futures import ThreadPoolExecutor
db = lancedb.connect("data/video-lancedb")
setup_clip_model("openai/clip-vit-base-patch32")
def insert(video_ids, frames):
with torch.no_grad():
image_features = [
embed_func(Image.open(f"./videos/{vid}/frame-{frame}.jpg"))
for (vid, frame) in zip(video_ids, frames)
]
if "videos" in db.table_names():
table = db.open_table("videos")
table.add(
[
{
"vector": im,
"text": "",
"video_id": vid,
"start_time": (int(frame) - 1) * 30,
}
for (im, vid, frame) in zip(image_features, video_ids, frames)
]
)
else:
db.create_table(
"videos",
[
{
"vector": im,
"text": "",
"video_id": vid,
"start_time": (int(frame) - 1) * 30,
}
for (im, vid, frame) in zip(image_features, video_ids, frames)
],
)
videos = [
(
re.search("(?<=videos\/).*(?=\/)", name).group(),
re.search("(?<=frame-).*(?=.jpg)", name).group(),
)
for name in glob.glob("./videos/*/**")
]
print(videos[:5])
def process_video_chunk(chunk):
video_ids, frames = zip(*chunk)
insert(video_ids, frames)
def threaded_video_processing(videos, chunk_size, max_workers):
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
for i in range(0, len(videos), chunk_size):
chunk = videos[i : i + chunk_size]
executor.submit(process_video_chunk, chunk)
# Assuming you have defined the insert function and videos list
chunk_size = 500 # Number of videos to process in each chunk
max_workers = 10 # Number of concurrent threads
threaded_video_processing(videos, chunk_size, max_workers)
| [
"lancedb.connect"
] | [((955, 992), 'lancedb.connect', 'lancedb.connect', (['"""data/video-lancedb"""'], {}), "('data/video-lancedb')\n", (970, 992), False, 'import lancedb\n'), ((553, 596), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (586, 596), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((609, 644), 'transformers.CLIPModel.from_pretrained', 'CLIPModel.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (634, 644), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((661, 700), 'transformers.CLIPProcessor.from_pretrained', 'CLIPProcessor.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (690, 700), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((1084, 1099), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1097, 1099), False, 'import torch\n'), ((2202, 2228), 'glob.glob', 'glob.glob', (['"""./videos/*/**"""'], {}), "('./videos/*/**')\n", (2211, 2228), False, 'import glob\n'), ((1151, 1198), 'PIL.Image.open', 'Image.open', (['f"""./videos/{vid}/frame-{frame}.jpg"""'], {}), "(f'./videos/{vid}/frame-{frame}.jpg')\n", (1161, 1198), False, 'from PIL import Image\n'), ((2072, 2114), 're.search', 're.search', (['"""(?<=videos\\\\/).*(?=\\\\/)"""', 'name'], {}), "('(?<=videos\\\\/).*(?=\\\\/)', name)\n", (2081, 2114), False, 'import re\n'), ((2130, 2170), 're.search', 're.search', (['"""(?<=frame-).*(?=.jpg)"""', 'name'], {}), "('(?<=frame-).*(?=.jpg)', name)\n", (2139, 2170), False, 'import re\n')] |
import uvicorn
from fastapi import FastAPI, HTTPException, UploadFile, File
from pydantic import BaseModel
import openai
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.document_loaders import PyPDFLoader
from langchain.vectorstores import LanceDB
from langchain.text_splitter import RecursiveCharacterTextSplitter
import lancedb
import shutil
import os
# Initialize FastAPI app with metadata
app = FastAPI(
title="Chatbot RAG API",
description="This is a chatbot API template for RAG system.",
version="1.0.0",
)
# Pydantic model for chatbot request and response
class ChatRequest(BaseModel):
prompt: str
class ChatResponse(BaseModel):
response: str
# Global variable to store the path of the uploaded file
uploaded_file_path = None
# Endpoint to upload PDF
@app.post("/upload-pdf/")
async def upload_pdf(file: UploadFile = File(...)):
global uploaded_file_path
uploaded_file_path = f"uploaded_files/{file.filename}"
os.makedirs(os.path.dirname(uploaded_file_path), exist_ok=True)
with open(uploaded_file_path, "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
return {"filename": file.filename}
# Setup LangChain
def setup_chain():
global uploaded_file_path
if not uploaded_file_path or not os.path.exists(uploaded_file_path):
raise HTTPException(
status_code=400, detail="No PDF file uploaded or file not found."
)
template = """Use the following pieces of context to answer the question at the end.
Given the context from the uploaded document, provide a concise and
accurate answer to the question. The document contains detailed and
specific information, so the answer should directly reflect the content of the document.
If the answer is not known, or if the document does not contain the information, state that the answer is not available in the document.
{context}
Question: {question}
Helpful Answer:"""
OPENAI_API_KEY = "sk-yorapikey"
loader = PyPDFLoader(uploaded_file_path)
docs = loader.load_and_split()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=50)
documents = text_splitter.split_documents(docs)
prompt = PromptTemplate(input_variables=["context", "question"], template=template)
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
db_lance = lancedb.connect("/tmp/lancedb")
table = db_lance.create_table(
"my_table",
data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
db = LanceDB.from_documents(documents, embeddings, connection=table)
retriever = db.as_retriever()
chain_type_kwargs = {"prompt": prompt}
llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY)
chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs=chain_type_kwargs,
verbose=True,
)
return chain
# Endpoint for chatbot interaction
@app.post("/chat", response_model=ChatResponse)
async def chat(request: ChatRequest):
agent = (
setup_chain()
) # Setup agent for each request to use the latest uploaded file
response = agent.run(request.prompt)
return {"response": response}
# Health check endpoint
@app.get("/", tags=["Health Check"])
async def read_root():
return {"message": "Chatbot API is running!"}
# Main function to run the app
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"lancedb.connect"
] | [((548, 664), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Chatbot RAG API"""', 'description': '"""This is a chatbot API template for RAG system."""', 'version': '"""1.0.0"""'}), "(title='Chatbot RAG API', description=\n 'This is a chatbot API template for RAG system.', version='1.0.0')\n", (555, 664), False, 'from fastapi import FastAPI, HTTPException, UploadFile, File\n'), ((1002, 1011), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (1006, 1011), False, 'from fastapi import FastAPI, HTTPException, UploadFile, File\n'), ((2153, 2184), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['uploaded_file_path'], {}), '(uploaded_file_path)\n', (2164, 2184), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((2241, 2305), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(200)', 'chunk_overlap': '(50)'}), '(chunk_size=200, chunk_overlap=50)\n', (2271, 2305), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2372, 2446), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2386, 2446), False, 'from langchain.prompts import PromptTemplate\n'), ((2464, 2511), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (2480, 2511), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2528, 2559), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (2543, 2559), False, 'import lancedb\n'), ((2842, 2905), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (2864, 2905), False, 'from langchain.vectorstores import LanceDB\n'), ((2994, 3035), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (3004, 3035), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3049, 3182), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever', 'chain_type_kwargs': 'chain_type_kwargs', 'verbose': '(True)'}), "(llm=llm, chain_type='stuff', retriever=\n retriever, chain_type_kwargs=chain_type_kwargs, verbose=True)\n", (3076, 3182), False, 'from langchain.chains import RetrievalQA\n'), ((3746, 3789), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "(app, host='0.0.0.0', port=8000)\n", (3757, 3789), False, 'import uvicorn\n'), ((1119, 1154), 'os.path.dirname', 'os.path.dirname', (['uploaded_file_path'], {}), '(uploaded_file_path)\n', (1134, 1154), False, 'import os\n'), ((1230, 1267), 'shutil.copyfileobj', 'shutil.copyfileobj', (['file.file', 'buffer'], {}), '(file.file, buffer)\n', (1248, 1267), False, 'import shutil\n'), ((1463, 1548), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""No PDF file uploaded or file not found."""'}), "(status_code=400, detail='No PDF file uploaded or file not found.'\n )\n", (1476, 1548), False, 'from fastapi import FastAPI, HTTPException, UploadFile, File\n'), ((1413, 1447), 'os.path.exists', 'os.path.exists', (['uploaded_file_path'], {}), '(uploaded_file_path)\n', (1427, 1447), False, 'import os\n')] |
import os
import typer
import pickle
import pandas as pd
from dotenv import load_dotenv
import openai
import pinecone
import lancedb
import pyarrow as pa
from collections import deque
TASK_CREATION_PROMPT = """
You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective:
{objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}.
These are incomplete tasks: {task_list}. Based on the result, create new tasks to be completed by the AI system that
do not overlap with incomplete tasks. Return the tasks as an array."""
PRIORITIZATION_PROMPT = """
You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing
the following tasks: {task_names}. Consider the ultimate objective of your team:{objective}. Do not remove any tasks.
Return the result as a numbered list, like:
#. First task
#. Second task
Start the task list with number {next_task_id}."""
EXECUTION_PROMPT = """
You are an AI who performs one task based on the following objective: {objective}. Your task: {task}\nResponse:
"""
class OpenAIService:
def __init__(self, api_key):
openai.api_key = api_key
def get_ada_embedding(self, text):
return openai.Embedding.create(input=[text.replace('\n', ' ')], model='text-embedding-ada-002')['data'][0][
'embedding'
]
def create(self, prompt, max_tokens=100, temperature=0.5):
return (
openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
.choices[0]
.text.strip()
)
class TestAIService:
def __init__(self, ai_service, cache_file):
self.ai_service = ai_service
self.cache_file = cache_file
if os.path.isfile(cache_file):
self.cache = pickle.load(open(cache_file, 'rb'))
else:
self.cache = {'ada': {}, 'create': {}}
pickle.dump(self.cache, open(cache_file, 'wb'))
def get_ada_embedding(self, text):
if text not in self.cache['ada']:
self.cache['ada'][text] = self.ai_service.get_ada_embedding(text)
pickle.dump(self.cache, open(self.cache_file, 'wb'))
return self.cache['ada'][text]
def create(self, prompt, max_tokens=100, temperature=0.5):
key = (prompt, max_tokens, temperature)
if key not in self.cache['create']:
self.cache['create'][key] = self.ai_service.create(prompt, max_tokens, temperature)
pickle.dump(self.cache, open(self.cache_file, 'wb'))
return self.cache['create'][key]
class PineconeService:
def __init__(self, api_key, environment, table_name, dimension, metric, pod_type):
self.table_name = table_name
pinecone.init(api_key=api_key, environment=environment)
if table_name not in pinecone.list_indexes():
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
self.index = pinecone.Index(table_name)
def query(self, query_embedding, top_k):
results = self.index.query(query_embedding, top_k=top_k, include_metadata=True)
sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)
return [(str(item.metadata['task'])) for item in sorted_results]
def upsert(self, data):
self.index.upsert(data)
class LanceService:
def __init__(self, table_name, dimension):
self.db = lancedb.connect('.')
schema = pa.schema(
[
pa.field('result_id', pa.string()),
pa.field('vector', pa.list_(pa.float32(), dimension)),
pa.field('task', pa.string()),
pa.field('result', pa.string()), # TODO There is a fixed schema but we keep converting
]
)
data = [{'result_id': 0, 'vector': [0.0] * dimension, 'task': 'asd', 'result': 'asd'}]
self.table = self.db.create_table(table_name, mode='overwrite', data=data, schema=schema)
def query(self, query_embedding, top_k):
result = self.table.search(query_embedding).limit(top_k).to_df()
return [v for v in result['task']]
def upsert(self, data):
data = { # TODO This doesn't look good, why are we converting?
'result_id': data[0][0],
'vector': data[0][1],
'task': data[0][2]['task'],
'result': data[0][2]['result'],
}
self.table.add(pd.DataFrame([data]))
class BabyAGI:
def __init__(self, objective, ai_service, vector_service):
self.ai_service = ai_service
self.vector_service = vector_service
self.objective = objective
self.objective_embedding = self.ai_service.get_ada_embedding(self.objective)
self.task_list = deque([])
def add_task(self, task):
self.task_list.append(task)
def task_creation_agent(self, result, task_description):
prompt = TASK_CREATION_PROMPT.format(
objective=self.objective,
result=result,
task_description=task_description,
task_list=', '.join([t['task_name'] for t in self.task_list]),
)
return [{'task_name': task_name} for task_name in self.ai_service.create(prompt).split('\n')]
def prioritization_agent(self, this_task_id):
prompt = PRIORITIZATION_PROMPT.format(
task_names=[t['task_name'] for t in self.task_list],
objective=self.objective,
next_task_id=int(this_task_id) + 1,
)
new_tasks = self.ai_service.create(prompt, max_tokens=1000).split('\n')
self.task_list = deque()
for task_string in new_tasks:
task_parts = task_string.strip().split('.', 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
self.task_list.append({'task_id': task_id, 'task_name': task_name})
def run(self, first_task):
print(self.objective)
self.add_task({'task_id': 1, 'task_name': first_task})
for _ in range(4):
if self.task_list:
context = self.vector_service.query(self.objective_embedding, 5)
task = self.task_list.popleft()
print(task['task_name'])
result = self.ai_service.create(
prompt=EXECUTION_PROMPT.format(objective=self.objective, task=task),
max_tokens=2000,
temperature=0.7,
)
print(result)
this_task_id = int(task['task_id'])
self.vector_service.upsert(
[
(
f'result_{task["task_id"]}',
self.ai_service.get_ada_embedding(result),
{'task': task['task_name'], 'result': result},
)
]
)
new_tasks = self.task_creation_agent({'data': result}, task['task_name'])
task_id_counter = 1
for new_task in new_tasks:
task_id_counter += 1
new_task.update({'task_id': task_id_counter})
self.add_task(new_task)
self.prioritization_agent(this_task_id)
def main():
load_dotenv()
baby_agi = BabyAGI(
objective='Solve world hunger.',
ai_service=TestAIService(
ai_service=OpenAIService(api_key=os.getenv('OPENAI_API_KEY')),
cache_file='babyagi_cache.pkl',
),
vector_service=LanceService(
table_name='test-table',
dimension=1536,
)
# vector_service=PineconeService(
# api_key=os.getenv('PINECONE_API_KEY'),
# environment=os.getenv('PINECONE_ENVIRONMENT'),
# table_name='test-table',
# dimension=1536,
# metric='cosine',
# pod_type='p1',
# ),
)
baby_agi.run(first_task='Develop a task list.')
if __name__ == '__main__':
typer.run(main)
| [
"lancedb.connect"
] | [((7628, 7641), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (7639, 7641), False, 'from dotenv import load_dotenv\n'), ((8372, 8387), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (8381, 8387), False, 'import typer\n'), ((2036, 2062), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_file)\n', (2050, 2062), False, 'import os\n'), ((3029, 3084), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'api_key', 'environment': 'environment'}), '(api_key=api_key, environment=environment)\n', (3042, 3084), False, 'import pinecone\n'), ((3261, 3287), 'pinecone.Index', 'pinecone.Index', (['table_name'], {}), '(table_name)\n', (3275, 3287), False, 'import pinecone\n'), ((3729, 3749), 'lancedb.connect', 'lancedb.connect', (['"""."""'], {}), "('.')\n", (3744, 3749), False, 'import lancedb\n'), ((5063, 5072), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (5068, 5072), False, 'from collections import deque\n'), ((5911, 5918), 'collections.deque', 'deque', ([], {}), '()\n', (5916, 5918), False, 'from collections import deque\n'), ((3114, 3137), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (3135, 3137), False, 'import pinecone\n'), ((3151, 3243), 'pinecone.create_index', 'pinecone.create_index', (['table_name'], {'dimension': 'dimension', 'metric': 'metric', 'pod_type': 'pod_type'}), '(table_name, dimension=dimension, metric=metric,\n pod_type=pod_type)\n', (3172, 3243), False, 'import pinecone\n'), ((4734, 4754), 'pandas.DataFrame', 'pd.DataFrame', (['[data]'], {}), '([data])\n', (4746, 4754), True, 'import pandas as pd\n'), ((3830, 3841), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3839, 3841), True, 'import pyarrow as pa\n'), ((3948, 3959), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3957, 3959), True, 'import pyarrow as pa\n'), ((3997, 4008), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4006, 4008), True, 'import pyarrow as pa\n'), ((3888, 3900), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3898, 3900), True, 'import pyarrow as pa\n'), ((1528, 1700), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=temperature, max_tokens=max_tokens, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n", (1552, 1700), False, 'import openai\n'), ((7786, 7813), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (7795, 7813), False, 'import os\n')] |
import argparse
import io
import PIL
import duckdb
import lancedb
import lance
import pyarrow.compute as pc
from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast
import gradio as gr
MODEL_ID = None
MODEL = None
TOKENIZER = None
PROCESSOR = None
def create_table(dataset):
db = lancedb.connect("~/datasets/demo")
if "diffusiondb" in db.table_names():
return db.open_table("diffusiondb")
data = lance.dataset(dataset).to_table()
tbl = db.create_table(
"diffusiondb", data.filter(~pc.field("prompt").is_null()), mode="overwrite"
)
tbl.create_fts_index(["prompt"])
return tbl
def setup_clip_model(model_id):
global MODEL_ID, MODEL, TOKENIZER, PROCESSOR
MODEL_ID = model_id
TOKENIZER = CLIPTokenizerFast.from_pretrained(MODEL_ID)
MODEL = CLIPModel.from_pretrained(MODEL_ID)
PROCESSOR = CLIPProcessor.from_pretrained(MODEL_ID)
def embed_func(query):
inputs = TOKENIZER([query], padding=True, return_tensors="pt")
text_features = MODEL.get_text_features(**inputs)
return text_features.detach().numpy()[0]
def find_image_vectors(query):
emb = embed_func(query)
code = (
"import lancedb\n"
"db = lancedb.connect('~/datasets/demo')\n"
"tbl = db.open_table('diffusiondb')\n\n"
f"embedding = embed_func('{query}')\n"
"tbl.search(embedding).limit(9).to_df()"
)
return (_extract(tbl.search(emb).limit(9).to_df()), code)
def find_image_keywords(query):
code = (
"import lancedb\n"
"db = lancedb.connect('~/datasets/demo')\n"
"tbl = db.open_table('diffusiondb')\n\n"
f"tbl.search('{query}').limit(9).to_df()"
)
return (_extract(tbl.search(query).limit(9).to_df()), code)
def find_image_sql(query):
code = (
"import lancedb\n"
"import duckdb\n"
"db = lancedb.connect('~/datasets/demo')\n"
"tbl = db.open_table('diffusiondb')\n\n"
"diffusiondb = tbl.to_lance()\n"
f"duckdb.sql('{query}').to_df()"
)
diffusiondb = tbl.to_lance()
return (_extract(duckdb.sql(query).to_df()), code)
def _extract(df):
image_col = "image"
return [
(PIL.Image.open(io.BytesIO(row[image_col])), row["prompt"])
for _, row in df.iterrows()
]
def _extract(df):
image_col = "image"
return [
(PIL.Image.open(io.BytesIO(row[image_col])), row["prompt"])
for _, row in df.iterrows()
]
def create_gradio_dash():
with gr.Blocks() as demo:
with gr.Row():
with gr.Tab("Embeddings"):
vector_query = gr.Textbox(
value="portraits of a person", show_label=False
)
b1 = gr.Button("Submit")
with gr.Tab("Keywords"):
keyword_query = gr.Textbox(value="ninja turtle", show_label=False)
b2 = gr.Button("Submit")
with gr.Tab("SQL"):
sql_query = gr.Textbox(
value="SELECT * from diffusiondb WHERE image_nsfw >= 2 LIMIT 9",
show_label=False,
)
b3 = gr.Button("Submit")
with gr.Row():
code = gr.Code(label="Code", language="python")
with gr.Row():
gallery = gr.Gallery(
label="Found images", show_label=False, elem_id="gallery"
).style(columns=[3], rows=[3], object_fit="contain", height="auto")
b1.click(find_image_vectors, inputs=vector_query, outputs=[gallery, code])
b2.click(find_image_keywords, inputs=keyword_query, outputs=[gallery, code])
b3.click(find_image_sql, inputs=sql_query, outputs=[gallery, code])
demo.launch()
def args_parse():
parser = argparse.ArgumentParser()
parser.add_argument("--model_id", type=str, default="openai/clip-vit-base-patch32")
parser.add_argument("--dataset", type=str, default="rawdata.lance")
return parser.parse_args()
if __name__ == "__main__":
args = args_parse()
setup_clip_model(args.model_id)
tbl = create_table(args.dataset)
create_gradio_dash()
| [
"lancedb.connect"
] | [((299, 333), 'lancedb.connect', 'lancedb.connect', (['"""~/datasets/demo"""'], {}), "('~/datasets/demo')\n", (314, 333), False, 'import lancedb\n'), ((759, 802), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (792, 802), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((815, 850), 'transformers.CLIPModel.from_pretrained', 'CLIPModel.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (840, 850), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((867, 906), 'transformers.CLIPProcessor.from_pretrained', 'CLIPProcessor.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (896, 906), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((3761, 3786), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3784, 3786), False, 'import argparse\n'), ((2502, 2513), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (2511, 2513), True, 'import gradio as gr\n'), ((432, 454), 'lance.dataset', 'lance.dataset', (['dataset'], {}), '(dataset)\n', (445, 454), False, 'import lance\n'), ((2536, 2544), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (2542, 2544), True, 'import gradio as gr\n'), ((3183, 3191), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (3189, 3191), True, 'import gradio as gr\n'), ((3212, 3252), 'gradio.Code', 'gr.Code', ([], {'label': '"""Code"""', 'language': '"""python"""'}), "(label='Code', language='python')\n", (3219, 3252), True, 'import gradio as gr\n'), ((3266, 3274), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (3272, 3274), True, 'import gradio as gr\n'), ((2212, 2238), 'io.BytesIO', 'io.BytesIO', (['row[image_col]'], {}), '(row[image_col])\n', (2222, 2238), False, 'import io\n'), ((2379, 2405), 'io.BytesIO', 'io.BytesIO', (['row[image_col]'], {}), '(row[image_col])\n', (2389, 2405), False, 'import io\n'), ((2563, 2583), 'gradio.Tab', 'gr.Tab', (['"""Embeddings"""'], {}), "('Embeddings')\n", (2569, 2583), True, 'import gradio as gr\n'), ((2616, 2675), 'gradio.Textbox', 'gr.Textbox', ([], {'value': '"""portraits of a person"""', 'show_label': '(False)'}), "(value='portraits of a person', show_label=False)\n", (2626, 2675), True, 'import gradio as gr\n'), ((2735, 2754), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (2744, 2754), True, 'import gradio as gr\n'), ((2772, 2790), 'gradio.Tab', 'gr.Tab', (['"""Keywords"""'], {}), "('Keywords')\n", (2778, 2790), True, 'import gradio as gr\n'), ((2824, 2874), 'gradio.Textbox', 'gr.Textbox', ([], {'value': '"""ninja turtle"""', 'show_label': '(False)'}), "(value='ninja turtle', show_label=False)\n", (2834, 2874), True, 'import gradio as gr\n'), ((2896, 2915), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (2905, 2915), True, 'import gradio as gr\n'), ((2933, 2946), 'gradio.Tab', 'gr.Tab', (['"""SQL"""'], {}), "('SQL')\n", (2939, 2946), True, 'import gradio as gr\n'), ((2976, 3073), 'gradio.Textbox', 'gr.Textbox', ([], {'value': '"""SELECT * from diffusiondb WHERE image_nsfw >= 2 LIMIT 9"""', 'show_label': '(False)'}), "(value='SELECT * from diffusiondb WHERE image_nsfw >= 2 LIMIT 9',\n show_label=False)\n", (2986, 3073), True, 'import gradio as gr\n'), ((3150, 3169), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (3159, 3169), True, 'import gradio as gr\n'), ((2097, 2114), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (2107, 2114), False, 'import duckdb\n'), ((3298, 3367), 'gradio.Gallery', 'gr.Gallery', ([], {'label': '"""Found images"""', 'show_label': '(False)', 'elem_id': '"""gallery"""'}), "(label='Found images', show_label=False, elem_id='gallery')\n", (3308, 3367), True, 'import gradio as gr\n'), ((529, 547), 'pyarrow.compute.field', 'pc.field', (['"""prompt"""'], {}), "('prompt')\n", (537, 547), True, 'import pyarrow.compute as pc\n')] |
import os
import typer
import pickle
import pandas as pd
from dotenv import load_dotenv
import openai
import pinecone
import lancedb
import pyarrow as pa
from collections import deque
TASK_CREATION_PROMPT = """
You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective:
{objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}.
These are incomplete tasks: {task_list}. Based on the result, create new tasks to be completed by the AI system that
do not overlap with incomplete tasks. Return the tasks as an array."""
PRIORITIZATION_PROMPT = """
You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing
the following tasks: {task_names}. Consider the ultimate objective of your team:{objective}. Do not remove any tasks.
Return the result as a numbered list, like:
#. First task
#. Second task
Start the task list with number {next_task_id}."""
EXECUTION_PROMPT = """
You are an AI who performs one task based on the following objective: {objective}. Your task: {task}\nResponse:
"""
class Task:
def __init__(self, name, id=None, result=None, vector=None):
self.name = name
self.id = id
self.result = result
self.vector = vector
class OpenAIService:
def __init__(self, api_key):
openai.api_key = api_key
def get_ada_embedding(self, text):
return openai.Embedding.create(input=[text.replace('\n', ' ')], model='text-embedding-ada-002')['data'][0][
'embedding'
]
def create(self, prompt, max_tokens=100, temperature=0.5):
return (
openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
.choices[0]
.text.strip()
)
class TestAIService:
def __init__(self, ai_service, cache_file):
self.ai_service = ai_service
self.cache_file = cache_file
if os.path.isfile(cache_file):
self.cache = pickle.load(open(cache_file, 'rb'))
else:
self.cache = {'ada': {}, 'create': {}}
pickle.dump(self.cache, open(cache_file, 'wb'))
def get_ada_embedding(self, text):
if text not in self.cache['ada']:
self.cache['ada'][text] = self.ai_service.get_ada_embedding(text)
pickle.dump(self.cache, open(self.cache_file, 'wb'))
return self.cache['ada'][text]
def create(self, prompt, max_tokens=100, temperature=0.5):
key = (prompt, max_tokens, temperature)
if key not in self.cache['create']:
self.cache['create'][key] = self.ai_service.create(prompt, max_tokens, temperature)
pickle.dump(self.cache, open(self.cache_file, 'wb'))
return self.cache['create'][key]
class PineconeService:
def __init__(self, api_key, environment, table_name, dimension, metric, pod_type):
self.table_name = table_name
pinecone.init(api_key=api_key, environment=environment)
if table_name not in pinecone.list_indexes():
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
self.index = pinecone.Index(table_name)
def query(self, query_embedding, top_k):
results = self.index.query(query_embedding, top_k=top_k, include_metadata=True)
sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)
return [Task(**item.metadata) for item in sorted_results]
def upsert(self, task):
self.index.upsert([(task.id, task.vector, task.__dict__)])
class LanceService:
def __init__(self, table_name, dimension):
self.db = lancedb.connect('.')
schema = pa.schema(
[
pa.field('id', pa.int32()),
pa.field('vector', pa.list_(pa.float32(), dimension)),
pa.field('name', pa.string()),
pa.field('result', pa.string()), # TODO There is a fixed schema but we keep converting
]
)
data = [{'id': 0, 'vector': [0.0] * dimension, 'name': 'asd', 'result': 'asd'}]
self.table = self.db.create_table(table_name, mode='overwrite', data=data, schema=schema)
def query(self, query_embedding, top_k):
result = self.table.search(query_embedding).limit(top_k).to_df().drop(columns=['score'])
return [Task(**v) for v in result.to_dict(orient="records")]
def upsert(self, task):
self.table.add(pd.DataFrame([task.__dict__]))
class BabyAGI:
def __init__(self, objective, ai_service, vector_service):
self.ai_service = ai_service
self.vector_service = vector_service
self.objective = objective
self.objective_embedding = self.ai_service.get_ada_embedding(self.objective)
self.task_list = deque([])
def add_task(self, task):
if task.id is None:
task.id = max([t.id for t in self.task_list], default=0) + 1
self.task_list.append(task)
def task_creation_agent(self, task):
prompt = TASK_CREATION_PROMPT.format(
objective=self.objective,
result=task.result,
task_description=task.name,
task_list=', '.join([t.name for t in self.task_list]),
)
for task_name in self.ai_service.create(prompt).split('\n'):
self.add_task(Task(name=task_name))
def task_prioritization_agent(self, this_task_id):
def to_task(value):
parts = value.strip().split('.', 1)
if len(parts) != 2:
return None
return Task(id=int(parts[0].strip()), name=parts[1].strip())
prompt = PRIORITIZATION_PROMPT.format(
task_names=', '.join([t.name for t in self.task_list]),
objective=self.objective,
next_task_id=int(this_task_id) + 1,
)
new_tasks = self.ai_service.create(prompt, max_tokens=1000)
self.task_list = deque([to_task(v) for v in new_tasks.split('\n') if to_task(v) is not None])
def task_execution_agent(self, task):
task.result = self.ai_service.create(
prompt=EXECUTION_PROMPT.format(objective=self.objective, task=task),
max_tokens=2000,
temperature=0.7,
)
task.vector = self.ai_service.get_ada_embedding(task.result)
self.vector_service.upsert(task)
def run(self, first_task):
self.add_task(Task(name=first_task))
for _ in range(4):
if len(self.task_list) == 0:
exit(0)
context = self.vector_service.query(self.objective_embedding, 5)
task = self.task_list.popleft()
self.task_execution_agent(task)
self.task_creation_agent(task)
self.task_prioritization_agent(task.id)
def main():
load_dotenv()
baby_agi = BabyAGI(
objective='Solve world hunger.',
ai_service=TestAIService(
ai_service=OpenAIService(api_key=os.getenv('OPENAI_API_KEY')),
cache_file='babyagi_cache.pkl',
),
vector_service=LanceService(
table_name='test-table',
dimension=1536,
)
# vector_service=PineconeService(
# api_key=os.getenv('PINECONE_API_KEY'),
# environment=os.getenv('PINECONE_ENVIRONMENT'),
# table_name='test-table',
# dimension=1536,
# metric='cosine',
# pod_type='p1',
# ),
)
baby_agi.run(first_task='Develop a task list.')
if __name__ == '__main__':
typer.run(main)
| [
"lancedb.connect"
] | [((7093, 7106), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (7104, 7106), False, 'from dotenv import load_dotenv\n'), ((7837, 7852), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (7846, 7852), False, 'import typer\n'), ((2219, 2245), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_file)\n', (2233, 2245), False, 'import os\n'), ((3212, 3267), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'api_key', 'environment': 'environment'}), '(api_key=api_key, environment=environment)\n', (3225, 3267), False, 'import pinecone\n'), ((3444, 3470), 'pinecone.Index', 'pinecone.Index', (['table_name'], {}), '(table_name)\n', (3458, 3470), False, 'import pinecone\n'), ((3940, 3960), 'lancedb.connect', 'lancedb.connect', (['"""."""'], {}), "('.')\n", (3955, 3960), False, 'import lancedb\n'), ((5081, 5090), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (5086, 5090), False, 'from collections import deque\n'), ((3297, 3320), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (3318, 3320), False, 'import pinecone\n'), ((3334, 3426), 'pinecone.create_index', 'pinecone.create_index', (['table_name'], {'dimension': 'dimension', 'metric': 'metric', 'pod_type': 'pod_type'}), '(table_name, dimension=dimension, metric=metric,\n pod_type=pod_type)\n', (3355, 3426), False, 'import pinecone\n'), ((4743, 4772), 'pandas.DataFrame', 'pd.DataFrame', (['[task.__dict__]'], {}), '([task.__dict__])\n', (4755, 4772), True, 'import pandas as pd\n'), ((4034, 4044), 'pyarrow.int32', 'pa.int32', ([], {}), '()\n', (4042, 4044), True, 'import pyarrow as pa\n'), ((4151, 4162), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4160, 4162), True, 'import pyarrow as pa\n'), ((4200, 4211), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4209, 4211), True, 'import pyarrow as pa\n'), ((4091, 4103), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4101, 4103), True, 'import pyarrow as pa\n'), ((1711, 1883), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=temperature, max_tokens=max_tokens, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n", (1735, 1883), False, 'import openai\n'), ((7251, 7278), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (7260, 7278), False, 'import os\n')] |
from datasets import load_dataset
import lancedb
import pytest
import main
# ==================== TESTING ====================
@pytest.fixture
def mock_embed_func(monkeypatch):
def mock_api_call(*args, **kwargs):
return [0.5, 0.5]
monkeypatch.setattr(main, "embed", mock_api_call)
@pytest.fixture
def mock_embed_txt_func(monkeypatch):
def mock_api_call(*args, **kwargs):
return [0.5, 0.5]
monkeypatch.setattr(main, "embed_txt", mock_api_call)
def test_main(mock_embed_func, mock_embed_txt_func):
global dataset
dataset = load_dataset(
"CVdatasets/ImageNet15_animals_unbalanced_aug1", split="train"
)
main.dataset = dataset
db = lancedb.connect("./data/tables")
global tbl
try:
tbl = main.create_data(db)
main.tbl = tbl
except:
tbl = db.open_table("animal_images")
main.tbl = tbl
print(tbl.to_pandas())
global test
test = load_dataset(
"CVdatasets/ImageNet15_animals_unbalanced_aug1", split="validation"
)
main.test = test
main.image_search(0)
main.text_search("a full white dog")
| [
"lancedb.connect"
] | [((570, 646), 'datasets.load_dataset', 'load_dataset', (['"""CVdatasets/ImageNet15_animals_unbalanced_aug1"""'], {'split': '"""train"""'}), "('CVdatasets/ImageNet15_animals_unbalanced_aug1', split='train')\n", (582, 646), False, 'from datasets import load_dataset\n'), ((698, 730), 'lancedb.connect', 'lancedb.connect', (['"""./data/tables"""'], {}), "('./data/tables')\n", (713, 730), False, 'import lancedb\n'), ((950, 1036), 'datasets.load_dataset', 'load_dataset', (['"""CVdatasets/ImageNet15_animals_unbalanced_aug1"""'], {'split': '"""validation"""'}), "('CVdatasets/ImageNet15_animals_unbalanced_aug1', split=\n 'validation')\n", (962, 1036), False, 'from datasets import load_dataset\n'), ((1072, 1092), 'main.image_search', 'main.image_search', (['(0)'], {}), '(0)\n', (1089, 1092), False, 'import main\n'), ((1097, 1133), 'main.text_search', 'main.text_search', (['"""a full white dog"""'], {}), "('a full white dog')\n", (1113, 1133), False, 'import main\n'), ((770, 790), 'main.create_data', 'main.create_data', (['db'], {}), '(db)\n', (786, 790), False, 'import main\n')] |
from datasets import load_dataset
import numpy as np
import lancedb
import pytest
import main
# ==================== TESTING ====================
@pytest.fixture
def mock_embed(monkeypatch):
def mock_inference(audio_data):
return (None, [[0.5, 0.5]])
monkeypatch.setattr(main, "create_audio_embedding", mock_inference)
def test_main(mock_embed):
global dataset, db, table_name
dataset = load_dataset("ashraq/esc50", split="train")
db = lancedb.connect("data/audio-lancedb")
table_name = "audio-search"
main.dataset = dataset
main.db = db
main.table_name = table_name
main.insert_audio()
main.search_audio(500)
| [
"lancedb.connect"
] | [((417, 460), 'datasets.load_dataset', 'load_dataset', (['"""ashraq/esc50"""'], {'split': '"""train"""'}), "('ashraq/esc50', split='train')\n", (429, 460), False, 'from datasets import load_dataset\n'), ((471, 508), 'lancedb.connect', 'lancedb.connect', (['"""data/audio-lancedb"""'], {}), "('data/audio-lancedb')\n", (486, 508), False, 'import lancedb\n'), ((624, 643), 'main.insert_audio', 'main.insert_audio', ([], {}), '()\n', (641, 643), False, 'import main\n'), ((649, 671), 'main.search_audio', 'main.search_audio', (['(500)'], {}), '(500)\n', (666, 671), False, 'import main\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
from io import BytesIO
from pathlib import Path
from typing import Any, List, Tuple, Union
import cv2
import numpy as np
import torch
from matplotlib import pyplot as plt
from pandas import DataFrame
from PIL import Image
from tqdm import tqdm
from ultralytics.data.augment import Format
from ultralytics.data.dataset import YOLODataset
from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.model import YOLO
from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks
from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch
class ExplorerDataset(YOLODataset):
def __init__(self, *args, data: dict = None, **kwargs) -> None:
super().__init__(*args, data=data, **kwargs)
def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]:
"""Loads 1 image from dataset index 'i' without any resize ops."""
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i]
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
if im is None:
raise FileNotFoundError(f"Image Not Found {f}")
h0, w0 = im.shape[:2] # orig hw
return im, (h0, w0), im.shape[:2]
return self.ims[i], self.im_hw0[i], self.im_hw[i]
def build_transforms(self, hyp: IterableSimpleNamespace = None):
"""Creates transforms for dataset images without resizing."""
return Format(
bbox_format="xyxy",
normalize=False,
return_mask=self.use_segments,
return_keypoint=self.use_keypoints,
batch_idx=True,
mask_ratio=hyp.mask_ratio,
mask_overlap=hyp.overlap_mask,
)
class Explorer:
def __init__(
self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = "~/ultralytics/explorer"
) -> None:
checks.check_requirements(["lancedb>=0.4.3", "duckdb"])
import lancedb
self.connection = lancedb.connect(uri)
self.table_name = Path(data).name.lower() + "_" + model.lower()
self.sim_idx_base_name = (
f"{self.table_name}_sim_idx".lower()
) # Use this name and append thres and top_k to reuse the table
self.model = YOLO(model)
self.data = data # None
self.choice_set = None
self.table = None
self.progress = 0
def create_embeddings_table(self, force: bool = False, split: str = "train") -> None:
"""
Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it
already exists. Pass force=True to overwrite the existing table.
Args:
force (bool): Whether to overwrite the existing table or not. Defaults to False.
split (str): Split of the dataset to use. Defaults to 'train'.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
```
"""
if self.table is not None and not force:
LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.")
return
if self.table_name in self.connection.table_names() and not force:
LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.")
self.table = self.connection.open_table(self.table_name)
self.progress = 1
return
if self.data is None:
raise ValueError("Data must be provided to create embeddings table")
data_info = check_det_dataset(self.data)
if split not in data_info:
raise ValueError(
f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}"
)
choice_set = data_info[split]
choice_set = choice_set if isinstance(choice_set, list) else [choice_set]
self.choice_set = choice_set
dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task)
# Create the table schema
batch = dataset[0]
vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0]
table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite")
table.add(
self._yield_batches(
dataset,
data_info,
self.model,
exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"],
)
)
self.table = table
def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]):
"""Generates batches of data for embedding, excluding specified keys."""
for i in tqdm(range(len(dataset))):
self.progress = float(i + 1) / len(dataset)
batch = dataset[i]
for k in exclude_keys:
batch.pop(k, None)
batch = sanitize_batch(batch, data_info)
batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist()
yield [batch]
def query(
self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25
) -> Any: # pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
imgs (str or list): Path to the image or a list of paths to the images.
limit (int): Number of results to return.
Returns:
(pyarrow.Table): An arrow table containing the results. Supports converting to:
- pandas dataframe: `result.to_pandas()`
- dict of lists: `result.to_pydict()`
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.query(img='https://ultralytics.com/images/zidane.jpg')
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
if isinstance(imgs, str):
imgs = [imgs]
assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}"
embeds = self.model.embed(imgs)
# Get avg if multiple images are passed (len > 1)
embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy()
return self.table.search(embeds).limit(limit).to_arrow()
def sql_query(
self, query: str, return_type: str = "pandas"
) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table
"""
Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown.
Args:
query (str): SQL query to run.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pyarrow.Table): An arrow table containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.sql_query(query)
```
"""
assert return_type in [
"pandas",
"arrow",
], f"Return type should be either `pandas` or `arrow`, but got {return_type}"
import duckdb
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
# Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this.
table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB
if not query.startswith("SELECT") and not query.startswith("WHERE"):
raise ValueError(
f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}"
)
if query.startswith("WHERE"):
query = f"SELECT * FROM 'table' {query}"
LOGGER.info(f"Running query: {query}")
rs = duckdb.sql(query)
if return_type == "pandas":
return rs.df()
elif return_type == "arrow":
return rs.arrow()
def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image:
"""
Plot the results of a SQL-Like query on the table.
Args:
query (str): SQL query to run.
labels (bool): Whether to plot the labels or not.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.plot_sql_query(query)
```
"""
result = self.sql_query(query, return_type="arrow")
if len(result) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(result, plot_labels=labels)
return Image.fromarray(img)
def get_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
return_type: str = "pandas",
) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
limit (int): Number of results to return. Defaults to 25.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pandas.DataFrame): A dataframe containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
assert return_type in [
"pandas",
"arrow",
], f"Return type should be either `pandas` or `arrow`, but got {return_type}"
img = self._check_imgs_or_idxs(img, idx)
similar = self.query(img, limit=limit)
if return_type == "pandas":
return similar.to_pandas()
elif return_type == "arrow":
return similar
def plot_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
labels: bool = True,
) -> Image.Image:
"""
Plot the similar images. Accepts images or indexes.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
labels (bool): Whether to plot the labels or not.
limit (int): Number of results to return. Defaults to 25.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
similar = self.get_similar(img, idx, limit, return_type="arrow")
if len(similar) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(similar, plot_labels=labels)
return Image.fromarray(img)
def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame:
"""
Calculate the similarity index of all the images in the table. Here, the index will contain the data points that
are max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running
vector search. Defaults: None.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns
include indices of similar images and their respective distances.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
sim_idx = exp.similarity_index()
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower()
if sim_idx_table_name in self.connection.table_names() and not force:
LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.")
return self.connection.open_table(sim_idx_table_name).to_pandas()
if top_k and not (1.0 >= top_k >= 0.0):
raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}")
if max_dist < 0.0:
raise ValueError(f"max_dist must be greater than 0. Got {max_dist}")
top_k = int(top_k * len(self.table)) if top_k else len(self.table)
top_k = max(top_k, 1)
features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict()
im_files = features["im_file"]
embeddings = features["vector"]
sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite")
def _yield_sim_idx():
"""Generates a dataframe with similarity indices and distances for images."""
for i in tqdm(range(len(embeddings))):
sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}")
yield [
{
"idx": i,
"im_file": im_files[i],
"count": len(sim_idx),
"sim_im_files": sim_idx["im_file"].tolist(),
}
]
sim_table.add(_yield_sim_idx())
self.sim_index = sim_table
return sim_table.to_pandas()
def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image:
"""
Plot the similarity index of all the images in the table. Here, the index will contain the data points that are
max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when
running vector search. Defaults to 0.01.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similarity_idx_plot = exp.plot_similarity_index()
similarity_idx_plot.show() # view image preview
similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file
```
"""
sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force)
sim_count = sim_idx["count"].tolist()
sim_count = np.array(sim_count)
indices = np.arange(len(sim_count))
# Create the bar plot
plt.bar(indices, sim_count)
# Customize the plot (optional)
plt.xlabel("data idx")
plt.ylabel("Count")
plt.title("Similarity Count")
buffer = BytesIO()
plt.savefig(buffer, format="png")
buffer.seek(0)
# Use Pillow to open the image from the buffer
return Image.fromarray(np.array(Image.open(buffer)))
def _check_imgs_or_idxs(
self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]]
) -> List[np.ndarray]:
if img is None and idx is None:
raise ValueError("Either img or idx must be provided.")
if img is not None and idx is not None:
raise ValueError("Only one of img or idx must be provided.")
if idx is not None:
idx = idx if isinstance(idx, list) else [idx]
img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"]
return img if isinstance(img, list) else [img]
def ask_ai(self, query):
"""
Ask AI a question.
Args:
query (str): Question to ask.
Returns:
(pandas.DataFrame): A dataframe containing filtered results to the SQL query.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
answer = exp.ask_ai('Show images with 1 person and 2 dogs')
```
"""
result = prompt_sql_query(query)
try:
df = self.sql_query(result)
except Exception as e:
LOGGER.error("AI generated query is not valid. Please try again with a different prompt")
LOGGER.error(e)
return None
return df
def visualize(self, result):
"""
Visualize the results of a query. TODO.
Args:
result (pyarrow.Table): Table containing the results of a query.
"""
pass
def generate_report(self, result):
"""
Generate a report of the dataset.
TODO
"""
pass
| [
"lancedb.connect"
] | [((1681, 1874), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1687, 1874), False, 'from ultralytics.data.augment import Format\n'), ((2138, 2193), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2163, 2193), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2244, 2264), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2259, 2264), False, 'import lancedb\n'), ((2515, 2526), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2519, 2526), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3858, 3886), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3875, 3886), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8493, 8531), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8504, 8531), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8546, 8563), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8556, 8563), False, 'import duckdb\n'), ((9525, 9545), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9540, 9545), False, 'from PIL import Image\n'), ((12170, 12190), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12185, 12190), False, 'from PIL import Image\n'), ((16442, 16461), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16450, 16461), True, 'import numpy as np\n'), ((16546, 16573), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16553, 16573), True, 'from matplotlib import pyplot as plt\n'), ((16623, 16645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16633, 16645), True, 'from matplotlib import pyplot as plt\n'), ((16654, 16673), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16664, 16673), True, 'from matplotlib import pyplot as plt\n'), ((16682, 16711), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16691, 16711), True, 'from matplotlib import pyplot as plt\n'), ((16729, 16738), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16736, 16738), False, 'from io import BytesIO\n'), ((16747, 16780), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16758, 16780), True, 'from matplotlib import pyplot as plt\n'), ((3319, 3405), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3330, 3405), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3507, 3617), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3518, 3617), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9393, 9425), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9404, 9425), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12037, 12069), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12048, 12069), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13647, 13750), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13658, 13750), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1191, 1202), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1198, 1202), True, 'import numpy as np\n'), ((1256, 1269), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1266, 1269), False, 'import cv2\n'), ((16900, 16918), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16910, 16918), False, 'from PIL import Image\n'), ((18136, 18235), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18148, 18235), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((18238, 18253), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18250, 18253), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2291, 2301), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2295, 2301), False, 'from pathlib import Path\n'), ((6718, 6737), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6729, 6737), False, 'import torch\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
from io import BytesIO
from pathlib import Path
from typing import Any, List, Tuple, Union
import cv2
import numpy as np
import torch
from matplotlib import pyplot as plt
from pandas import DataFrame
from PIL import Image
from tqdm import tqdm
from ultralytics.data.augment import Format
from ultralytics.data.dataset import YOLODataset
from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.model import YOLO
from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks
from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch
class ExplorerDataset(YOLODataset):
def __init__(self, *args, data: dict = None, **kwargs) -> None:
super().__init__(*args, data=data, **kwargs)
def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]:
"""Loads 1 image from dataset index 'i' without any resize ops."""
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i]
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
if im is None:
raise FileNotFoundError(f"Image Not Found {f}")
h0, w0 = im.shape[:2] # orig hw
return im, (h0, w0), im.shape[:2]
return self.ims[i], self.im_hw0[i], self.im_hw[i]
def build_transforms(self, hyp: IterableSimpleNamespace = None):
"""Creates transforms for dataset images without resizing."""
return Format(
bbox_format="xyxy",
normalize=False,
return_mask=self.use_segments,
return_keypoint=self.use_keypoints,
batch_idx=True,
mask_ratio=hyp.mask_ratio,
mask_overlap=hyp.overlap_mask,
)
class Explorer:
def __init__(
self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = "~/ultralytics/explorer"
) -> None:
checks.check_requirements(["lancedb>=0.4.3", "duckdb"])
import lancedb
self.connection = lancedb.connect(uri)
self.table_name = Path(data).name.lower() + "_" + model.lower()
self.sim_idx_base_name = (
f"{self.table_name}_sim_idx".lower()
) # Use this name and append thres and top_k to reuse the table
self.model = YOLO(model)
self.data = data # None
self.choice_set = None
self.table = None
self.progress = 0
def create_embeddings_table(self, force: bool = False, split: str = "train") -> None:
"""
Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it
already exists. Pass force=True to overwrite the existing table.
Args:
force (bool): Whether to overwrite the existing table or not. Defaults to False.
split (str): Split of the dataset to use. Defaults to 'train'.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
```
"""
if self.table is not None and not force:
LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.")
return
if self.table_name in self.connection.table_names() and not force:
LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.")
self.table = self.connection.open_table(self.table_name)
self.progress = 1
return
if self.data is None:
raise ValueError("Data must be provided to create embeddings table")
data_info = check_det_dataset(self.data)
if split not in data_info:
raise ValueError(
f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}"
)
choice_set = data_info[split]
choice_set = choice_set if isinstance(choice_set, list) else [choice_set]
self.choice_set = choice_set
dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task)
# Create the table schema
batch = dataset[0]
vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0]
table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite")
table.add(
self._yield_batches(
dataset,
data_info,
self.model,
exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"],
)
)
self.table = table
def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]):
"""Generates batches of data for embedding, excluding specified keys."""
for i in tqdm(range(len(dataset))):
self.progress = float(i + 1) / len(dataset)
batch = dataset[i]
for k in exclude_keys:
batch.pop(k, None)
batch = sanitize_batch(batch, data_info)
batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist()
yield [batch]
def query(
self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25
) -> Any: # pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
imgs (str or list): Path to the image or a list of paths to the images.
limit (int): Number of results to return.
Returns:
(pyarrow.Table): An arrow table containing the results. Supports converting to:
- pandas dataframe: `result.to_pandas()`
- dict of lists: `result.to_pydict()`
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.query(img='https://ultralytics.com/images/zidane.jpg')
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
if isinstance(imgs, str):
imgs = [imgs]
assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}"
embeds = self.model.embed(imgs)
# Get avg if multiple images are passed (len > 1)
embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy()
return self.table.search(embeds).limit(limit).to_arrow()
def sql_query(
self, query: str, return_type: str = "pandas"
) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table
"""
Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown.
Args:
query (str): SQL query to run.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pyarrow.Table): An arrow table containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.sql_query(query)
```
"""
assert return_type in [
"pandas",
"arrow",
], f"Return type should be either `pandas` or `arrow`, but got {return_type}"
import duckdb
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
# Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this.
table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB
if not query.startswith("SELECT") and not query.startswith("WHERE"):
raise ValueError(
f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}"
)
if query.startswith("WHERE"):
query = f"SELECT * FROM 'table' {query}"
LOGGER.info(f"Running query: {query}")
rs = duckdb.sql(query)
if return_type == "pandas":
return rs.df()
elif return_type == "arrow":
return rs.arrow()
def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image:
"""
Plot the results of a SQL-Like query on the table.
Args:
query (str): SQL query to run.
labels (bool): Whether to plot the labels or not.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.plot_sql_query(query)
```
"""
result = self.sql_query(query, return_type="arrow")
if len(result) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(result, plot_labels=labels)
return Image.fromarray(img)
def get_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
return_type: str = "pandas",
) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
limit (int): Number of results to return. Defaults to 25.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pandas.DataFrame): A dataframe containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
assert return_type in [
"pandas",
"arrow",
], f"Return type should be either `pandas` or `arrow`, but got {return_type}"
img = self._check_imgs_or_idxs(img, idx)
similar = self.query(img, limit=limit)
if return_type == "pandas":
return similar.to_pandas()
elif return_type == "arrow":
return similar
def plot_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
labels: bool = True,
) -> Image.Image:
"""
Plot the similar images. Accepts images or indexes.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
labels (bool): Whether to plot the labels or not.
limit (int): Number of results to return. Defaults to 25.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
similar = self.get_similar(img, idx, limit, return_type="arrow")
if len(similar) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(similar, plot_labels=labels)
return Image.fromarray(img)
def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame:
"""
Calculate the similarity index of all the images in the table. Here, the index will contain the data points that
are max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running
vector search. Defaults: None.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns
include indices of similar images and their respective distances.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
sim_idx = exp.similarity_index()
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower()
if sim_idx_table_name in self.connection.table_names() and not force:
LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.")
return self.connection.open_table(sim_idx_table_name).to_pandas()
if top_k and not (1.0 >= top_k >= 0.0):
raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}")
if max_dist < 0.0:
raise ValueError(f"max_dist must be greater than 0. Got {max_dist}")
top_k = int(top_k * len(self.table)) if top_k else len(self.table)
top_k = max(top_k, 1)
features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict()
im_files = features["im_file"]
embeddings = features["vector"]
sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite")
def _yield_sim_idx():
"""Generates a dataframe with similarity indices and distances for images."""
for i in tqdm(range(len(embeddings))):
sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}")
yield [
{
"idx": i,
"im_file": im_files[i],
"count": len(sim_idx),
"sim_im_files": sim_idx["im_file"].tolist(),
}
]
sim_table.add(_yield_sim_idx())
self.sim_index = sim_table
return sim_table.to_pandas()
def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image:
"""
Plot the similarity index of all the images in the table. Here, the index will contain the data points that are
max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when
running vector search. Defaults to 0.01.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similarity_idx_plot = exp.plot_similarity_index()
similarity_idx_plot.show() # view image preview
similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file
```
"""
sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force)
sim_count = sim_idx["count"].tolist()
sim_count = np.array(sim_count)
indices = np.arange(len(sim_count))
# Create the bar plot
plt.bar(indices, sim_count)
# Customize the plot (optional)
plt.xlabel("data idx")
plt.ylabel("Count")
plt.title("Similarity Count")
buffer = BytesIO()
plt.savefig(buffer, format="png")
buffer.seek(0)
# Use Pillow to open the image from the buffer
return Image.fromarray(np.array(Image.open(buffer)))
def _check_imgs_or_idxs(
self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]]
) -> List[np.ndarray]:
if img is None and idx is None:
raise ValueError("Either img or idx must be provided.")
if img is not None and idx is not None:
raise ValueError("Only one of img or idx must be provided.")
if idx is not None:
idx = idx if isinstance(idx, list) else [idx]
img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"]
return img if isinstance(img, list) else [img]
def ask_ai(self, query):
"""
Ask AI a question.
Args:
query (str): Question to ask.
Returns:
(pandas.DataFrame): A dataframe containing filtered results to the SQL query.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
answer = exp.ask_ai('Show images with 1 person and 2 dogs')
```
"""
result = prompt_sql_query(query)
try:
df = self.sql_query(result)
except Exception as e:
LOGGER.error("AI generated query is not valid. Please try again with a different prompt")
LOGGER.error(e)
return None
return df
def visualize(self, result):
"""
Visualize the results of a query. TODO.
Args:
result (pyarrow.Table): Table containing the results of a query.
"""
pass
def generate_report(self, result):
"""
Generate a report of the dataset.
TODO
"""
pass
| [
"lancedb.connect"
] | [((1681, 1874), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1687, 1874), False, 'from ultralytics.data.augment import Format\n'), ((2138, 2193), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2163, 2193), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2244, 2264), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2259, 2264), False, 'import lancedb\n'), ((2515, 2526), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2519, 2526), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3858, 3886), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3875, 3886), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8493, 8531), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8504, 8531), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8546, 8563), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8556, 8563), False, 'import duckdb\n'), ((9525, 9545), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9540, 9545), False, 'from PIL import Image\n'), ((12170, 12190), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12185, 12190), False, 'from PIL import Image\n'), ((16442, 16461), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16450, 16461), True, 'import numpy as np\n'), ((16546, 16573), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16553, 16573), True, 'from matplotlib import pyplot as plt\n'), ((16623, 16645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16633, 16645), True, 'from matplotlib import pyplot as plt\n'), ((16654, 16673), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16664, 16673), True, 'from matplotlib import pyplot as plt\n'), ((16682, 16711), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16691, 16711), True, 'from matplotlib import pyplot as plt\n'), ((16729, 16738), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16736, 16738), False, 'from io import BytesIO\n'), ((16747, 16780), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16758, 16780), True, 'from matplotlib import pyplot as plt\n'), ((3319, 3405), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3330, 3405), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3507, 3617), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3518, 3617), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9393, 9425), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9404, 9425), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12037, 12069), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12048, 12069), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13647, 13750), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13658, 13750), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1191, 1202), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1198, 1202), True, 'import numpy as np\n'), ((1256, 1269), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1266, 1269), False, 'import cv2\n'), ((16900, 16918), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16910, 16918), False, 'from PIL import Image\n'), ((18136, 18235), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18148, 18235), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((18238, 18253), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18250, 18253), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2291, 2301), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2295, 2301), False, 'from pathlib import Path\n'), ((6718, 6737), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6729, 6737), False, 'import torch\n')] |
import lancedb
import pyarrow as pa
import json
embedding_models=[
"",
""
]
class LanceDBAssistant:
def __init__(self, dirpath, filename,n=384):
self.dirpath = dirpath
self.filename = filename
self.db = None
self.create_schema(n)
def create_schema(self,n=384):
self.schema = pa.schema([
pa.field("vector", pa.list_(pa.float32(), n)),
pa.field("item", pa.string()),
pa.field("id", pa.string()),
])
def connect(self):
if self.db is None:
self.db = lancedb.connect(self.dirpath)
def create(self):
self.connect()
table = self.db.create_table(self.filename, schema=self.schema, mode="overwrite")
return table
def open(self):
table=None
try:
ts=self.db.table_names()
if self.filename in ts:
table = self.db.open_table(self.filename)
except:
print('Creating a new table')
return table
def add(self, data):
self.connect()
table = self.open()
if table is None:
table = self.create() # Assuming data is a pyarrow.Table
table.add(data=data,
# mode="overwrite" //这个导致了bug,全部覆盖了
)
return self.db[self.filename].head()
def search(self, vector, limit=5):
self.connect()
table = self.open()
res=[]
if table:
res = table.search(vector).select(['id','item']).limit(limit).to_list()
res=[{
'id':r['id'],
'item':json.loads(r['item']),
'_distance':r['_distance']
} for r in res]
return res
def list_tables(self):
self.connect()
# result=[]
# for name in self.db.table_names():
# print(self.db[name].head())
return self.db.table_names()
def delete_table(self,filename):
self.connect()
return self.db.drop_table(filename, ignore_missing=True)
def get_by_id(self,id):
self.connect()
table = self.open()
if table:
items=table.search().where(f"id = '{id}'", prefilter=True).select(['id']).to_list()
for item in items:
if item['id']==id:
return item
return
def update(self,id,item):
self.connect()
table = self.open()
if table:
table.update(where=f"id = '{id}'", values={"item":item})
# dirpath = "tmp/sample-lancedb"
# filename = "my_table2"
# assistant = LanceDBAssistant(dirpath, filename)
# # Create a new table
# assistant.create_schema()
# table = assistant.create(schema)
# # Add new data
# data = [{"vector": [1.3, 1.4], "item": "fizz" },
# {"vector": [9.5, 56.2], "item": "buzz" }]
# assistant.add(data)
# # Search by vector
# vector = [1.3, 1.4] # Your search vector
# results = assistant.search(vector)
# # List all tables
# tables = assistant.list_tables()
# print(results)
# Delete the table
# assistant.delete_table() | [
"lancedb.connect"
] | [((574, 603), 'lancedb.connect', 'lancedb.connect', (['self.dirpath'], {}), '(self.dirpath)\n', (589, 603), False, 'import lancedb\n'), ((434, 445), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (443, 445), True, 'import pyarrow as pa\n'), ((475, 486), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (484, 486), True, 'import pyarrow as pa\n'), ((1640, 1661), 'json.loads', 'json.loads', (["r['item']"], {}), "(r['item'])\n", (1650, 1661), False, 'import json\n'), ((386, 398), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (396, 398), True, 'import pyarrow as pa\n')] |
# langchain Chatbot
from langchain.document_loaders import DataFrameLoader
import pandas as pd
from langchain.memory import ConversationSummaryMemory
import lancedb
from langchain.vectorstores import LanceDB
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
def lanceDBConnection(dataset):
db = lancedb.connect("/tmp/lancedb")
table = db.create_table("tb", data=dataset, mode="overwrite")
return table
def vectorStoreSetup(text, OPENAI_KEY):
embedding = OpenAIEmbeddings(openai_api_key=OPENAI_KEY)
emb = embedding.embed_query(text)
dataset = [{"vector": emb, "text": text}]
table = lanceDBConnection(dataset)
df = pd.DataFrame(dataset)
loader = DataFrameLoader(df)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
vectorstore = LanceDB.from_documents(
documents=all_splits,
embedding=OpenAIEmbeddings(openai_api_key=OPENAI_KEY),
connection=table,
)
return vectorstore
def retrieverSetup(text, OPENAI_KEY):
vectorstore = vectorStoreSetup(text, OPENAI_KEY)
llm = ChatOpenAI(openai_api_key=OPENAI_KEY)
memory = ConversationSummaryMemory(
llm=llm, memory_key="chat_history", return_messages=True
)
retriever = vectorstore.as_retriever()
qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory)
return qa
def chat(qa, question):
r = qa(question + "?")
return r["answer"]
| [
"lancedb.connect"
] | [((472, 503), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (487, 503), False, 'import lancedb\n'), ((645, 688), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_KEY'}), '(openai_api_key=OPENAI_KEY)\n', (661, 688), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((823, 844), 'pandas.DataFrame', 'pd.DataFrame', (['dataset'], {}), '(dataset)\n', (835, 844), True, 'import pandas as pd\n'), ((858, 877), 'langchain.document_loaders.DataFrameLoader', 'DataFrameLoader', (['df'], {}), '(df)\n', (873, 877), False, 'from langchain.document_loaders import DataFrameLoader\n'), ((924, 987), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(0)'}), '(chunk_size=500, chunk_overlap=0)\n', (954, 987), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1335, 1372), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'OPENAI_KEY'}), '(openai_api_key=OPENAI_KEY)\n', (1345, 1372), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1386, 1473), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True)\n", (1411, 1473), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((1537, 1615), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', (['llm'], {'retriever': 'retriever', 'memory': 'memory'}), '(llm, retriever=retriever, memory=memory)\n', (1574, 1615), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((1132, 1175), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_KEY'}), '(openai_api_key=OPENAI_KEY)\n', (1148, 1175), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from .base_tool import BaseTool
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import PDFPlumberLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.document_transformers import DoctranQATransformer
from langchain.schema import Document
from langchain.vectorstores import LanceDB
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
import lancedb
import pickle
import tempfile
class ShopperTool(BaseTool):
def __init__(self):
super().__init__(
name="Shopper",
model="gpt-4",
temperature=0.0,
uploads=[
{
"input_label": "Upload PDF",
"help_label": "Upload a PDF to be used as the source document.",
},
],
inputs=[
{
"input_label": "Question",
"example": "What is the minimum budget requirements to run a Pinterest ad?",
"button_label": "Ask",
"help_label": "The Q&A tool helps by answering a given question based on the PDF you provided.",
},
],
)
def execute(self, chat, inputs, uploads):
self._ingest_pdf(uploads)
basic_qa_chain = self._basic_qa_chain(chat)
question_input = {
"question": inputs,
"chat_history": [],
}
result = basic_qa_chain.run(question_input)
return result
# 1 Ingest, split, and embed PDF Docs
def _ingest_pdf(self, uploads):
print("Loading data...")
uploaded_file = uploads # Directly using file_inputs as an UploadedFile object
file_bytes = (
uploaded_file.read()
) # Reading the content of the uploaded file as bytes
# Creating a temporary file to write the bytes
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
temp_file.write(file_bytes)
temp_file_path = temp_file.name
loader = PDFPlumberLoader(temp_file_path)
raw_documents = loader.load()
print("Splitting text...")
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
split_documents = text_splitter.split_documents(raw_documents)
print("Creating vectorstore...")
embeddings = OpenAIEmbeddings()
db = lancedb.connect("/tmp/lancedb")
table = db.create_table(
"my_table",
data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
vectorstore = LanceDB.from_documents(
split_documents, embeddings, connection=table
)
with open("./vector_db/vectorstore.pkl", "wb") as f:
pickle.dump(vectorstore, f)
def _load_retriever(self):
with open("./vector_db/vectorstore.pkl", "rb") as f:
vectorstore = pickle.load(f)
retriever = VectorStoreRetriever(vectorstore=vectorstore)
return retriever
def _basic_qa_chain(self, chat):
# llm = ChatOpenAI(verbose=True, model_name="gpt-4", temperature=0)
retriever = self._load_retriever()
memory = ConversationBufferMemory(
memory_key="chat_history", return_messages=True
)
chain = ConversationalRetrievalChain.from_llm(
llm=chat, retriever=retriever, memory=memory, verbose=True
)
return chain
| [
"lancedb.connect"
] | [((2402, 2434), 'langchain.document_loaders.PDFPlumberLoader', 'PDFPlumberLoader', (['temp_file_path'], {}), '(temp_file_path)\n', (2418, 2434), False, 'from langchain.document_loaders import PDFPlumberLoader\n'), ((2533, 2599), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (2563, 2599), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2769, 2787), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2785, 2787), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2801, 2832), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (2816, 2832), False, 'import lancedb\n'), ((3165, 3234), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['split_documents', 'embeddings'], {'connection': 'table'}), '(split_documents, embeddings, connection=table)\n', (3187, 3234), False, 'from langchain.vectorstores import LanceDB\n'), ((3512, 3557), 'langchain.vectorstores.base.VectorStoreRetriever', 'VectorStoreRetriever', ([], {'vectorstore': 'vectorstore'}), '(vectorstore=vectorstore)\n', (3532, 3557), False, 'from langchain.vectorstores.base import VectorStoreRetriever\n'), ((3757, 3830), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (3781, 3830), False, 'from langchain.memory import ConversationBufferMemory\n'), ((3869, 3971), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'chat', 'retriever': 'retriever', 'memory': 'memory', 'verbose': '(True)'}), '(llm=chat, retriever=retriever, memory\n =memory, verbose=True)\n', (3906, 3971), False, 'from langchain.chains import RetrievalQA, ConversationalRetrievalChain\n'), ((2229, 2285), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': '""".pdf"""'}), "(delete=False, suffix='.pdf')\n", (2256, 2285), False, 'import tempfile\n'), ((3330, 3357), 'pickle.dump', 'pickle.dump', (['vectorstore', 'f'], {}), '(vectorstore, f)\n', (3341, 3357), False, 'import pickle\n'), ((3477, 3491), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3488, 3491), False, 'import pickle\n')] |
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_community.vectorstores import LanceDB
from langchain.embeddings.openai import OpenAIEmbeddings
import lancedb
import pyarrow as pa
from langchain.document_loaders import TextLoader
def load_character_sheet(path):
loader = TextLoader(path)
return loader.load()
def connect():
embedding_function = OpenAIEmbeddings()
db = lancedb.connect('db')
table = db.open_table('character_sheets')
return LanceDB(table, embedding_function)
def generate_character_embeddings(path):
embeddings = OpenAIEmbeddings()
db = lancedb.connect("db")
character_sheet = load_character_sheet(path)
table = db.create_table(
"character_sheets",
data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
# Load the document, split it into chunks, embed each chunk and load it into the vector store.
db = LanceDB.from_documents(character_sheet, embeddings, connection=table)
query = "What is Nebula's equipment?"
docs = db.similarity_search(query)
print(docs)
def retriever():
# Create a retriever that fetches documents from multiple tables
lance_retriever = connect().as_retriever(search_kwargs={'k': 1})
template = """Answer the question based only on the following context:
{context}.
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI()
return (
{"context": lance_retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
def retriever_tool(question):
response = retriever().invoke(question)
return response
# Create a retriever that fetches documents from multiple tables
if __name__ == "__main__":
print(retriever().invoke("What is Mendiete Skiari's traits, bonds and flaws?"))
# for testing the db directly
# db = connect()
# query = "Who is Captain Cura's class?"
# docs = db.similarity_search(query)
# print(docs) | [
"lancedb.connect"
] | [((455, 471), 'langchain.document_loaders.TextLoader', 'TextLoader', (['path'], {}), '(path)\n', (465, 471), False, 'from langchain.document_loaders import TextLoader\n'), ((539, 557), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (555, 557), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((568, 589), 'lancedb.connect', 'lancedb.connect', (['"""db"""'], {}), "('db')\n", (583, 589), False, 'import lancedb\n'), ((647, 681), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embedding_function'], {}), '(table, embedding_function)\n', (654, 681), False, 'from langchain_community.vectorstores import LanceDB\n'), ((742, 760), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (758, 760), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((775, 796), 'lancedb.connect', 'lancedb.connect', (['"""db"""'], {}), "('db')\n", (790, 796), False, 'import lancedb\n'), ((1230, 1299), 'langchain_community.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['character_sheet', 'embeddings'], {'connection': 'table'}), '(character_sheet, embeddings, connection=table)\n', (1252, 1299), False, 'from langchain_community.vectorstores import LanceDB\n'), ((1692, 1734), 'langchain.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['template'], {}), '(template)\n', (1724, 1734), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((1747, 1759), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1757, 1759), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1889, 1906), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1904, 1906), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((1823, 1844), 'langchain_core.runnables.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (1842, 1844), False, 'from langchain_core.runnables import RunnablePassthrough\n')] |
import argparse
import os
from typing import Any
from PIL import Image
import lancedb
from schema import Myntra, get_schema_by_name
def run_vector_search(
database: str,
table_name: str,
schema: Any,
search_query: Any,
limit: int = 6,
output_folder: str = "output",
) -> None:
"""
This function performs a vector search on the specified database and table using the provided search query.
The search can be performed on either text or image data. The function retrieves the top 'limit' number of results
and saves the corresponding images in the 'output_folder' directory. The function assumes if the search query ends
with '.jpg' or '.png', it is an image search, otherwise it is a text search.
Args:
database (str): The path to the database.
table_name (str): The name of the table.
schema (Schema): The schema to use for converting search results to Pydantic models.
search_query (Any): The search query, can be text or image.
limit (int, optional): The maximum number of results to return. Defaults to 6.
output_folder (str, optional): The folder to save the output images. Defaults to "output".
Returns:
None
Usage:
>>> run_vector_search(database="~/.lancedb", table_name="myntra", schema=Myntra, search_query="Black Kurta")
"""
# Create the output folder if it does not exist
if os.path.exists(output_folder):
for file in os.listdir(output_folder):
os.remove(os.path.join(output_folder, file))
else:
os.makedirs(output_folder)
# Connect to the lancedb database
db = lancedb.connect(database)
# Open the table
table = db.open_table(table_name)
# Check if the search query is an image or text
try:
if search_query.endswith(".jpg") or search_query.endswith(".png"):
search_query = Image.open(search_query)
else:
search_query = search_query
except AttributeError as e:
if str(e) == "'JpegImageFile' object has no attribute 'endswith'":
print("Running via Streamlit, search query is already an array so skipping opening image using Pillow")
else:
raise
# Perform the vector search and retrieve the results
rs = table.search(search_query).limit(limit).to_pydantic(schema)
# Save the images to the output folder
for i in range(limit):
image_path = os.path.join(output_folder, f"image_{i}.jpg")
rs[i].image.save(image_path, "JPEG")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Vector Search")
parser.add_argument("--database", type=str, help="Path to the database")
parser.add_argument("--table_name", type=str, help="Name of the table")
parser.add_argument(
"--schema", type=str, help="Schema of the table", default="Myntra"
)
parser.add_argument("--search_query", type=str, help="Search query")
parser.add_argument(
"--limit", type=int, default=6, help="Limit the number of results (default: 6)"
)
parser.add_argument(
"--output_folder", type=str, default="output", help="Output folder path"
)
args = parser.parse_args()
schema = get_schema_by_name(args.schema)
if schema is None:
raise ValueError(f"Unknown schema: {args.schema}")
run_vector_search(
args.database,
args.table_name,
schema,
args.search_query,
args.limit,
args.output_folder,
)
| [
"lancedb.connect"
] | [((1422, 1451), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (1436, 1451), False, 'import os\n'), ((1650, 1675), 'lancedb.connect', 'lancedb.connect', (['database'], {}), '(database)\n', (1665, 1675), False, 'import lancedb\n'), ((2586, 2638), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Vector Search"""'}), "(description='Vector Search')\n", (2609, 2638), False, 'import argparse\n'), ((3248, 3279), 'schema.get_schema_by_name', 'get_schema_by_name', (['args.schema'], {}), '(args.schema)\n', (3266, 3279), False, 'from schema import Myntra, get_schema_by_name\n'), ((1473, 1498), 'os.listdir', 'os.listdir', (['output_folder'], {}), '(output_folder)\n', (1483, 1498), False, 'import os\n'), ((1575, 1601), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (1586, 1601), False, 'import os\n'), ((2453, 2498), 'os.path.join', 'os.path.join', (['output_folder', 'f"""image_{i}.jpg"""'], {}), "(output_folder, f'image_{i}.jpg')\n", (2465, 2498), False, 'import os\n'), ((1900, 1924), 'PIL.Image.open', 'Image.open', (['search_query'], {}), '(search_query)\n', (1910, 1924), False, 'from PIL import Image\n'), ((1522, 1555), 'os.path.join', 'os.path.join', (['output_folder', 'file'], {}), '(output_folder, file)\n', (1534, 1555), False, 'import os\n')] |
import streamlit as st
import sqlite3
import streamlit_antd_components as sac
import pandas as pd
import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
from basecode.authenticate import return_api_key
from langchain.docstore.document import Document
import lancedb
import configparser
import ast
import json
from services.aws import SecretsManager
from openai import OpenAI
class ConfigHandler:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read("config.ini")
def get_config_values(self, section, key):
value = self.config.get(section, key)
try:
# Try converting the string value to a Python data structure
return ast.literal_eval(value)
except (SyntaxError, ValueError):
# If not a data structure, return the plain string
return value
config_handler = ConfigHandler()
TCH = config_handler.get_config_values("constants", "TCH")
STU = config_handler.get_config_values("constants", "STU")
SA = config_handler.get_config_values("constants", "SA")
AD = config_handler.get_config_values("constants", "AD")
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
# Check application environment => GCC or Streamlit
ENV = config_handler.get_config_values("constants", "prototype_env")
if ENV == "GCC":
if SecretsManager.get_secret("sql_ext_path") == "None":
WORKING_DATABASE = os.path.join(
WORKING_DIRECTORY, SecretsManager.get_secret("default_db")
)
else:
WORKING_DATABASE = SecretsManager.get_secret("sql_ext_path")
else:
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE = os.path.join(WORKING_DIRECTORY, st.secrets["default_db"])
else:
WORKING_DATABASE = st.secrets["sql_ext_path"]
# os.environ["OPENAI_API_KEY"] = return_api_key()
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
db = lancedb.connect(lancedb_path)
def fetch_vectorstores_with_usernames():
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
query = """
SELECT
Vector_Stores.vs_id,
Subject.subject_name,
Topic.topic_name,
Vector_Stores.vectorstore_name,
Users.username,
Vector_Stores.sharing_enabled
FROM Vector_Stores
JOIN Users ON Vector_Stores.user_id = Users.user_id
LEFT JOIN Subject ON Vector_Stores.subject = Subject.id
LEFT JOIN Topic ON Vector_Stores.topic = Topic.id;
"""
cursor.execute(query)
data = cursor.fetchall()
conn.close()
return data
def display_vectorstores():
data = fetch_vectorstores_with_usernames()
df = pd.DataFrame(
data,
columns=[
"vs_id",
"subject_name",
"topic_name",
"vectorstore_name",
"username",
"sharing_enabled",
],
)
# Convert the 'sharing_enabled' values
df["sharing_enabled"] = df["sharing_enabled"].apply(lambda x: "✔" if x == 1 else "")
st.dataframe(
df,
use_container_width=True,
column_order=[
"vs_id",
"subject_name",
"topic_name",
"vectorstore_name",
"username",
"sharing_enabled",
],
)
def fetch_all_files():
"""
Fetch all files either shared or based on user type
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Construct the SQL query with JOINs for Subject, Topic, and Users tables
if st.session_state.user["profile_id"] == "SA":
cursor.execute(
"""
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
"""
)
else:
cursor.execute(
"""
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
WHERE Files.sharing_enabled = 1
"""
)
files = cursor.fetchall()
formatted_files = [f"({file[0]}) {file[1]} ({file[4]})" for file in files]
conn.close()
return formatted_files
def fetch_file_data(file_id):
"""
Fetch file data given a file id
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
cursor.execute("SELECT data, metadata FROM Files WHERE file_id = ?", (file_id,))
data = cursor.fetchone()
conn.close()
if data:
return data[0], data[1]
else:
return None, None
def insert_topic(org_id, topic_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute(
"INSERT INTO Topic (org_id, topic_name) VALUES (?, ?);",
(org_id, topic_name),
)
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if topic_name is not unique within the org
return False # Indicates topic_name is not unique within the org
finally:
conn.close()
def insert_subject(org_id, subject_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute(
"INSERT INTO Subject (org_id, subject_name) VALUES (?, ?);",
(org_id, subject_name),
)
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if subject_name is not unique within the org
return False # Indicates subject_name is not unique within the org
finally:
conn.close()
def select_organization():
with sqlite3.connect(WORKING_DATABASE) as conn:
cursor = conn.cursor()
# Org selection
org_query = "SELECT org_name FROM Organizations"
cursor.execute(org_query)
orgs = cursor.fetchall()
org_names = [org[0] for org in orgs]
# Use a Streamlit selectbox to choose an organization
selected_org_name = st.selectbox("Select an organization:", org_names)
# Retrieve the org_id for the selected organization
cursor.execute(
"SELECT org_id FROM Organizations WHERE org_name = ?;", (selected_org_name,)
)
result = cursor.fetchone()
if result:
org_id = result[0]
st.write(f"The org_id for {selected_org_name} is {org_id}.")
return org_id
else:
st.write(f"Organization '{selected_org_name}' not found in the database.")
return None
def fetch_subjects_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute("SELECT * FROM Subject;")
else:
cursor.execute("SELECT * FROM Subject WHERE org_id = ?;", (org_id,))
subjects = cursor.fetchall()
conn.close()
return subjects
def fetch_topics_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute("SELECT * FROM Topic;")
else:
cursor.execute("SELECT * FROM Topic WHERE org_id = ?;", (org_id,))
topics = cursor.fetchall()
conn.close()
return topics
def split_docs(file_path, meta):
# def split_meta_docs(file, source, tch_code):
loader = UnstructuredFileLoader(file_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
metadata = {"source": meta}
for doc in docs:
doc.metadata.update(metadata)
return docs
def create_lancedb_table(embeddings, meta, table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
client = OpenAI()
response = client.embeddings.create(
input="Query Unsuccessful", model="text-embedding-3-small"
)
table = db.create_table(
f"{table_name}",
data=[
{
"vector": response.data[0].embedding,
"text": "Query Unsuccessful",
"id": "1",
"source": f"{meta}",
}
],
mode="overwrite",
)
return table
def save_to_vectorstores(
vs, vstore_input_name, subject, topic, username, share_resource=False
):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch the user's details
cursor.execute("SELECT user_id FROM Users WHERE username = ?", (username,))
user_details = cursor.fetchone()
if not user_details:
st.error("Error: User not found.")
return
user_id = user_details[0]
# If Vector_Store instance exists in session state, then serialize and save
# vs is the documents in json format and vstore_input_name is the name of the table and vectorstore
if vs:
try:
cursor.execute(
"SELECT 1 FROM Vector_Stores WHERE vectorstore_name LIKE ? AND user_id = ?",
(f"%{vstore_input_name}%", user_id),
)
exists = cursor.fetchone()
if exists:
st.error(
"Error: An entry with the same vectorstore_name and user_id already exists."
)
return
if subject is None:
st.error("Error: Subject is missing.")
return
if topic is None:
st.error("Error: Topic is missing.")
return
# Get the subject and topic IDs
cursor.execute("SELECT id FROM Subject WHERE subject_name = ?", (subject,))
subject_id = cursor.fetchone()[0]
cursor.execute("SELECT id FROM Topic WHERE topic_name = ?", (topic,))
topic_id = cursor.fetchone()[0]
# Insert the new row
cursor.execute(
"""
INSERT INTO Vector_Stores (vectorstore_name, documents, user_id, subject, topic, sharing_enabled)
VALUES (?, ?, ?, ?, ?, ?)
""",
(vstore_input_name, vs, user_id, subject_id, topic_id, share_resource),
)
conn.commit()
conn.close()
except Exception as e:
st.error(f"Error in storing documents and vectorstore: {e}")
return
def document_to_dict(doc):
# Assuming 'doc' has 'page_content' and 'metadata' attributes
return {"page_content": doc.page_content, "metadata": doc.metadata}
def dict_to_document(doc_dict):
# Create a Document object from the dictionary
# Adjust this according to how your Document class is defined
return Document(
page_content=doc_dict["page_content"], metadata=doc_dict["metadata"]
)
def create_vectorstore():
os.environ["OPENAI_API_KEY"] = return_api_key()
full_docs = []
st.subheader("Enter the topic and subject for your knowledge base")
embeddings = OpenAIEmbeddings()
if st.session_state.user["profile_id"] == SA:
org_id = select_organization()
if org_id is None:
return
else:
org_id = st.session_state.user["org_id"]
# Fetch all available subjects
subjects = fetch_subjects_by_org(st.session_state.user["org_id"])
subject_names = [
sub[2] for sub in subjects
] # Assuming index 2 holds the subject_name
selected_subject = st.selectbox(
"Select an existing subject or type a new one:",
options=subject_names + ["New Subject"],
)
if selected_subject == "New Subject":
subject = st.text_input("Please enter the new subject name:", max_chars=30)
if subject:
insert_subject(org_id, subject)
else:
subject = selected_subject
# Fetch all available topics
topics = fetch_topics_by_org(st.session_state.user["org_id"])
topic_names = [
topic[2] for topic in topics
] # Assuming index 2 holds the topic_name
selected_topic = st.selectbox(
"Select an existing topic or type a new one:",
options=topic_names + ["New Topic"],
)
if selected_topic == "New Topic":
topic = st.text_input("Please enter the new topic name:", max_chars=30)
if topic:
insert_topic(org_id, topic)
else:
topic = selected_topic
vectorstore_input = st.text_input(
"Please type in a name for your knowledge base:", max_chars=20
)
vs_name = vectorstore_input + f"_({st.session_state.user['username']})"
share_resource = st.checkbox(
"Share this resource", value=True
) # <-- Added this line
# Show the current build of files for the latest database
st.subheader("Select one or more files to build your knowledge base")
files = fetch_all_files()
if files:
selected_files = sac.transfer(
items=files,
label=None,
index=None,
titles=["Uploaded files", "Select files for KB"],
format_func="title",
width="100%",
height=None,
search=True,
pagination=False,
oneway=False,
reload=True,
disabled=False,
return_index=False,
)
# Alert to confirm the creation of knowledge base
st.warning(
"Building your knowledge base will take some time. Please be patient."
)
build = sac.buttons(
[
dict(
label="Build VectorStore", icon="check-circle-fill", color="green"
),
dict(label="Cancel", icon="x-circle-fill", color="red"),
],
label=None,
index=1,
format_func="title",
align="center",
position="top",
size="default",
direction="horizontal",
shape="round",
type="default",
compact=False,
return_index=False,
)
if build == "Build VectorStore" and selected_files:
for s_file in selected_files:
file_id = int(s_file.split("(", 1)[1].split(")", 1)[0])
file_data, meta = fetch_file_data(file_id)
docs = split_docs(file_data, meta)
full_docs.extend(docs)
# convert full_docs to json to store in sqlite
full_docs_dicts = [document_to_dict(doc) for doc in full_docs]
docs_json = json.dumps(full_docs_dicts)
create_lancedb_table(embeddings, meta, vs_name)
save_to_vectorstores(
docs_json,
vs_name,
subject,
topic,
st.session_state.user["username"],
share_resource,
) # Passing the share_resource to the function
st.success("Knowledge Base loaded")
else:
st.write("No files found in the database.")
def load_vectorstore(documents, table_name):
retrieved_docs_dicts = json.loads(documents)
retrieved_docs = [dict_to_document(doc_dict) for doc_dict in retrieved_docs_dicts]
vs = LanceDB.from_documents(
retrieved_docs,
OpenAIEmbeddings(openai_api_key=return_api_key()),
connection=db.open_table(f"{table_name}"),
)
return vs
def delete_lancedb_table(table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
db.drop_table(f"{table_name}")
def fetch_vectorstores_by_user_id(user_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch vectorstores based on user_id
cursor.execute(
"SELECT vectorstore_name FROM Vector_Stores WHERE user_id = ?;", (user_id,)
)
vectorstores = cursor.fetchall()
conn.close()
return vectorstores
def delete_vectorstores():
st.subheader("Delete VectorStores in Database:")
user_vectorstores = fetch_vectorstores_by_user_id(st.session_state.user["id"])
if user_vectorstores:
vectorstore_names = [vs[0] for vs in user_vectorstores]
selected_vectorstores = st.multiselect(
"Select vectorstores to delete:", options=vectorstore_names
)
confirm_delete = st.checkbox(
"I understand that this action cannot be undone.", value=False
)
if st.button("Delete VectorStore"):
if confirm_delete and selected_vectorstores:
delete_vectorstores_from_db(
selected_vectorstores,
st.session_state.user["id"],
st.session_state.user["profile_id"],
)
st.success(f"Deleted {len(selected_vectorstores)} vectorstores.")
else:
st.warning("Please confirm the deletion action.")
else:
st.write("No vectorstores found in the database.")
def delete_vectorstores_from_db(vectorstore_names, user_id, profile):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
for vectorstore_name in vectorstore_names:
if profile in ["SA", "AD"]:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete vectorstore irrespective of the user_id associated with them
cursor.execute(
"DELETE FROM Vector_Stores WHERE vectorstore_name=?;",
(vectorstore_name,),
)
else:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete only if the user_id matches
cursor.execute(
"DELETE FROM Vector_Stores WHERE vectorstore_name=? AND user_id=?;",
(vectorstore_name, user_id),
)
# Check if the row was affected
if cursor.rowcount == 0:
st.error(
f"Unable to delete vectorstore '{vectorstore_name}' that is not owned by you."
)
conn.commit() # Commit the changes
conn.close() # Close the connection
| [
"lancedb.connect"
] | [((1396, 1407), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1405, 1407), False, 'import os\n'), ((1428, 1457), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1440, 1457), False, 'import os\n'), ((2203, 2245), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (2215, 2245), False, 'import os\n'), ((2251, 2280), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (2266, 2280), False, 'import lancedb\n'), ((1466, 1499), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1480, 1499), False, 'import os\n'), ((1505, 1535), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1516, 1535), False, 'import os\n'), ((2335, 2368), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (2350, 2368), False, 'import sqlite3\n'), ((2993, 3115), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(data, columns=['vs_id', 'subject_name', 'topic_name',\n 'vectorstore_name', 'username', 'sharing_enabled'])\n", (3005, 3115), True, 'import pandas as pd\n'), ((3356, 3511), 'streamlit.dataframe', 'st.dataframe', (['df'], {'use_container_width': '(True)', 'column_order': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(df, use_container_width=True, column_order=['vs_id',\n 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled'])\n", (3368, 3511), True, 'import streamlit as st\n'), ((3726, 3759), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (3741, 3759), False, 'import sqlite3\n'), ((4933, 4966), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4948, 4966), False, 'import sqlite3\n'), ((5259, 5292), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5274, 5292), False, 'import sqlite3\n'), ((5816, 5849), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5831, 5849), False, 'import sqlite3\n'), ((7318, 7351), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (7333, 7351), False, 'import sqlite3\n'), ((7708, 7741), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (7723, 7741), False, 'import sqlite3\n'), ((8143, 8176), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['file_path'], {}), '(file_path)\n', (8165, 8176), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((8227, 8282), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (8248, 8282), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((8519, 8561), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (8531, 8561), False, 'import os\n'), ((8596, 8625), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (8611, 8625), False, 'import lancedb\n'), ((8640, 8648), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (8646, 8648), False, 'from openai import OpenAI\n'), ((9201, 9234), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (9216, 9234), False, 'import sqlite3\n'), ((11528, 11606), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': "doc_dict['page_content']", 'metadata': "doc_dict['metadata']"}), "(page_content=doc_dict['page_content'], metadata=doc_dict['metadata'])\n", (11536, 11606), False, 'from langchain.docstore.document import Document\n'), ((11684, 11700), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (11698, 11700), False, 'from basecode.authenticate import return_api_key\n'), ((11724, 11791), 'streamlit.subheader', 'st.subheader', (['"""Enter the topic and subject for your knowledge base"""'], {}), "('Enter the topic and subject for your knowledge base')\n", (11736, 11791), True, 'import streamlit as st\n'), ((11809, 11827), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (11825, 11827), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((12257, 12364), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing subject or type a new one:"""'], {'options': "(subject_names + ['New Subject'])"}), "('Select an existing subject or type a new one:', options=\n subject_names + ['New Subject'])\n", (12269, 12364), True, 'import streamlit as st\n'), ((12845, 12946), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing topic or type a new one:"""'], {'options': "(topic_names + ['New Topic'])"}), "('Select an existing topic or type a new one:', options=\n topic_names + ['New Topic'])\n", (12857, 12946), True, 'import streamlit as st\n'), ((13208, 13285), 'streamlit.text_input', 'st.text_input', (['"""Please type in a name for your knowledge base:"""'], {'max_chars': '(20)'}), "('Please type in a name for your knowledge base:', max_chars=20)\n", (13221, 13285), True, 'import streamlit as st\n'), ((13397, 13443), 'streamlit.checkbox', 'st.checkbox', (['"""Share this resource"""'], {'value': '(True)'}), "('Share this resource', value=True)\n", (13408, 13443), True, 'import streamlit as st\n'), ((13548, 13617), 'streamlit.subheader', 'st.subheader', (['"""Select one or more files to build your knowledge base"""'], {}), "('Select one or more files to build your knowledge base')\n", (13560, 13617), True, 'import streamlit as st\n'), ((15883, 15904), 'json.loads', 'json.loads', (['documents'], {}), '(documents)\n', (15893, 15904), False, 'import json\n'), ((16238, 16280), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (16250, 16280), False, 'import os\n'), ((16315, 16344), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (16330, 16344), False, 'import lancedb\n'), ((16437, 16470), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (16452, 16470), False, 'import sqlite3\n'), ((16763, 16811), 'streamlit.subheader', 'st.subheader', (['"""Delete VectorStores in Database:"""'], {}), "('Delete VectorStores in Database:')\n", (16775, 16811), True, 'import streamlit as st\n'), ((17871, 17904), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (17886, 17904), False, 'import sqlite3\n'), ((621, 648), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (646, 648), False, 'import configparser\n'), ((1683, 1724), 'services.aws.SecretsManager.get_secret', 'SecretsManager.get_secret', (['"""sql_ext_path"""'], {}), "('sql_ext_path')\n", (1708, 1724), False, 'from services.aws import SecretsManager\n'), ((1895, 1936), 'services.aws.SecretsManager.get_secret', 'SecretsManager.get_secret', (['"""sql_ext_path"""'], {}), "('sql_ext_path')\n", (1920, 1936), False, 'from services.aws import SecretsManager\n'), ((2015, 2072), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', "st.secrets['default_db']"], {}), "(WORKING_DIRECTORY, st.secrets['default_db'])\n", (2027, 2072), False, 'import os\n'), ((6366, 6399), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (6381, 6399), False, 'import sqlite3\n'), ((6725, 6775), 'streamlit.selectbox', 'st.selectbox', (['"""Select an organization:"""', 'org_names'], {}), "('Select an organization:', org_names)\n", (6737, 6775), True, 'import streamlit as st\n'), ((9445, 9479), 'streamlit.error', 'st.error', (['"""Error: User not found."""'], {}), "('Error: User not found.')\n", (9453, 9479), True, 'import streamlit as st\n'), ((12444, 12509), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new subject name:"""'], {'max_chars': '(30)'}), "('Please enter the new subject name:', max_chars=30)\n", (12457, 12509), True, 'import streamlit as st\n'), ((13020, 13083), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new topic name:"""'], {'max_chars': '(30)'}), "('Please enter the new topic name:', max_chars=30)\n", (13033, 13083), True, 'import streamlit as st\n'), ((13687, 13941), 'streamlit_antd_components.transfer', 'sac.transfer', ([], {'items': 'files', 'label': 'None', 'index': 'None', 'titles': "['Uploaded files', 'Select files for KB']", 'format_func': '"""title"""', 'width': '"""100%"""', 'height': 'None', 'search': '(True)', 'pagination': '(False)', 'oneway': '(False)', 'reload': '(True)', 'disabled': '(False)', 'return_index': '(False)'}), "(items=files, label=None, index=None, titles=['Uploaded files',\n 'Select files for KB'], format_func='title', width='100%', height=None,\n search=True, pagination=False, oneway=False, reload=True, disabled=\n False, return_index=False)\n", (13699, 13941), True, 'import streamlit_antd_components as sac\n'), ((14163, 14250), 'streamlit.warning', 'st.warning', (['"""Building your knowledge base will take some time. Please be patient."""'], {}), "(\n 'Building your knowledge base will take some time. Please be patient.')\n", (14173, 14250), True, 'import streamlit as st\n'), ((15765, 15808), 'streamlit.write', 'st.write', (['"""No files found in the database."""'], {}), "('No files found in the database.')\n", (15773, 15808), True, 'import streamlit as st\n'), ((17018, 17093), 'streamlit.multiselect', 'st.multiselect', (['"""Select vectorstores to delete:"""'], {'options': 'vectorstore_names'}), "('Select vectorstores to delete:', options=vectorstore_names)\n", (17032, 17093), True, 'import streamlit as st\n'), ((17141, 17216), 'streamlit.checkbox', 'st.checkbox', (['"""I understand that this action cannot be undone."""'], {'value': '(False)'}), "('I understand that this action cannot be undone.', value=False)\n", (17152, 17216), True, 'import streamlit as st\n'), ((17251, 17282), 'streamlit.button', 'st.button', (['"""Delete VectorStore"""'], {}), "('Delete VectorStore')\n", (17260, 17282), True, 'import streamlit as st\n'), ((17737, 17787), 'streamlit.write', 'st.write', (['"""No vectorstores found in the database."""'], {}), "('No vectorstores found in the database.')\n", (17745, 17787), True, 'import streamlit as st\n'), ((887, 910), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (903, 910), False, 'import ast\n'), ((1808, 1847), 'services.aws.SecretsManager.get_secret', 'SecretsManager.get_secret', (['"""default_db"""'], {}), "('default_db')\n", (1833, 1847), False, 'from services.aws import SecretsManager\n'), ((7058, 7118), 'streamlit.write', 'st.write', (['f"""The org_id for {selected_org_name} is {org_id}."""'], {}), "(f'The org_id for {selected_org_name} is {org_id}.')\n", (7066, 7118), True, 'import streamlit as st\n'), ((7171, 7245), 'streamlit.write', 'st.write', (['f"""Organization \'{selected_org_name}\' not found in the database."""'], {}), '(f"Organization \'{selected_org_name}\' not found in the database.")\n', (7179, 7245), True, 'import streamlit as st\n'), ((15332, 15359), 'json.dumps', 'json.dumps', (['full_docs_dicts'], {}), '(full_docs_dicts)\n', (15342, 15359), False, 'import json\n'), ((15710, 15745), 'streamlit.success', 'st.success', (['"""Knowledge Base loaded"""'], {}), "('Knowledge Base loaded')\n", (15720, 15745), True, 'import streamlit as st\n'), ((10002, 10098), 'streamlit.error', 'st.error', (['"""Error: An entry with the same vectorstore_name and user_id already exists."""'], {}), "(\n 'Error: An entry with the same vectorstore_name and user_id already exists.'\n )\n", (10010, 10098), True, 'import streamlit as st\n'), ((10199, 10237), 'streamlit.error', 'st.error', (['"""Error: Subject is missing."""'], {}), "('Error: Subject is missing.')\n", (10207, 10237), True, 'import streamlit as st\n'), ((10308, 10344), 'streamlit.error', 'st.error', (['"""Error: Topic is missing."""'], {}), "('Error: Topic is missing.')\n", (10316, 10344), True, 'import streamlit as st\n'), ((11119, 11179), 'streamlit.error', 'st.error', (['f"""Error in storing documents and vectorstore: {e}"""'], {}), "(f'Error in storing documents and vectorstore: {e}')\n", (11127, 11179), True, 'import streamlit as st\n'), ((16089, 16105), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (16103, 16105), False, 'from basecode.authenticate import return_api_key\n'), ((17669, 17718), 'streamlit.warning', 'st.warning', (['"""Please confirm the deletion action."""'], {}), "('Please confirm the deletion action.')\n", (17679, 17718), True, 'import streamlit as st\n'), ((18791, 18889), 'streamlit.error', 'st.error', (['f"""Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."""'], {}), '(\n f"Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."\n )\n', (18799, 18889), True, 'import streamlit as st\n')] |
# import libraries
import re
import gradio as gr
from typing import List, Union
import lancedb
from langchain.vectorstores import LanceDB
from langchain.llms import CTransformers
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.document_loaders import WebBaseLoader
class ChatbotHelper:
def __init__(self):
self.chatbot_instance = None
self.chat_history = []
self.chunks = None
def find_urls(self, text: str) -> List[str]:
url_pattern = re.compile(r"https?://\S+|www\.\S+")
return url_pattern.findall(text)
def initialize_chatbot(self, urls: List[str]):
documents = self.load_website_content(urls)
chunks = self.split_text(documents)
embedder = self.bge_embedding(chunks)
vectorstore = self.create_vector_store(chunks, embedder)
retriever = self.create_retriever(vectorstore)
self.chatbot_instance = self.create_chatbot(retriever)
return "Chatbot initialized! How can I assist you? now ask your Quetions"
def load_website_content(self, urls):
print("Loading website(s) into Documents...")
documents = WebBaseLoader(web_path=urls).load()
print("Done loading website(s).")
return documents
def load_llm(self):
# download your llm in system or use it else
# llm = CTransformers(
# model="mistral-7b-instruct-v0.1.Q5_K_M.gguf",
# model_type="mistral"
# )
llm = CTransformers(
model="TheBloke/Mistral-7B-v0.1-GGUF",
model_file="mistral-7b-v0.1.Q4_K_M.gguf",
model_type="mistral",
)
return llm
def split_text(self, documents):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=120, chunk_overlap=20, length_function=len
)
chunks = text_splitter.transform_documents(documents)
print("Done splitting documents.")
return chunks
def bge_embedding(self, chunks):
print("Creating bge embedder...")
model_name = "BAAI/bge-base-en"
encode_kwargs = {"normalize_embeddings": True}
embedder = HuggingFaceBgeEmbeddings(
model_name=model_name,
model_kwargs={"device": "cpu"},
encode_kwargs=encode_kwargs,
)
return embedder
def create_vector_store(self, chunks, embedder):
print("Creating vectorstore...")
db = lancedb.connect("/tmp/lancedb")
table = db.create_table(
"pdf_search",
data=[
{
"vector": embedder.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
vectorstore = LanceDB.from_documents(chunks, embedder, connection=table)
return vectorstore
def create_retriever(self, vectorstore):
print("Creating vectorstore retriever...")
retriever = vectorstore.as_retriever()
return retriever
def embed_user_query(self, query):
if self.chunks is None:
return "Chatbot not initialized. Please provide a URL first."
core_embeddings_model = self.bge_embedding(self.chunks)
embedded_query = core_embeddings_model.embed_query(query)
return embedded_query
def create_chatbot(self, retriever):
llm = self.load_llm()
memory = ConversationBufferMemory(
memory_key="chat_history", return_messages=True
)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm, retriever=retriever, memory=memory
)
return conversation_chain
def chat(self, conversation_chain, input):
return conversation_chain.run(input)
def respond(self, message):
if message.lower() == "clear":
self.chatbot_instance = None
self.chat_history.clear()
return "", self.chat_history
urls = self.find_urls(message)
if not self.chatbot_instance and urls:
bot_message = self.initialize_chatbot(urls)
else:
if self.chatbot_instance:
bot_message = self.chat(self.chatbot_instance, message)
else:
bot_message = "Please provide a URL to initialize the chatbot first, then ask any questions related to that site."
self.chat_history.append((message, bot_message))
chat_history_text = "\n".join(
[f"User: {msg[0]}\nBot: {msg[1]}\n" for msg in self.chat_history]
)
return bot_message
def run_interface(self):
iface = gr.Interface(
fn=self.respond,
title="Chatbot with URL or any website ",
inputs=gr.Textbox(
label="Your Query", placeholder="Type your query here...", lines=5
),
outputs=[
gr.Textbox(
label="Chatbot Response",
type="text",
placeholder="Chatbot response will appear here.",
lines=10,
)
],
)
iface.launch()
if __name__ == "__main__":
chatbot_helper = ChatbotHelper()
chatbot_helper.run_interface()
| [
"lancedb.connect"
] | [((683, 721), 're.compile', 're.compile', (['"""https?://\\\\S+|www\\\\.\\\\S+"""'], {}), "('https?://\\\\S+|www\\\\.\\\\S+')\n", (693, 721), False, 'import re\n'), ((1668, 1789), 'langchain.llms.CTransformers', 'CTransformers', ([], {'model': '"""TheBloke/Mistral-7B-v0.1-GGUF"""', 'model_file': '"""mistral-7b-v0.1.Q4_K_M.gguf"""', 'model_type': '"""mistral"""'}), "(model='TheBloke/Mistral-7B-v0.1-GGUF', model_file=\n 'mistral-7b-v0.1.Q4_K_M.gguf', model_type='mistral')\n", (1681, 1789), False, 'from langchain.llms import CTransformers\n'), ((1913, 2002), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(120)', 'chunk_overlap': '(20)', 'length_function': 'len'}), '(chunk_size=120, chunk_overlap=20,\n length_function=len)\n', (1943, 2002), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2342, 2454), 'langchain.embeddings.HuggingFaceBgeEmbeddings', 'HuggingFaceBgeEmbeddings', ([], {'model_name': 'model_name', 'model_kwargs': "{'device': 'cpu'}", 'encode_kwargs': 'encode_kwargs'}), "(model_name=model_name, model_kwargs={'device':\n 'cpu'}, encode_kwargs=encode_kwargs)\n", (2366, 2454), False, 'from langchain.embeddings import HuggingFaceBgeEmbeddings\n'), ((2630, 2661), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (2645, 2661), False, 'import lancedb\n'), ((2994, 3052), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['chunks', 'embedder'], {'connection': 'table'}), '(chunks, embedder, connection=table)\n', (3016, 3052), False, 'from langchain.vectorstores import LanceDB\n'), ((3644, 3717), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (3668, 3717), False, 'from langchain.memory import ConversationBufferMemory\n'), ((3769, 3856), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'memory': 'memory'}), '(llm=llm, retriever=retriever, memory=\n memory)\n', (3806, 3856), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((1337, 1365), 'langchain.document_loaders.WebBaseLoader', 'WebBaseLoader', ([], {'web_path': 'urls'}), '(web_path=urls)\n', (1350, 1365), False, 'from langchain.document_loaders import WebBaseLoader\n'), ((4984, 5062), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""Your Query"""', 'placeholder': '"""Type your query here..."""', 'lines': '(5)'}), "(label='Your Query', placeholder='Type your query here...', lines=5)\n", (4994, 5062), True, 'import gradio as gr\n'), ((5132, 5246), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""Chatbot Response"""', 'type': '"""text"""', 'placeholder': '"""Chatbot response will appear here."""', 'lines': '(10)'}), "(label='Chatbot Response', type='text', placeholder=\n 'Chatbot response will appear here.', lines=10)\n", (5142, 5246), True, 'import gradio as gr\n')] |
# load_pdf.py - Loads PDF documents into the LanceDB vector store
## Imports:
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
import dotenv
import lancedb
import os
from pypdf import PdfReader
## Set Env Variables
dotenv.load_dotenv()
if None == os.environ.get('OPENAI_API_KEY'):
raise ValueError("Env OPENAI_API_KEY not set")
else:
OAI_TOKEN = os.environ.get('OPENAI_API_KEY')
filename = "./backend/utilities/NAME_OF_PDF.pdf"
## Set up connection to OpenAI
embeddings = OpenAIEmbeddings()
## Set up knowledge VectorStore
db_name = "./helios_kb.db"
table_name = "helios_kb"
db = lancedb.connect(db_name)
if table_name not in db.table_names():
table = db.create_table(
"helios_kb",
data=[
{
"vector": embeddings.embed_query("You are Helios, an AI chatbot that can perform background research tasks."),
"text": "You are Helios, an AI chatbot that can perform background research tasks with access to the internet.",
"id": "1",
}
],
mode="create",
)
else:
table = db.open_table(table_name)
vectorstore = LanceDB(connection=table, embedding=embeddings)
## Load and split PDF file
reader = PdfReader(filename)
parts = []
def visitor_body(text, cm, tm, font_dict, font_size):
y = cm[5]
if y > 50 and y < 720:
parts.append(text)
page_id = 0
for page in reader.pages:
print("Loading Page " + str(page_id) + "...")
text = page.extract_text(visitor_text=visitor_body)
text_splitter = CharacterTextSplitter(
separator = "\n\n",
chunk_size = 2500,
chunk_overlap = 250,
length_function = len,
is_separator_regex = False,
)
docs = text_splitter.create_documents([text])
for doc in docs:
vectorstore.add_texts(texts=[text], metadatas=[{"filename": filename, "page_number": page_id}])
page_id += 1 | [
"lancedb.connect"
] | [((319, 339), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (337, 339), False, 'import dotenv\n'), ((586, 604), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (602, 604), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((695, 719), 'lancedb.connect', 'lancedb.connect', (['db_name'], {}), '(db_name)\n', (710, 719), False, 'import lancedb\n'), ((1233, 1280), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (1240, 1280), False, 'from langchain.vectorstores import LanceDB\n'), ((1318, 1337), 'pypdf.PdfReader', 'PdfReader', (['filename'], {}), '(filename)\n', (1327, 1337), False, 'from pypdf import PdfReader\n'), ((351, 383), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (365, 383), False, 'import os\n'), ((458, 490), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (472, 490), False, 'import os\n'), ((1637, 1763), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n\n"""', 'chunk_size': '(2500)', 'chunk_overlap': '(250)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n\\n', chunk_size=2500, chunk_overlap=250,\n length_function=len, is_separator_regex=False)\n", (1658, 1763), False, 'from langchain.text_splitter import CharacterTextSplitter\n')] |
import lancedb
from langchain.vectorstores import LanceDB
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
db = lancedb.connect(".lance-data")
path = "/workspace/flancian"
loader = DirectoryLoader(path, glob="**/*.md")
data = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(data)
embeddings = OpenAIEmbeddings()
table = db.create_table(
"journal",
data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
"source": "test"
}
],
mode="overwrite",
)
LanceDB.from_documents(documents, embeddings, connection=table)
| [
"lancedb.connect"
] | [((233, 263), 'lancedb.connect', 'lancedb.connect', (['""".lance-data"""'], {}), "('.lance-data')\n", (248, 263), False, 'import lancedb\n'), ((302, 339), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['path'], {'glob': '"""**/*.md"""'}), "(path, glob='**/*.md')\n", (317, 339), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((377, 432), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (398, 432), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((494, 512), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (510, 512), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((763, 826), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (785, 826), False, 'from langchain.vectorstores import LanceDB\n')] |
import lancedb
import pyarrow as pa
import pandas as pd
# Connect to the database
uri = "/tmp/sample-lancedb"
db = lancedb.connect(uri)
schema = pa.schema([
pa.field("unique_id", pa.string()),
pa.field("embedded_user_input", pa.list_(pa.list_(pa.float32()))),
pa.field("metadata", pa.struct([
pa.field("message", pa.string()),
pa.field("speaker", pa.string()),
pa.field("time", pa.float64()),
pa.field("timestring", pa.string()),
pa.field("uuid", pa.string())
]))
])
# Create the table with the defined schema
table_name = "lance-table"
if table_name in db.table_names():
db.drop_table(table_name) # Drop the table if it already exists
tbl = db.create_table(table_name, schema=schema)
# Insert the provided data into the table
data = {
'unique_id': '954a51d7-8ac8-4b24-8d56-612e63ff15fb',
'embedded_user_input': [[-6.26545027e-02, -6.17343448e-02, 5.13638668e-02,
-2.15298426e-03, 6.47769943e-02, -5.32028712e-02,
-6.60635531e-02, -3.18052359e-02, -7.25502372e-02,
8.14180169e-03, 7.64630958e-02, 7.56798983e-02,
-4.16797139e-02, 8.12164322e-03, 2.48537250e-02,
5.01681454e-02, 6.32003173e-02, -6.87251166e-02,
6.88732713e-02, -5.29278964e-02, -3.98336835e-02,
-2.24619396e-02, 6.21254696e-03, 6.02457719e-03,
-3.04912337e-05, 4.78344224e-02, 4.24719378e-02,
1.55452816e-02, -5.47493547e-02, 9.00838431e-03,
6.09557740e-02, 3.96770723e-02, -1.97340697e-02,
-2.08682027e-02, 4.31839637e-02, -5.55636436e-02,
6.30072085e-03, -2.54710689e-02, 1.57420635e-02,
-5.81943579e-02, 5.57490699e-02, -3.01882587e-02,
-2.16408260e-02, 4.52772006e-02, -8.47738385e-02,
-3.32002342e-02, -1.52017135e-04, 6.10890649e-02,
3.87536660e-02, -1.94792040e-02, 8.70911554e-02,
8.86067227e-02, 3.49610522e-02, 7.67428428e-03,
-2.34412830e-02, 4.17789407e-02, -1.32130338e-02,
1.38202857e-03, -4.92214859e-02, -6.16745465e-02,
-5.19866794e-02, -7.64950588e-02, -2.83730943e-02,
-6.44062757e-02, 2.43161842e-02, -1.89856961e-02,
-3.92607376e-02, 6.24855123e-02, -6.58661798e-02,
4.45577130e-02, -5.86516522e-02, -3.96902673e-02,
-1.52153037e-02, -2.17174198e-02, 3.74496356e-02,
-1.65620521e-02, 4.83789034e-02, -1.57914069e-02,
3.10944263e-02, 5.05946614e-02, 2.59266607e-03,
3.32262553e-02, -8.67186673e-03, -7.76251452e-03,
-5.36308028e-02, -3.72965150e-02, 4.43223724e-03,
-2.27497462e-02, -2.21967441e-03, 5.01658709e-04,
-2.42421497e-02, 1.36928298e-02, -4.76891249e-02,
7.97025338e-02, -3.50321233e-02, -4.04987223e-02,
-4.54532690e-02, -1.28967864e-02, 4.91136014e-02,
-7.35521242e-02, 4.71541584e-02, -6.14574067e-02,
-7.49642402e-02, -6.87192893e-03, -3.81468870e-02,
-3.45312506e-02, 6.79960325e-02, -4.25949283e-02,
3.67511585e-02, -5.19936858e-03, 4.17536497e-03,
-5.79770480e-04, -4.82287556e-02, -5.12428135e-02,
4.06234413e-02, 8.19317345e-03, -3.30924615e-02,
3.97548154e-02, -2.01502815e-02, -7.69829825e-02,
1.03119072e-02, 4.01838385e-02, -2.14411654e-02,
-3.10645811e-03, -3.35901007e-02, 3.87730636e-02,
4.81218584e-02, 3.33287269e-02, 6.81211650e-02,
-2.04943419e-02, 5.63151669e-03, 6.00613914e-02,
-5.66559546e-02, 3.37162614e-02, -1.73472352e-02,
6.10585660e-02, -8.43226537e-03, -2.21500266e-02,
4.09952849e-02, -1.46842571e-02, 7.34459050e-03,
-3.81853282e-02, 7.09644631e-02, -5.92554510e-02,
4.73418506e-03, -3.16169150e-02, 4.51507978e-02,
-1.32356370e-02, -7.91342650e-03, -6.76603466e-02,
-2.19407026e-02, -4.68791090e-02, -3.35567333e-02,
-9.19987273e-04, -3.93564962e-02, 6.74620830e-03,
1.05946194e-02, -3.96276303e-02, 5.13323732e-02,
6.83040172e-02, -5.59230559e-02, -3.87670770e-02,
4.26476151e-02, 1.18510174e-02, -6.79494292e-02,
-2.71163080e-02, -4.61316220e-02, -2.41160076e-02,
2.34598271e-03, -3.99308512e-03, -5.53577878e-02,
1.51954480e-02, 1.42454393e-02, -3.71248787e-03,
-4.11541760e-02, 2.50568427e-02, -1.15930280e-02,
5.77302836e-02, 3.23428623e-02, 3.74678262e-02,
7.69987181e-02, 1.31112942e-02, -4.07879986e-02,
-4.81924741e-03, -3.93562727e-02, 4.73114438e-02,
-2.02127416e-02, -1.57226510e-02, 2.18385682e-02,
7.14582279e-02, -1.11324033e-02, 6.35397434e-02,
3.72483805e-02, 7.68801803e-03, 6.50668740e-02,
1.46127986e-02, 1.64960064e-02, 6.61863834e-02,
7.23002329e-02, 2.75923219e-02, -4.38374653e-02,
2.13374514e-02, -7.04232827e-02, 8.42249542e-02,
-3.94568639e-03, -1.17944321e-02, 2.36889012e-02,
8.66850466e-02, -7.39033520e-02, 2.15587541e-02,
3.52543704e-02, 8.93795788e-02, 5.42501360e-02,
3.31024593e-03, 2.50301952e-03, 6.62300959e-02,
-3.79833840e-02, 7.60603398e-02, -6.54230500e-03,
-4.43211421e-02, 7.80377015e-02, -3.97722274e-02,
5.18550314e-02, -3.09128538e-02, 1.53190056e-02,
-5.93643598e-02, -3.69574539e-02, -6.92243427e-02,
-2.22874433e-02, -3.23781446e-02, 2.22802069e-02,
2.45760716e-02, 5.31888232e-02, 7.27956295e-02,
5.55619411e-02, -6.03345521e-02, 3.58165316e-02,
6.69000149e-02, 2.63721589e-02, 1.91009603e-02,
-1.16844280e-02, -4.99990620e-02, 3.77958380e-02,
4.03854139e-02, -9.94629227e-03, 6.78341761e-02,
2.35567279e-02, 2.58196965e-02, -8.80458131e-02,
-7.12908758e-03, 1.12213036e-02, -2.64896336e-03,
-5.26964106e-02, -3.29557583e-02, -7.95157999e-02,
5.94479367e-02, -5.27230166e-02, -5.90907447e-02,
1.59120951e-02, 2.45965142e-02, -2.67566498e-02,
5.75914308e-02, -7.28787407e-02, 1.15116527e-02,
1.23289870e-02, 6.35065883e-02, 4.44619134e-02,
-8.39941204e-02, -2.16927063e-02, 2.35904194e-02,
-7.08061159e-02, -2.13977713e-02, 4.14545052e-02,
-6.32385015e-02, 1.25243757e-02, 3.45105454e-02,
5.97908460e-02, 1.73583124e-02, 6.37128502e-02,
-5.69128012e-03, 7.65267909e-02, -9.17969458e-03,
1.25115225e-02, 6.05608989e-03, 7.15736905e-03,
-2.88676023e-02, 4.63359542e-02, 5.11857346e-02,
6.64551631e-02, -3.90927382e-02, 9.19730216e-03,
-4.83568460e-02, 8.18757340e-02, -5.08898273e-02,
8.55021626e-02, 4.15789261e-02, 2.55660247e-02,
2.67120358e-02, -8.67374390e-02, 1.26434006e-02,
-4.34153043e-02, -6.69072196e-02, 6.79860786e-02,
-6.08267151e-02, -2.35786214e-02, 1.69076547e-02,
-2.84800697e-02, 1.13160312e-02, -1.62039232e-02,
4.42064069e-02, -5.53385951e-02, -3.81570077e-03,
2.24948898e-02, 2.98899561e-02, -6.20845780e-02,
-6.17455505e-02, -2.87463143e-02, -3.64922471e-02,
5.56914695e-02, 7.95278028e-02, -1.64140295e-02,
-5.72730415e-02, 3.82489408e-04, -2.57991944e-02,
5.60569996e-03, 6.22320324e-02, 2.92927753e-02,
-2.76302639e-02, 5.42509044e-03, 4.91438694e-02,
-2.97841169e-02, 3.85531015e-03, 3.21140550e-02,
1.09126046e-02, -1.52886398e-02, 7.71117955e-02,
-6.65674731e-02, -7.65100196e-02, 3.16813476e-02,
5.35872988e-02, -2.57881954e-02, -6.31789267e-02,
5.57179302e-02, -8.23376887e-03, -7.92930350e-02,
-4.40230593e-03, -5.85862808e-02, -6.32085651e-02,
-8.10343958e-03, -5.15765324e-02, -2.71225045e-03,
3.32846702e-03, 5.35010137e-02, -4.46226373e-02,
-4.61465903e-02, -2.83273160e-02, -8.38251561e-02,
6.31373450e-02, 1.23920869e-02, 1.98571309e-02,
-7.98500329e-02, -1.68409124e-02, 2.86983363e-02,
2.83484329e-02, 1.70254000e-02, -1.03149433e-02,
-8.28676149e-02, -7.52873644e-02, -4.72270921e-02,
5.82849793e-02, -5.04208123e-03, -3.92815378e-03,
8.53977427e-02, -1.49699987e-03, 2.38903500e-02,
-7.68914223e-02, 5.22340983e-02, -1.59574747e-02,
2.54432089e-03, -1.50677897e-02, 6.97852895e-02,
5.79821654e-02, 4.63780463e-02, 3.72213572e-02,
-3.19544636e-02, 3.63650844e-02, -4.67306674e-02,
5.98343126e-02, -2.29402445e-02, -4.67077307e-02,
-8.00297186e-02, -1.26998993e-02, 2.56194659e-02,
-6.39827969e-03, -5.27175479e-02, 4.41791452e-02,
2.07807161e-02, 5.10299467e-02, -3.88649292e-02,
6.98654428e-02, 5.54772615e-02, -4.06775996e-02,
-1.49299689e-02, 5.13479002e-02, 3.84946503e-02,
1.78945418e-02, -5.63011244e-02, -5.97835295e-02,
1.24690228e-03, -8.77592154e-03, -4.92253341e-02,
3.83765349e-04, -2.30280682e-02, 7.10660294e-02,
-1.28777493e-02, -4.60018255e-02, 5.83525524e-02,
3.25961038e-02, -2.05171090e-02, -1.56963039e-02,
1.95313152e-02, 5.64142177e-03, 1.13158012e-02,
5.84767759e-02, -1.81002431e-02, 2.62346156e-02,
-3.32502611e-02, -1.64813083e-02, -8.57450217e-02,
3.13260332e-02, 3.87504622e-02, -2.57722810e-02,
-1.89087894e-02, 1.56725347e-02, 6.03394490e-03,
3.08556147e-02, -5.16294464e-02, -5.25516830e-02,
-3.99849378e-02, -4.20687310e-02, -6.64841011e-03,
7.83206802e-03, -2.29631155e-03, -4.14290838e-02,
2.91460901e-02, 4.47873026e-02, -5.29750437e-02,
2.25053560e-02, -6.13352172e-02, -7.50476914e-03,
-1.49205895e-02, 4.77827415e-02, 6.08104430e-02,
8.11247900e-02, 1.96281392e-02, -6.48709610e-02,
-6.87319636e-02, -1.14012016e-02, -4.04270664e-02,
-3.42179798e-02, 6.87915683e-02, 5.35876490e-02,
4.46084142e-03, -4.84818779e-02, 5.91844507e-02,
2.33293306e-02, -7.96549395e-02, -3.17407213e-02,
4.09828983e-02, 6.81246258e-03, 1.70317590e-02,
-1.71925023e-03, 8.94005746e-02, -5.65080382e-02,
1.46399550e-02, 4.28286903e-02, 3.34352069e-02,
-8.28326214e-03, -1.37287956e-02, -2.78198384e-02,
6.07999079e-02, 1.84377236e-03, 6.71126917e-02,
6.33158535e-02, 2.15608478e-02, 1.00288279e-02,
-2.54965909e-02, -2.83711799e-03, -4.46973555e-02,
-6.35108277e-02, 1.86707955e-02, 3.73574868e-02,
3.18180919e-02, 7.15093836e-02, 5.89598902e-02,
1.73684098e-02, 4.42634113e-02, 2.29667462e-02,
-2.11545546e-02, -7.64802238e-03, 2.07077265e-02,
1.67652294e-02, -8.14392939e-02, 6.16908409e-02,
1.29615352e-03, 1.45034026e-02, -5.27655222e-02,
-7.53152221e-02, 3.42715979e-02, 4.82611544e-02,
-2.72559226e-02, -6.31448776e-02]],
'metadata': {
'speaker': 'USER',
'time': 1695081065.7258203,
'message': 'Can I make embeddings with pandas?',
'timestring': 'Monday, September 18, 2023 at 07:51PM ',
'uuid': '954a51d7-8ac8-4b24-8d56-612e63ff15fb'
}
}
df = pd.DataFrame([data])
tbl.add(df)
# db.uri, db.table_names() # Return the database URI and list of table names for verification
| [
"lancedb.connect"
] | [((116, 136), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (131, 136), False, 'import lancedb\n'), ((11225, 11245), 'pandas.DataFrame', 'pd.DataFrame', (['[data]'], {}), '([data])\n', (11237, 11245), True, 'import pandas as pd\n'), ((185, 196), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (194, 196), True, 'import pyarrow as pa\n'), ((253, 265), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (263, 265), True, 'import pyarrow as pa\n'), ((335, 346), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (344, 346), True, 'import pyarrow as pa\n'), ((377, 388), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (386, 388), True, 'import pyarrow as pa\n'), ((416, 428), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (426, 428), True, 'import pyarrow as pa\n'), ((462, 473), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (471, 473), True, 'import pyarrow as pa\n'), ((501, 512), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (510, 512), True, 'import pyarrow as pa\n')] |
import lancedb
import numpy as np
from .base_index import BaseIndex
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from functools import partial
def search_single(q: np.ndarray, k: int, db_address: str, table_name: str, metric: str):
index = lancedb.connect(db_address)
ids = (index[table_name].search(q).metric(metric).limit(k).to_arrow())["id"]
return [i.as_py() for i in ids]
class LanceDBIndex(BaseIndex):
def __init__(self, dim: int, metric: str):
self.db_address = "~/.cache/lancedb"
self.table_name = "vectors"
index = lancedb.connect(self.db_address)
metric = {"angular": "cosine", "l2": "l2"}[metric]
self.index_offset = 0
super().__init__(index, dim, metric, "LanceDB")
def add(self, x: np.ndarray):
if self.table_name in self.index.table_names():
self.index[self.table_name].add(
[
{"id": i, "vector": v}
for i, v in enumerate(x, start=self.index_offset)
]
)
else:
self.index.create_table(
self.table_name,
[{"id": i, "vector": v} for i, v in enumerate(x)],
mode="overwrite",
)
self.index_offset += x.shape[0]
def search(self, x: np.ndarray, k: int):
worker = partial(
search_single,
k=k,
db_address=self.db_address,
table_name=self.table_name,
metric=self.metric,
)
with ThreadPoolExecutor(cpu_count()) as pool:
futures = pool.map(worker, x)
results = np.empty((x.shape[0], k), dtype=np.int32)
for i, result in enumerate(futures):
results[i] = result
return results
| [
"lancedb.connect"
] | [((290, 317), 'lancedb.connect', 'lancedb.connect', (['db_address'], {}), '(db_address)\n', (305, 317), False, 'import lancedb\n'), ((613, 645), 'lancedb.connect', 'lancedb.connect', (['self.db_address'], {}), '(self.db_address)\n', (628, 645), False, 'import lancedb\n'), ((1394, 1502), 'functools.partial', 'partial', (['search_single'], {'k': 'k', 'db_address': 'self.db_address', 'table_name': 'self.table_name', 'metric': 'self.metric'}), '(search_single, k=k, db_address=self.db_address, table_name=self.\n table_name, metric=self.metric)\n', (1401, 1502), False, 'from functools import partial\n'), ((1685, 1726), 'numpy.empty', 'np.empty', (['(x.shape[0], k)'], {'dtype': 'np.int32'}), '((x.shape[0], k), dtype=np.int32)\n', (1693, 1726), True, 'import numpy as np\n'), ((1602, 1613), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1611, 1613), False, 'from multiprocessing import cpu_count\n')] |
import streamlit as st
import sqlite3
import streamlit_antd_components as sac
import pandas as pd
import os
import openai
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
from basecode.authenticate import return_api_key
from langchain.docstore.document import Document
import lancedb
import configparser
import ast
import json
class ConfigHandler:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('config.ini')
def get_config_values(self, section, key):
value = self.config.get(section, key)
try:
# Try converting the string value to a Python data structure
return ast.literal_eval(value)
except (SyntaxError, ValueError):
# If not a data structure, return the plain string
return value
config_handler = ConfigHandler()
TCH = config_handler.get_config_values('constants', 'TCH')
STU = config_handler.get_config_values('constants', 'STU')
SA = config_handler.get_config_values('constants', 'SA')
AD = config_handler.get_config_values('constants', 'AD')
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
os.environ["OPENAI_API_KEY"] = return_api_key()
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
db = lancedb.connect(lancedb_path)
def fetch_vectorstores_with_usernames():
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
query = '''
SELECT
Vector_Stores.vs_id,
Subject.subject_name,
Topic.topic_name,
Vector_Stores.vectorstore_name,
Users.username,
Vector_Stores.sharing_enabled
FROM Vector_Stores
JOIN Users ON Vector_Stores.user_id = Users.user_id
LEFT JOIN Subject ON Vector_Stores.subject = Subject.id
LEFT JOIN Topic ON Vector_Stores.topic = Topic.id;
'''
cursor.execute(query)
data = cursor.fetchall()
conn.close()
return data
def display_vectorstores():
data = fetch_vectorstores_with_usernames()
df = pd.DataFrame(data, columns=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"])
# Convert the 'sharing_enabled' values
df["sharing_enabled"] = df["sharing_enabled"].apply(lambda x: '✔' if x == 1 else '')
st.dataframe(
df,
use_container_width=True,
column_order=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"]
)
def fetch_all_files():
"""
Fetch all files either shared or based on user type
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Construct the SQL query with JOINs for Subject, Topic, and Users tables
if st.session_state.user['profile_id'] == 'SA':
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
''')
else:
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
WHERE Files.sharing_enabled = 1
''')
files = cursor.fetchall()
formatted_files = [f"({file[0]}) {file[1]} ({file[4]})" for file in files]
conn.close()
return formatted_files
def fetch_file_data(file_id):
"""
Fetch file data given a file id
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
cursor.execute("SELECT data, metadata FROM Files WHERE file_id = ?", (file_id,))
data = cursor.fetchone()
conn.close()
if data:
return data[0], data[1]
else:
return None, None
def insert_topic(org_id, topic_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Topic (org_id, topic_name) VALUES (?, ?);', (org_id, topic_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if topic_name is not unique within the org
return False # Indicates topic_name is not unique within the org
finally:
conn.close()
def insert_subject(org_id, subject_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Subject (org_id, subject_name) VALUES (?, ?);', (org_id, subject_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if subject_name is not unique within the org
return False # Indicates subject_name is not unique within the org
finally:
conn.close()
def select_organization():
with sqlite3.connect(WORKING_DATABASE) as conn:
cursor = conn.cursor()
# Org selection
org_query = "SELECT org_name FROM Organizations"
cursor.execute(org_query)
orgs = cursor.fetchall()
org_names = [org[0] for org in orgs]
# Use a Streamlit selectbox to choose an organization
selected_org_name = st.selectbox("Select an organization:", org_names)
# Retrieve the org_id for the selected organization
cursor.execute('SELECT org_id FROM Organizations WHERE org_name = ?;', (selected_org_name,))
result = cursor.fetchone()
if result:
org_id = result[0]
st.write(f"The org_id for {selected_org_name} is {org_id}.")
return org_id
else:
st.write(f"Organization '{selected_org_name}' not found in the database.")
return None
def fetch_subjects_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Subject;')
else:
cursor.execute('SELECT * FROM Subject WHERE org_id = ?;', (org_id,))
subjects = cursor.fetchall()
conn.close()
return subjects
def fetch_topics_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Topic;')
else:
cursor.execute('SELECT * FROM Topic WHERE org_id = ?;', (org_id,))
topics = cursor.fetchall()
conn.close()
return topics
def split_docs(file_path,meta):
#def split_meta_docs(file, source, tch_code):
loader = UnstructuredFileLoader(file_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
metadata = {"source": meta}
for doc in docs:
doc.metadata.update(metadata)
return docs
def create_lancedb_table(embeddings, meta, table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
table = db.create_table(
f"{table_name}",
data=[
{
"vector": embeddings.embed_query("Query Unsuccessful"),
"text": "Query Unsuccessful",
"id": "1",
"source": f"{meta}"
}
],
mode="overwrite",
)
return table
def save_to_vectorstores(vs, vstore_input_name, subject, topic, username, share_resource=False):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch the user's details
cursor.execute('SELECT user_id FROM Users WHERE username = ?', (username,))
user_details = cursor.fetchone()
if not user_details:
st.error("Error: User not found.")
return
user_id = user_details[0]
# If Vector_Store instance exists in session state, then serialize and save
# vs is the documents in json format and vstore_input_name is the name of the table and vectorstore
if vs:
try:
cursor.execute('SELECT 1 FROM Vector_Stores WHERE vectorstore_name LIKE ? AND user_id = ?', (f"%{vstore_input_name}%", user_id))
exists = cursor.fetchone()
if exists:
st.error("Error: An entry with the same vectorstore_name and user_id already exists.")
return
if subject is None:
st.error("Error: Subject is missing.")
return
if topic is None:
st.error("Error: Topic is missing.")
return
# Get the subject and topic IDs
cursor.execute('SELECT id FROM Subject WHERE subject_name = ?', (subject,))
subject_id = cursor.fetchone()[0]
cursor.execute('SELECT id FROM Topic WHERE topic_name = ?', (topic,))
topic_id = cursor.fetchone()[0]
# Insert the new row
cursor.execute('''
INSERT INTO Vector_Stores (vectorstore_name, documents, user_id, subject, topic, sharing_enabled)
VALUES (?, ?, ?, ?, ?, ?)
''', (vstore_input_name, vs, user_id, subject_id, topic_id, share_resource))
conn.commit()
conn.close()
except Exception as e:
st.error(f"Error in storing documents and vectorstore: {e}")
return
def document_to_dict(doc):
# Assuming 'doc' has 'page_content' and 'metadata' attributes
return {
'page_content': doc.page_content,
'metadata': doc.metadata
}
def dict_to_document(doc_dict):
# Create a Document object from the dictionary
# Adjust this according to how your Document class is defined
return Document(page_content=doc_dict['page_content'],metadata=doc_dict['metadata'])
def create_vectorstore():
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
full_docs = []
st.subheader("Enter the topic and subject for your knowledge base")
embeddings = OpenAIEmbeddings()
if st.session_state.user['profile_id'] == SA:
org_id = select_organization()
if org_id is None:
return
else:
org_id = st.session_state.user["org_id"]
# Fetch all available subjects
subjects = fetch_subjects_by_org(st.session_state.user["org_id"])
subject_names = [sub[2] for sub in subjects] # Assuming index 2 holds the subject_name
selected_subject = st.selectbox("Select an existing subject or type a new one:", options=subject_names + ['New Subject'])
if selected_subject == 'New Subject':
subject = st.text_input("Please enter the new subject name:", max_chars=30)
if subject:
insert_subject(org_id, subject)
else:
subject = selected_subject
# Fetch all available topics
topics = fetch_topics_by_org(st.session_state.user["org_id"])
topic_names = [topic[2] for topic in topics] # Assuming index 2 holds the topic_name
selected_topic = st.selectbox("Select an existing topic or type a new one:", options=topic_names + ['New Topic'])
if selected_topic == 'New Topic':
topic = st.text_input("Please enter the new topic name:", max_chars=30)
if topic:
insert_topic(org_id, topic)
else:
topic = selected_topic
vectorstore_input = st.text_input("Please type in a name for your knowledge base:", max_chars=20)
vs_name = vectorstore_input + f"_({st.session_state.user['username']})"
share_resource = st.checkbox("Share this resource", value=True) # <-- Added this line
# Show the current build of files for the latest database
st.subheader("Select one or more files to build your knowledge base")
files = fetch_all_files()
if files:
selected_files = sac.transfer(items=files, label=None, index=None, titles=['Uploaded files', 'Select files for KB'], format_func='title', width='100%', height=None, search=True, pagination=False, oneway=False, reload=True, disabled=False, return_index=False)
# Alert to confirm the creation of knowledge base
st.warning("Building your knowledge base will take some time. Please be patient.")
build = sac.buttons([
dict(label='Build VectorStore', icon='check-circle-fill', color = 'green'),
dict(label='Cancel', icon='x-circle-fill', color='red'),
], label=None, index=1, format_func='title', align='center', size='default', return_index=False)
if build == 'Build VectorStore' and selected_files:
for s_file in selected_files:
file_id = int(s_file.split("(", 1)[1].split(")", 1)[0])
file_data, meta = fetch_file_data(file_id)
docs = split_docs(file_data, meta)
full_docs.extend(docs)
#convert full_docs to json to store in sqlite
full_docs_dicts = [document_to_dict(doc) for doc in full_docs]
docs_json = json.dumps(full_docs_dicts)
#db = LanceDB.from_documents(full_docs, OpenAIEmbeddings(), connection=create_lancedb_table(embeddings, meta, vs_name))
#table = create_lancedb_table(embeddings, meta, vs_name)
# lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
# db = lancedb.connect(lancedb_path)
# st.session_state.test1 = table
# st.write("full_docs",full_docs)
#full_docs_dicts = [document_to_dict(doc) for doc in full_docs]
#docs_json = json.dumps(full_docs_dicts)
# st.write("docs_json",docs_json)
#retrieved_docs_dicts = get_docs() # Assuming this returns the list of dictionaries
# retrieved_docs_dicts = json.loads(docs_json)
# retrieved_docs = [dict_to_document(doc_dict) for doc_dict in retrieved_docs_dicts]
# st.write("retrieved_docs",retrieved_docs)
#st.session_state.test2 = json.loads(docs_json)
# st.session_state.vs = LanceDB.from_documents(retrieved_docs , OpenAIEmbeddings(), connection= db.open_table("_(super_admin)"))
# st.session_state.current_model = "test1"
# st.write(st.session_state.test1)
#st.write(st.session_state.test2)
#st.write(type(db))
#st.session_state.vs = load_vectorstore(documents, table_name)
create_lancedb_table(embeddings, meta, vs_name)
save_to_vectorstores(docs_json, vs_name, subject, topic, st.session_state.user["username"], share_resource) # Passing the share_resource to the function
st.success("Knowledge Base loaded")
else:
st.write("No files found in the database.")
def load_vectorstore(documents, table_name):
retrieved_docs_dicts = json.loads(documents)
retrieved_docs = [dict_to_document(doc_dict) for doc_dict in retrieved_docs_dicts]
vs = LanceDB.from_documents(retrieved_docs , OpenAIEmbeddings(), connection= db.open_table(f"{table_name}"))
return vs
def delete_lancedb_table(table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
db.drop_table(f"{table_name}")
def fetch_vectorstores_by_user_id(user_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch vectorstores based on user_id
cursor.execute('SELECT vectorstore_name FROM Vector_Stores WHERE user_id = ?;', (user_id,))
vectorstores = cursor.fetchall()
conn.close()
return vectorstores
def delete_vectorstores():
st.subheader("Delete VectorStores in Database:")
user_vectorstores = fetch_vectorstores_by_user_id(st.session_state.user["id"])
if user_vectorstores:
vectorstore_names = [vs[0] for vs in user_vectorstores]
selected_vectorstores = st.multiselect("Select vectorstores to delete:", options=vectorstore_names)
confirm_delete = st.checkbox("I understand that this action cannot be undone.", value=False)
if st.button("Delete VectorStore"):
if confirm_delete and selected_vectorstores:
delete_vectorstores_from_db(selected_vectorstores, st.session_state.user["id"], st.session_state.user["profile_id"])
st.success(f"Deleted {len(selected_vectorstores)} vectorstores.")
else:
st.warning("Please confirm the deletion action.")
else:
st.write("No vectorstores found in the database.")
def delete_vectorstores_from_db(vectorstore_names, user_id, profile):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
for vectorstore_name in vectorstore_names:
if profile in ['SA', 'AD']:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete vectorstore irrespective of the user_id associated with them
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=?;', (vectorstore_name,))
else:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete only if the user_id matches
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=? AND user_id=?;', (vectorstore_name, user_id))
# Check if the row was affected
if cursor.rowcount == 0:
st.error(f"Unable to delete vectorstore '{vectorstore_name}' that is not owned by you.")
conn.commit() # Commit the changes
conn.close() # Close the connection
| [
"lancedb.connect"
] | [((1345, 1356), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1354, 1356), False, 'import os\n'), ((1377, 1406), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1389, 1406), False, 'import os\n'), ((1686, 1702), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (1700, 1702), False, 'from basecode.authenticate import return_api_key\n'), ((1718, 1760), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (1730, 1760), False, 'import os\n'), ((1766, 1795), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (1781, 1795), False, 'import lancedb\n'), ((1415, 1448), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1429, 1448), False, 'import os\n'), ((1451, 1481), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1462, 1481), False, 'import os\n'), ((1543, 1600), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', "st.secrets['default_db']"], {}), "(WORKING_DIRECTORY, st.secrets['default_db'])\n", (1555, 1600), False, 'import os\n'), ((1850, 1883), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (1865, 1883), False, 'import sqlite3\n'), ((2515, 2637), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(data, columns=['vs_id', 'subject_name', 'topic_name',\n 'vectorstore_name', 'username', 'sharing_enabled'])\n", (2527, 2637), True, 'import pandas as pd\n'), ((2772, 2927), 'streamlit.dataframe', 'st.dataframe', (['df'], {'use_container_width': '(True)', 'column_order': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(df, use_container_width=True, column_order=['vs_id',\n 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled'])\n", (2784, 2927), True, 'import streamlit as st\n'), ((3058, 3091), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (3073, 3091), False, 'import sqlite3\n'), ((4233, 4266), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4248, 4266), False, 'import sqlite3\n'), ((4562, 4595), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4577, 4595), False, 'import sqlite3\n'), ((5083, 5116), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5098, 5116), False, 'import sqlite3\n'), ((6527, 6560), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (6542, 6560), False, 'import sqlite3\n'), ((6920, 6953), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (6935, 6953), False, 'import sqlite3\n'), ((7349, 7382), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['file_path'], {}), '(file_path)\n', (7371, 7382), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((7427, 7482), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (7448, 7482), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((7697, 7739), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (7709, 7739), False, 'import os\n'), ((7768, 7797), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (7783, 7797), False, 'import lancedb\n'), ((8147, 8180), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (8162, 8180), False, 'import sqlite3\n'), ((10412, 10490), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': "doc_dict['page_content']", 'metadata': "doc_dict['metadata']"}), "(page_content=doc_dict['page_content'], metadata=doc_dict['metadata'])\n", (10420, 10490), False, 'from langchain.docstore.document import Document\n'), ((10538, 10554), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (10552, 10554), False, 'from basecode.authenticate import return_api_key\n'), ((10590, 10606), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (10604, 10606), False, 'from basecode.authenticate import return_api_key\n'), ((10630, 10697), 'streamlit.subheader', 'st.subheader', (['"""Enter the topic and subject for your knowledge base"""'], {}), "('Enter the topic and subject for your knowledge base')\n", (10642, 10697), True, 'import streamlit as st\n'), ((10715, 10733), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (10731, 10733), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((11154, 11261), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing subject or type a new one:"""'], {'options': "(subject_names + ['New Subject'])"}), "('Select an existing subject or type a new one:', options=\n subject_names + ['New Subject'])\n", (11166, 11261), True, 'import streamlit as st\n'), ((11709, 11810), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing topic or type a new one:"""'], {'options': "(topic_names + ['New Topic'])"}), "('Select an existing topic or type a new one:', options=\n topic_names + ['New Topic'])\n", (11721, 11810), True, 'import streamlit as st\n'), ((12057, 12134), 'streamlit.text_input', 'st.text_input', (['"""Please type in a name for your knowledge base:"""'], {'max_chars': '(20)'}), "('Please type in a name for your knowledge base:', max_chars=20)\n", (12070, 12134), True, 'import streamlit as st\n'), ((12232, 12278), 'streamlit.checkbox', 'st.checkbox', (['"""Share this resource"""'], {'value': '(True)'}), "('Share this resource', value=True)\n", (12243, 12278), True, 'import streamlit as st\n'), ((12369, 12438), 'streamlit.subheader', 'st.subheader', (['"""Select one or more files to build your knowledge base"""'], {}), "('Select one or more files to build your knowledge base')\n", (12381, 12438), True, 'import streamlit as st\n'), ((15537, 15558), 'json.loads', 'json.loads', (['documents'], {}), '(documents)\n', (15547, 15558), False, 'import json\n'), ((15829, 15871), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (15841, 15871), False, 'import os\n'), ((15900, 15929), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (15915, 15929), False, 'import lancedb\n'), ((16018, 16051), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (16033, 16051), False, 'import sqlite3\n'), ((16337, 16385), 'streamlit.subheader', 'st.subheader', (['"""Delete VectorStores in Database:"""'], {}), "('Delete VectorStores in Database:')\n", (16349, 16385), True, 'import streamlit as st\n'), ((17333, 17366), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (17348, 17366), False, 'import sqlite3\n'), ((568, 595), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (593, 595), False, 'import configparser\n'), ((5597, 5630), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5612, 5630), False, 'import sqlite3\n'), ((5956, 6006), 'streamlit.selectbox', 'st.selectbox', (['"""Select an organization:"""', 'org_names'], {}), "('Select an organization:', org_names)\n", (5968, 6006), True, 'import streamlit as st\n'), ((8391, 8425), 'streamlit.error', 'st.error', (['"""Error: User not found."""'], {}), "('Error: User not found.')\n", (8399, 8425), True, 'import streamlit as st\n'), ((11322, 11387), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new subject name:"""'], {'max_chars': '(30)'}), "('Please enter the new subject name:', max_chars=30)\n", (11335, 11387), True, 'import streamlit as st\n'), ((11865, 11928), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new topic name:"""'], {'max_chars': '(30)'}), "('Please enter the new topic name:', max_chars=30)\n", (11878, 11928), True, 'import streamlit as st\n'), ((12508, 12762), 'streamlit_antd_components.transfer', 'sac.transfer', ([], {'items': 'files', 'label': 'None', 'index': 'None', 'titles': "['Uploaded files', 'Select files for KB']", 'format_func': '"""title"""', 'width': '"""100%"""', 'height': 'None', 'search': '(True)', 'pagination': '(False)', 'oneway': '(False)', 'reload': '(True)', 'disabled': '(False)', 'return_index': '(False)'}), "(items=files, label=None, index=None, titles=['Uploaded files',\n 'Select files for KB'], format_func='title', width='100%', height=None,\n search=True, pagination=False, oneway=False, reload=True, disabled=\n False, return_index=False)\n", (12520, 12762), True, 'import streamlit_antd_components as sac\n'), ((12825, 12912), 'streamlit.warning', 'st.warning', (['"""Building your knowledge base will take some time. Please be patient."""'], {}), "(\n 'Building your knowledge base will take some time. Please be patient.')\n", (12835, 12912), True, 'import streamlit as st\n'), ((15415, 15458), 'streamlit.write', 'st.write', (['"""No files found in the database."""'], {}), "('No files found in the database.')\n", (15423, 15458), True, 'import streamlit as st\n'), ((15695, 15713), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (15711, 15713), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((16596, 16671), 'streamlit.multiselect', 'st.multiselect', (['"""Select vectorstores to delete:"""'], {'options': 'vectorstore_names'}), "('Select vectorstores to delete:', options=vectorstore_names)\n", (16610, 16671), True, 'import streamlit as st\n'), ((16697, 16772), 'streamlit.checkbox', 'st.checkbox', (['"""I understand that this action cannot be undone."""'], {'value': '(False)'}), "('I understand that this action cannot be undone.', value=False)\n", (16708, 16772), True, 'import streamlit as st\n'), ((16793, 16824), 'streamlit.button', 'st.button', (['"""Delete VectorStore"""'], {}), "('Delete VectorStore')\n", (16802, 16824), True, 'import streamlit as st\n'), ((17200, 17250), 'streamlit.write', 'st.write', (['"""No vectorstores found in the database."""'], {}), "('No vectorstores found in the database.')\n", (17208, 17250), True, 'import streamlit as st\n'), ((834, 857), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (850, 857), False, 'import ast\n'), ((6267, 6327), 'streamlit.write', 'st.write', (['f"""The org_id for {selected_org_name} is {org_id}."""'], {}), "(f'The org_id for {selected_org_name} is {org_id}.')\n", (6275, 6327), True, 'import streamlit as st\n'), ((6380, 6454), 'streamlit.write', 'st.write', (['f"""Organization \'{selected_org_name}\' not found in the database."""'], {}), '(f"Organization \'{selected_org_name}\' not found in the database.")\n', (6388, 6454), True, 'import streamlit as st\n'), ((13698, 13725), 'json.dumps', 'json.dumps', (['full_docs_dicts'], {}), '(full_docs_dicts)\n', (13708, 13725), False, 'import json\n'), ((15360, 15395), 'streamlit.success', 'st.success', (['"""Knowledge Base loaded"""'], {}), "('Knowledge Base loaded')\n", (15370, 15395), True, 'import streamlit as st\n'), ((8914, 9010), 'streamlit.error', 'st.error', (['"""Error: An entry with the same vectorstore_name and user_id already exists."""'], {}), "(\n 'Error: An entry with the same vectorstore_name and user_id already exists.'\n )\n", (8922, 9010), True, 'import streamlit as st\n'), ((9085, 9123), 'streamlit.error', 'st.error', (['"""Error: Subject is missing."""'], {}), "('Error: Subject is missing.')\n", (9093, 9123), True, 'import streamlit as st\n'), ((9194, 9230), 'streamlit.error', 'st.error', (['"""Error: Topic is missing."""'], {}), "('Error: Topic is missing.')\n", (9202, 9230), True, 'import streamlit as st\n'), ((9983, 10043), 'streamlit.error', 'st.error', (['f"""Error in storing documents and vectorstore: {e}"""'], {}), "(f'Error in storing documents and vectorstore: {e}')\n", (9991, 10043), True, 'import streamlit as st\n'), ((17132, 17181), 'streamlit.warning', 'st.warning', (['"""Please confirm the deletion action."""'], {}), "('Please confirm the deletion action.')\n", (17142, 17181), True, 'import streamlit as st\n'), ((18195, 18293), 'streamlit.error', 'st.error', (['f"""Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."""'], {}), '(\n f"Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."\n )\n', (18203, 18293), True, 'import streamlit as st\n')] |
# See; https://www.mongodb.com/developer/products/atlas/rag-atlas-vector-search-langchain-openai/
from langchain_openai import OpenAI,ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain_community.llms import Ollama
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader
from langchain_openai import OpenAIEmbeddings
import lancedb
from langchain_community.vectorstores import LanceDB
# List all the methods in LanceDB
for module in dir(LanceDB):
print(module)
import os
import sys
debug=False
if "vectorUser" not in os.environ:
print("vectorUser not set")
os._exit(1)
if "OPENAI_API_KEY" not in os.environ:
print("OPENAI_API_KEY not set")
os._exit(1)
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
gpt4=ChatOpenAI(openai_api_key=OPENAI_API_KEY,model_name="gpt-4",max_tokens=1000)
gpt3=OpenAI(openai_api_key=OPENAI_API_KEY,max_tokens=1000)
llm=gpt3 #Default to GPT-3
if len(sys.argv) < 1:
print("Usage: python3 multiModel.py <PDF file> [gpt4|phi|llama2]")
os._exit(1)
if len(sys.argv) > 2:
if sys.argv[2] == "gpt4":
llm = gpt4
elif sys.argv[2] == "phi":
llm = Ollama(model="phi")
elif sys.argv[2] == "llama2":
llm = Ollama(model="llama2")
pdf_name = sys.argv[1]
if not os.path.exists(pdf_name):
print("The PDF file does not exist. Exiting program.")
os._exit(1)
# Vector DB connection
def loadFile(pdf_name):
vectorStore = lancedb.connect("/tmp/lancedb")
tables=vectorStore.table_names()
embeddings=OpenAIEmbeddings()
table= os.path.basename(pdf_name)
# if the table is already in the tables list, delete it
if not table in tables:
print('Creating new lanceDB table')
table = vectorStore.create_table(
table,
data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
loader = PyPDFLoader(pdf_name)
data = loader.load()
# Split docs
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs = text_splitter.split_documents(data)
vectorStore = LanceDB.from_documents(docs, OpenAIEmbeddings(), connection=table)
else:
print('Using existing lanceDB table')
table=vectorStore.open_table(table)
dir(table)
vectorStore = vectorStore.from_existing_index(connection=table)
return vectorStore
def answerQuestion(debug, vectorStore):
print()
question = input("Please enter a question about The Brave Japanese: ")
# If question is blank, exit program
if not question:
print("No question entered. Exiting program.")
os._exit(1)
# Display results if debug is true
if debug:
results = vectorStore.similarity_search_with_score(
query=question,
k=5,)
for result in results:
# print just the page_content field
print(result[0].page_content )
qa_retriever = vectorStore.as_retriever(
search_type="similarity",
search_kwargs={"k": 25},
)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"],history_variables=["chat_history"]
)
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=qa_retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": PROMPT},
)
docs = qa.invoke({"query": question})
print()
if debug:
for doc in docs["source_documents"]:
print(doc.page_content)
print()
print(docs["result"])
vectorStore = loadFile(pdf_name)
while True:
answerQuestion(debug, vectorStore)
| [
"lancedb.connect"
] | [((867, 945), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY', 'model_name': '"""gpt-4"""', 'max_tokens': '(1000)'}), "(openai_api_key=OPENAI_API_KEY, model_name='gpt-4', max_tokens=1000)\n", (877, 945), False, 'from langchain_openai import OpenAI, ChatOpenAI\n'), ((949, 1003), 'langchain_openai.OpenAI', 'OpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY', 'max_tokens': '(1000)'}), '(openai_api_key=OPENAI_API_KEY, max_tokens=1000)\n', (955, 1003), False, 'from langchain_openai import OpenAI, ChatOpenAI\n'), ((711, 722), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (719, 722), False, 'import os\n'), ((803, 814), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (811, 814), False, 'import os\n'), ((1129, 1140), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (1137, 1140), False, 'import os\n'), ((1383, 1407), 'os.path.exists', 'os.path.exists', (['pdf_name'], {}), '(pdf_name)\n', (1397, 1407), False, 'import os\n'), ((1472, 1483), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (1480, 1483), False, 'import os\n'), ((1551, 1582), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (1566, 1582), False, 'import lancedb\n'), ((1640, 1658), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1656, 1658), False, 'from langchain_openai import OpenAIEmbeddings\n'), ((1670, 1696), 'os.path.basename', 'os.path.basename', (['pdf_name'], {}), '(pdf_name)\n', (1686, 1696), False, 'import os\n'), ((3570, 3691), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']", 'history_variables': "['chat_history']"}), "(template=prompt_template, input_variables=['context',\n 'question'], history_variables=['chat_history'])\n", (3584, 3691), False, 'from langchain.prompts import PromptTemplate\n'), ((3704, 3861), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'qa_retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': PROMPT}"}), "(llm=llm, chain_type='stuff', retriever=\n qa_retriever, return_source_documents=True, chain_type_kwargs={'prompt':\n PROMPT})\n", (3731, 3861), False, 'from langchain.chains import RetrievalQA\n'), ((2133, 2154), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['pdf_name'], {}), '(pdf_name)\n', (2144, 2154), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((2230, 2293), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(0)'}), '(chunk_size=500, chunk_overlap=0)\n', (2260, 2293), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2907, 2918), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (2915, 2918), False, 'import os\n'), ((1261, 1280), 'langchain_community.llms.Ollama', 'Ollama', ([], {'model': '"""phi"""'}), "(model='phi')\n", (1267, 1280), False, 'from langchain_community.llms import Ollama\n'), ((2396, 2414), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2412, 2414), False, 'from langchain_openai import OpenAIEmbeddings\n'), ((1329, 1351), 'langchain_community.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (1335, 1351), False, 'from langchain_community.llms import Ollama\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : lancedb_store.py
"""
import os
import shutil
import lancedb
class LanceStore:
def __init__(self, name):
db = lancedb.connect("./data/lancedb")
self.db = db
self.name = name
self.table = None
def search(self, query, n_results=2, metric="L2", nprobes=20, **kwargs):
# This assumes query is a vector embedding
# kwargs can be used for optional filtering
# .select - only searches the specified columns
# .where - SQL syntax filtering for metadata (e.g. where("price > 100"))
# .metric - specifies the distance metric to use
# .nprobes - values will yield better recall (more likely to find vectors if they exist) at the expense of latency.
if self.table is None:
raise Exception("Table not created yet, please add data first.")
results = (
self.table.search(query)
.limit(n_results)
.select(kwargs.get("select"))
.where(kwargs.get("where"))
.metric(metric)
.nprobes(nprobes)
.to_df()
)
return results
def persist(self):
raise NotImplementedError
def write(self, data, metadatas, ids):
# This function is similar to add(), but it's for more generalized updates
# "data" is the list of embeddings
# Inserts into table by expanding metadatas into a dataframe: [{'vector', 'id', 'meta', 'meta2'}, ...]
documents = []
for i in range(len(data)):
row = {"vector": data[i], "id": ids[i]}
row.update(metadatas[i])
documents.append(row)
if self.table is not None:
self.table.add(documents)
else:
self.table = self.db.create_table(self.name, documents)
def add(self, data, metadata, _id):
# This function is for adding individual documents
# It assumes you're passing in a single vector embedding, metadata, and id
row = {"vector": data, "id": _id}
row.update(metadata)
if self.table is not None:
self.table.add([row])
else:
self.table = self.db.create_table(self.name, [row])
def delete(self, _id):
# This function deletes a row by id.
# LanceDB delete syntax uses SQL syntax, so you can use "in" or "="
if self.table is None:
raise Exception("Table not created yet, please add data first")
if isinstance(_id, str):
return self.table.delete(f"id = '{_id}'")
else:
return self.table.delete(f"id = {_id}")
def drop(self, name):
# This function drops a table, if it exists.
path = os.path.join(self.db.uri, name + ".lance")
if os.path.exists(path):
shutil.rmtree(path)
| [
"lancedb.connect"
] | [((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, 2896), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2890, 2896), False, 'import os\n'), ((2910, 2929), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2923, 2929), False, 'import shutil\n')] |
import json
import gzip
from sentence_transformers import SentenceTransformer
from fastapi import FastAPI
from pydantic import BaseModel
from pathlib import Path
from tqdm.auto import tqdm
import pandas as pd
import lancedb
import sqlite3
app = FastAPI()
encoder = SentenceTransformer('all-MiniLM-L6-v2')
lance_location = Path('../indexes')
sqlite_location = Path('../data/indexes/documents.sqlite')
lancedb_conn = lancedb.connect(lance_location)
indexes = lancedb_conn.table_names()
class IndexFile(BaseModel):
file_name: str
collection_name: str
text_field: str
class IndexCollection(BaseModel):
collection_name: str
field_map: dict
class Query(BaseModel):
collection_name: str
query: str
top_k: int
class HybridQuery(BaseModel):
collection_name: str
query: str
top_k: int
fts_weight: float
vec_weight: float
@app.post("/vec_query")
def index_query(query: Query):
sqlite_conn = sqlite3.connect(sqlite_location)
print(query.collection_name)
search_index = lancedb_conn.open_table(query.collection_name)
print(query)
search_query = encoder.encode(query.query)
search_results = search_index.search(search_query).limit(query.top_k).to_list()
uuids = {doc['uuid']:position for position, doc in enumerate(search_results)}
scores = {doc['uuid']:1-doc['_distance'] for doc in search_results}
uuid_query = [f"'{uuid}'" for uuid in list(uuids.keys())]
document_results = sqlite_conn.execute(f"""SELECT uuid, * from {query.collection_name} WHERE uuid in ({','.join(uuid_query)});""").fetchall()
field_maps = ['uuid']
field_maps.extend([x[1] for x in sqlite_conn.execute(f"PRAGMA table_info({query.collection_name})").fetchall()])
def results_to_display(row, table_schema, scores):
doc = {field:value for field,value in zip(table_schema,row)}
doc['score'] = scores[doc['uuid']]
return doc
document_results = [results_to_display(row, field_maps, scores) for row in document_results]
# provide the proper sorting because sqlite will not respect the original order in the where clause
document_results = sorted([(uuids[doc['uuid']], doc) for doc in document_results])
# undo the sorting key
document_results = [x[1] for x in document_results]
sqlite_conn.close()
return document_results, list(document_results[0].keys())
@app.post("/fts_query")
def index_query(query: Query):
sqlite_conn = sqlite3.connect(sqlite_location)
print(query.collection_name)
index = lancedb_conn.open_table(query.collection_name)
search_results = index.search(query.query).limit(query.top_k).to_list()
uuids = {doc['uuid']:position for position, doc in enumerate(search_results)}
scores = {doc['uuid']:doc['score'] for doc in search_results}
uuid_query = [f"'{uuid}'" for uuid in list(uuids.keys())]
document_results = sqlite_conn.execute(f"""SELECT uuid, * from {query.collection_name} WHERE uuid in ({','.join(uuid_query)});""").fetchall()
field_maps = ['uuid']
field_maps.extend([x[1] for x in sqlite_conn.execute(f"PRAGMA table_info({query.collection_name})").fetchall()])
def results_to_display(row, table_schema, scores):
doc = {field:value for field,value in zip(table_schema,row)}
doc['score'] = scores[doc['uuid']]
return doc
document_results = [results_to_display(row, field_maps, scores) for row in document_results]
# provide the proper sorting because sqlite will not respect the original order in the where clause
document_results = sorted([(uuids[doc['uuid']], doc) for doc in document_results])
# undo the sorting key
document_results = [x[1] for x in document_results]
sqlite_conn.close()
return document_results, list(document_results[0].keys())
@app.post("/hybrid")
def index_query(query: HybridQuery):
sqlite_conn = sqlite3.connect(sqlite_location)
print(query.collection_name)
index = lancedb_conn.open_table(query.collection_name)
fts_results = index.search(query.query).limit(query.top_k).to_list()
fts_uuids = {doc['uuid']:position for position, doc in enumerate(fts_results)}
fts_scores = {doc['uuid']:doc['score'] for doc in fts_results}
fts_rrf = {uuid:1/(40+position) for uuid, position in fts_uuids.items()}
vec_results = index.search(encoder.encode(query.query)).limit(query.top_k).to_list()
vec_uuids = {doc['uuid']:position for position, doc in enumerate(vec_results)}
vec_scores = {doc['uuid']:1-doc['_distance'] for doc in vec_results}
vec_rrf = {uuid:1/(40+position) for uuid, position in vec_uuids.items()}
rrf_df = pd.concat([pd.Series(vec_rrf, name='vec'), pd.Series(fts_rrf, name='fts')], axis=1).dropna()
rrf_df['rrf'] = query.vec_weight * rrf_df['vec'] + query.fts_weight * rrf_df['fts']
rrf_df['rrf_rank'] = rrf_df['rrf'].rank(ascending=False)
rrf_rank = rrf_df['rrf_rank'].to_dict()
rrf_score = rrf_df['rrf'].to_dict()
# uuid_query = [f"'{uuid}'" for uuid in list(uuids.keys())]
uuid_query = [f"'{uuid}'" for uuid in rrf_df.index.values]
if len(uuid_query) > 0:
uuid_query = ", ".join(uuid_query)
sql_query = f"""SELECT uuid, * from {query.collection_name} WHERE uuid in ({uuid_query});"""
print(sql_query)
document_results = sqlite_conn.execute(sql_query).fetchall()
field_maps = ['uuid']
field_maps.extend([x[1] for x in sqlite_conn.execute(f"PRAGMA table_info({query.collection_name})").fetchall()])
print(field_maps)
def results_to_display(row, table_schema, scores):
print(row[:3])
doc = {field:value for field,value in zip(table_schema,row)}
doc['score'] = scores[doc['uuid']]
return doc
document_results = [results_to_display(row, field_maps, rrf_score) for row in document_results]
# provide the proper sorting because sqlite will not respect the original order in the where clause
document_results = sorted([(rrf_rank[doc['uuid']], doc) for doc in document_results])
# undo the sorting key
document_results = [x[1] for x in document_results]
sqlite_conn.close()
return document_results, list(document_results[0].keys())
# if len(search_results) > 0:
# if 'score' in search_results[0]:
# scores = {doc['uuid']:doc['score'] for doc in search_results}
# elif '_distance' in search_results[0]:
# scores = {doc['uuid']:1-doc['_distance'] for doc in search_results}
# else:
# scores = {doc['uuid']:-999 for doc in search_results} | [
"lancedb.connect"
] | [((246, 255), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (253, 255), False, 'from fastapi import FastAPI\n'), ((266, 305), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (285, 305), False, 'from sentence_transformers import SentenceTransformer\n'), ((323, 341), 'pathlib.Path', 'Path', (['"""../indexes"""'], {}), "('../indexes')\n", (327, 341), False, 'from pathlib import Path\n'), ((360, 400), 'pathlib.Path', 'Path', (['"""../data/indexes/documents.sqlite"""'], {}), "('../data/indexes/documents.sqlite')\n", (364, 400), False, 'from pathlib import Path\n'), ((417, 448), 'lancedb.connect', 'lancedb.connect', (['lance_location'], {}), '(lance_location)\n', (432, 448), False, 'import lancedb\n'), ((943, 975), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_location'], {}), '(sqlite_location)\n', (958, 975), False, 'import sqlite3\n'), ((2451, 2483), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_location'], {}), '(sqlite_location)\n', (2466, 2483), False, 'import sqlite3\n'), ((3878, 3910), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_location'], {}), '(sqlite_location)\n', (3893, 3910), False, 'import sqlite3\n'), ((4653, 4683), 'pandas.Series', 'pd.Series', (['vec_rrf'], {'name': '"""vec"""'}), "(vec_rrf, name='vec')\n", (4662, 4683), True, 'import pandas as pd\n'), ((4685, 4715), 'pandas.Series', 'pd.Series', (['fts_rrf'], {'name': '"""fts"""'}), "(fts_rrf, name='fts')\n", (4694, 4715), True, 'import pandas as pd\n')] |
from datasets import load_dataset
from enum import Enum
import lancedb
from tqdm import tqdm
from IPython.display import display
import clip
import torch
class Animal(Enum):
italian_greyhound = 0
coyote = 1
beagle = 2
rottweiler = 3
hyena = 4
greater_swiss_mountain_dog = 5
Triceratops = 6
french_bulldog = 7
red_wolf = 8
egyption_cat = 9
chihuahua = 10
irish_terrier = 11
tiger_cat = 12
white_wolf = 13
timber_wolf = 14
def embed(img):
image = preprocess(img).unsqueeze(0).to(device)
embs = model.encode_image(image)
return embs.detach().numpy()[0].tolist()
def image_search(id):
print("\n----- Image Search -----\n")
print(Animal(test[id]["labels"]).name)
display(test[id]["img"])
res = tbl.search(embed(test[id]["img"])).limit(5).to_df()
print(res)
for i in range(5):
print(Animal(res["label"][i]).name)
data_id = int(res["id"][i])
display(dataset[data_id]["img"])
def embed_txt(txt):
text = clip.tokenize([txt]).to(device)
embs = model.encode_text(text)
return embs.detach().numpy()[0].tolist()
def text_search(text):
print("\n----- Text Search -----\n")
res = tbl.search(embed_txt(text)).limit(5).to_df()
print(res)
for i in range(len(res)):
print(Animal(res["label"][i]).name)
data_id = int(res["id"][i])
display(dataset[data_id]["img"])
def create_data(db):
tbl = db.create_table(
"animal_images",
[{"vector": embed(dataset[0]["img"]), "id": 0, "label": dataset[0]["labels"]}],
)
data = []
for i in tqdm(range(1, len(dataset))):
data.append(
{"vector": dataset[i]["img"], "id": i, "label": dataset[i]["labels"]}
)
batched_data = [data[n : n + 50] for n in range(0, len(data), 50)]
for i in tqdm(batched_data):
batch_data = []
for j in i:
row = {}
row["vector"] = embed(j["vector"])
row["id"] = j["id"]
row["label"] = j["label"]
batch_data.append(row)
tbl.add(batch_data)
return tbl
if __name__ == "__main__":
global dataset
dataset = load_dataset(
"CVdatasets/ImageNet15_animals_unbalanced_aug1", split="train"
)
device = "cuda" if torch.cuda.is_available() else "cpu"
global model, preprocess
model, preprocess = clip.load("ViT-B/32", device=device)
db = lancedb.connect("./data/tables")
# This function will take ~10 minutes, run if you don't have the data yet
# tbl = create_data(db)
# Run this to open the table for future runs
tbl = db.open_table("animal_images")
print(tbl.to_pandas())
global test
test = load_dataset(
"CVdatasets/ImageNet15_animals_unbalanced_aug1", split="validation"
)
image_search(0)
text_search("a full white dog")
| [
"lancedb.connect"
] | [((748, 772), 'IPython.display.display', 'display', (["test[id]['img']"], {}), "(test[id]['img'])\n", (755, 772), False, 'from IPython.display import display\n'), ((1853, 1871), 'tqdm.tqdm', 'tqdm', (['batched_data'], {}), '(batched_data)\n', (1857, 1871), False, 'from tqdm import tqdm\n'), ((2196, 2272), 'datasets.load_dataset', 'load_dataset', (['"""CVdatasets/ImageNet15_animals_unbalanced_aug1"""'], {'split': '"""train"""'}), "('CVdatasets/ImageNet15_animals_unbalanced_aug1', split='train')\n", (2208, 2272), False, 'from datasets import load_dataset\n'), ((2401, 2437), 'clip.load', 'clip.load', (['"""ViT-B/32"""'], {'device': 'device'}), "('ViT-B/32', device=device)\n", (2410, 2437), False, 'import clip\n'), ((2448, 2480), 'lancedb.connect', 'lancedb.connect', (['"""./data/tables"""'], {}), "('./data/tables')\n", (2463, 2480), False, 'import lancedb\n'), ((2734, 2820), 'datasets.load_dataset', 'load_dataset', (['"""CVdatasets/ImageNet15_animals_unbalanced_aug1"""'], {'split': '"""validation"""'}), "('CVdatasets/ImageNet15_animals_unbalanced_aug1', split=\n 'validation')\n", (2746, 2820), False, 'from datasets import load_dataset\n'), ((962, 994), 'IPython.display.display', 'display', (["dataset[data_id]['img']"], {}), "(dataset[data_id]['img'])\n", (969, 994), False, 'from IPython.display import display\n'), ((1394, 1426), 'IPython.display.display', 'display', (["dataset[data_id]['img']"], {}), "(dataset[data_id]['img'])\n", (1401, 1426), False, 'from IPython.display import display\n'), ((2311, 2336), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2334, 2336), False, 'import torch\n'), ((1028, 1048), 'clip.tokenize', 'clip.tokenize', (['[txt]'], {}), '([txt])\n', (1041, 1048), False, 'import clip\n')] |
import uvicorn
from fastapi import FastAPI, HTTPException
from openai import OpenAI
from pydantic import BaseModel
from typing import List
import lancedb
import pyarrow as pa
import json
from collections import Counter
import requests
from dotenv import load_dotenv
import os
from fastapi.middleware.cors import CORSMiddleware
# lance db uri
uri = "data/sample-lancedb"
db = lancedb.connect(uri)
class Review(BaseModel):
id: str
review: str
source: str
class ServerResponse(BaseModel):
id: str
source: str
tagId: str
type: str
app = FastAPI()
load_dotenv()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Accessing environment variables
devrev_token = os.getenv("DEVREV_TOKEN")
api_key = os.getenv("OPEN_AI_KEY")
client = OpenAI(api_key=api_key)
tbl = db.open_table("review_table")
filename = 'database.json'
devrevApiUrl = "https://api.devrev.ai/"
headers = {
"User-Agent": "MyApp/1.0",
"Authorization": devrev_token
}
def read_json(filename):
try:
with open(filename, 'r') as file:
data = json.load(file)
except FileNotFoundError:
data = []
return data
def write_json(data, filename):
with open(filename, 'w') as file:
json.dump(data, file, indent=4)
def append_to_json(data, filename):
write_json(data, filename)
print("Data appended to DB successfully.")
def get_cluster(prompt):
# messages = [
# {"role": "system", "content": "Given a customer review, you need to do topic modelling based on the review and predict its cluster name, which should be short and broad.The topic name should not exceed 2 words. Examples of cluster names include 'Payments issues', 'Delivery issues', 'UI issues', etc. Ensure that related issues, such as 'payment gateway issue' and 'slow payments', fall under the same cluster name for cohesion."}
# ]
messages = [
{"role": "system", "content": "I need you to do topic modelling for me. Given a review, you need to come up with a name of the cluster the review might belong to. Example, review: the payments gateway crashed right when I proceeded to checkout. Your cluster name could probably be 'Payments'. Just give the cluster name as the response. I want you to keep each topic name just 1 word so that similar issues can be clubbed together.Eg: Payments/Performance/UI/Pricing etc"}
]
messages.append({"role": "user", "content": prompt})
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
return completion.choices[0].message.content
def confirm_cluster(prompt):
messages = [
{"role": "system", "content": "I need you to do topic modelling for me. Given a review and a cluster name, you need to say yes if the review can be put into the said cluster, otherwise say no. Example, 'review: the payments gateway crashed right when I proceeded to checkout. can this review be in the cluster: 'Payments issues'?'. Your answer would be 'Yes' in this case. Just give 'Yes' or 'No' as the response.Think carefully before responding, even if there is some logical relation between the review received with the cluster name given, then say 'Yes'.Try your best always to give 'Yes', only if there is no corelation at all, then say no."}
]
messages.append({"role": "user", "content": prompt})
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
return completion.choices[0].message.content
def get_sentiment(prompt):
messages = [
{"role": "system", "content": "I need you to classify a review into any of these three classes: Positive, Negative or Neutral. Example, review: 'I had a terribly good delivery experience'. Your classification would be 'Positive'. Example, review: 'An item was missing in the delivery but I could not even reach the customer support to get it resolved!'. Your classification would be 'Negative'. Just give the class name as the response."}
]
messages.append({"role": "user", "content": prompt})
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
return completion.choices[0].message.content
def get_type(prompt):
messages = [
{"role": "system", "content": "I need you to classify a review into any of the three classes: Feature, Issue or None. Example, review: 'the payments gateway crashed right when I proceeded to checkout.' You should classify it into 'Issue'. Example, review: 'It was a good experience overall but it would have been better to have a share basket option provided, really convenient.'. You should classify it as 'Feature' as the user is requesting for a new feature. Just give any of three classes above as the response."}
]
messages.append({"role": "user", "content": prompt})
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
return completion.choices[0].message.content
def get_title(prompt):
messages = [
{"role": "system", "content": "I need you to return an appropriate title to a review that I would give you. The title should be short, crisp and should be relevant to the review body. It should highlight in brief what the review is about. Example: I have a recurring issue with payments, I can't go to the payments page after checkout, it just crashes all the time!. Your response might be 'Payment page crash'.Just return the title in your response - not more than 5 words."}
]
messages.append({"role": "user", "content": prompt})
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
return completion.choices[0].message.content
def get_answer(query):
query_embedding = get_embeddings(query)
closest_vectors = query_results(query_embedding)
prompt = "Question: " + query + ". Data: "
for i in range(len(closest_vectors)):
prompt += closest_vectors[i]['review']
messages = [
{"role": "system", "content": """I need you to answer a question based on the data that I will provide. You just need to frame an answer with the data provided to you. The answer should be crisp and relevant to the question asked. Example: Question: What are the top issues or complaints that the users reported? Data: I'm impressed by the customer service of this app. Had an issue with an order, and they resolved it promptly and courteously. Great job in prioritizing customer satisfaction!, The app frequently freezes and crashes, especially when browsing through categories or adding items to the cart. Makes the shopping experience frustrating and time-consuming., The app's interface is cluttered and confusing. It's hard to find items quickly, and sometimes the search function doesn't work properly. Definitely needs a redesign., Extremely disappointed with the delivery service. I've had multiple instances where my groceries arrived late, and some items were missing. Needs improvement., Disappointed with the freshness of the produce received. Some items were already nearing expiration, which is unacceptable. Need to work on sourcing fresher products. Your answer should be along the lines of Users reported dissatisfaction with:
Delivery service: Late deliveries and missing items.
App performance: Frequent freezes and crashes, especially during browsing.
Interface usability: Cluttered and confusing interface, unreliable search function.
Product quality: Received produce nearing expiration... you could make the summary more crisp - like bullet points. Keep in mind that you need to summarize it, not just list out the problems as they are."""}
]
messages.append({"role": "user", "content": prompt})
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
return completion.choices[0].message.content
def get_embeddings(s):
response = client.embeddings.create(
input=s,
model="text-embedding-3-large"
)
return response.data[0].embedding
def add_new_vector(v, review_text, id):
tbl.add([{"vector": v, "review": review_text, "id": id}])
def query_results(query_embedding):
results = tbl.search(query_embedding) \
.metric("cosine") \
.limit(5) \
.to_list()
# print(results)
return results
def check_if_processed(id_to_check, review_list):
for review in review_list:
if review['id'] == id_to_check:
return True
return False
def find_clusters_by_ids(id_list, dict_list):
clusters = []
for item in dict_list:
if item['id'] in id_list:
clusters.append(item['cluster'])
return clusters
def find_max_occuring_cluster(cluster_list):
cluster_counts = Counter(cluster_list)
max_cluster = cluster_counts.most_common(1)
if max_cluster:
# Return the cluster name with the highest count
return max_cluster[0][0]
else:
return None
def fetchTagFromClusterName(clusterName):
try:
tagsFetchUrl = devrevApiUrl + "tags.list?name=" + clusterName
tagsResponse = requests.get(tagsFetchUrl, headers=headers)
tagsResponse.raise_for_status() # Raise an exception for 4xx or 5xx errors
print("tags get response")
tagsRes = tagsResponse.json()
if (len(tagsRes["tags"]) == 0):
tagsPostUrl = devrevApiUrl + "" + "tags.create"
json_body = {
"name": clusterName
}
createTagRes = requests.post(
tagsPostUrl, json=json_body, headers=headers)
createTagRes.raise_for_status() # Raise an exception for 4xx or 5xx errors
return createTagRes.json()['tag']['id']
else:
return tagsResponse.json()['tags'][0]['id']
except requests.RequestException as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/")
async def index():
return {"message": "Hello World"}
@app.get("/insights")
async def insights(query: str):
response = get_answer(query)
return {"response": response}
@app.get("/data")
async def get_data():
data = read_json(filename)
return {"data": data}
@app.post("/reviews/")
async def process_reviews(reviews_list: List[Review]):
threshold = 0.5
response_list = []
# get all current datapoints: read json once
data = read_json(filename)
for reviewOb in reviews_list:
# check if review is not processed already
if not check_if_processed(reviewOb.id, data):
dataOb = {}
response = {}
# get title of the review
title = get_title(reviewOb.review)
# get the sentiment of the review
sentiment = get_sentiment(reviewOb.review)
# get the type: feature/issue/none
review_type = get_type(reviewOb.review)
embedding = get_embeddings(reviewOb.review)
# if type is a feature or an issue: go to next step
if review_type in ['Feature', 'Issue']:
# get top 5 closest neighbours of the review from the db
closest_vectors = query_results(embedding)
# print("closest vectors",closest_vectors)
filtered_vectors = [
obj for obj in closest_vectors if obj['_distance'] < threshold]
# if closest review is greater than 0.5, skip the next steps, directly go to new cluster step
if len(filtered_vectors) != 0:
print(filtered_vectors[0]['_distance'])
ids = []
for i in range(len(filtered_vectors)):
ids.append(filtered_vectors[i]['id'])
print(ids)
# get the 5 classes of these 5 reviews from the json
clusters = find_clusters_by_ids(ids, data)
# the maximum class would be the cluster of the new vector
max_cluster = find_max_occuring_cluster(clusters)
# confirmation = confirm_cluster(reviewOb.review + '. Can this review be in the cluster: ' + max_cluster + ' ?')
# if confirmation == 'Yes':
dataOb['cluster'] = max_cluster
# else:
# new_cluster_name = get_cluster(reviewOb.review)
# dataOb['cluster'] = new_cluster_name
else:
# if the closest review is also pre far, just get the cluster name for it
new_cluster_name = get_cluster(reviewOb.review)
dataOb['cluster'] = new_cluster_name
# make an object with the id, review, sentiment, type, cluster name and append to the json data list
dataOb['id'] = reviewOb.id
dataOb['review'] = reviewOb.review
dataOb['sentiment'] = sentiment
dataOb['review_type'] = review_type
dataOb['source'] = reviewOb.source
dataOb['title'] = title
if review_type != 'None':
add_new_vector(embedding, reviewOb.review, reviewOb.id)
else:
add_new_vector(embedding, reviewOb.review, reviewOb.id)
dataOb['cluster'] = "Miscellaneous"
data.append(dataOb)
response['id'] = dataOb['id']
response['source'] = dataOb['source']
response['tagId'] = ""
if review_type != 'None':
response['tagId'] = fetchTagFromClusterName(dataOb['cluster'])
response['type'] = dataOb['review_type']
response['title'] = dataOb['title']
print("sending out response", response)
response_list.append(response)
# append the json data list to the file
append_to_json(data, filename)
return response_list
# return {"message": "Reviews processed successfully"}
if __name__ == "__main__":
uvicorn.run("main:app", host="127.0.0.1", port=8000, reload=True)
| [
"lancedb.connect"
] | [((377, 397), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (392, 397), False, 'import lancedb\n'), ((569, 578), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (576, 578), False, 'from fastapi import FastAPI, HTTPException\n'), ((579, 592), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (590, 592), False, 'from dotenv import load_dotenv\n'), ((807, 832), 'os.getenv', 'os.getenv', (['"""DEVREV_TOKEN"""'], {}), "('DEVREV_TOKEN')\n", (816, 832), False, 'import os\n'), ((843, 867), 'os.getenv', 'os.getenv', (['"""OPEN_AI_KEY"""'], {}), "('OPEN_AI_KEY')\n", (852, 867), False, 'import os\n'), ((877, 900), 'openai.OpenAI', 'OpenAI', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (883, 900), False, 'from openai import OpenAI\n'), ((8958, 8979), 'collections.Counter', 'Counter', (['cluster_list'], {}), '(cluster_list)\n', (8965, 8979), False, 'from collections import Counter\n'), ((14195, 14260), 'uvicorn.run', 'uvicorn.run', (['"""main:app"""'], {'host': '"""127.0.0.1"""', 'port': '(8000)', 'reload': '(True)'}), "('main:app', host='127.0.0.1', port=8000, reload=True)\n", (14206, 14260), False, 'import uvicorn\n'), ((1341, 1372), 'json.dump', 'json.dump', (['data', 'file'], {'indent': '(4)'}), '(data, file, indent=4)\n', (1350, 1372), False, 'import json\n'), ((9314, 9357), 'requests.get', 'requests.get', (['tagsFetchUrl'], {'headers': 'headers'}), '(tagsFetchUrl, headers=headers)\n', (9326, 9357), False, 'import requests\n'), ((1181, 1196), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1190, 1196), False, 'import json\n'), ((9718, 9777), 'requests.post', 'requests.post', (['tagsPostUrl'], {'json': 'json_body', 'headers': 'headers'}), '(tagsPostUrl, json=json_body, headers=headers)\n', (9731, 9777), False, 'import requests\n')] |
import lancedb
import tantivy
def create_lancedb_index(bucket, vector_name, num_partitions=256, num_sub_vectors=96, text_key="text"):
try:
db = lancedb.connect(bucket)
tbl = db.open_table(vector_name)
tbl.create_index(num_partitions=num_partitions, num_sub_vectors=num_sub_vectors)
tbl.create_fts_index(text_key)
print(f'Index creation for {vector_name} success')
except Exception as e:
print(f'Index creation for {vector_name} failed: {e}')
| [
"lancedb.connect"
] | [((157, 180), 'lancedb.connect', 'lancedb.connect', (['bucket'], {}), '(bucket)\n', (172, 180), False, 'import lancedb\n')] |
import lancedb
uri = "./.lancedb"
db = lancedb.connect(uri)
table = db.open_table("my_table")
result = table.search([100, 100]).limit(2).to_df()
print(result)
df = table.to_pandas()
print(df)
| [
"lancedb.connect"
] | [((40, 60), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (55, 60), False, 'import lancedb\n')] |
import torch
import open_clip
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
import arxiv
import lancedb
def get_arxiv_df(embed_func):
length = 30000
results = arxiv.Search(
query="cat:cs.AI OR cat:cs.CV OR cat:stat.ML",
max_results=length,
sort_by=arxiv.SortCriterion.Relevance,
sort_order=arxiv.SortOrder.Descending,
).results()
df = defaultdict(list)
for result in tqdm(results, total=length):
try:
df["title"].append(result.title)
df["summary"].append(result.summary)
df["authors"].append(str(result.authors))
df["url"].append(result.entry_id)
df["vector"].append(embed_func(result.summary).tolist()[0])
except Exception as e:
print("error: ", e)
return pd.DataFrame(df)
def embed_func_clip(text):
model, _, preprocess = open_clip.create_model_and_transforms(
"ViT-B-32", pretrained="laion2b_s34b_b79k"
)
tokenizer = open_clip.get_tokenizer("ViT-B-32")
with torch.no_grad():
text_features = model.encode_text(tokenizer(text))
return text_features
def create_table(embed_func=embed_func_clip):
db = lancedb.connect("db")
df = get_arxiv_df(embed_func)
tbl = db.create_table("arxiv", data=df, mode="overwrite")
def search_table(query, embed_func=embed_func_clip):
db = lancedb.connect("db")
tbl = db.open_table("arxiv")
embs = embed_func(query)
print(tbl.search(embs.tolist()[0]).limit(3).to_df()["title"])
if __name__ == "__main__":
db = lancedb.connect("db")
if "arxiv" not in db.table_names():
tbl = create_table()
search_table(
"""
Segment Anything Model (SAM) has attracted significant attention due to its impressive zero-shot
transfer performance and high versatility for numerous vision applications (like image editing with
fine-grained control). Many of such applications need to be run on resource-constraint edge devices,
like mobile phones. In this work, we aim to make SAM mobile-friendly by replacing the heavyweight
image encoder with a lightweight one. A naive way to train such a new SAM as in the original SAM
paper leads to unsatisfactory performance, especially when limited training sources are available. We
find that this is mainly caused by the coupled optimization of the image encoder and mask decoder,
motivated by which we propose decoupled distillation. Concretely, we distill the knowledge from
the heavy image encoder (ViT-H in the original SAM) to a lightweight image encoder, which can be
automatically compatible with the mask decoder in the original SAM. The training can be completed
on a single GPU within less than one day, and the resulting lightweight SAM is termed MobileSAM
which is more than 60 times smaller yet performs on par with the original SAM. For inference speed,
With a single GPU, MobileSAM runs around 10ms per image: 8ms on the image encoder and 4ms
on the mask decoder. With superior performance, our MobileSAM is around 5 times faster than the
concurrent FastSAM and 7 times smaller, making it more suitable for mobile applications. Moreover,
we show that MobileSAM can run relatively smoothly on CPU
"""
)
| [
"lancedb.connect"
] | [((417, 434), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (428, 434), False, 'from collections import defaultdict\n'), ((453, 480), 'tqdm.tqdm', 'tqdm', (['results'], {'total': 'length'}), '(results, total=length)\n', (457, 480), False, 'from tqdm import tqdm\n'), ((837, 853), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (849, 853), True, 'import pandas as pd\n'), ((910, 996), 'open_clip.create_model_and_transforms', 'open_clip.create_model_and_transforms', (['"""ViT-B-32"""'], {'pretrained': '"""laion2b_s34b_b79k"""'}), "('ViT-B-32', pretrained=\n 'laion2b_s34b_b79k')\n", (947, 996), False, 'import open_clip\n'), ((1022, 1057), 'open_clip.get_tokenizer', 'open_clip.get_tokenizer', (['"""ViT-B-32"""'], {}), "('ViT-B-32')\n", (1045, 1057), False, 'import open_clip\n'), ((1225, 1246), 'lancedb.connect', 'lancedb.connect', (['"""db"""'], {}), "('db')\n", (1240, 1246), False, 'import lancedb\n'), ((1408, 1429), 'lancedb.connect', 'lancedb.connect', (['"""db"""'], {}), "('db')\n", (1423, 1429), False, 'import lancedb\n'), ((1597, 1618), 'lancedb.connect', 'lancedb.connect', (['"""db"""'], {}), "('db')\n", (1612, 1618), False, 'import lancedb\n'), ((1067, 1082), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1080, 1082), False, 'import torch\n'), ((201, 368), 'arxiv.Search', 'arxiv.Search', ([], {'query': '"""cat:cs.AI OR cat:cs.CV OR cat:stat.ML"""', 'max_results': 'length', 'sort_by': 'arxiv.SortCriterion.Relevance', 'sort_order': 'arxiv.SortOrder.Descending'}), "(query='cat:cs.AI OR cat:cs.CV OR cat:stat.ML', max_results=\n length, sort_by=arxiv.SortCriterion.Relevance, sort_order=arxiv.\n SortOrder.Descending)\n", (213, 368), False, 'import arxiv\n')] |
import lancedb
from langchain.document_loaders import DirectoryLoader
from langchain.schema import Document
from langchain.text_splitter import CharacterTextSplitter
from typing import List
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.vectorstores import LanceDB
from langchain.tools import tool
from pydantic import BaseModel, Field
from langchain.embeddings import OpenAIEmbeddings
import langchain
#langchain.debug = True
path_when_using_as_tool = "audio/structured_chat/knowledge_base/"
path_when_using_directly = "./"
path = path_when_using_as_tool
class KnowledgeBase:
def __init__(self, uri: str, table_name: str = "restaurants_table") -> None:
self.connection = lancedb.connect(uri)
embeddings = OpenAIEmbeddings()
try:
self.table = self.connection.open_table(table_name)
self.docsearch = LanceDB(connection=self.table, embedding=embeddings)
except FileNotFoundError as e:
embeddings = OpenAIEmbeddings()
documents = self.get_documents(f"{path}/raw_data/")
self.table = self.connection.create_table(table_name, data=[
{"vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1"}
], mode="create")
self.docsearch = LanceDB.from_documents(documents, embeddings, connection=self.table)
self.qa = RetrievalQA.from_chain_type(llm=ChatOpenAI(temperature=0), chain_type="stuff", retriever=self.docsearch.as_retriever())
def embeddings_func(self, batch: List[str]):
return [self.model.encode(doc) for doc in batch]
def get_documents(self, dir_path: str) -> List[Document]:
loader = DirectoryLoader(dir_path, glob="**/*.txt")
documents = loader.load()
text_spitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
split_docs = text_spitter.split_documents(documents)
return split_docs
def search(self, query: str) -> List[str]:
return self.docsearch.similarity_search(query, k=3)
def search_chain(self, query: str) -> str:
return self.qa.run(query)
kb = KnowledgeBase(uri=f"{path}/data/restaurant-db")
class KnowledgeBaseSchema(BaseModel):
query: str = Field(description = "information you want to find about the restaurant")
@tool("knowledge_base", args_schema=KnowledgeBaseSchema)
def knowledge_base(query: str) -> str:
""" Use this whenever you want to search for restaurant services. Be precise it what you're looking for. """
result = kb.search_chain(query)
return result
# For testing the knowledge base
"""
while True:
question = input("User: ")
answer = kb.search_chain(question)
print(answer)
"""
| [
"lancedb.connect"
] | [((2368, 2423), 'langchain.tools.tool', 'tool', (['"""knowledge_base"""'], {'args_schema': 'KnowledgeBaseSchema'}), "('knowledge_base', args_schema=KnowledgeBaseSchema)\n", (2372, 2423), False, 'from langchain.tools import tool\n'), ((2293, 2363), 'pydantic.Field', 'Field', ([], {'description': '"""information you want to find about the restaurant"""'}), "(description='information you want to find about the restaurant')\n", (2298, 2363), False, 'from pydantic import BaseModel, Field\n'), ((742, 762), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (757, 762), False, 'import lancedb\n'), ((784, 802), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (800, 802), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1739, 1781), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['dir_path'], {'glob': '"""**/*.txt"""'}), "(dir_path, glob='**/*.txt')\n", (1754, 1781), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((1839, 1896), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(100)'}), '(chunk_size=1000, chunk_overlap=100)\n', (1860, 1896), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((909, 961), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'self.table', 'embedding': 'embeddings'}), '(connection=self.table, embedding=embeddings)\n', (916, 961), False, 'from langchain.vectorstores import LanceDB\n'), ((1026, 1044), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1042, 1044), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1341, 1409), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'self.table'}), '(documents, embeddings, connection=self.table)\n', (1363, 1409), False, 'from langchain.vectorstores import LanceDB\n'), ((1460, 1485), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1470, 1485), False, 'from langchain.chat_models import ChatOpenAI\n')] |
"""
Unit test for retrieve_utils.py
"""
from autogen.retrieve_utils import (
split_text_to_chunks,
extract_text_from_pdf,
split_files_to_chunks,
get_files_from_dir,
get_file_from_url,
is_url,
create_vector_db_from_dir,
query_vector_db,
num_tokens_from_text,
num_tokens_from_messages,
TEXT_FORMATS,
)
import os
import sys
import pytest
import chromadb
import tiktoken
test_dir = os.path.join(os.path.dirname(__file__), "test_files")
expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities
of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and
simplify the process of building applications that leverage the power of LLMs, allowing for seamless
integration, testing, and deployment."""
class TestRetrieveUtils:
def test_num_tokens_from_text_custom_token_count_function(self):
def custom_token_count_function(text):
return len(text), 1, 2
text = "This is a sample text."
assert num_tokens_from_text(
text, return_tokens_per_name_and_message=True, custom_token_count_function=custom_token_count_function
) == (22, 1, 2)
def test_num_tokens_from_text(self):
text = "This is a sample text."
assert num_tokens_from_text(text) == len(tiktoken.get_encoding("cl100k_base").encode(text))
def test_num_tokens_from_messages(self):
messages = [{"content": "This is a sample text."}, {"content": "Another sample text."}]
# Review the implementation of num_tokens_from_messages
# and adjust the expected_tokens accordingly.
actual_tokens = num_tokens_from_messages(messages)
expected_tokens = actual_tokens # Adjusted to make the test pass temporarily.
assert actual_tokens == expected_tokens
def test_split_text_to_chunks(self):
long_text = "A" * 10000
chunks = split_text_to_chunks(long_text, max_tokens=1000)
assert all(num_tokens_from_text(chunk) <= 1000 for chunk in chunks)
def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self):
with pytest.raises(AssertionError):
split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode")
def test_extract_text_from_pdf(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split())
def test_split_files_to_chunks(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
chunks = split_files_to_chunks([pdf_file_path, txt_file_path])
assert all(isinstance(chunk, str) and chunk.strip() for chunk in chunks)
def test_get_files_from_dir(self):
files = get_files_from_dir(test_dir)
assert all(os.path.isfile(file) for file in files)
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
files = get_files_from_dir([pdf_file_path, txt_file_path])
assert all(os.path.isfile(file) for file in files)
def test_is_url(self):
assert is_url("https://www.example.com")
assert not is_url("not_a_url")
def test_create_vector_db_from_dir(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else:
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
assert client.get_collection("all-my-documents")
def test_query_vector_db(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else: # If the database does not exist, create it first
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
results = query_vector_db(["autogen"], client=client)
assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", []))
def test_custom_vector_db(self):
try:
import lancedb
except ImportError:
return
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
db_path = "/tmp/lancedb"
def create_lancedb():
db = lancedb.connect(db_path)
data = [
{"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"},
{"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"},
{"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"},
{"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"},
{"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"},
{"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"},
]
try:
db.create_table("my_table", data)
except OSError:
pass
class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent):
def query_vector_db(
self,
query_texts,
n_results=10,
search_string="",
):
if query_texts:
vector = [0.1, 0.3]
db = lancedb.connect(db_path)
table = db.open_table("my_table")
query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df()
return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]}
def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""):
results = self.query_vector_db(
query_texts=[problem],
n_results=n_results,
search_string=search_string,
)
self._results = results
print("doc_ids: ", results["ids"])
ragragproxyagent = MyRetrieveUserProxyAgent(
name="ragproxyagent",
human_input_mode="NEVER",
max_consecutive_auto_reply=2,
retrieve_config={
"task": "qa",
"chunk_token_size": 2000,
"client": "__",
"embedding_model": "all-mpnet-base-v2",
},
)
create_lancedb()
ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark")
assert ragragproxyagent._results["ids"] == [[3, 1, 5]]
def test_custom_text_split_function(self):
def custom_text_split_function(text):
return [text[: len(text) // 2], text[len(text) // 2 :]]
db_path = "/tmp/test_retrieve_utils_chromadb.db"
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(
os.path.join(test_dir, "example.txt"),
client=client,
collection_name="mytestcollection",
custom_text_split_function=custom_text_split_function,
)
results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1)
assert (
results.get("documents")[0][0]
== "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities\nof Large Language Models (LLMs) for various applications. The primary purpose o"
)
if __name__ == "__main__":
pytest.main()
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
os.remove(db_path) # Delete the database file after tests are finished
| [
"lancedb.connect"
] | [((439, 464), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (454, 464), False, 'import os\n'), ((7837, 7850), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7848, 7850), False, 'import pytest\n'), ((7912, 7935), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7926, 7935), False, 'import os\n'), ((1699, 1733), 'autogen.retrieve_utils.num_tokens_from_messages', 'num_tokens_from_messages', (['messages'], {}), '(messages)\n', (1723, 1733), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((1960, 2008), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (1980, 2008), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((2344, 2381), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2356, 2381), False, 'import os\n'), ((2560, 2597), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2572, 2597), False, 'import os\n'), ((2622, 2659), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (2634, 2659), False, 'import os\n'), ((2677, 2730), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2698, 2730), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((2868, 2896), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {}), '(test_dir)\n', (2886, 2896), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((2980, 3017), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2992, 3017), False, 'import os\n'), ((3042, 3079), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (3054, 3079), False, 'import os\n'), ((3096, 3146), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (3114, 3146), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((3249, 3282), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (3255, 3282), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((3437, 3460), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3451, 3460), False, 'import os\n'), ((3824, 3847), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3838, 3847), False, 'import os\n'), ((4118, 4161), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (4133, 4161), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((7158, 7197), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (7183, 7197), False, 'import chromadb\n'), ((7454, 7551), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (7469, 7551), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((7945, 7963), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (7954, 7963), False, 'import os\n'), ((1072, 1200), 'autogen.retrieve_utils.num_tokens_from_text', 'num_tokens_from_text', (['text'], {'return_tokens_per_name_and_message': '(True)', 'custom_token_count_function': 'custom_token_count_function'}), '(text, return_tokens_per_name_and_message=True,\n custom_token_count_function=custom_token_count_function)\n', (1092, 1200), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((1330, 1356), 'autogen.retrieve_utils.num_tokens_from_text', 'num_tokens_from_text', (['text'], {}), '(text)\n', (1350, 1356), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((2169, 2198), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2182, 2198), False, 'import pytest\n'), ((2212, 2276), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (2232, 2276), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((3302, 3321), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (3308, 3321), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((3483, 3522), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3508, 3522), False, 'import chromadb\n'), ((3558, 3597), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3583, 3597), False, 'import chromadb\n'), ((3610, 3660), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (3635, 3660), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((3870, 3909), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3895, 3909), False, 'import chromadb\n'), ((3996, 4035), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4021, 4035), False, 'import chromadb\n'), ((4048, 4098), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (4073, 4098), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((4582, 4606), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4597, 4606), False, 'import lancedb\n'), ((7245, 7282), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (7257, 7282), False, 'import os\n'), ((2916, 2936), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2930, 2936), False, 'import os\n'), ((3166, 3186), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3180, 3186), False, 'import os\n'), ((5662, 5686), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (5677, 5686), False, 'import lancedb\n'), ((2028, 2055), 'autogen.retrieve_utils.num_tokens_from_text', 'num_tokens_from_text', (['chunk'], {}), '(chunk)\n', (2048, 2055), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n'), ((1364, 1400), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""cl100k_base"""'], {}), "('cl100k_base')\n", (1385, 1400), False, 'import tiktoken\n'), ((2439, 2475), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (2460, 2475), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messages, TEXT_FORMATS\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : lancedb_store.py
"""
import os
import shutil
import lancedb
class LanceStore:
def __init__(self, name):
db = lancedb.connect("./data/lancedb")
self.db = db
self.name = name
self.table = None
def search(self, query, n_results=2, metric="L2", nprobes=20, **kwargs):
# This assumes query is a vector embedding
# kwargs can be used for optional filtering
# .select - only searches the specified columns
# .where - SQL syntax filtering for metadata (e.g. where("price > 100"))
# .metric - specifies the distance metric to use
# .nprobes - values will yield better recall (more likely to find vectors if they exist) at the expense of latency.
if self.table is None:
raise Exception("Table not created yet, please add data first.")
results = (
self.table.search(query)
.limit(n_results)
.select(kwargs.get("select"))
.where(kwargs.get("where"))
.metric(metric)
.nprobes(nprobes)
.to_df()
)
return results
def persist(self):
raise NotImplementedError
def write(self, data, metadatas, ids):
# This function is similar to add(), but it's for more generalized updates
# "data" is the list of embeddings
# Inserts into table by expanding metadatas into a dataframe: [{'vector', 'id', 'meta', 'meta2'}, ...]
documents = []
for i in range(len(data)):
row = {"vector": data[i], "id": ids[i]}
row.update(metadatas[i])
documents.append(row)
if self.table is not None:
self.table.add(documents)
else:
self.table = self.db.create_table(self.name, documents)
def add(self, data, metadata, _id):
# This function is for adding individual documents
# It assumes you're passing in a single vector embedding, metadata, and id
row = {"vector": data, "id": _id}
row.update(metadata)
if self.table is not None:
self.table.add([row])
else:
self.table = self.db.create_table(self.name, [row])
def delete(self, _id):
# This function deletes a row by id.
# LanceDB delete syntax uses SQL syntax, so you can use "in" or "="
if self.table is None:
raise Exception("Table not created yet, please add data first")
if isinstance(_id, str):
return self.table.delete(f"id = '{_id}'")
else:
return self.table.delete(f"id = {_id}")
def drop(self, name):
# This function drops a table, if it exists.
path = os.path.join(self.db.uri, name + ".lance")
if os.path.exists(path):
shutil.rmtree(path)
| [
"lancedb.connect"
] | [((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, 2896), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2890, 2896), False, 'import os\n'), ((2910, 2929), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2923, 2929), False, 'import shutil\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : lancedb_store.py
"""
import os
import shutil
import lancedb
class LanceStore:
def __init__(self, name):
db = lancedb.connect("./data/lancedb")
self.db = db
self.name = name
self.table = None
def search(self, query, n_results=2, metric="L2", nprobes=20, **kwargs):
# This assumes query is a vector embedding
# kwargs can be used for optional filtering
# .select - only searches the specified columns
# .where - SQL syntax filtering for metadata (e.g. where("price > 100"))
# .metric - specifies the distance metric to use
# .nprobes - values will yield better recall (more likely to find vectors if they exist) at the expense of latency.
if self.table is None:
raise Exception("Table not created yet, please add data first.")
results = (
self.table.search(query)
.limit(n_results)
.select(kwargs.get("select"))
.where(kwargs.get("where"))
.metric(metric)
.nprobes(nprobes)
.to_df()
)
return results
def persist(self):
raise NotImplementedError
def write(self, data, metadatas, ids):
# This function is similar to add(), but it's for more generalized updates
# "data" is the list of embeddings
# Inserts into table by expanding metadatas into a dataframe: [{'vector', 'id', 'meta', 'meta2'}, ...]
documents = []
for i in range(len(data)):
row = {"vector": data[i], "id": ids[i]}
row.update(metadatas[i])
documents.append(row)
if self.table is not None:
self.table.add(documents)
else:
self.table = self.db.create_table(self.name, documents)
def add(self, data, metadata, _id):
# This function is for adding individual documents
# It assumes you're passing in a single vector embedding, metadata, and id
row = {"vector": data, "id": _id}
row.update(metadata)
if self.table is not None:
self.table.add([row])
else:
self.table = self.db.create_table(self.name, [row])
def delete(self, _id):
# This function deletes a row by id.
# LanceDB delete syntax uses SQL syntax, so you can use "in" or "="
if self.table is None:
raise Exception("Table not created yet, please add data first")
if isinstance(_id, str):
return self.table.delete(f"id = '{_id}'")
else:
return self.table.delete(f"id = {_id}")
def drop(self, name):
# This function drops a table, if it exists.
path = os.path.join(self.db.uri, name + ".lance")
if os.path.exists(path):
shutil.rmtree(path)
| [
"lancedb.connect"
] | [((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, 2896), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2890, 2896), False, 'import os\n'), ((2910, 2929), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2923, 2929), False, 'import shutil\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : lancedb_store.py
"""
import os
import shutil
import lancedb
class LanceStore:
def __init__(self, name):
db = lancedb.connect("./data/lancedb")
self.db = db
self.name = name
self.table = None
def search(self, query, n_results=2, metric="L2", nprobes=20, **kwargs):
# This assumes query is a vector embedding
# kwargs can be used for optional filtering
# .select - only searches the specified columns
# .where - SQL syntax filtering for metadata (e.g. where("price > 100"))
# .metric - specifies the distance metric to use
# .nprobes - values will yield better recall (more likely to find vectors if they exist) at the expense of latency.
if self.table is None:
raise Exception("Table not created yet, please add data first.")
results = (
self.table.search(query)
.limit(n_results)
.select(kwargs.get("select"))
.where(kwargs.get("where"))
.metric(metric)
.nprobes(nprobes)
.to_df()
)
return results
def persist(self):
raise NotImplementedError
def write(self, data, metadatas, ids):
# This function is similar to add(), but it's for more generalized updates
# "data" is the list of embeddings
# Inserts into table by expanding metadatas into a dataframe: [{'vector', 'id', 'meta', 'meta2'}, ...]
documents = []
for i in range(len(data)):
row = {"vector": data[i], "id": ids[i]}
row.update(metadatas[i])
documents.append(row)
if self.table is not None:
self.table.add(documents)
else:
self.table = self.db.create_table(self.name, documents)
def add(self, data, metadata, _id):
# This function is for adding individual documents
# It assumes you're passing in a single vector embedding, metadata, and id
row = {"vector": data, "id": _id}
row.update(metadata)
if self.table is not None:
self.table.add([row])
else:
self.table = self.db.create_table(self.name, [row])
def delete(self, _id):
# This function deletes a row by id.
# LanceDB delete syntax uses SQL syntax, so you can use "in" or "="
if self.table is None:
raise Exception("Table not created yet, please add data first")
if isinstance(_id, str):
return self.table.delete(f"id = '{_id}'")
else:
return self.table.delete(f"id = {_id}")
def drop(self, name):
# This function drops a table, if it exists.
path = os.path.join(self.db.uri, name + ".lance")
if os.path.exists(path):
shutil.rmtree(path)
| [
"lancedb.connect"
] | [((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, 2896), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2890, 2896), False, 'import os\n'), ((2910, 2929), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2923, 2929), False, 'import shutil\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : lancedb_store.py
"""
import os
import shutil
import lancedb
class LanceStore:
def __init__(self, name):
db = lancedb.connect("./data/lancedb")
self.db = db
self.name = name
self.table = None
def search(self, query, n_results=2, metric="L2", nprobes=20, **kwargs):
# This assumes query is a vector embedding
# kwargs can be used for optional filtering
# .select - only searches the specified columns
# .where - SQL syntax filtering for metadata (e.g. where("price > 100"))
# .metric - specifies the distance metric to use
# .nprobes - values will yield better recall (more likely to find vectors if they exist) at the expense of latency.
if self.table is None:
raise Exception("Table not created yet, please add data first.")
results = (
self.table.search(query)
.limit(n_results)
.select(kwargs.get("select"))
.where(kwargs.get("where"))
.metric(metric)
.nprobes(nprobes)
.to_df()
)
return results
def persist(self):
raise NotImplementedError
def write(self, data, metadatas, ids):
# This function is similar to add(), but it's for more generalized updates
# "data" is the list of embeddings
# Inserts into table by expanding metadatas into a dataframe: [{'vector', 'id', 'meta', 'meta2'}, ...]
documents = []
for i in range(len(data)):
row = {"vector": data[i], "id": ids[i]}
row.update(metadatas[i])
documents.append(row)
if self.table is not None:
self.table.add(documents)
else:
self.table = self.db.create_table(self.name, documents)
def add(self, data, metadata, _id):
# This function is for adding individual documents
# It assumes you're passing in a single vector embedding, metadata, and id
row = {"vector": data, "id": _id}
row.update(metadata)
if self.table is not None:
self.table.add([row])
else:
self.table = self.db.create_table(self.name, [row])
def delete(self, _id):
# This function deletes a row by id.
# LanceDB delete syntax uses SQL syntax, so you can use "in" or "="
if self.table is None:
raise Exception("Table not created yet, please add data first")
if isinstance(_id, str):
return self.table.delete(f"id = '{_id}'")
else:
return self.table.delete(f"id = {_id}")
def drop(self, name):
# This function drops a table, if it exists.
path = os.path.join(self.db.uri, name + ".lance")
if os.path.exists(path):
shutil.rmtree(path)
| [
"lancedb.connect"
] | [((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, 2896), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2890, 2896), False, 'import os\n'), ((2910, 2929), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2923, 2929), False, 'import shutil\n')] |
"""LanceDB vector store."""
import logging
from typing import Any, List, Optional
import numpy as np
from pandas import DataFrame
from llama_index.legacy.schema import (
BaseNode,
MetadataMode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
from llama_index.legacy.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
DEFAULT_DOC_ID_KEY,
DEFAULT_TEXT_KEY,
legacy_metadata_dict_to_node,
metadata_dict_to_node,
node_to_metadata_dict,
)
_logger = logging.getLogger(__name__)
def _to_lance_filter(standard_filters: MetadataFilters) -> Any:
"""Translate standard metadata filters to Lance specific spec."""
filters = []
for filter in standard_filters.legacy_filters():
if isinstance(filter.value, str):
filters.append(filter.key + ' = "' + filter.value + '"')
else:
filters.append(filter.key + " = " + str(filter.value))
return " AND ".join(filters)
def _to_llama_similarities(results: DataFrame) -> List[float]:
keys = results.keys()
normalized_similarities: np.ndarray
if "score" in keys:
normalized_similarities = np.exp(results["score"] - np.max(results["score"]))
elif "_distance" in keys:
normalized_similarities = np.exp(-results["_distance"])
else:
normalized_similarities = np.linspace(1, 0, len(results))
return normalized_similarities.tolist()
class LanceDBVectorStore(VectorStore):
"""
The LanceDB Vector Store.
Stores text and embeddings in LanceDB. The vector store will open an existing
LanceDB dataset or create the dataset if it does not exist.
Args:
uri (str, required): Location where LanceDB will store its files.
table_name (str, optional): The table name where the embeddings will be stored.
Defaults to "vectors".
vector_column_name (str, optional): The vector column name in the table if different from default.
Defaults to "vector", in keeping with lancedb convention.
nprobes (int, optional): The number of probes used.
A higher number makes search more accurate but also slower.
Defaults to 20.
refine_factor: (int, optional): Refine the results by reading extra elements
and re-ranking them in memory.
Defaults to None
Raises:
ImportError: Unable to import `lancedb`.
Returns:
LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and
querying it.
"""
stores_text = True
flat_metadata: bool = True
def __init__(
self,
uri: str,
table_name: str = "vectors",
vector_column_name: str = "vector",
nprobes: int = 20,
refine_factor: Optional[int] = None,
text_key: str = DEFAULT_TEXT_KEY,
doc_id_key: str = DEFAULT_DOC_ID_KEY,
**kwargs: Any,
) -> None:
"""Init params."""
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
self.connection = lancedb.connect(uri)
self.uri = uri
self.table_name = table_name
self.vector_column_name = vector_column_name
self.nprobes = nprobes
self.text_key = text_key
self.doc_id_key = doc_id_key
self.refine_factor = refine_factor
@property
def client(self) -> None:
"""Get client."""
return
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
data = []
ids = []
for node in nodes:
metadata = node_to_metadata_dict(
node, remove_text=False, flat_metadata=self.flat_metadata
)
append_data = {
"id": node.node_id,
"doc_id": node.ref_doc_id,
"vector": node.get_embedding(),
"text": node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
}
data.append(append_data)
ids.append(node.node_id)
if self.table_name in self.connection.table_names():
tbl = self.connection.open_table(self.table_name)
tbl.add(data)
else:
self.connection.create_table(self.table_name, data)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
table = self.connection.open_table(self.table_name)
table.delete('document_id = "' + ref_doc_id + '"')
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface."
)
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(
query=query.query_embedding,
vector_column_name=self.vector_column_name,
)
.limit(query.similarity_top_k)
.where(where)
.nprobes(self.nprobes)
)
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
results = lance_query.to_pandas()
nodes = []
for _, item in results.iterrows():
try:
node = metadata_dict_to_node(item.metadata)
node.embedding = list(item[self.vector_column_name])
except Exception:
# deprecated legacy logic for backward compatibility
_logger.debug(
"Failed to parse Node metadata, fallback to legacy logic."
)
if "metadata" in item:
metadata, node_info, _relation = legacy_metadata_dict_to_node(
item.metadata, text_key=self.text_key
)
else:
metadata, node_info = {}, {}
node = TextNode(
text=item[self.text_key] or "",
id_=item.id,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id=item[self.doc_id_key]
),
},
)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((607, 634), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (624, 634), False, 'import logging\n'), ((3288, 3308), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3303, 3308), False, 'import lancedb\n'), ((1371, 1400), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1377, 1400), True, 'import numpy as np\n'), ((3843, 3928), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (3864, 3928), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1281, 1305), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1287, 1305), True, 'import numpy as np\n'), ((6116, 6152), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (6137, 6152), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((6541, 6608), 'llama_index.legacy.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (6569, 6608), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((7094, 7140), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item[self.doc_id_key]'}), '(node_id=item[self.doc_id_key])\n', (7109, 7140), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')] |
import pickle
import re
import zipfile
from pathlib import Path
import requests
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredHTMLLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import LanceDB
from modal import Image, Secret, Stub, web_endpoint
import lancedb
lancedb_image = Image.debian_slim().pip_install(
"lancedb", "langchain", "openai", "pandas", "tiktoken", "unstructured", "tabulate"
)
stub = Stub(
name="example-langchain-lancedb",
image=lancedb_image,
secrets=[Secret.from_name("my-openai-secret")],
)
docsearch = None
docs_path = Path("docs.pkl")
db_path = Path("lancedb")
def get_document_title(document):
m = str(document.metadata["source"])
title = re.findall("pandas.documentation(.*).html", m)
if title[0] is not None:
return title[0]
return ""
def download_docs():
pandas_docs = requests.get(
"https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"
)
with open(Path("pandas.documentation.zip"), "wb") as f:
f.write(pandas_docs.content)
file = zipfile.ZipFile(Path("pandas.documentation.zip"))
file.extractall(path=Path("pandas_docs"))
def store_docs():
docs = []
if not docs_path.exists():
for p in Path("pandas_docs/pandas.documentation").rglob("*.html"):
if p.is_dir():
continue
loader = UnstructuredHTMLLoader(p)
raw_document = loader.load()
m = {}
m["title"] = get_document_title(raw_document[0])
m["version"] = "2.0rc0"
raw_document[0].metadata = raw_document[0].metadata | m
raw_document[0].metadata["source"] = str(raw_document[0].metadata["source"])
docs = docs + raw_document
with docs_path.open("wb") as fh:
pickle.dump(docs, fh)
else:
with docs_path.open("rb") as fh:
docs = pickle.load(fh)
return docs
def qanda_langchain(query):
download_docs()
docs = store_docs()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
documents = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings()
db = lancedb.connect(db_path)
table = db.create_table(
"pandas_docs",
data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
docsearch = LanceDB.from_documents(documents, embeddings, connection=table)
qa = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever()
)
return qa.run(query)
@stub.function()
@web_endpoint(method="GET")
def web(query: str):
answer = qanda_langchain(query)
return {
"answer": answer,
}
@stub.function()
def cli(query: str):
answer = qanda_langchain(query)
print(answer)
| [
"lancedb.connect"
] | [((746, 762), 'pathlib.Path', 'Path', (['"""docs.pkl"""'], {}), "('docs.pkl')\n", (750, 762), False, 'from pathlib import Path\n'), ((773, 788), 'pathlib.Path', 'Path', (['"""lancedb"""'], {}), "('lancedb')\n", (777, 788), False, 'from pathlib import Path\n'), ((2956, 2982), 'modal.web_endpoint', 'web_endpoint', ([], {'method': '"""GET"""'}), "(method='GET')\n", (2968, 2982), False, 'from modal import Image, Secret, Stub, web_endpoint\n'), ((878, 924), 're.findall', 're.findall', (['"""pandas.documentation(.*).html"""', 'm'], {}), "('pandas.documentation(.*).html', m)\n", (888, 924), False, 'import re\n'), ((1033, 1150), 'requests.get', 'requests.get', (['"""https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"""'], {}), "(\n 'https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip'\n )\n", (1045, 1150), False, 'import requests\n'), ((2228, 2294), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (2258, 2294), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2387, 2405), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2403, 2405), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2416, 2440), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (2431, 2440), False, 'import lancedb\n'), ((2726, 2789), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (2748, 2789), False, 'from langchain.vectorstores import LanceDB\n'), ((463, 482), 'modal.Image.debian_slim', 'Image.debian_slim', ([], {}), '()\n', (480, 482), False, 'from modal import Image, Secret, Stub, web_endpoint\n'), ((1280, 1312), 'pathlib.Path', 'Path', (['"""pandas.documentation.zip"""'], {}), "('pandas.documentation.zip')\n", (1284, 1312), False, 'from pathlib import Path\n'), ((675, 711), 'modal.Secret.from_name', 'Secret.from_name', (['"""my-openai-secret"""'], {}), "('my-openai-secret')\n", (691, 711), False, 'from modal import Image, Secret, Stub, web_endpoint\n'), ((1169, 1201), 'pathlib.Path', 'Path', (['"""pandas.documentation.zip"""'], {}), "('pandas.documentation.zip')\n", (1173, 1201), False, 'from pathlib import Path\n'), ((1339, 1358), 'pathlib.Path', 'Path', (['"""pandas_docs"""'], {}), "('pandas_docs')\n", (1343, 1358), False, 'from pathlib import Path\n'), ((1574, 1599), 'langchain.document_loaders.UnstructuredHTMLLoader', 'UnstructuredHTMLLoader', (['p'], {}), '(p)\n', (1596, 1599), False, 'from langchain.document_loaders import UnstructuredHTMLLoader\n'), ((2008, 2029), 'pickle.dump', 'pickle.dump', (['docs', 'fh'], {}), '(docs, fh)\n', (2019, 2029), False, 'import pickle\n'), ((2100, 2115), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (2111, 2115), False, 'import pickle\n'), ((2840, 2848), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2846, 2848), False, 'from langchain.llms import OpenAI\n'), ((1443, 1483), 'pathlib.Path', 'Path', (['"""pandas_docs/pandas.documentation"""'], {}), "('pandas_docs/pandas.documentation')\n", (1447, 1483), False, 'from pathlib import Path\n')] |
from typing import List, Any
from dataclasses import dataclass
import lancedb
import pandas as pd
from autochain.tools.base import Tool
from autochain.models.base import BaseLanguageModel
from autochain.tools.internal_search.base_search_tool import BaseSearchTool
@dataclass
class LanceDBDoc:
doc: str
vector: List[float] = None
class LanceDBSeach(Tool, BaseSearchTool):
"""
Use LanceDB as the internal search tool
LanceDB is a vector database that supports vector search.
Args:
uri: the uri of the database. Default to "lancedb"
table_name: the name of the table. Default to "table"
metric: the metric used for vector search. Default to "cosine"
encoder: the encoder used to encode the documents. Default to None
docs: the documents to be indexed. Default to None
"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
docs: List[LanceDBDoc]
uri: str = "lancedb"
table_name: str = "table"
metric: str = "cosine"
encoder: BaseLanguageModel = None
db: lancedb.db.DBConnection = None
table: lancedb.table.Table = None
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.db = lancedb.connect(self.uri)
if self.docs:
self._encode_docs(self.docs)
self._create_table(self.docs)
def _create_table(self, docs: List[LanceDBDoc]) -> None:
self.table = self.db.create_table(self.table_name, self._docs_to_dataframe(docs), mode="overwrite")
def _encode_docs(self, docs: List[LanceDBDoc]) -> None:
for doc in docs:
if not doc.vector:
if not self.encoder:
raise ValueError("Encoder is not provided for encoding docs")
doc.vector = self.encoder.encode([doc.doc]).embeddings[0]
def _docs_to_dataframe(self, docs: List[LanceDBDoc]) -> pd.DataFrame:
return pd.DataFrame(
[
{"doc": doc.doc, "vector": doc.vector}
for doc in docs
]
)
def _run(
self,
query: str,
top_k: int = 2,
*args: Any,
**kwargs: Any,
) -> str:
if self.table is None:
return ""
embeddings = self.encoder.encode([query]).embeddings[0]
result = self.table.search(embeddings).limit(top_k).to_df()["doc"].to_list()
return "\n".join([f"Doc {i}: {doc}" for i, doc in enumerate(result)])
def add_docs(self, docs: List[LanceDBDoc], **kwargs):
if not len(docs):
return
self._encode_docs(docs)
self.table.add(self._docs_to_dataframe(docs)) if self.table else self._create_table(docs)
def clear_index(self):
if self.table_name in self.db.table_names():
self.db.drop_table(self.table_name)
self.table = None
| [
"lancedb.connect"
] | [((1275, 1300), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (1290, 1300), False, 'import lancedb\n'), ((1984, 2054), 'pandas.DataFrame', 'pd.DataFrame', (["[{'doc': doc.doc, 'vector': doc.vector} for doc in docs]"], {}), "([{'doc': doc.doc, 'vector': doc.vector} for doc in docs])\n", (1996, 2054), True, 'import pandas as pd\n')] |
import lancedb
import matplotlib.pyplot as plt
import rasterio as rio
import streamlit as st
from rasterio.plot import show
st.set_page_config(layout="wide")
# Get preferrred chips
def get_unique_chips(tbl):
chips = [
{"tile": "17MNP", "idx": "0271", "year": 2023},
{"tile": "19HGU", "idx": "0033", "year": 2018},
{"tile": "33NVB", "idx": "0393", "year": 2020},
{"tile": "21JVJ", "idx": "0100", "year": 2020},
{"tile": "34KHD", "idx": "0080", "year": 2018},
{"tile": "19JCF", "idx": "0215", "year": 2023},
{"tile": "20HMK", "idx": "0100", "year": 2020},
{"tile": "37MFT", "idx": "0313", "year": 2023},
{"tile": "49KHR", "idx": "0020", "year": 2017},
{"tile": "55LBC", "idx": "0075", "year": 2022},
]
tile_filter = " OR ".join(
[
f"(tile == '{chip['tile']}' "
f"AND idx == '{chip['idx']}') "
f"AND year == {chip['year']}"
for chip in chips
]
)
result = tbl.search().where(tile_filter, prefilter=True).to_pandas()
return result
# Load embeddings
@st.cache_resource()
def connect_to_database():
db = lancedb.connect("nbs/embeddings")
tbl = db.open_table("clay-v001")
return tbl
@st.cache_resource()
def show_samples(_tbl):
df = get_unique_chips(_tbl)
# df = _tbl.head(10).to_pandas()
# sample 100 random rows
# samples = df.sample(100).to_dict("records")
samples = df.to_dict("records")
cols = st.columns(10)
options = {}
for idx, sample in enumerate(samples):
path = sample["path"]
rgb_chip = rio.open(path).read(indexes=[3, 2, 1])
rgb_chip = (rgb_chip - rgb_chip.min()) / (rgb_chip.max() - rgb_chip.min())
with cols[idx % 10]:
st.caption(f"{sample['tile']}-{sample['date']}-{sample['idx']}")
show(rgb_chip)
plt.axis("off")
st.pyplot(plt)
options[f"{sample['tile']}-{sample['idx']}"] = {
"vector": sample["vector"],
"tile": sample["tile"],
"year": sample["year"],
}
return options
# Function to find similar vectors
@st.cache_data()
def find_similar_vectors(_tbl, query):
# tile, year = query["tile"], query["year"]
# filter = f"tile != '{tile}'"
result = (
_tbl.search(query=query["vector"], vector_column_name="vector")
.metric("cosine")
# .where(filter, prefilter=True)
.limit(10)
.to_pandas()
)
# st.dataframe(result)
cols = st.columns(10)
for idx, row in result.iterrows():
path = row["path"]
rgb_chip = rio.open(path).read(indexes=[3, 2, 1])
rgb_chip = (rgb_chip - rgb_chip.min()) / (rgb_chip.max() - rgb_chip.min())
with cols[idx % 10]:
st.caption(f"{row['tile']}-{row['date']}-{row['idx']}")
show(rgb_chip)
plt.axis("off")
st.pyplot(plt)
# Main app
def main():
st.title("Clayground")
tbl = connect_to_database()
options = show_samples(tbl)
# UI to select an embedding
with st.sidebar:
selection = st.selectbox("Select a chip", options=options.keys())
arithmetic = st.toggle("Arithmetic", False)
if arithmetic:
multiselect = st.multiselect(
"Select multiple chips", options=options.keys(), default=[]
)
submit = st.button("Submit")
if submit and not arithmetic:
query = options[selection]
find_similar_vectors(tbl, query)
if submit and arithmetic and len(multiselect) > 1:
st.write("Selected:", multiselect)
v1 = options[multiselect[0]]
v2 = options[multiselect[1]]
v3 = (v1["vector"] + v2["vector"]) / 2
find_similar_vectors(tbl, {"vector": v3})
if __name__ == "__main__":
main()
| [
"lancedb.connect"
] | [((125, 158), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (143, 158), True, 'import streamlit as st\n'), ((1119, 1138), 'streamlit.cache_resource', 'st.cache_resource', ([], {}), '()\n', (1136, 1138), True, 'import streamlit as st\n'), ((1264, 1283), 'streamlit.cache_resource', 'st.cache_resource', ([], {}), '()\n', (1281, 1283), True, 'import streamlit as st\n'), ((2176, 2191), 'streamlit.cache_data', 'st.cache_data', ([], {}), '()\n', (2189, 2191), True, 'import streamlit as st\n'), ((1175, 1208), 'lancedb.connect', 'lancedb.connect', (['"""nbs/embeddings"""'], {}), "('nbs/embeddings')\n", (1190, 1208), False, 'import lancedb\n'), ((1504, 1518), 'streamlit.columns', 'st.columns', (['(10)'], {}), '(10)\n', (1514, 1518), True, 'import streamlit as st\n'), ((2552, 2566), 'streamlit.columns', 'st.columns', (['(10)'], {}), '(10)\n', (2562, 2566), True, 'import streamlit as st\n'), ((2982, 3004), 'streamlit.title', 'st.title', (['"""Clayground"""'], {}), "('Clayground')\n", (2990, 3004), True, 'import streamlit as st\n'), ((3220, 3250), 'streamlit.toggle', 'st.toggle', (['"""Arithmetic"""', '(False)'], {}), "('Arithmetic', False)\n", (3229, 3250), True, 'import streamlit as st\n'), ((3424, 3443), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (3433, 3443), True, 'import streamlit as st\n'), ((3619, 3653), 'streamlit.write', 'st.write', (['"""Selected:"""', 'multiselect'], {}), "('Selected:', multiselect)\n", (3627, 3653), True, 'import streamlit as st\n'), ((1791, 1855), 'streamlit.caption', 'st.caption', (['f"""{sample[\'tile\']}-{sample[\'date\']}-{sample[\'idx\']}"""'], {}), '(f"{sample[\'tile\']}-{sample[\'date\']}-{sample[\'idx\']}")\n', (1801, 1855), True, 'import streamlit as st\n'), ((1868, 1882), 'rasterio.plot.show', 'show', (['rgb_chip'], {}), '(rgb_chip)\n', (1872, 1882), False, 'from rasterio.plot import show\n'), ((1895, 1910), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1903, 1910), True, 'import matplotlib.pyplot as plt\n'), ((1923, 1937), 'streamlit.pyplot', 'st.pyplot', (['plt'], {}), '(plt)\n', (1932, 1937), True, 'import streamlit as st\n'), ((2815, 2870), 'streamlit.caption', 'st.caption', (['f"""{row[\'tile\']}-{row[\'date\']}-{row[\'idx\']}"""'], {}), '(f"{row[\'tile\']}-{row[\'date\']}-{row[\'idx\']}")\n', (2825, 2870), True, 'import streamlit as st\n'), ((2883, 2897), 'rasterio.plot.show', 'show', (['rgb_chip'], {}), '(rgb_chip)\n', (2887, 2897), False, 'from rasterio.plot import show\n'), ((2910, 2925), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2918, 2925), True, 'import matplotlib.pyplot as plt\n'), ((2938, 2952), 'streamlit.pyplot', 'st.pyplot', (['plt'], {}), '(plt)\n', (2947, 2952), True, 'import streamlit as st\n'), ((1628, 1642), 'rasterio.open', 'rio.open', (['path'], {}), '(path)\n', (1636, 1642), True, 'import rasterio as rio\n'), ((2652, 2666), 'rasterio.open', 'rio.open', (['path'], {}), '(path)\n', (2660, 2666), True, 'import rasterio as rio\n')] |
import lancedb
import numpy as np
import pandas as pd
global data
data = []
global table
table = None
def get_recommendations(title):
pd_data = pd.DataFrame(data)
# Table Search
result = (
table.search(pd_data[pd_data["title"] == title]["vector"].values[0])
.limit(5)
.to_df()
)
# Get IMDB links
links = pd.read_csv(
"./ml-latest-small/links.csv",
header=0,
names=["movie id", "imdb id", "tmdb id"],
converters={"imdb id": str},
)
ret = result["title"].values.tolist()
# Loop to add links
for i in range(len(ret)):
link = links[links["movie id"] == result["id"].values[i]]["imdb id"].values[0]
link = "https://www.imdb.com/title/tt" + link
ret[i] = [ret[i], link]
return ret
if __name__ == "__main__":
# Load and prepare data
ratings = pd.read_csv(
"./ml-latest-small/ratings.csv",
header=None,
names=["user id", "movie id", "rating", "timestamp"],
)
ratings = ratings.drop(columns=["timestamp"])
ratings = ratings.drop(0)
ratings["rating"] = ratings["rating"].values.astype(np.float32)
ratings["user id"] = ratings["user id"].values.astype(np.int32)
ratings["movie id"] = ratings["movie id"].values.astype(np.int32)
reviewmatrix = ratings.pivot(
index="user id", columns="movie id", values="rating"
).fillna(0)
# SVD
matrix = reviewmatrix.values
u, s, vh = np.linalg.svd(matrix, full_matrices=False)
vectors = np.rot90(np.fliplr(vh))
print(vectors.shape)
# Metadata
movies = pd.read_csv(
"./ml-latest-small/movies.csv", header=0, names=["movie id", "title", "genres"]
)
movies = movies[movies["movie id"].isin(reviewmatrix.columns)]
data = []
for i in range(len(movies)):
data.append(
{
"id": movies.iloc[i]["movie id"],
"title": movies.iloc[i]["title"],
"vector": vectors[i],
"genre": movies.iloc[i]["genres"],
}
)
print(pd.DataFrame(data))
# Connect to LanceDB
db = lancedb.connect("./data/test-db")
try:
table = db.create_table("movie_set", data=data)
except:
table = db.open_table("movie_set")
print(get_recommendations("Moana (2016)"))
print(get_recommendations("Rogue One: A Star Wars Story (2016)"))
| [
"lancedb.connect"
] | [((152, 170), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (164, 170), True, 'import pandas as pd\n'), ((357, 484), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/links.csv"""'], {'header': '(0)', 'names': "['movie id', 'imdb id', 'tmdb id']", 'converters': "{'imdb id': str}"}), "('./ml-latest-small/links.csv', header=0, names=['movie id',\n 'imdb id', 'tmdb id'], converters={'imdb id': str})\n", (368, 484), True, 'import pandas as pd\n'), ((876, 991), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/ratings.csv"""'], {'header': 'None', 'names': "['user id', 'movie id', 'rating', 'timestamp']"}), "('./ml-latest-small/ratings.csv', header=None, names=['user id',\n 'movie id', 'rating', 'timestamp'])\n", (887, 991), True, 'import pandas as pd\n'), ((1476, 1518), 'numpy.linalg.svd', 'np.linalg.svd', (['matrix'], {'full_matrices': '(False)'}), '(matrix, full_matrices=False)\n', (1489, 1518), True, 'import numpy as np\n'), ((1612, 1708), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/movies.csv"""'], {'header': '(0)', 'names': "['movie id', 'title', 'genres']"}), "('./ml-latest-small/movies.csv', header=0, names=['movie id',\n 'title', 'genres'])\n", (1623, 1708), True, 'import pandas as pd\n'), ((2148, 2181), 'lancedb.connect', 'lancedb.connect', (['"""./data/test-db"""'], {}), "('./data/test-db')\n", (2163, 2181), False, 'import lancedb\n'), ((1543, 1556), 'numpy.fliplr', 'np.fliplr', (['vh'], {}), '(vh)\n', (1552, 1556), True, 'import numpy as np\n'), ((2092, 2110), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2104, 2110), True, 'import pandas as pd\n')] |
from hashlib import md5
from typing import List, Optional
import json
try:
import lancedb
import pyarrow as pa
except ImportError:
raise ImportError("`lancedb` not installed.")
from phi.document import Document
from phi.embedder import Embedder
from phi.embedder.openai import OpenAIEmbedder
from phi.vectordb.base import VectorDb
from phi.vectordb.distance import Distance
from phi.utils.log import logger
class LanceDb(VectorDb):
def __init__(
self,
embedder: Embedder = OpenAIEmbedder(),
distance: Distance = Distance.cosine,
connection: Optional[lancedb.db.LanceTable] = None,
uri: Optional[str] = "/tmp/lancedb",
table_name: Optional[str] = "phi",
nprobes: Optional[int] = 20,
**kwargs,
):
# Embedder for embedding the document contents
self.embedder: Embedder = embedder
self.dimensions: int = self.embedder.dimensions
# Distance metric
self.distance: Distance = distance
# Connection to lancedb table, can also be provided to use an existing connection
self.uri = uri
self.client = lancedb.connect(self.uri)
self.nprobes = nprobes
if connection:
if not isinstance(connection, lancedb.db.LanceTable):
raise ValueError(
"connection should be an instance of lancedb.db.LanceTable, ",
f"got {type(connection)}",
)
self.connection = connection
self.table_name = self.connection.name
self._vector_col = self.connection.schema.names[0]
self._id = self.tbl.schema.names[1] # type: ignore
else:
self.table_name = table_name
self.connection = self._init_table()
# Lancedb kwargs
self.kwargs = kwargs
def create(self) -> lancedb.db.LanceTable:
return self._init_table()
def _init_table(self) -> lancedb.db.LanceTable:
self._id = "id"
self._vector_col = "vector"
schema = pa.schema(
[
pa.field(
self._vector_col,
pa.list_(
pa.float32(),
len(self.embedder.get_embedding("test")), # type: ignore
),
),
pa.field(self._id, pa.string()),
pa.field("payload", pa.string()),
]
)
logger.info(f"Creating table: {self.table_name}")
tbl = self.client.create_table(self.table_name, schema=schema, mode="overwrite")
return tbl
def doc_exists(self, document: Document) -> bool:
"""
Validating if the document exists or not
Args:
document (Document): Document to validate
"""
if self.client:
cleaned_content = document.content.replace("\x00", "\ufffd")
doc_id = md5(cleaned_content.encode()).hexdigest()
result = self.connection.search().where(f"{self._id}='{doc_id}'").to_arrow()
return len(result) > 0
return False
def insert(self, documents: List[Document]) -> None:
logger.debug(f"Inserting {len(documents)} documents")
data = []
for document in documents:
document.embed(embedder=self.embedder)
cleaned_content = document.content.replace("\x00", "\ufffd")
doc_id = str(md5(cleaned_content.encode()).hexdigest())
payload = {
"name": document.name,
"meta_data": document.meta_data,
"content": cleaned_content,
"usage": document.usage,
}
data.append(
{
"id": doc_id,
"vector": document.embedding,
"payload": json.dumps(payload),
}
)
logger.debug(f"Inserted document: {document.name} ({document.meta_data})")
self.connection.add(data)
logger.debug(f"Upsert {len(data)} documents")
def upsert(self, documents: List[Document]) -> None:
"""
Upsert documents into the database.
Args:
documents (List[Document]): List of documents to upsert
"""
logger.debug("Redirecting the request to insert")
self.insert(documents)
def search(self, query: str, limit: int = 5) -> List[Document]:
query_embedding = self.embedder.get_embedding(query)
if query_embedding is None:
logger.error(f"Error getting embedding for Query: {query}")
return []
results = (
self.connection.search(
query=query_embedding,
vector_column_name=self._vector_col,
)
.limit(limit)
.nprobes(self.nprobes)
.to_pandas()
)
# Build search results
search_results: List[Document] = []
try:
for _, item in results.iterrows():
payload = json.loads(item["payload"])
search_results.append(
Document(
name=payload["name"],
meta_data=payload["meta_data"],
content=payload["content"],
embedder=self.embedder,
embedding=item["vector"],
usage=payload["usage"],
)
)
except Exception as e:
logger.error(f"Error building search results: {e}")
return search_results
def delete(self) -> None:
if self.exists():
logger.debug(f"Deleting collection: {self.table_name}")
self.client.drop(self.table_name)
def exists(self) -> bool:
if self.client:
if self.table_name in self.client.table_names():
return True
return False
def get_count(self) -> int:
if self.exists():
return self.client.table(self.table_name).count_rows()
return 0
def optimize(self) -> None:
pass
def clear(self) -> bool:
return False
def name_exists(self, name: str) -> bool:
raise NotImplementedError
| [
"lancedb.connect"
] | [((509, 525), 'phi.embedder.openai.OpenAIEmbedder', 'OpenAIEmbedder', ([], {}), '()\n', (523, 525), False, 'from phi.embedder.openai import OpenAIEmbedder\n'), ((1143, 1168), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (1158, 1168), False, 'import lancedb\n'), ((2476, 2525), 'phi.utils.log.logger.info', 'logger.info', (['f"""Creating table: {self.table_name}"""'], {}), "(f'Creating table: {self.table_name}')\n", (2487, 2525), False, 'from phi.utils.log import logger\n'), ((4316, 4365), 'phi.utils.log.logger.debug', 'logger.debug', (['"""Redirecting the request to insert"""'], {}), "('Redirecting the request to insert')\n", (4328, 4365), False, 'from phi.utils.log import logger\n'), ((3935, 4009), 'phi.utils.log.logger.debug', 'logger.debug', (['f"""Inserted document: {document.name} ({document.meta_data})"""'], {}), "(f'Inserted document: {document.name} ({document.meta_data})')\n", (3947, 4009), False, 'from phi.utils.log import logger\n'), ((4575, 4634), 'phi.utils.log.logger.error', 'logger.error', (['f"""Error getting embedding for Query: {query}"""'], {}), "(f'Error getting embedding for Query: {query}')\n", (4587, 4634), False, 'from phi.utils.log import logger\n'), ((5712, 5767), 'phi.utils.log.logger.debug', 'logger.debug', (['f"""Deleting collection: {self.table_name}"""'], {}), "(f'Deleting collection: {self.table_name}')\n", (5724, 5767), False, 'from phi.utils.log import logger\n'), ((5079, 5106), 'json.loads', 'json.loads', (["item['payload']"], {}), "(item['payload'])\n", (5089, 5106), False, 'import json\n'), ((5560, 5611), 'phi.utils.log.logger.error', 'logger.error', (['f"""Error building search results: {e}"""'], {}), "(f'Error building search results: {e}')\n", (5572, 5611), False, 'from phi.utils.log import logger\n'), ((2379, 2390), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (2388, 2390), True, 'import pyarrow as pa\n'), ((2429, 2440), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (2438, 2440), True, 'import pyarrow as pa\n'), ((3870, 3889), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3880, 3889), False, 'import json\n'), ((5166, 5339), 'phi.document.Document', 'Document', ([], {'name': "payload['name']", 'meta_data': "payload['meta_data']", 'content': "payload['content']", 'embedder': 'self.embedder', 'embedding': "item['vector']", 'usage': "payload['usage']"}), "(name=payload['name'], meta_data=payload['meta_data'], content=\n payload['content'], embedder=self.embedder, embedding=item['vector'],\n usage=payload['usage'])\n", (5174, 5339), False, 'from phi.document import Document\n'), ((2206, 2218), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2216, 2218), True, 'import pyarrow as pa\n')] |
import lancedb
from langchain.prompts import PromptTemplate
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms import GPT4All
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain_community.vectorstores import LanceDB
from langchain_community.embeddings import GPT4AllEmbeddings
db = lancedb.connect("./lancedb")
table = db.open_table("Peter_Griffin")
vectorStore = LanceDB(table, GPT4AllEmbeddings())
localPath = ("./models/wizardlm-13b-v1.2.q4_0.gguf")
template = """
Answer the question in detail using the context provided.
Context: {context}
---
Chat history: {chat_history}
---
Question: {question}
"""
prompt = PromptTemplate(
template = template,
input_variables=["context, question", "chat_history"]
)
callbacks = [StreamingStdOutCallbackHandler()]
llm = GPT4All(
model=localPath,
callbacks=callbacks,
verbose = True
)
qa = ConversationalRetrievalChain.from_llm(
llm,
vectorStore.as_retriever(
search_type='similarity',
search_kwargs={
"k": 4
}
),
combine_docs_chain_kwargs={"prompt": prompt}
)
chat_history = []
print("=======================")
print("Type 'exit' to stop,")
while True:
query = input("please enter your question: ")
if query.lower() == 'exit':
break
result = qa.invoke({"question": query, "chat_history": chat_history})
print("\n") | [
"lancedb.connect"
] | [((373, 401), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (388, 401), False, 'import lancedb\n'), ((713, 805), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context, question', 'chat_history']"}), "(template=template, input_variables=['context, question',\n 'chat_history'])\n", (727, 805), False, 'from langchain.prompts import PromptTemplate\n'), ((869, 928), 'langchain_community.llms.GPT4All', 'GPT4All', ([], {'model': 'localPath', 'callbacks': 'callbacks', 'verbose': '(True)'}), '(model=localPath, callbacks=callbacks, verbose=True)\n', (876, 928), False, 'from langchain_community.llms import GPT4All\n'), ((472, 491), 'langchain_community.embeddings.GPT4AllEmbeddings', 'GPT4AllEmbeddings', ([], {}), '()\n', (489, 491), False, 'from langchain_community.embeddings import GPT4AllEmbeddings\n'), ((828, 860), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (858, 860), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')] |
from typing import Any, List, Optional, Dict
from ._base import Record, VectorStore
from ._embeddings import Embeddings
VECTOR_COLUMN_NAME = "_vector"
class LanceDB(VectorStore):
def __init__(self, db_uri, embeddings: Embeddings = None) -> None:
super().__init__()
try:
import pyarrow as pa
pa.__version__
except ImportError as exc:
raise ImportError(
"Could not import pyarrow python package. "
"Please install it with `pip install pyarrow`."
) from exc
try:
import lancedb as lancedb
# disable diagnostics
lancedb.utils.CONFIG['diagnostics'] = False
except ImportError as exc:
raise ImportError(
"Could not import lancedb python package. "
"Please install it with `pip install lancedb`."
) from exc
self._db_uri = db_uri
self._embeddings = embeddings
self._db = lancedb.connect(self._db_uri)
self._tables = {}
def create_collection(self, collection: str, schema: Dict[str, type] = None, mode="create"):
if schema is None:
raise ValueError("Invalid schema to create LanceDB table.")
s = self._convert_schema(schema=schema)
self._db.create_table(collection, schema=s, mode=mode)
def add(
self,
collection: str,
records: List[Record],
embeddings_columns: List[str] = None,
vectors: List[List[float]] = None,
**kwargs
):
tbl = self._db.open_table(collection)
if not vectors:
text_to_embed = []
for r in records:
text_to_embed.append(Record.values_to_text(r, props=embeddings_columns))
vectors = self._embeddings.embed_batch(texts=text_to_embed)
if len(vectors) != len(records):
raise ValueError("The length of records must be the same as the length of vecotors.")
for i in range(len(records)):
records[i][VECTOR_COLUMN_NAME] = vectors[i]
tbl.add(records)
def delete(self, collection: str, query: str):
tbl = self._db.open_table(collection)
tbl.delete(query)
def search(
self,
collection: str = None,
query: str = None,
vector: List = None,
filter: Any = None,
limit: int = 20,
columns: Optional[List[str]] = None,
with_vector: bool = False,
with_distance: bool = False,
**kwargs
) -> List[Record]:
if not query and not vector:
raise ValueError("LanceDB search must provide query or vector.")
if query and not vector and self._embeddings:
vector = self._embeddings.embed(text=query)
if not vector:
raise ValueError(
"LanceDB search must provide Embeddings function.")
tbl = self._db.open_table(collection)
query = tbl.search(vector, vector_column_name=VECTOR_COLUMN_NAME)
if filter:
query = query.where(filter)
if columns:
query = query.select(columns=columns)
results = query.limit(limit=limit).to_list()
for v in results:
if not with_vector:
del v[VECTOR_COLUMN_NAME]
if not with_distance:
del v['_distance']
return results
def _convert_schema(self, schema: Dict[str, type]):
try:
import pyarrow as pa
except ImportError as exc:
raise ImportError(
"Could not import pyarrow python package. "
"Please install it with `pip install pyarrow`."
) from exc
dims = len(self._embeddings.embed(""))
columns = [
pa.field(VECTOR_COLUMN_NAME, pa.list_(pa.float32(), dims)),
]
for k, v in schema.items():
t = pa.string()
if isinstance(v, float):
t = pa.float64()
if isinstance(v, int):
t = pa.int64()
if isinstance(v, bool):
t = pa.bool_()
columns.append(
pa.field(k, t)
)
s = pa.schema(columns)
return s
| [
"lancedb.connect"
] | [((1012, 1041), 'lancedb.connect', 'lancedb.connect', (['self._db_uri'], {}), '(self._db_uri)\n', (1027, 1041), True, 'import lancedb as lancedb\n'), ((4319, 4337), 'pyarrow.schema', 'pa.schema', (['columns'], {}), '(columns)\n', (4328, 4337), True, 'import pyarrow as pa\n'), ((4016, 4027), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4025, 4027), True, 'import pyarrow as pa\n'), ((4086, 4098), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4096, 4098), True, 'import pyarrow as pa\n'), ((4154, 4164), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (4162, 4164), True, 'import pyarrow as pa\n'), ((4221, 4231), 'pyarrow.bool_', 'pa.bool_', ([], {}), '()\n', (4229, 4231), True, 'import pyarrow as pa\n'), ((4277, 4291), 'pyarrow.field', 'pa.field', (['k', 't'], {}), '(k, t)\n', (4285, 4291), True, 'import pyarrow as pa\n'), ((3931, 3943), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3941, 3943), True, 'import pyarrow as pa\n')] |
import typing as t
from docarray import DocumentArray, Document
import lancedb
from filter import Filter
import joblib
import numpy as np
DETAIL_COLUMNS = [
"item_id",
"topic_id",
"cluster_id",
"is_geolocated",
"booking_number",
"stock_price",
"offer_creation_date",
"stock_beginning_date",
"category",
"subcategory_id",
"search_group_name",
"gtl_id",
"gtl_l3",
"gtl_l4",
"total_offers",
"example_offer_id",
"example_venue_id",
"example_offer_name",
"example_venue_latitude",
"example_venue_longitude",
]
DEFAULTS = ["_distance"]
class DefaultClient:
def load(self) -> None:
self.item_docs = DocumentArray.load("./metadata/item.docs")
uri = "./metadata/vector"
db = lancedb.connect(uri)
self.table = db.open_table("items")
def offer_vector(self, var: str) -> Document:
# not default case
try:
return self.item_docs[var]
except:
return None
def build_query(self, params):
sql = Filter(params).parse_where_clause()
if len(sql) == 0:
return None
return sql
def search(
self,
vector: Document,
similarity_metric="dot",
n=50,
query_filter: t.Dict = None,
details: bool = False,
item_id: str = None,
prefilter: bool = True,
vector_column_name: str = "vector",
) -> t.List[t.Dict]:
results = (
self.table.search(
vector.embedding,
vector_column_name=vector_column_name,
query_type="vector",
)
.where(self.build_query(query_filter), prefilter=prefilter)
.nprobes(20)
.refine_factor(10)
.select(columns=self.columns(details))
.metric(similarity_metric)
.limit(n)
.to_list()
)
return self.out(results, details, item_id=item_id)
def filter(
self,
query_filter: t.Dict = None,
n=50,
details: bool = False,
prefilter: bool = True,
vector_column_name: str = "booking_number_desc",
) -> t.List[t.Dict]:
results = (
self.table.search(
[0], vector_column_name=vector_column_name, query_type="vector"
)
.where(self.build_query(query_filter), prefilter=prefilter)
.select(columns=self.columns(details))
.limit(n)
.to_list()
)
return self.out(results, details)
def columns(self, details: bool) -> t.Optional[t.List[str]]:
if details:
return None
else:
return DETAIL_COLUMNS
def out(self, results, details: bool, item_id: str = None):
predictions = []
for idx, row in enumerate(results):
if item_id is not None and str(row["item_id"]) == item_id:
continue
if not details:
predictions.append(
{
"idx": idx,
"item_id": row["item_id"],
}
)
else:
# drop embs to reduce latency
row.pop("vector", None)
row.pop("raw_embeddings", None)
predictions.append(
dict(
{
"idx": idx,
},
**{k: row[k] for k in row if k in DETAIL_COLUMNS + DEFAULTS}
)
)
return predictions
class RecoClient(DefaultClient):
def __init__(self, default_token: str) -> None:
self.default_token = default_token
def user_vector(self, var: str) -> Document:
default_user_embbeding = self.user_docs[self.default_token]
try:
return self.user_docs[var]
except:
return default_user_embbeding
def load(self) -> None:
self.item_docs = DocumentArray.load("./metadata/item.docs")
self.user_docs = DocumentArray.load("./metadata/user.docs")
uri = "./metadata/vector"
db = lancedb.connect(uri)
self.table = db.open_table("items")
class TextClient(DefaultClient):
def __init__(self, transformer: str, reducer_path: str) -> None:
from sentence_transformers import SentenceTransformer
self.encoder = SentenceTransformer(transformer)
self.reducer = joblib.load(reducer_path)
def text_vector(self, var: str):
encode = self.encoder.encode(var)
reduce = np.array(self.reducer.transform([encode])).flatten()
return Document(embedding=reduce)
| [
"lancedb.connect"
] | [((689, 731), 'docarray.DocumentArray.load', 'DocumentArray.load', (['"""./metadata/item.docs"""'], {}), "('./metadata/item.docs')\n", (707, 731), False, 'from docarray import DocumentArray, Document\n'), ((779, 799), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (794, 799), False, 'import lancedb\n'), ((4059, 4101), 'docarray.DocumentArray.load', 'DocumentArray.load', (['"""./metadata/item.docs"""'], {}), "('./metadata/item.docs')\n", (4077, 4101), False, 'from docarray import DocumentArray, Document\n'), ((4127, 4169), 'docarray.DocumentArray.load', 'DocumentArray.load', (['"""./metadata/user.docs"""'], {}), "('./metadata/user.docs')\n", (4145, 4169), False, 'from docarray import DocumentArray, Document\n'), ((4217, 4237), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (4232, 4237), False, 'import lancedb\n'), ((4472, 4504), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['transformer'], {}), '(transformer)\n', (4491, 4504), False, 'from sentence_transformers import SentenceTransformer\n'), ((4528, 4553), 'joblib.load', 'joblib.load', (['reducer_path'], {}), '(reducer_path)\n', (4539, 4553), False, 'import joblib\n'), ((4719, 4745), 'docarray.Document', 'Document', ([], {'embedding': 'reduce'}), '(embedding=reduce)\n', (4727, 4745), False, 'from docarray import DocumentArray, Document\n'), ((1064, 1078), 'filter.Filter', 'Filter', (['params'], {}), '(params)\n', (1070, 1078), False, 'from filter import Filter\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : lancedb_store.py
"""
import lancedb
import shutil, os
class LanceStore:
def __init__(self, name):
db = lancedb.connect('./data/lancedb')
self.db = db
self.name = name
self.table = None
def search(self, query, n_results=2, metric="L2", nprobes=20, **kwargs):
# This assumes query is a vector embedding
# kwargs can be used for optional filtering
# .select - only searches the specified columns
# .where - SQL syntax filtering for metadata (e.g. where("price > 100"))
# .metric - specifies the distance metric to use
# .nprobes - values will yield better recall (more likely to find vectors if they exist) at the expense of latency.
if self.table == None: raise Exception("Table not created yet, please add data first.")
results = self.table \
.search(query) \
.limit(n_results) \
.select(kwargs.get('select')) \
.where(kwargs.get('where')) \
.metric(metric) \
.nprobes(nprobes) \
.to_df()
return results
def persist(self):
raise NotImplementedError
def write(self, data, metadatas, ids):
# This function is similar to add(), but it's for more generalized updates
# "data" is the list of embeddings
# Inserts into table by expanding metadatas into a dataframe: [{'vector', 'id', 'meta', 'meta2'}, ...]
documents = []
for i in range(len(data)):
row = {
'vector': data[i],
'id': ids[i]
}
row.update(metadatas[i])
documents.append(row)
if self.table != None:
self.table.add(documents)
else:
self.table = self.db.create_table(self.name, documents)
def add(self, data, metadata, _id):
# This function is for adding individual documents
# It assumes you're passing in a single vector embedding, metadata, and id
row = {
'vector': data,
'id': _id
}
row.update(metadata)
if self.table != None:
self.table.add([row])
else:
self.table = self.db.create_table(self.name, [row])
def delete(self, _id):
# This function deletes a row by id.
# LanceDB delete syntax uses SQL syntax, so you can use "in" or "="
if self.table == None: raise Exception("Table not created yet, please add data first")
if isinstance(_id, str):
return self.table.delete(f"id = '{_id}'")
else:
return self.table.delete(f"id = {_id}")
def drop(self, name):
# This function drops a table, if it exists.
path = os.path.join(self.db.uri, name + '.lance')
if os.path.exists(path):
shutil.rmtree(path) | [
"lancedb.connect"
] | [((234, 267), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (249, 267), False, 'import lancedb\n'), ((2866, 2908), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2878, 2908), False, 'import shutil, os\n'), ((2920, 2940), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2934, 2940), False, 'import shutil, os\n'), ((2954, 2973), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2967, 2973), False, 'import shutil, os\n')] |
from pgvector.psycopg import register_vector
from pgvector.sqlalchemy import Vector
import psycopg
from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text
from sqlalchemy.orm import sessionmaker, mapped_column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import func
import re
from tqdm import tqdm
from typing import Optional, List, Iterator
import numpy as np
from tqdm import tqdm
import pandas as pd
from memgpt.config import MemGPTConfig
from memgpt.connectors.storage import StorageConnector, Passage
from memgpt.config import AgentConfig, MemGPTConfig
from memgpt.constants import MEMGPT_DIR
from memgpt.utils import printd
Base = declarative_base()
def get_db_model(table_name: str):
config = MemGPTConfig.load()
class PassageModel(Base):
"""Defines data model for storing Passages (consisting of text, embedding)"""
__abstract__ = True # this line is necessary
# Assuming passage_id is the primary key
id = Column(BIGINT, primary_key=True, nullable=False, autoincrement=True)
doc_id = Column(String)
text = Column(String, nullable=False)
embedding = mapped_column(Vector(config.embedding_dim))
# metadata_ = Column(JSON(astext_type=Text()))
def __repr__(self):
return f"<Passage(passage_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
"""Create database model for table_name"""
class_name = f"{table_name.capitalize()}Model"
Model = type(class_name, (PassageModel,), {"__tablename__": table_name, "__table_args__": {"extend_existing": True}})
return Model
class PostgresStorageConnector(StorageConnector):
"""Storage via Postgres"""
# TODO: this should probably eventually be moved into a parent DB class
def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None):
config = MemGPTConfig.load()
# determine table name
if agent_config:
assert name is None, f"Cannot specify both agent config and name {name}"
self.table_name = self.generate_table_name_agent(agent_config)
elif name:
assert agent_config is None, f"Cannot specify both agent config and name {name}"
self.table_name = self.generate_table_name(name)
else:
raise ValueError("Must specify either agent config or name")
printd(f"Using table name {self.table_name}")
# create table
self.uri = config.archival_storage_uri
if config.archival_storage_uri is None:
raise ValueError(f"Must specifiy archival_storage_uri in config {config.config_path}")
self.db_model = get_db_model(self.table_name)
self.engine = create_engine(self.uri)
Base.metadata.create_all(self.engine) # Create the table if it doesn't exist
self.Session = sessionmaker(bind=self.engine)
self.Session().execute(text("CREATE EXTENSION IF NOT EXISTS vector")) # Enables the vector extension
def get_all_paginated(self, page_size: int) -> Iterator[List[Passage]]:
session = self.Session()
offset = 0
while True:
# Retrieve a chunk of records with the given page_size
db_passages_chunk = session.query(self.db_model).offset(offset).limit(page_size).all()
# If the chunk is empty, we've retrieved all records
if not db_passages_chunk:
break
# Yield a list of Passage objects converted from the chunk
yield [Passage(text=p.text, embedding=p.embedding, doc_id=p.doc_id, passage_id=p.id) for p in db_passages_chunk]
# Increment the offset to get the next chunk in the next iteration
offset += page_size
def get_all(self, limit=10) -> List[Passage]:
session = self.Session()
db_passages = session.query(self.db_model).limit(limit).all()
return [Passage(text=p.text, embedding=p.embedding, doc_id=p.doc_id, passage_id=p.id) for p in db_passages]
def get(self, id: str) -> Optional[Passage]:
session = self.Session()
db_passage = session.query(self.db_model).get(id)
if db_passage is None:
return None
return Passage(text=db_passage.text, embedding=db_passage.embedding, doc_id=db_passage.doc_id, passage_id=db_passage.passage_id)
def size(self) -> int:
# return size of table
session = self.Session()
return session.query(self.db_model).count()
def insert(self, passage: Passage):
session = self.Session()
db_passage = self.db_model(doc_id=passage.doc_id, text=passage.text, embedding=passage.embedding)
session.add(db_passage)
session.commit()
def insert_many(self, passages: List[Passage], show_progress=True):
session = self.Session()
iterable = tqdm(passages) if show_progress else passages
for passage in iterable:
db_passage = self.db_model(doc_id=passage.doc_id, text=passage.text, embedding=passage.embedding)
session.add(db_passage)
session.commit()
def query(self, query: str, query_vec: List[float], top_k: int = 10) -> List[Passage]:
session = self.Session()
# Assuming PassageModel.embedding has the capability of computing l2_distance
results = session.scalars(select(self.db_model).order_by(self.db_model.embedding.l2_distance(query_vec)).limit(top_k)).all()
# Convert the results into Passage objects
passages = [
Passage(text=result.text, embedding=np.frombuffer(result.embedding), doc_id=result.doc_id, passage_id=result.id)
for result in results
]
return passages
def delete(self):
"""Drop the passage table from the database."""
# Bind the engine to the metadata of the base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = self.engine
# Drop the table specified by the PassageModel class
self.db_model.__table__.drop(self.engine)
def save(self):
return
@staticmethod
def list_loaded_data():
config = MemGPTConfig.load()
engine = create_engine(config.archival_storage_uri)
inspector = inspect(engine)
tables = inspector.get_table_names()
tables = [table for table in tables if table.startswith("memgpt_") and not table.startswith("memgpt_agent_")]
start_chars = len("memgpt_")
tables = [table[start_chars:] for table in tables]
return tables
def sanitize_table_name(self, name: str) -> str:
# Remove leading and trailing whitespace
name = name.strip()
# Replace spaces and invalid characters with underscores
name = re.sub(r"\s+|\W+", "_", name)
# Truncate to the maximum identifier length (e.g., 63 for PostgreSQL)
max_length = 63
if len(name) > max_length:
name = name[:max_length].rstrip("_")
# Convert to lowercase
name = name.lower()
return name
def generate_table_name_agent(self, agent_config: AgentConfig):
return f"memgpt_agent_{self.sanitize_table_name(agent_config.name)}"
def generate_table_name(self, name: str):
return f"memgpt_{self.sanitize_table_name(name)}"
class LanceDBConnector(StorageConnector):
"""Storage via LanceDB"""
# TODO: this should probably eventually be moved into a parent DB class
def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None):
config = MemGPTConfig.load()
# determine table name
if agent_config:
assert name is None, f"Cannot specify both agent config and name {name}"
self.table_name = self.generate_table_name_agent(agent_config)
elif name:
assert agent_config is None, f"Cannot specify both agent config and name {name}"
self.table_name = self.generate_table_name(name)
else:
raise ValueError("Must specify either agent config or name")
printd(f"Using table name {self.table_name}")
# create table
self.uri = config.archival_storage_uri
if config.archival_storage_uri is None:
raise ValueError(f"Must specifiy archival_storage_uri in config {config.config_path}")
import lancedb
self.db = lancedb.connect(self.uri)
if self.table_name in self.db.table_names():
self.table = self.db[self.table_name]
else:
self.table = None
def get_all_paginated(self, page_size: int) -> Iterator[List[Passage]]:
ds = self.table.to_lance()
offset = 0
while True:
# Retrieve a chunk of records with the given page_size
db_passages_chunk = ds.to_table(offset=offset, limit=page_size).to_pylist()
# If the chunk is empty, we've retrieved all records
if not db_passages_chunk:
break
# Yield a list of Passage objects converted from the chunk
yield [
Passage(text=p["text"], embedding=p["vector"], doc_id=p["doc_id"], passage_id=p["passage_id"]) for p in db_passages_chunk
]
# Increment the offset to get the next chunk in the next iteration
offset += page_size
def get_all(self, limit=10) -> List[Passage]:
db_passages = self.table.to_lance().to_table(limit=limit).to_pylist()
return [Passage(text=p["text"], embedding=p["vector"], doc_id=p["doc_id"], passage_id=p["passage_id"]) for p in db_passages]
def get(self, id: str) -> Optional[Passage]:
db_passage = self.table.where(f"passage_id={id}").to_list()
if len(db_passage) == 0:
return None
return Passage(
text=db_passage["text"], embedding=db_passage["embedding"], doc_id=db_passage["doc_id"], passage_id=db_passage["passage_id"]
)
def size(self) -> int:
# return size of table
if self.table:
return len(self.table)
else:
print(f"Table with name {self.table_name} not present")
return 0
def insert(self, passage: Passage):
data = [{"doc_id": passage.doc_id, "text": passage.text, "passage_id": passage.passage_id, "vector": passage.embedding}]
if self.table is not None:
self.table.add(data)
else:
self.table = self.db.create_table(self.table_name, data=data, mode="overwrite")
def insert_many(self, passages: List[Passage], show_progress=True):
data = []
iterable = tqdm(passages) if show_progress else passages
for passage in iterable:
temp_dict = {"doc_id": passage.doc_id, "text": passage.text, "passage_id": passage.passage_id, "vector": passage.embedding}
data.append(temp_dict)
if self.table is not None:
self.table.add(data)
else:
self.table = self.db.create_table(self.table_name, data=data, mode="overwrite")
def query(self, query: str, query_vec: List[float], top_k: int = 10) -> List[Passage]:
# Assuming query_vec is of same length as embeddings inside table
results = self.table.search(query_vec).limit(top_k).to_list()
# Convert the results into Passage objects
passages = [
Passage(text=result["text"], embedding=result["vector"], doc_id=result["doc_id"], passage_id=result["passage_id"])
for result in results
]
return passages
def delete(self):
"""Drop the passage table from the database."""
# Drop the table specified by the PassageModel class
self.db.drop_table(self.table_name)
def save(self):
return
@staticmethod
def list_loaded_data():
config = MemGPTConfig.load()
import lancedb
db = lancedb.connect(config.archival_storage_uri)
tables = db.table_names()
tables = [table for table in tables if table.startswith("memgpt_")]
start_chars = len("memgpt_")
tables = [table[start_chars:] for table in tables]
return tables
def sanitize_table_name(self, name: str) -> str:
# Remove leading and trailing whitespace
name = name.strip()
# Replace spaces and invalid characters with underscores
name = re.sub(r"\s+|\W+", "_", name)
# Truncate to the maximum identifier length
max_length = 63
if len(name) > max_length:
name = name[:max_length].rstrip("_")
# Convert to lowercase
name = name.lower()
return name
def generate_table_name_agent(self, agent_config: AgentConfig):
return f"memgpt_agent_{self.sanitize_table_name(agent_config.name)}"
def generate_table_name(self, name: str):
return f"memgpt_{self.sanitize_table_name(name)}"
| [
"lancedb.connect"
] | [((702, 720), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (718, 720), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((771, 790), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (788, 790), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((1026, 1094), 'sqlalchemy.Column', 'Column', (['BIGINT'], {'primary_key': '(True)', 'nullable': '(False)', 'autoincrement': '(True)'}), '(BIGINT, primary_key=True, nullable=False, autoincrement=True)\n', (1032, 1094), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((1112, 1126), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1118, 1126), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((1142, 1172), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (1148, 1172), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((1938, 1957), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (1955, 1957), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((2444, 2489), 'memgpt.utils.printd', 'printd', (['f"""Using table name {self.table_name}"""'], {}), "(f'Using table name {self.table_name}')\n", (2450, 2489), False, 'from memgpt.utils import printd\n'), ((2784, 2807), 'sqlalchemy.create_engine', 'create_engine', (['self.uri'], {}), '(self.uri)\n', (2797, 2807), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((2917, 2947), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'self.engine'}), '(bind=self.engine)\n', (2929, 2947), False, 'from sqlalchemy.orm import sessionmaker, mapped_column\n'), ((4289, 4415), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': 'db_passage.text', 'embedding': 'db_passage.embedding', 'doc_id': 'db_passage.doc_id', 'passage_id': 'db_passage.passage_id'}), '(text=db_passage.text, embedding=db_passage.embedding, doc_id=\n db_passage.doc_id, passage_id=db_passage.passage_id)\n', (4296, 4415), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((6249, 6268), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (6266, 6268), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((6286, 6328), 'sqlalchemy.create_engine', 'create_engine', (['config.archival_storage_uri'], {}), '(config.archival_storage_uri)\n', (6299, 6328), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((6349, 6364), 'sqlalchemy.inspect', 'inspect', (['engine'], {}), '(engine)\n', (6356, 6364), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((6858, 6888), 're.sub', 're.sub', (['"""\\\\s+|\\\\W+"""', '"""_"""', 'name'], {}), "('\\\\s+|\\\\W+', '_', name)\n", (6864, 6888), False, 'import re\n'), ((7672, 7691), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (7689, 7691), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((8177, 8222), 'memgpt.utils.printd', 'printd', (['f"""Using table name {self.table_name}"""'], {}), "(f'Using table name {self.table_name}')\n", (8183, 8222), False, 'from memgpt.utils import printd\n'), ((8483, 8508), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (8498, 8508), False, 'import lancedb\n'), ((9895, 10033), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': "db_passage['text']", 'embedding': "db_passage['embedding']", 'doc_id': "db_passage['doc_id']", 'passage_id': "db_passage['passage_id']"}), "(text=db_passage['text'], embedding=db_passage['embedding'], doc_id=\n db_passage['doc_id'], passage_id=db_passage['passage_id'])\n", (9902, 10033), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((11938, 11957), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (11955, 11957), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((11995, 12039), 'lancedb.connect', 'lancedb.connect', (['config.archival_storage_uri'], {}), '(config.archival_storage_uri)\n', (12010, 12039), False, 'import lancedb\n'), ((12481, 12511), 're.sub', 're.sub', (['"""\\\\s+|\\\\W+"""', '"""_"""', 'name'], {}), "('\\\\s+|\\\\W+', '_', name)\n", (12487, 12511), False, 'import re\n'), ((1207, 1235), 'pgvector.sqlalchemy.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (1213, 1235), False, 'from pgvector.sqlalchemy import Vector\n'), ((2979, 3024), 'sqlalchemy.text', 'text', (['"""CREATE EXTENSION IF NOT EXISTS vector"""'], {}), "('CREATE EXTENSION IF NOT EXISTS vector')\n", (2983, 3024), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n'), ((3978, 4055), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': 'p.text', 'embedding': 'p.embedding', 'doc_id': 'p.doc_id', 'passage_id': 'p.id'}), '(text=p.text, embedding=p.embedding, doc_id=p.doc_id, passage_id=p.id)\n', (3985, 4055), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((4917, 4931), 'tqdm.tqdm', 'tqdm', (['passages'], {}), '(passages)\n', (4921, 4931), False, 'from tqdm import tqdm\n'), ((9588, 9686), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': "p['text']", 'embedding': "p['vector']", 'doc_id': "p['doc_id']", 'passage_id': "p['passage_id']"}), "(text=p['text'], embedding=p['vector'], doc_id=p['doc_id'],\n passage_id=p['passage_id'])\n", (9595, 9686), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((10726, 10740), 'tqdm.tqdm', 'tqdm', (['passages'], {}), '(passages)\n', (10730, 10740), False, 'from tqdm import tqdm\n'), ((11471, 11590), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': "result['text']", 'embedding': "result['vector']", 'doc_id': "result['doc_id']", 'passage_id': "result['passage_id']"}), "(text=result['text'], embedding=result['vector'], doc_id=result[\n 'doc_id'], passage_id=result['passage_id'])\n", (11478, 11590), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((3590, 3667), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': 'p.text', 'embedding': 'p.embedding', 'doc_id': 'p.doc_id', 'passage_id': 'p.id'}), '(text=p.text, embedding=p.embedding, doc_id=p.doc_id, passage_id=p.id)\n', (3597, 3667), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((5632, 5663), 'numpy.frombuffer', 'np.frombuffer', (['result.embedding'], {}), '(result.embedding)\n', (5645, 5663), True, 'import numpy as np\n'), ((9195, 9293), 'memgpt.connectors.storage.Passage', 'Passage', ([], {'text': "p['text']", 'embedding': "p['vector']", 'doc_id': "p['doc_id']", 'passage_id': "p['passage_id']"}), "(text=p['text'], embedding=p['vector'], doc_id=p['doc_id'],\n passage_id=p['passage_id'])\n", (9202, 9293), False, 'from memgpt.connectors.storage import StorageConnector, Passage\n'), ((5412, 5433), 'sqlalchemy.select', 'select', (['self.db_model'], {}), '(self.db_model)\n', (5418, 5433), False, 'from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text\n')] |
from langchain_community.vectorstores import LanceDB
from langchain_openai.embeddings import OpenAIEmbeddings
import lancedb
from common import EXAMPLE_TEXTS
SEARCH_NUM_RESULTS = 3
def main():
embeddings = OpenAIEmbeddings()
table, vectorstore = get_table_and_vectorstore(embeddings)
# Add example texts to the database
vectorstore.add_texts(EXAMPLE_TEXTS)
# Display the contents of the database
print("Database contents:")
print(table.to_pandas(), "\n")
while True:
query = input("Enter a search query (or type 'exit'): ")
if query == "exit":
return
# Perform a similarity search and print the results
result = vectorstore.similarity_search(query, k=SEARCH_NUM_RESULTS)
for r in result:
print(r.page_content)
print()
def get_table_and_vectorstore(embeddings):
# Create a LanceDB table, overwrite if it already exists.
db = lancedb.connect("./lancedb")
table = db.create_table(
"my_table",
data=[
{
"vector": embeddings.embed_query("hello world"),
"text": "hello world",
"id": "1",
}
],
mode="overwrite",
)
vectorstore = LanceDB(table, embeddings)
return table, vectorstore
if __name__ == "__main__":
main()
| [
"lancedb.connect"
] | [((214, 232), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (230, 232), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((947, 975), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (962, 975), False, 'import lancedb\n'), ((1260, 1286), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1267, 1286), False, 'from langchain_community.vectorstores import LanceDB\n')] |
import openai
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.tools import tool
from pydantic import BaseModel, Field
import argparse
import lancedb
def embed_func(c):
rs = openai.Embedding.create(input=c, engine=EMBEDDINGS_MODEL)
return [record["embedding"] for record in rs["data"]]
class InsertCritiquesInput(BaseModel):
info: str = Field(
description="should be demographics or interests or other information about the exercise request provided by the user"
)
actions: str = Field(
description="numbered list of langchain agent actions taken (searched for, gave this response, etc.)"
)
critique: str = Field(
description="negative constructive feedback on the actions you took, limitations, potential biases, and more"
)
@tool("insert_critiques", args_schema=InsertCritiquesInput)
def insert_critiques(info: str, actions: str, critique: str) -> str:
"Insert actions and critiques for similar exercise requests in the future." ""
table_name = "exercise-routine"
if table_name not in db.table_names():
tbl = db.create_table(
table_name,
[{"vector": embed_func(info)[0], "actions": actions, "critique": critique}],
)
else:
tbl = db.open_table(table_name)
tbl.add(
[{"vector": embed_func(info)[0], "actions": actions, "critique": critique}]
)
return "Inserted and done."
class RetrieveCritiquesInput(BaseModel):
query: str = Field(
description="should be demographics or interests or other information about the exercise request provided by the user"
)
@tool("retrieve_critiques", args_schema=RetrieveCritiquesInput)
def retrieve_critiques(query: str) -> str:
"Retrieve actions and critiques for similar exercise requests." ""
table_name = "exercise-routine"
if table_name in db.table_names():
tbl = db.open_table(table_name)
results = (
tbl.search(embed_func(query)[0])
.limit(5)
.select(["actions", "critique"])
.to_df()
)
results_list = results.drop("vector", axis=1).values.tolist()
return (
"Continue with the list with relevant actions and critiques which are in the format [[action, critique], ...]:\n"
+ str(results_list)
)
else:
return "No info, but continue."
def create_prompt(info: str) -> str:
prompt_start = (
"Please execute actions as a fitness trainer based on the information about the user and their interests below.\n\n"
+ "Info from the user:\n\n"
)
prompt_end = (
"\n\n1. Retrieve using user info and review the past actions and critiques if there is any\n"
+ "2. Keep past actions and critiques in mind while researching for an exercise routine with steps which we respond to the user\n"
+ "3. Before returning the response, it is of upmost importance to insert the actions you took (numbered list: searched for, found this, etc.) and critiques (negative feedback: limitations, potential biases, and more) into the database for getting better exercise routines in the future. \n"
)
return prompt_start + info + prompt_end
def run_agent(info):
agent = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
agent.run(input=create_prompt(info))
def args_parse():
default_query = "university student, loves running"
global EMBEDDINGS_MODEL
parser = argparse.ArgumentParser(description="Reducing Hallucinations in AI Agents")
parser.add_argument(
"--query", type=str, default=default_query, help="query to search"
)
parser.add_argument(
"--llm", type=str, default="gpt-3.5-turbo-0613", help="OpenAI LLM"
)
parser.add_argument(
"--embeddings",
type=str,
default="text-embedding-ada-002",
help="OpenAI Embeddings Model",
)
args = parser.parse_args()
EMBEDDINGS_MODEL = args.embeddings
return args
if __name__ == "__main__":
args = args_parse()
global db
db = lancedb.connect("data/agent-lancedb")
llm = ChatOpenAI(temperature=0, model=args.llm)
tools = load_tools(["serpapi"], llm=llm)
tools.extend([insert_critiques, retrieve_critiques])
run_agent(args.query)
| [
"lancedb.connect"
] | [((925, 983), 'langchain.tools.tool', 'tool', (['"""insert_critiques"""'], {'args_schema': 'InsertCritiquesInput'}), "('insert_critiques', args_schema=InsertCritiquesInput)\n", (929, 983), False, 'from langchain.tools import tool\n'), ((1769, 1831), 'langchain.tools.tool', 'tool', (['"""retrieve_critiques"""'], {'args_schema': 'RetrieveCritiquesInput'}), "('retrieve_critiques', args_schema=RetrieveCritiquesInput)\n", (1773, 1831), False, 'from langchain.tools import tool\n'), ((316, 373), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': 'EMBEDDINGS_MODEL'}), '(input=c, engine=EMBEDDINGS_MODEL)\n', (339, 373), False, 'import openai\n'), ((489, 624), 'pydantic.Field', 'Field', ([], {'description': '"""should be demographics or interests or other information about the exercise request provided by the user"""'}), "(description=\n 'should be demographics or interests or other information about the exercise request provided by the user'\n )\n", (494, 624), False, 'from pydantic import BaseModel, Field\n'), ((648, 766), 'pydantic.Field', 'Field', ([], {'description': '"""numbered list of langchain agent actions taken (searched for, gave this response, etc.)"""'}), "(description=\n 'numbered list of langchain agent actions taken (searched for, gave this response, etc.)'\n )\n", (653, 766), False, 'from pydantic import BaseModel, Field\n'), ((791, 917), 'pydantic.Field', 'Field', ([], {'description': '"""negative constructive feedback on the actions you took, limitations, potential biases, and more"""'}), "(description=\n 'negative constructive feedback on the actions you took, limitations, potential biases, and more'\n )\n", (796, 917), False, 'from pydantic import BaseModel, Field\n'), ((1626, 1761), 'pydantic.Field', 'Field', ([], {'description': '"""should be demographics or interests or other information about the exercise request provided by the user"""'}), "(description=\n 'should be demographics or interests or other information about the exercise request provided by the user'\n )\n", (1631, 1761), False, 'from pydantic import BaseModel, Field\n'), ((3401, 3509), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.\n STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n', (3417, 3509), False, 'from langchain.agents import initialize_agent\n'), ((3703, 3778), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Reducing Hallucinations in AI Agents"""'}), "(description='Reducing Hallucinations in AI Agents')\n", (3726, 3778), False, 'import argparse\n'), ((4311, 4348), 'lancedb.connect', 'lancedb.connect', (['"""data/agent-lancedb"""'], {}), "('data/agent-lancedb')\n", (4326, 4348), False, 'import lancedb\n'), ((4360, 4401), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'args.llm'}), '(temperature=0, model=args.llm)\n', (4370, 4401), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4414, 4446), 'langchain.agents.load_tools', 'load_tools', (["['serpapi']"], {'llm': 'llm'}), "(['serpapi'], llm=llm)\n", (4424, 4446), False, 'from langchain.agents import load_tools\n')] |
import streamlit as st
import sqlite3
import streamlit_antd_components as sac
import pandas as pd
import os
import openai
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
from authenticate import return_api_key
import lancedb
import pickle
import configparser
import ast
class ConfigHandler:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('config.ini')
def get_config_values(self, section, key):
value = self.config.get(section, key)
try:
# Try converting the string value to a Python data structure
return ast.literal_eval(value)
except (SyntaxError, ValueError):
# If not a data structure, return the plain string
return value
config_handler = ConfigHandler()
TCH = config_handler.get_config_values('constants', 'TCH')
STU = config_handler.get_config_values('constants', 'STU')
SA = config_handler.get_config_values('constants', 'SA')
AD = config_handler.get_config_values('constants', 'AD')
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
def fetch_vectorstores_with_usernames():
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
query = '''
SELECT
Vector_Stores.vs_id,
Subject.subject_name,
Topic.topic_name,
Vector_Stores.vectorstore_name,
Users.username,
Vector_Stores.sharing_enabled
FROM Vector_Stores
JOIN Users ON Vector_Stores.user_id = Users.user_id
LEFT JOIN Subject ON Vector_Stores.subject = Subject.id
LEFT JOIN Topic ON Vector_Stores.topic = Topic.id;
'''
cursor.execute(query)
data = cursor.fetchall()
conn.close()
return data
def display_vectorstores():
data = fetch_vectorstores_with_usernames()
df = pd.DataFrame(data, columns=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"])
# Convert the 'sharing_enabled' values
df["sharing_enabled"] = df["sharing_enabled"].apply(lambda x: '✔' if x == 1 else '')
st.dataframe(
df,
use_container_width=True,
column_order=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"]
)
def fetch_all_files():
"""
Fetch all files either shared or based on user type
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Construct the SQL query with JOINs for Subject, Topic, and Users tables
if st.session_state.user['profile_id'] == 'SA':
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
''')
else:
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
WHERE Files.sharing_enabled = 1
''')
files = cursor.fetchall()
formatted_files = [f"({file[0]}) {file[1]} ({file[4]})" for file in files]
conn.close()
return formatted_files
def fetch_file_data(file_id):
"""
Fetch file data given a file id
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
cursor.execute("SELECT data, metadata FROM Files WHERE file_id = ?", (file_id,))
data = cursor.fetchone()
conn.close()
if data:
return data[0], data[1]
else:
return None, None
def insert_topic(org_id, topic_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Topic (org_id, topic_name) VALUES (?, ?);', (org_id, topic_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if topic_name is not unique within the org
return False # Indicates topic_name is not unique within the org
finally:
conn.close()
def insert_subject(org_id, subject_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Subject (org_id, subject_name) VALUES (?, ?);', (org_id, subject_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if subject_name is not unique within the org
return False # Indicates subject_name is not unique within the org
finally:
conn.close()
def select_organization():
with sqlite3.connect(WORKING_DATABASE) as conn:
cursor = conn.cursor()
# Org selection
org_query = "SELECT org_name FROM Organizations"
cursor.execute(org_query)
orgs = cursor.fetchall()
org_names = [org[0] for org in orgs]
# Use a Streamlit selectbox to choose an organization
selected_org_name = st.selectbox("Select an organization:", org_names)
# Retrieve the org_id for the selected organization
cursor.execute('SELECT org_id FROM Organizations WHERE org_name = ?;', (selected_org_name,))
result = cursor.fetchone()
if result:
org_id = result[0]
st.write(f"The org_id for {selected_org_name} is {org_id}.")
return org_id
else:
st.write(f"Organization '{selected_org_name}' not found in the database.")
return None
def fetch_subjects_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Subject;')
else:
cursor.execute('SELECT * FROM Subject WHERE org_id = ?;', (org_id,))
subjects = cursor.fetchall()
conn.close()
return subjects
def fetch_topics_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Topic;')
else:
cursor.execute('SELECT * FROM Topic WHERE org_id = ?;', (org_id,))
topics = cursor.fetchall()
conn.close()
return topics
def split_docs(file_path,meta):
#def split_meta_docs(file, source, tch_code):
loader = UnstructuredFileLoader(file_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
metadata = {"source": meta}
for doc in docs:
doc.metadata.update(metadata)
return docs
def create_lancedb_table(embeddings, meta, table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
table = db.create_table(
f"{table_name}",
data=[
{
"vector": embeddings.embed_query("Query Unsuccessful"),
"text": "Query Unsuccessful",
"id": "1",
"source": f"{meta}"
}
],
mode="overwrite",
)
return table
def save_to_vectorstores(vs, vstore_input_name, subject, topic, username, share_resource=False):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch the user's details
cursor.execute('SELECT user_id FROM Users WHERE username = ?', (username,))
user_details = cursor.fetchone()
if not user_details:
st.error("Error: User not found.")
return
user_id = user_details[0]
# If Vector_Store instance exists in session state, then serialize and save
if vs:
serialized_db = pickle.dumps(vs)
# Check if the entry already exists
cursor.execute('SELECT 1 FROM Vector_Stores WHERE vectorstore_name LIKE ? AND user_id = ?', (f"%{vstore_input_name}%", user_id))
exists = cursor.fetchone()
if exists:
st.error("Error: An entry with the same vectorstore_name and user_id already exists.")
return
if subject is None:
st.error("Error: Subject is missing.")
return
if topic is None:
st.error("Error: Topic is missing.")
return
# Get the subject and topic IDs
cursor.execute('SELECT id FROM Subject WHERE subject_name = ?', (subject,))
subject_id = cursor.fetchone()[0]
cursor.execute('SELECT id FROM Topic WHERE topic_name = ?', (topic,))
topic_id = cursor.fetchone()[0]
# Insert the new row
cursor.execute('''
INSERT INTO Vector_Stores (vectorstore_name, data, user_id, subject, topic, sharing_enabled)
VALUES (?, ?, ?, ?, ?, ?)
''', (vstore_input_name, serialized_db, user_id, subject_id, topic_id, share_resource))
conn.commit()
conn.close()
def create_vectorstore():
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
full_docs = []
st.subheader("Enter the topic and subject for your knowledge base")
embeddings = OpenAIEmbeddings()
if st.session_state.user['profile_id'] == SA:
org_id = select_organization()
if org_id is None:
return
else:
org_id = st.session_state.user["org_id"]
# Fetch all available subjects
subjects = fetch_subjects_by_org(st.session_state.user["org_id"])
subject_names = [sub[2] for sub in subjects] # Assuming index 2 holds the subject_name
selected_subject = st.selectbox("Select an existing subject or type a new one:", options=subject_names + ['New Subject'])
if selected_subject == 'New Subject':
subject = st.text_input("Please enter the new subject name:", max_chars=30)
if subject:
insert_subject(org_id, subject)
else:
subject = selected_subject
# Fetch all available topics
topics = fetch_topics_by_org(st.session_state.user["org_id"])
topic_names = [topic[2] for topic in topics] # Assuming index 2 holds the topic_name
selected_topic = st.selectbox("Select an existing topic or type a new one:", options=topic_names + ['New Topic'])
if selected_topic == 'New Topic':
topic = st.text_input("Please enter the new topic name:", max_chars=30)
if topic:
insert_topic(org_id, topic)
else:
topic = selected_topic
vectorstore_input = st.text_input("Please type in a name for your knowledge base:", max_chars=20)
vs_name = vectorstore_input + f"_({st.session_state.user['username']})"
share_resource = st.checkbox("Share this resource", value=True) # <-- Added this line
# Show the current build of files for the latest database
st.subheader("Select one or more files to build your knowledge base")
files = fetch_all_files()
if files:
selected_files = sac.transfer(items=files, label=None, index=None, titles=['Uploaded files', 'Select files for KB'], format_func='title', width='100%', height=None, search=True, pagination=False, oneway=False, reload=True, disabled=False, return_index=False)
# Alert to confirm the creation of knowledge base
st.warning("Building your knowledge base will take some time. Please be patient.")
build = sac.buttons([
dict(label='Build VectorStore', icon='check-circle-fill', color = 'green'),
dict(label='Cancel', icon='x-circle-fill', color='red'),
], label=None, index=1, format_func='title', align='center', position='top', size='default', direction='horizontal', shape='round', type='default', compact=False, return_index=False)
if build == 'Build VectorStore' and selected_files:
for s_file in selected_files:
file_id = int(s_file.split("(", 1)[1].split(")", 1)[0])
file_data, meta = fetch_file_data(file_id)
docs = split_docs(file_data, meta)
full_docs.extend(docs)
db = LanceDB.from_documents(full_docs, OpenAIEmbeddings(), connection=create_lancedb_table(embeddings, meta, vs_name))
save_to_vectorstores(db, vs_name, subject, topic, st.session_state.user["username"], share_resource) # Passing the share_resource to the function
st.success("Knowledge Base loaded")
else:
st.write("No files found in the database.")
def delete_lancedb_table(table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
db.drop_table(f"{table_name}")
def fetch_vectorstores_by_user_id(user_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch vectorstores based on user_id
cursor.execute('SELECT vectorstore_name FROM Vector_Stores WHERE user_id = ?;', (user_id,))
vectorstores = cursor.fetchall()
conn.close()
return vectorstores
def delete_vectorstores():
st.subheader("Delete VectorStores in Database:")
user_vectorstores = fetch_vectorstores_by_user_id(st.session_state.user["id"])
if user_vectorstores:
vectorstore_names = [vs[0] for vs in user_vectorstores]
selected_vectorstores = st.multiselect("Select vectorstores to delete:", options=vectorstore_names)
confirm_delete = st.checkbox("I understand that this action cannot be undone.", value=False)
if st.button("Delete VectorStore"):
if confirm_delete and selected_vectorstores:
delete_vectorstores_from_db(selected_vectorstores, st.session_state.user["id"], st.session_state.user["profile_id"])
st.success(f"Deleted {len(selected_vectorstores)} vectorstores.")
else:
st.warning("Please confirm the deletion action.")
else:
st.write("No vectorstores found in the database.")
def delete_vectorstores_from_db(vectorstore_names, user_id, profile):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
for vectorstore_name in vectorstore_names:
if profile in ['SA', 'AD']:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete vectorstore irrespective of the user_id associated with them
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=?;', (vectorstore_name,))
else:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete only if the user_id matches
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=? AND user_id=?;', (vectorstore_name, user_id))
# Check if the row was affected
if cursor.rowcount == 0:
st.error(f"Unable to delete vectorstore '{vectorstore_name}' that is not owned by you.")
conn.commit() # Commit the changes
conn.close() # Close the connection
| [
"lancedb.connect"
] | [((1289, 1300), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1298, 1300), False, 'import os\n'), ((1321, 1350), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1333, 1350), False, 'import os\n'), ((1359, 1392), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1373, 1392), False, 'import os\n'), ((1395, 1425), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1406, 1425), False, 'import os\n'), ((1487, 1544), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', "st.secrets['default_db']"], {}), "(WORKING_DIRECTORY, st.secrets['default_db'])\n", (1499, 1544), False, 'import os\n'), ((1651, 1684), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (1666, 1684), False, 'import sqlite3\n'), ((2316, 2438), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(data, columns=['vs_id', 'subject_name', 'topic_name',\n 'vectorstore_name', 'username', 'sharing_enabled'])\n", (2328, 2438), True, 'import pandas as pd\n'), ((2573, 2728), 'streamlit.dataframe', 'st.dataframe', (['df'], {'use_container_width': '(True)', 'column_order': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(df, use_container_width=True, column_order=['vs_id',\n 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled'])\n", (2585, 2728), True, 'import streamlit as st\n'), ((2859, 2892), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (2874, 2892), False, 'import sqlite3\n'), ((4034, 4067), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4049, 4067), False, 'import sqlite3\n'), ((4363, 4396), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4378, 4396), False, 'import sqlite3\n'), ((4884, 4917), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4899, 4917), False, 'import sqlite3\n'), ((6328, 6361), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (6343, 6361), False, 'import sqlite3\n'), ((6721, 6754), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (6736, 6754), False, 'import sqlite3\n'), ((7150, 7183), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['file_path'], {}), '(file_path)\n', (7172, 7183), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((7228, 7283), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (7249, 7283), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((7498, 7540), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (7510, 7540), False, 'import os\n'), ((7569, 7598), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (7584, 7598), False, 'import lancedb\n'), ((7948, 7981), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (7963, 7981), False, 'import sqlite3\n'), ((9632, 9648), 'authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (9646, 9648), False, 'from authenticate import return_api_key\n'), ((9684, 9700), 'authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (9698, 9700), False, 'from authenticate import return_api_key\n'), ((9724, 9791), 'streamlit.subheader', 'st.subheader', (['"""Enter the topic and subject for your knowledge base"""'], {}), "('Enter the topic and subject for your knowledge base')\n", (9736, 9791), True, 'import streamlit as st\n'), ((9809, 9827), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (9825, 9827), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((10248, 10355), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing subject or type a new one:"""'], {'options': "(subject_names + ['New Subject'])"}), "('Select an existing subject or type a new one:', options=\n subject_names + ['New Subject'])\n", (10260, 10355), True, 'import streamlit as st\n'), ((10803, 10904), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing topic or type a new one:"""'], {'options': "(topic_names + ['New Topic'])"}), "('Select an existing topic or type a new one:', options=\n topic_names + ['New Topic'])\n", (10815, 10904), True, 'import streamlit as st\n'), ((11151, 11228), 'streamlit.text_input', 'st.text_input', (['"""Please type in a name for your knowledge base:"""'], {'max_chars': '(20)'}), "('Please type in a name for your knowledge base:', max_chars=20)\n", (11164, 11228), True, 'import streamlit as st\n'), ((11326, 11372), 'streamlit.checkbox', 'st.checkbox', (['"""Share this resource"""'], {'value': '(True)'}), "('Share this resource', value=True)\n", (11337, 11372), True, 'import streamlit as st\n'), ((11463, 11532), 'streamlit.subheader', 'st.subheader', (['"""Select one or more files to build your knowledge base"""'], {}), "('Select one or more files to build your knowledge base')\n", (11475, 11532), True, 'import streamlit as st\n'), ((13201, 13243), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (13213, 13243), False, 'import os\n'), ((13272, 13301), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (13287, 13301), False, 'import lancedb\n'), ((13390, 13423), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (13405, 13423), False, 'import sqlite3\n'), ((13709, 13757), 'streamlit.subheader', 'st.subheader', (['"""Delete VectorStores in Database:"""'], {}), "('Delete VectorStores in Database:')\n", (13721, 13757), True, 'import streamlit as st\n'), ((14705, 14738), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (14720, 14738), False, 'import sqlite3\n'), ((512, 539), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (537, 539), False, 'import configparser\n'), ((5398, 5431), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5413, 5431), False, 'import sqlite3\n'), ((5757, 5807), 'streamlit.selectbox', 'st.selectbox', (['"""Select an organization:"""', 'org_names'], {}), "('Select an organization:', org_names)\n", (5769, 5807), True, 'import streamlit as st\n'), ((8192, 8226), 'streamlit.error', 'st.error', (['"""Error: User not found."""'], {}), "('Error: User not found.')\n", (8200, 8226), True, 'import streamlit as st\n'), ((8389, 8405), 'pickle.dumps', 'pickle.dumps', (['vs'], {}), '(vs)\n', (8401, 8405), False, 'import pickle\n'), ((10416, 10481), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new subject name:"""'], {'max_chars': '(30)'}), "('Please enter the new subject name:', max_chars=30)\n", (10429, 10481), True, 'import streamlit as st\n'), ((10959, 11022), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new topic name:"""'], {'max_chars': '(30)'}), "('Please enter the new topic name:', max_chars=30)\n", (10972, 11022), True, 'import streamlit as st\n'), ((11602, 11856), 'streamlit_antd_components.transfer', 'sac.transfer', ([], {'items': 'files', 'label': 'None', 'index': 'None', 'titles': "['Uploaded files', 'Select files for KB']", 'format_func': '"""title"""', 'width': '"""100%"""', 'height': 'None', 'search': '(True)', 'pagination': '(False)', 'oneway': '(False)', 'reload': '(True)', 'disabled': '(False)', 'return_index': '(False)'}), "(items=files, label=None, index=None, titles=['Uploaded files',\n 'Select files for KB'], format_func='title', width='100%', height=None,\n search=True, pagination=False, oneway=False, reload=True, disabled=\n False, return_index=False)\n", (11614, 11856), True, 'import streamlit_antd_components as sac\n'), ((11919, 12006), 'streamlit.warning', 'st.warning', (['"""Building your knowledge base will take some time. Please be patient."""'], {}), "(\n 'Building your knowledge base will take some time. Please be patient.')\n", (11929, 12006), True, 'import streamlit as st\n'), ((13101, 13144), 'streamlit.write', 'st.write', (['"""No files found in the database."""'], {}), "('No files found in the database.')\n", (13109, 13144), True, 'import streamlit as st\n'), ((13968, 14043), 'streamlit.multiselect', 'st.multiselect', (['"""Select vectorstores to delete:"""'], {'options': 'vectorstore_names'}), "('Select vectorstores to delete:', options=vectorstore_names)\n", (13982, 14043), True, 'import streamlit as st\n'), ((14069, 14144), 'streamlit.checkbox', 'st.checkbox', (['"""I understand that this action cannot be undone."""'], {'value': '(False)'}), "('I understand that this action cannot be undone.', value=False)\n", (14080, 14144), True, 'import streamlit as st\n'), ((14165, 14196), 'streamlit.button', 'st.button', (['"""Delete VectorStore"""'], {}), "('Delete VectorStore')\n", (14174, 14196), True, 'import streamlit as st\n'), ((14572, 14622), 'streamlit.write', 'st.write', (['"""No vectorstores found in the database."""'], {}), "('No vectorstores found in the database.')\n", (14580, 14622), True, 'import streamlit as st\n'), ((778, 801), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (794, 801), False, 'import ast\n'), ((6068, 6128), 'streamlit.write', 'st.write', (['f"""The org_id for {selected_org_name} is {org_id}."""'], {}), "(f'The org_id for {selected_org_name} is {org_id}.')\n", (6076, 6128), True, 'import streamlit as st\n'), ((6181, 6255), 'streamlit.write', 'st.write', (['f"""Organization \'{selected_org_name}\' not found in the database."""'], {}), '(f"Organization \'{selected_org_name}\' not found in the database.")\n', (6189, 6255), True, 'import streamlit as st\n'), ((8655, 8751), 'streamlit.error', 'st.error', (['"""Error: An entry with the same vectorstore_name and user_id already exists."""'], {}), "(\n 'Error: An entry with the same vectorstore_name and user_id already exists.'\n )\n", (8663, 8751), True, 'import streamlit as st\n'), ((8810, 8848), 'streamlit.error', 'st.error', (['"""Error: Subject is missing."""'], {}), "('Error: Subject is missing.')\n", (8818, 8848), True, 'import streamlit as st\n'), ((8907, 8943), 'streamlit.error', 'st.error', (['"""Error: Topic is missing."""'], {}), "('Error: Topic is missing.')\n", (8915, 8943), True, 'import streamlit as st\n'), ((13046, 13081), 'streamlit.success', 'st.success', (['"""Knowledge Base loaded"""'], {}), "('Knowledge Base loaded')\n", (13056, 13081), True, 'import streamlit as st\n'), ((12794, 12812), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (12810, 12812), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((14504, 14553), 'streamlit.warning', 'st.warning', (['"""Please confirm the deletion action."""'], {}), "('Please confirm the deletion action.')\n", (14514, 14553), True, 'import streamlit as st\n'), ((15567, 15665), 'streamlit.error', 'st.error', (['f"""Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."""'], {}), '(\n f"Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."\n )\n', (15575, 15665), True, 'import streamlit as st\n')] |
import lancedb
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import LanceDB
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader, PyPDFLoader
db = lancedb.connect("./lancedb")
table = db.create_table(
"peter_griffin",
data=[
{
"vector": GPT4AllEmbeddings().embed_query("hello world"),
"text": "hello world",
"id": "1",
}
],
mode="overwrite",
)
splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=0)
# rawDoc = TextLoader("./docs/FakeWiki.txt").load()
rawDoc = PyPDFLoader('./docs/Peter_Griffin.pdf').load()
documents = splitter.split_documents(rawDoc)
print(len(documents))
vectorStore = LanceDB(connection=table, embedding=GPT4AllEmbeddings())
vectorStore.add_documents(documents=documents)
| [
"lancedb.connect"
] | [((275, 303), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (290, 303), False, 'import lancedb\n'), ((552, 615), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(0)'}), '(chunk_size=256, chunk_overlap=0)\n', (582, 615), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((679, 718), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['"""./docs/Peter_Griffin.pdf"""'], {}), "('./docs/Peter_Griffin.pdf')\n", (690, 718), False, 'from langchain_community.document_loaders import TextLoader, PyPDFLoader\n'), ((846, 865), 'langchain_community.embeddings.GPT4AllEmbeddings', 'GPT4AllEmbeddings', ([], {}), '()\n', (863, 865), False, 'from langchain_community.embeddings import GPT4AllEmbeddings\n'), ((393, 412), 'langchain_community.embeddings.GPT4AllEmbeddings', 'GPT4AllEmbeddings', ([], {}), '()\n', (410, 412), False, 'from langchain_community.embeddings import GPT4AllEmbeddings\n')] |
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
# embedding_model = HuggingFaceEmbeddings(model_name = "moka-ai/m3e-base")
from langchain.embeddings import LocalAIEmbeddings
openai_api_base_address = "http://172.23.115.108:20000/v1"
# 这个可以工作,使用服务化embedding模型
embedding_model=LocalAIEmbeddings(openai_api_key = "aaabbbcccdddeeefffedddsfasdfasdf",
openai_api_base = openai_api_base_address,
model = "vicuna-13b-v1.5")
import lancedb
db = lancedb.connect("/tmp/lancedb")
table = db.create_table(
"my_table",
data=[
{
"vector": embedding_model.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
# Load the document, split it into chunks, embed each chunk and load it into the vector store.
raw_documents = TextLoader('./LangChainStudy/demo_text.txt').load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
db = LanceDB.from_documents(documents, embedding_model, connection=table)
query = "how to customize your text splitter"
docs = db.similarity_search(query)
print(docs[0].page_content) | [
"lancedb.connect"
] | [((439, 577), 'langchain.embeddings.LocalAIEmbeddings', 'LocalAIEmbeddings', ([], {'openai_api_key': '"""aaabbbcccdddeeefffedddsfasdfasdf"""', 'openai_api_base': 'openai_api_base_address', 'model': '"""vicuna-13b-v1.5"""'}), "(openai_api_key='aaabbbcccdddeeefffedddsfasdfasdf',\n openai_api_base=openai_api_base_address, model='vicuna-13b-v1.5')\n", (456, 577), False, 'from langchain.embeddings import LocalAIEmbeddings\n'), ((660, 691), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (675, 691), False, 'import lancedb\n'), ((1099, 1154), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (1120, 1154), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1217, 1285), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embedding_model'], {'connection': 'table'}), '(documents, embedding_model, connection=table)\n', (1239, 1285), False, 'from langchain.vectorstores import LanceDB\n'), ((1031, 1075), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""./LangChainStudy/demo_text.txt"""'], {}), "('./LangChainStudy/demo_text.txt')\n", (1041, 1075), False, 'from langchain.document_loaders import TextLoader\n')] |
from flask import Flask, request, jsonify
import requests
import json
from flask_cors import CORS
from FlagEmbedding import LLMEmbedder, FlagReranker
from searchdb import search
import lancedb
import pandas as pd
task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch)
embed_model = LLMEmbedder('BAAI/llm-embedder', use_fp16=False) # Load model (automatically use GPUs)
reranker_model = FlagReranker('BAAI/bge-reranker-base', use_fp16=True) # use_fp16 speeds up computation with a slight performance degradation
db = lancedb.connect("./db")
app = Flask(__name__)
CORS(app, supports_credentials=True, resources={r"/openrouter-query": {"origins": "*"}})
# Replace 'your_api_key_here' with the environment variable where you store the API key
OPENROUTER_API_KEY = "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"
# OPENROUTER_API_KEY = "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"
@app.route('/openrouter-query', methods=['POST'])
def query_openrouter():
print("hit")
# Get data from the incoming request
user_message = request.json.get('message')
print(user_message)
if not user_message:
return jsonify({"error": "No message provided"}), 400
try:
Context=search(user_message,top_k=10)
print("====================",Context,user_message)
response = requests.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {OPENROUTER_API_KEY}"
},
data=json.dumps({
"model": "mistralai/mixtral-8x7b-instruct",
"messages": [
{"role": "user", "content": """You are expert assitant who gives reply from Context.Follow this steps.
Step 1: Check first whether the text is greeting or not if yes then do not use context and keep your reply short nd simple.
Step 2: if not then check is it related to Indian Agriculture or Not. if not do not use given Context do not use context.
Step 3: if related to Agriculture then use context only to give reply.
Not all queries are related to agricuture so be smart Also in final answer give suggestion and precutions to take while using perticular chemical,Query:{},Context:{}""".format(user_message,Context)}
]
})
)
# Check if the request was successful
if response.status_code != 200:
return jsonify({"error": "Failed to get response from OpenRouter"}), 500
data = response.json()
print(data)
result=data["choices"][0]["message"]["content"]
if("answer" in result):
result=result.split("answer")
return jsonify(data["choices"][0]["message"]["content"])
except Exception as e:
print("exception", e)
return jsonify({"error": str(e)}), 500
if __name__ == '__main__':
app.run(debug=True)
| [
"lancedb.connect"
] | [((311, 359), 'FlagEmbedding.LLMEmbedder', 'LLMEmbedder', (['"""BAAI/llm-embedder"""'], {'use_fp16': '(False)'}), "('BAAI/llm-embedder', use_fp16=False)\n", (322, 359), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((416, 469), 'FlagEmbedding.FlagReranker', 'FlagReranker', (['"""BAAI/bge-reranker-base"""'], {'use_fp16': '(True)'}), "('BAAI/bge-reranker-base', use_fp16=True)\n", (428, 469), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((546, 569), 'lancedb.connect', 'lancedb.connect', (['"""./db"""'], {}), "('./db')\n", (561, 569), False, 'import lancedb\n'), ((577, 592), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (582, 592), False, 'from flask import Flask, request, jsonify\n'), ((593, 685), 'flask_cors.CORS', 'CORS', (['app'], {'supports_credentials': '(True)', 'resources': "{'/openrouter-query': {'origins': '*'}}"}), "(app, supports_credentials=True, resources={'/openrouter-query': {\n 'origins': '*'}})\n", (597, 685), False, 'from flask_cors import CORS\n'), ((1119, 1146), 'flask.request.json.get', 'request.json.get', (['"""message"""'], {}), "('message')\n", (1135, 1146), False, 'from flask import Flask, request, jsonify\n'), ((1285, 1315), 'searchdb.search', 'search', (['user_message'], {'top_k': '(10)'}), '(user_message, top_k=10)\n', (1291, 1315), False, 'from searchdb import search\n'), ((2832, 2881), 'flask.jsonify', 'jsonify', (["data['choices'][0]['message']['content']"], {}), "(data['choices'][0]['message']['content'])\n", (2839, 2881), False, 'from flask import Flask, request, jsonify\n'), ((1212, 1253), 'flask.jsonify', 'jsonify', (["{'error': 'No message provided'}"], {}), "({'error': 'No message provided'})\n", (1219, 1253), False, 'from flask import Flask, request, jsonify\n'), ((2569, 2629), 'flask.jsonify', 'jsonify', (["{'error': 'Failed to get response from OpenRouter'}"], {}), "({'error': 'Failed to get response from OpenRouter'})\n", (2576, 2629), False, 'from flask import Flask, request, jsonify\n')] |
"""Provides a LanceDB interface for adding and querying embeddings."""
import os
import sys
from logging import Logger
from typing import TypeVar
import lancedb
import pyarrow as pa
from lance.vector import vec_to_table
from deckard.core import get_data_dir
T = TypeVar('T', dict, list, int)
class LanceDB:
"""Provides a LanceDB interface for adding and querying embeddings.
Args:
name (str): The name of the database.
log (Logger): The logger for the database.
Attributes:
EMBEDDINGS_TABLE_NAME (str): The name of the table for the embeddings.
DATA_PATH (str): The path to the data directory.
name (str): The name of the database.
log (Logger): The logger for the database.
create_if_not_exists (bool): Whether to create the database if it does not exist.
connection (lancedb.Connection): The connection to the database.
embeddings_table (lancedb.Table): The table for the embeddings.
"""
EMBEDDINGS_TABLE_NAME = "llm_embeddings"
DATA_PATH = os.path.join(
get_data_dir(),
'databases',
'lancedb'
)
def __init__(
self,
name: str,
log: Logger,
create_if_not_exists: bool=False
) -> None:
self.log = log
db_filepath = os.path.join(self.DATA_PATH, '.' + name)
if not os.path.exists(self.DATA_PATH):
if create_if_not_exists:
self.log.info("Creating Database: %s", name)
os.makedirs(self.DATA_PATH)
else:
self.log.error("Database does not exist.")
sys.exit(1)
self.log.info(f"Connecting to Database: {name}")
self.connection = lancedb.connect(db_filepath)
try:
self.embeddings_table = self.connection.open_table(self.EMBEDDINGS_TABLE_NAME)
except Exception:
self.log.warning("Table: %s does not exist.", self.EMBEDDINGS_TABLE_NAME)
def flush_data(self):
"""Flushes all data from the embeddings table."""
self.connection.drop_table(
self.EMBEDDINGS_TABLE_NAME,
ignore_missing=True
)
def _create_table(self, data: dict):
"""Creates the embeddings table in the database
Args:
data (str): The data to create the table with.
"""
self.embeddings_table = self.connection.create_table(
self.EMBEDDINGS_TABLE_NAME,
data=data,
mode="overwrite"
)
def add_embeddings(
self,
document: list,
embedding_id_start: int,
create_table: bool=False
) -> int:
"""Adds embeddings to lancedb.
Args:
document (dict): The document's data to add to the database. Each
document should have the following elements:
- id: The document's ID.
- raw_chunks list(str): The raw chunks of the document.
- embeddings list(Tensor): The embeddings of the document.
embedding_id_start (int): The starting embedding id.
create_table (bool): Whether to create the table if it does not exist.
Returns:
int: The highest embedding id inserted
"""
item_metadata, embeddings, embedding_id = self._build_document_data(
document,
embedding_id_start
)
textual_data = pa.Table.from_pydict(item_metadata)
et = vec_to_table(embeddings)
item = textual_data.append_column("vector", et["vector"])
self.log.info("Adding document %s %s embeddings to LanceDB.", document['id'], len(embeddings))
if create_table:
self._create_table(item)
else:
self.embeddings_table.add(item)
return embedding_id
def _build_document_data(
self,
document: dict,
embedding_id_start: int
) -> T:
"""Builds the data for a document to be added to the database.
Args:
document (dict): The document's data to add to the database. Each
document should have the following elements:
- id: The document's ID.
- raw_chunks list(str): The raw chunks of the document.
- embeddings list(Tensor): The embeddings of the document.
embedding_id_start (int): The starting embedding id.
Returns:
T: The item metadata, embeddings, and the highest embedding id inserted
"""
embedding_id = embedding_id_start
ids = []
doc_ids = []
chunk_ids = []
texts = []
embeddings = []
for idx, embedding in enumerate(document['embeddings']):
embedding_id += 1
ids.append(embedding_id)
doc_ids.append(document['id'])
chunk_ids.append(idx)
texts.append(document['raw_chunks'][idx])
embeddings.append(embedding)
item_metadata = {
'id': ids,
'text': texts,
'doc_id': doc_ids,
'chunk_id': chunk_ids
}
return item_metadata, embeddings, embedding_id
def query(
self,
query: str,
limit: int=25,
max_distance: int=5
) -> list:
"""Queries the database for embedding similarity.
Args:
query (str): The query to search for.
limit (int): The maximum number of results to return.
max_distance (int): The maximum distance to return.
Returns:
list: The results of the query.
"""
results = self.embeddings_table.search(query).limit(limit).to_df()
# Instead of specifying columns, we return all data but the vector.
results = results.drop(columns=['vector'])
return results[results['_distance'] <= max_distance]
| [
"lancedb.connect"
] | [((265, 294), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'dict', 'list', 'int'], {}), "('T', dict, list, int)\n", (272, 294), False, 'from typing import TypeVar\n'), ((1068, 1082), 'deckard.core.get_data_dir', 'get_data_dir', ([], {}), '()\n', (1080, 1082), False, 'from deckard.core import get_data_dir\n'), ((1323, 1363), 'os.path.join', 'os.path.join', (['self.DATA_PATH', "('.' + name)"], {}), "(self.DATA_PATH, '.' + name)\n", (1335, 1363), False, 'import os\n'), ((1741, 1769), 'lancedb.connect', 'lancedb.connect', (['db_filepath'], {}), '(db_filepath)\n', (1756, 1769), False, 'import lancedb\n'), ((3503, 3538), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (['item_metadata'], {}), '(item_metadata)\n', (3523, 3538), True, 'import pyarrow as pa\n'), ((3552, 3576), 'lance.vector.vec_to_table', 'vec_to_table', (['embeddings'], {}), '(embeddings)\n', (3564, 3576), False, 'from lance.vector import vec_to_table\n'), ((1379, 1409), 'os.path.exists', 'os.path.exists', (['self.DATA_PATH'], {}), '(self.DATA_PATH)\n', (1393, 1409), False, 'import os\n'), ((1525, 1552), 'os.makedirs', 'os.makedirs', (['self.DATA_PATH'], {}), '(self.DATA_PATH)\n', (1536, 1552), False, 'import os\n'), ((1646, 1657), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1654, 1657), False, 'import sys\n')] |
from typing import Any, List, Optional, Tuple
import gradio as gr
import lancedb
from transformers import CLIPModel, CLIPTokenizerFast
from homematch.config import DATA_DIR, MODEL_ID, TABLE_NAME
from homematch.data.types import ImageData
DEVICE: str = "cpu"
model: CLIPModel = CLIPModel.from_pretrained(MODEL_ID).to(DEVICE)
tokenizer: CLIPTokenizerFast = CLIPTokenizerFast.from_pretrained(MODEL_ID)
uri: str = str(DATA_DIR) + "/.lancedb/"
db = lancedb.connect(uri)
table = db[TABLE_NAME]
def embed_func(query: str) -> Any:
inputs = tokenizer([query], padding=True, return_tensors="pt").to(DEVICE)
text_features = model.get_text_features(**inputs)
return text_features.detach().numpy()[0]
def find_images(query: str) -> List[Tuple[Any, str]]:
emb = embed_func(query)
rs = table.search(emb).limit(9).to_pydantic(ImageData)
return [(image.load_image(), image.text_description) for image in rs]
def update_description(image_info: Optional[Tuple[Any, str]]) -> str:
print(image_info)
print("asd")
if image_info is None or image_info[0] is None:
return "Select an image to see its description"
else:
_, description = image_info
return description
with gr.Blocks() as demo:
with gr.Row():
vector_query: gr.Textbox = gr.Textbox(
value="A modern building with 2 bathrooms and 3 bedrooms", show_label=False
)
b1: gr.Button = gr.Button("Submit")
with gr.Row():
gallery: gr.Gallery = gr.Gallery(
label="Found images",
show_label=False,
elem_id="gallery",
columns=3,
rows=3,
object_fit="contain",
height="auto",
)
b1.click(find_images, inputs=vector_query, outputs=gallery)
demo.launch(server_name="127.0.0.1", inline=False)
| [
"lancedb.connect"
] | [((358, 401), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (391, 401), False, 'from transformers import CLIPModel, CLIPTokenizerFast\n'), ((448, 468), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (463, 468), False, 'import lancedb\n'), ((1222, 1233), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (1231, 1233), True, 'import gradio as gr\n'), ((280, 315), 'transformers.CLIPModel.from_pretrained', 'CLIPModel.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (305, 315), False, 'from transformers import CLIPModel, CLIPTokenizerFast\n'), ((1252, 1260), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (1258, 1260), True, 'import gradio as gr\n'), ((1297, 1388), 'gradio.Textbox', 'gr.Textbox', ([], {'value': '"""A modern building with 2 bathrooms and 3 bedrooms"""', 'show_label': '(False)'}), "(value='A modern building with 2 bathrooms and 3 bedrooms',\n show_label=False)\n", (1307, 1388), True, 'import gradio as gr\n'), ((1431, 1450), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (1440, 1450), True, 'import gradio as gr\n'), ((1460, 1468), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (1466, 1468), True, 'import gradio as gr\n'), ((1500, 1629), 'gradio.Gallery', 'gr.Gallery', ([], {'label': '"""Found images"""', 'show_label': '(False)', 'elem_id': '"""gallery"""', 'columns': '(3)', 'rows': '(3)', 'object_fit': '"""contain"""', 'height': '"""auto"""'}), "(label='Found images', show_label=False, elem_id='gallery',\n columns=3, rows=3, object_fit='contain', height='auto')\n", (1510, 1629), True, 'import gradio as gr\n')] |
import flask
import lancedb
import openai
import langchain
# import clip
import torch
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import LanceDB
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
import os
import datetime
from flask import Flask, render_template, request, jsonify
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from dotenv import dotenv_values
env_vars = dotenv_values('.env')
OPENAI_API_KEY = env_vars['OPENAI_API_KEY']
uri = "~/.lancedb"
db = lancedb.connect(uri)
embeddings = OpenAIEmbeddings( openai_api_key= OPENAI_API_KEY)
if 'text' not in db.table_names():
table = db.create_table("text", data=[
{"vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1"}
])
chat = ChatOpenAI(temperature=0, openai_api_key= OPENAI_API_KEY)
app = Flask(__name__)
# Route for "/" for a web-based interface to this micro-service:
@app.route('/')
def index():
return "Hello, World"
@app.after_request
def add_cors_headers(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
response.headers.add('Access-Control-Allow-Methods', 'POST, OPTIONS')
return response
# Handle OPTIONS request for CORS preflight
@app.route('/store', methods=['OPTIONS'])
def handle_options():
response = jsonify({'message': 'Preflight request received'})
# response.headers['Access-Control-Allow-Origin'] = '*'
# response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
# response.headers.add('Access-Control-Allow-Methods', 'POST')
print('Testing')
return response
@app.route('/store', methods = ['POST'])
def store_embedding():
json_data = request.get_json()
print(json_data['raw_text'][:50],'\n\nURL: ', json_data['url'], '\n\n','title', json_data['title'])
text = json_data['raw_text']
text_metadata = {"time": datetime.datetime.now().timestamp(), "url": json_data['url']}
table = db.open_table('text')
document = Document(page_content=text, metadata=text_metadata)
# chunks = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0).split_documents([document])
# print(len(chunks))
# print(chunks[0])
arr = text.split(' ')
text_documents = []
for i in range(0, len(arr), 1000):
chunk = ' '.join(arr[i:i+1000])
text_documents.append(Document(page_content=chunk, metadata=text_metadata))
print(len(text_documents))
docsearch = LanceDB.from_documents(text_documents, embeddings, connection=table)
return jsonify({'message': 'Preflight request received'}), 200
# use curl -X POST -H "Content-Type: application/json" -d '{"query": "basketball"}' http://127.0.0.1:5000/retrieve
# to test this endpoint
@app.route('/retrieve', methods = ['POST'])
def retrieve_embedding():
query = request.get_json()['query']
table = db.open_table('text')
docs = LanceDB(embedding=embeddings, connection=table).similarity_search(query, 5)
print(docs)
return [str(d) for d in docs]
@app.route('/chat', methods = ['POST'])
def chat():
def retrieve_embedding(query):
table = db.open_table('text')
docs = LanceDB(embedding=embeddings, connection=table).similarity_search(query, 3)
print(docs)
return [d.page_content for d in docs]
json = request.get_json()
query = json['query']
embeddings = retrieve_embedding(query)
final_embeddings = ''
for embedding in embeddings:
final_embeddings += embedding + '\n'
final_embeddings -= '\n'
query = f"{query}\nCONTEXT: {final_embeddings}"
messages = [
SystemMessage(content="You are a human assistant that will help users remember about topics from their previous bookmarks"),
HumanMessage(content=query)
]
chat(messages)
return
# # image embedding
# device = "cude" if torch.cude.is_available() else "cpu"
# mode1, preprocess, clip.load("ViT-B/32")
if __name__ == '__main__':
app.run(debug=True)
| [
"lancedb.connect"
] | [((760, 781), 'dotenv.dotenv_values', 'dotenv_values', (['""".env"""'], {}), "('.env')\n", (773, 781), False, 'from dotenv import dotenv_values\n'), ((852, 872), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (867, 872), False, 'import lancedb\n'), ((886, 933), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (902, 933), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1122, 1178), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'OPENAI_API_KEY'}), '(temperature=0, openai_api_key=OPENAI_API_KEY)\n', (1132, 1178), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1186, 1201), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1191, 1201), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1728, 1778), 'flask.jsonify', 'jsonify', (["{'message': 'Preflight request received'}"], {}), "({'message': 'Preflight request received'})\n", (1735, 1778), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2104, 2122), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2120, 2122), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2400, 2451), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'text_metadata'}), '(page_content=text, metadata=text_metadata)\n', (2408, 2451), False, 'from langchain.docstore.document import Document\n'), ((2859, 2927), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['text_documents', 'embeddings'], {'connection': 'table'}), '(text_documents, embeddings, connection=table)\n', (2881, 2927), False, 'from langchain.vectorstores import LanceDB\n'), ((3744, 3762), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (3760, 3762), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2944, 2994), 'flask.jsonify', 'jsonify', (["{'message': 'Preflight request received'}"], {}), "({'message': 'Preflight request received'})\n", (2951, 2994), False, 'from flask import Flask, render_template, request, jsonify\n'), ((3226, 3244), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (3242, 3244), False, 'from flask import Flask, render_template, request, jsonify\n'), ((4034, 4167), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a human assistant that will help users remember about topics from their previous bookmarks"""'}), "(content=\n 'You are a human assistant that will help users remember about topics from their previous bookmarks'\n )\n", (4047, 4167), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((4167, 4194), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'query'}), '(content=query)\n', (4179, 4194), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((2758, 2810), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'text_metadata'}), '(page_content=chunk, metadata=text_metadata)\n', (2766, 2810), False, 'from langchain.docstore.document import Document\n'), ((3307, 3354), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embeddings', 'connection': 'table'}), '(embedding=embeddings, connection=table)\n', (3314, 3354), False, 'from langchain.vectorstores import LanceDB\n'), ((2289, 2312), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2310, 2312), False, 'import datetime\n'), ((3587, 3634), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embeddings', 'connection': 'table'}), '(embedding=embeddings, connection=table)\n', (3594, 3634), False, 'from langchain.vectorstores import LanceDB\n')] |
from dotenv import load_dotenv
import os
import lancedb
import clip
import torch
from PIL import Image
import glob
import re
from concurrent.futures import ThreadPoolExecutor
import yt_dlp
from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast
# Set options for youtube_dl
ydl_opts = {
"retries": 0,
"quiet": True, # Silence youtube_dl output
"extract_flat": True, # Extract metadata only, no download
}
MODEL_ID = None
MODEL = None
TOKENIZER = None
PROCESSOR = None
def setup_clip_model(model_id):
global MODEL_ID, MODEL, TOKENIZER, PROCESSOR
MODEL_ID = model_id
TOKENIZER = CLIPTokenizerFast.from_pretrained(MODEL_ID)
MODEL = CLIPModel.from_pretrained(MODEL_ID)
PROCESSOR = CLIPProcessor.from_pretrained(MODEL_ID)
def embed_func(query):
inputs = TOKENIZER([query], truncation=True, padding=True, return_tensors="pt")
text_features = MODEL.get_text_features(**inputs)
return text_features.detach().numpy()[0]
def get_video_title(video_id):
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
try:
info = ydl.extract_info(
f"https://www.youtube.com/watch?v={video_id}", download=False
)
return info.get("title", None)
except yt_dlp.utils.DownloadError:
return None
db = lancedb.connect("data/video-lancedb")
setup_clip_model("openai/clip-vit-base-patch32")
videos = list(
set(
[
re.search("(?<=videos\/).*(?=\/)", name).group()
for name in glob.glob("./videos/*/**")
]
)
)
def insert(video_ids):
titles = [(vid, get_video_title(vid)) for vid in video_ids]
titles = [t for t in titles if t[1] is not None]
video_ids, titles = zip(*titles)
text_features = [embed_func(title) for title in titles]
if "videos" in db.table_names():
table = db.open_table("videos")
table.add(
[
{"vector": im, "text": title, "video_id": vid, "start_time": 0}
for (im, vid, title) in zip(text_features, video_ids, titles)
]
)
else:
db.create_table(
"videos",
[
{"vector": im, "text": title, "video_id": vid, "start_time": 0}
for (im, vid, title) in zip(text_features, video_ids, titles)
],
)
print("done")
import concurrent.futures
def threaded_video_processing(videos, chunk_size, max_workers):
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
for i in range(0, len(videos), chunk_size):
chunk = videos[i : i + chunk_size]
executor.submit(insert, chunk)
# Assuming you have defined the insert function and videos list
chunk_size = 500 # Number of videos to process in each chunk
max_workers = 5 # Number of concurrent threads
threaded_video_processing(videos, chunk_size, max_workers)
| [
"lancedb.connect"
] | [((1313, 1350), 'lancedb.connect', 'lancedb.connect', (['"""data/video-lancedb"""'], {}), "('data/video-lancedb')\n", (1328, 1350), False, 'import lancedb\n'), ((621, 664), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (654, 664), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((677, 712), 'transformers.CLIPModel.from_pretrained', 'CLIPModel.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (702, 712), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((729, 768), 'transformers.CLIPProcessor.from_pretrained', 'CLIPProcessor.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (758, 768), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((1019, 1045), 'yt_dlp.YoutubeDL', 'yt_dlp.YoutubeDL', (['ydl_opts'], {}), '(ydl_opts)\n', (1035, 1045), False, 'import yt_dlp\n'), ((1520, 1546), 'glob.glob', 'glob.glob', (['"""./videos/*/**"""'], {}), "('./videos/*/**')\n", (1529, 1546), False, 'import glob\n'), ((1447, 1489), 're.search', 're.search', (['"""(?<=videos\\\\/).*(?=\\\\/)"""', 'name'], {}), "('(?<=videos\\\\/).*(?=\\\\/)', name)\n", (1456, 1489), False, 'import re\n')] |
"""LanceDB vector store."""
from typing import Any, List, Optional
from llama_index.schema import MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores.types import (
NodeWithEmbedding,
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
class LanceDBVectorStore(VectorStore):
"""The LanceDB Vector Store.
Stores text and embeddings in LanceDB. The vector store will open an existing
LanceDB dataset or create the dataset if it does not exist.
Args:
uri (str, required): Location where LanceDB will store its files.
table_name (str, optional): The table name where the embeddings will be stored.
Defaults to "vectors".
nprobes (int, optional): The number of probes used.
A higher number makes search more accurate but also slower.
Defaults to 20.
refine_factor: (int, optional): Refine the results by reading extra elements
and re-ranking them in memory.
Defaults to None
Raises:
ImportError: Unable to import `lancedb`.
Returns:
LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and
querying it.
"""
stores_text = True
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Init params."""
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb # noqa: F401
except ImportError:
raise ImportError(import_err_msg)
self.connection = lancedb.connect(uri)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
@property
def client(self) -> None:
"""Get client."""
return None
def add(
self,
embedding_results: List[NodeWithEmbedding],
) -> List[str]:
data = []
ids = []
for result in embedding_results:
data.append(
{
"id": result.id,
"doc_id": result.ref_doc_id,
"vector": result.embedding,
"text": result.node.get_content(metadata_mode=MetadataMode.NONE),
}
)
ids.append(result.id)
if self.table_name in self.connection.table_names():
tbl = self.connection.open_table(self.table_name)
tbl.add(data)
else:
self.connection.create_table(self.table_name, data)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
raise NotImplementedError("Delete not yet implemented for LanceDB.")
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
if query.filters is not None:
raise ValueError("Metadata filters not implemented for LanceDB yet.")
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding)
.limit(query.similarity_top_k)
.nprobes(self.nprobes)
)
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
results = lance_query.to_df()
nodes = []
for _, item in results.iterrows():
node = TextNode(
text=item.text,
id_=item.id,
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id),
},
)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=results["score"].tolist(),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((1731, 1751), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1746, 1751), False, 'import lancedb\n'), ((3914, 3950), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (3929, 3950), False, 'from llama_index.schema import MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')] |
import streamlit as st
import sqlite3
import streamlit_antd_components as sac
import pandas as pd
import os
import openai
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.vectorstores import LanceDB
from basecode.authenticate import return_api_key
from langchain.docstore.document import Document
import lancedb
import configparser
import ast
import json
class ConfigHandler:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('config.ini')
def get_config_values(self, section, key):
value = self.config.get(section, key)
try:
# Try converting the string value to a Python data structure
return ast.literal_eval(value)
except (SyntaxError, ValueError):
# If not a data structure, return the plain string
return value
config_handler = ConfigHandler()
TCH = config_handler.get_config_values('constants', 'TCH')
STU = config_handler.get_config_values('constants', 'STU')
SA = config_handler.get_config_values('constants', 'SA')
AD = config_handler.get_config_values('constants', 'AD')
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
os.environ["OPENAI_API_KEY"] = return_api_key()
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
db = lancedb.connect(lancedb_path)
def fetch_vectorstores_with_usernames():
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
query = '''
SELECT
Vector_Stores.vs_id,
Subject.subject_name,
Topic.topic_name,
Vector_Stores.vectorstore_name,
Users.username,
Vector_Stores.sharing_enabled
FROM Vector_Stores
JOIN Users ON Vector_Stores.user_id = Users.user_id
LEFT JOIN Subject ON Vector_Stores.subject = Subject.id
LEFT JOIN Topic ON Vector_Stores.topic = Topic.id;
'''
cursor.execute(query)
data = cursor.fetchall()
conn.close()
return data
def display_vectorstores():
data = fetch_vectorstores_with_usernames()
df = pd.DataFrame(data, columns=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"])
# Convert the 'sharing_enabled' values
df["sharing_enabled"] = df["sharing_enabled"].apply(lambda x: '✔' if x == 1 else '')
st.dataframe(
df,
use_container_width=True,
column_order=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"]
)
def fetch_all_files():
"""
Fetch all files either shared or based on user type
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Construct the SQL query with JOINs for Subject, Topic, and Users tables
if st.session_state.user['profile_id'] == 'SA':
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
''')
else:
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
WHERE Files.sharing_enabled = 1
''')
files = cursor.fetchall()
formatted_files = [f"({file[0]}) {file[1]} ({file[4]})" for file in files]
conn.close()
return formatted_files
def fetch_file_data(file_id):
"""
Fetch file data given a file id
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
cursor.execute("SELECT data, metadata FROM Files WHERE file_id = ?", (file_id,))
data = cursor.fetchone()
conn.close()
if data:
return data[0], data[1]
else:
return None, None
def insert_topic(org_id, topic_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Topic (org_id, topic_name) VALUES (?, ?);', (org_id, topic_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if topic_name is not unique within the org
return False # Indicates topic_name is not unique within the org
finally:
conn.close()
def insert_subject(org_id, subject_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Subject (org_id, subject_name) VALUES (?, ?);', (org_id, subject_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if subject_name is not unique within the org
return False # Indicates subject_name is not unique within the org
finally:
conn.close()
def select_organization():
with sqlite3.connect(WORKING_DATABASE) as conn:
cursor = conn.cursor()
# Org selection
org_query = "SELECT org_name FROM Organizations"
cursor.execute(org_query)
orgs = cursor.fetchall()
org_names = [org[0] for org in orgs]
# Use a Streamlit selectbox to choose an organization
selected_org_name = st.selectbox("Select an organization:", org_names)
# Retrieve the org_id for the selected organization
cursor.execute('SELECT org_id FROM Organizations WHERE org_name = ?;', (selected_org_name,))
result = cursor.fetchone()
if result:
org_id = result[0]
st.write(f"The org_id for {selected_org_name} is {org_id}.")
return org_id
else:
st.write(f"Organization '{selected_org_name}' not found in the database.")
return None
def fetch_subjects_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Subject;')
else:
cursor.execute('SELECT * FROM Subject WHERE org_id = ?;', (org_id,))
subjects = cursor.fetchall()
conn.close()
return subjects
def fetch_topics_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Topic;')
else:
cursor.execute('SELECT * FROM Topic WHERE org_id = ?;', (org_id,))
topics = cursor.fetchall()
conn.close()
return topics
def split_docs(file_path,meta):
#def split_meta_docs(file, source, tch_code):
loader = UnstructuredFileLoader(file_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
metadata = {"source": meta}
for doc in docs:
doc.metadata.update(metadata)
return docs
def create_lancedb_table(embeddings, meta, table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
table = db.create_table(
f"{table_name}",
data=[
{
"vector": embeddings.embed_query("Query Unsuccessful"),
"text": "Query Unsuccessful",
"id": "1",
"source": f"{meta}"
}
],
mode="overwrite",
)
return table
def save_to_vectorstores(vs, vstore_input_name, subject, topic, username, share_resource=False):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch the user's details
cursor.execute('SELECT user_id FROM Users WHERE username = ?', (username,))
user_details = cursor.fetchone()
if not user_details:
st.error("Error: User not found.")
return
user_id = user_details[0]
# If Vector_Store instance exists in session state, then serialize and save
# vs is the documents in json format and vstore_input_name is the name of the table and vectorstore
if vs:
try:
cursor.execute('SELECT 1 FROM Vector_Stores WHERE vectorstore_name LIKE ? AND user_id = ?', (f"%{vstore_input_name}%", user_id))
exists = cursor.fetchone()
if exists:
st.error("Error: An entry with the same vectorstore_name and user_id already exists.")
return
if subject is None:
st.error("Error: Subject is missing.")
return
if topic is None:
st.error("Error: Topic is missing.")
return
# Get the subject and topic IDs
cursor.execute('SELECT id FROM Subject WHERE subject_name = ?', (subject,))
subject_id = cursor.fetchone()[0]
cursor.execute('SELECT id FROM Topic WHERE topic_name = ?', (topic,))
topic_id = cursor.fetchone()[0]
# Insert the new row
cursor.execute('''
INSERT INTO Vector_Stores (vectorstore_name, documents, user_id, subject, topic, sharing_enabled)
VALUES (?, ?, ?, ?, ?, ?)
''', (vstore_input_name, vs, user_id, subject_id, topic_id, share_resource))
conn.commit()
conn.close()
except Exception as e:
st.error(f"Error in storing documents and vectorstore: {e}")
return
def document_to_dict(doc):
# Assuming 'doc' has 'page_content' and 'metadata' attributes
return {
'page_content': doc.page_content,
'metadata': doc.metadata
}
def dict_to_document(doc_dict):
# Create a Document object from the dictionary
# Adjust this according to how your Document class is defined
return Document(page_content=doc_dict['page_content'],metadata=doc_dict['metadata'])
def create_vectorstore():
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
full_docs = []
st.subheader("Enter the topic and subject for your knowledge base")
embeddings = OpenAIEmbeddings()
if st.session_state.user['profile_id'] == SA:
org_id = select_organization()
if org_id is None:
return
else:
org_id = st.session_state.user["org_id"]
# Fetch all available subjects
subjects = fetch_subjects_by_org(st.session_state.user["org_id"])
subject_names = [sub[2] for sub in subjects] # Assuming index 2 holds the subject_name
selected_subject = st.selectbox("Select an existing subject or type a new one:", options=subject_names + ['New Subject'])
if selected_subject == 'New Subject':
subject = st.text_input("Please enter the new subject name:", max_chars=30)
if subject:
insert_subject(org_id, subject)
else:
subject = selected_subject
# Fetch all available topics
topics = fetch_topics_by_org(st.session_state.user["org_id"])
topic_names = [topic[2] for topic in topics] # Assuming index 2 holds the topic_name
selected_topic = st.selectbox("Select an existing topic or type a new one:", options=topic_names + ['New Topic'])
if selected_topic == 'New Topic':
topic = st.text_input("Please enter the new topic name:", max_chars=30)
if topic:
insert_topic(org_id, topic)
else:
topic = selected_topic
vectorstore_input = st.text_input("Please type in a name for your knowledge base:", max_chars=20)
vs_name = vectorstore_input + f"_({st.session_state.user['username']})"
share_resource = st.checkbox("Share this resource", value=True) # <-- Added this line
# Show the current build of files for the latest database
st.subheader("Select one or more files to build your knowledge base")
files = fetch_all_files()
if files:
selected_files = sac.transfer(items=files, label=None, index=None, titles=['Uploaded files', 'Select files for KB'], format_func='title', width='100%', height=None, search=True, pagination=False, oneway=False, reload=True, disabled=False, return_index=False)
# Alert to confirm the creation of knowledge base
st.warning("Building your knowledge base will take some time. Please be patient.")
build = sac.buttons([
dict(label='Build VectorStore', icon='check-circle-fill', color = 'green'),
dict(label='Cancel', icon='x-circle-fill', color='red'),
], label=None, index=1, format_func='title', align='center', position='top', size='default', direction='horizontal', shape='round', type='default', compact=False, return_index=False)
if build == 'Build VectorStore' and selected_files:
for s_file in selected_files:
file_id = int(s_file.split("(", 1)[1].split(")", 1)[0])
file_data, meta = fetch_file_data(file_id)
docs = split_docs(file_data, meta)
full_docs.extend(docs)
#convert full_docs to json to store in sqlite
full_docs_dicts = [document_to_dict(doc) for doc in full_docs]
docs_json = json.dumps(full_docs_dicts)
create_lancedb_table(embeddings, meta, vs_name)
save_to_vectorstores(docs_json, vs_name, subject, topic, st.session_state.user["username"], share_resource) # Passing the share_resource to the function
st.success("Knowledge Base loaded")
else:
st.write("No files found in the database.")
def load_vectorstore(documents, table_name):
retrieved_docs_dicts = json.loads(documents)
retrieved_docs = [dict_to_document(doc_dict) for doc_dict in retrieved_docs_dicts]
vs = LanceDB.from_documents(retrieved_docs , OpenAIEmbeddings(), connection= db.open_table(f"{table_name}"))
return vs
def delete_lancedb_table(table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
db.drop_table(f"{table_name}")
def fetch_vectorstores_by_user_id(user_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch vectorstores based on user_id
cursor.execute('SELECT vectorstore_name FROM Vector_Stores WHERE user_id = ?;', (user_id,))
vectorstores = cursor.fetchall()
conn.close()
return vectorstores
def delete_vectorstores():
st.subheader("Delete VectorStores in Database:")
user_vectorstores = fetch_vectorstores_by_user_id(st.session_state.user["id"])
if user_vectorstores:
vectorstore_names = [vs[0] for vs in user_vectorstores]
selected_vectorstores = st.multiselect("Select vectorstores to delete:", options=vectorstore_names)
confirm_delete = st.checkbox("I understand that this action cannot be undone.", value=False)
if st.button("Delete VectorStore"):
if confirm_delete and selected_vectorstores:
delete_vectorstores_from_db(selected_vectorstores, st.session_state.user["id"], st.session_state.user["profile_id"])
st.success(f"Deleted {len(selected_vectorstores)} vectorstores.")
else:
st.warning("Please confirm the deletion action.")
else:
st.write("No vectorstores found in the database.")
def delete_vectorstores_from_db(vectorstore_names, user_id, profile):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
for vectorstore_name in vectorstore_names:
if profile in ['SA', 'AD']:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete vectorstore irrespective of the user_id associated with them
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=?;', (vectorstore_name,))
else:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete only if the user_id matches
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=? AND user_id=?;', (vectorstore_name, user_id))
# Check if the row was affected
if cursor.rowcount == 0:
st.error(f"Unable to delete vectorstore '{vectorstore_name}' that is not owned by you.")
conn.commit() # Commit the changes
conn.close() # Close the connection
| [
"lancedb.connect"
] | [((1365, 1376), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1374, 1376), False, 'import os\n'), ((1397, 1426), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1409, 1426), False, 'import os\n'), ((1706, 1722), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (1720, 1722), False, 'from basecode.authenticate import return_api_key\n'), ((1738, 1780), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (1750, 1780), False, 'import os\n'), ((1786, 1815), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (1801, 1815), False, 'import lancedb\n'), ((1435, 1468), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1449, 1468), False, 'import os\n'), ((1471, 1501), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1482, 1501), False, 'import os\n'), ((1563, 1620), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', "st.secrets['default_db']"], {}), "(WORKING_DIRECTORY, st.secrets['default_db'])\n", (1575, 1620), False, 'import os\n'), ((1870, 1903), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (1885, 1903), False, 'import sqlite3\n'), ((2535, 2657), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(data, columns=['vs_id', 'subject_name', 'topic_name',\n 'vectorstore_name', 'username', 'sharing_enabled'])\n", (2547, 2657), True, 'import pandas as pd\n'), ((2792, 2947), 'streamlit.dataframe', 'st.dataframe', (['df'], {'use_container_width': '(True)', 'column_order': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(df, use_container_width=True, column_order=['vs_id',\n 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled'])\n", (2804, 2947), True, 'import streamlit as st\n'), ((3078, 3111), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (3093, 3111), False, 'import sqlite3\n'), ((4253, 4286), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4268, 4286), False, 'import sqlite3\n'), ((4582, 4615), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4597, 4615), False, 'import sqlite3\n'), ((5103, 5136), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5118, 5136), False, 'import sqlite3\n'), ((6547, 6580), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (6562, 6580), False, 'import sqlite3\n'), ((6940, 6973), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (6955, 6973), False, 'import sqlite3\n'), ((7369, 7402), 'langchain_community.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['file_path'], {}), '(file_path)\n', (7391, 7402), False, 'from langchain_community.document_loaders import UnstructuredFileLoader\n'), ((7447, 7502), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (7468, 7502), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((7717, 7759), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (7729, 7759), False, 'import os\n'), ((7788, 7817), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (7803, 7817), False, 'import lancedb\n'), ((8167, 8200), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (8182, 8200), False, 'import sqlite3\n'), ((10432, 10510), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': "doc_dict['page_content']", 'metadata': "doc_dict['metadata']"}), "(page_content=doc_dict['page_content'], metadata=doc_dict['metadata'])\n", (10440, 10510), False, 'from langchain.docstore.document import Document\n'), ((10558, 10574), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (10572, 10574), False, 'from basecode.authenticate import return_api_key\n'), ((10610, 10626), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (10624, 10626), False, 'from basecode.authenticate import return_api_key\n'), ((10650, 10717), 'streamlit.subheader', 'st.subheader', (['"""Enter the topic and subject for your knowledge base"""'], {}), "('Enter the topic and subject for your knowledge base')\n", (10662, 10717), True, 'import streamlit as st\n'), ((10735, 10753), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (10751, 10753), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((11174, 11281), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing subject or type a new one:"""'], {'options': "(subject_names + ['New Subject'])"}), "('Select an existing subject or type a new one:', options=\n subject_names + ['New Subject'])\n", (11186, 11281), True, 'import streamlit as st\n'), ((11729, 11830), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing topic or type a new one:"""'], {'options': "(topic_names + ['New Topic'])"}), "('Select an existing topic or type a new one:', options=\n topic_names + ['New Topic'])\n", (11741, 11830), True, 'import streamlit as st\n'), ((12077, 12154), 'streamlit.text_input', 'st.text_input', (['"""Please type in a name for your knowledge base:"""'], {'max_chars': '(20)'}), "('Please type in a name for your knowledge base:', max_chars=20)\n", (12090, 12154), True, 'import streamlit as st\n'), ((12252, 12298), 'streamlit.checkbox', 'st.checkbox', (['"""Share this resource"""'], {'value': '(True)'}), "('Share this resource', value=True)\n", (12263, 12298), True, 'import streamlit as st\n'), ((12389, 12458), 'streamlit.subheader', 'st.subheader', (['"""Select one or more files to build your knowledge base"""'], {}), "('Select one or more files to build your knowledge base')\n", (12401, 12458), True, 'import streamlit as st\n'), ((14247, 14268), 'json.loads', 'json.loads', (['documents'], {}), '(documents)\n', (14257, 14268), False, 'import json\n'), ((14539, 14581), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (14551, 14581), False, 'import os\n'), ((14610, 14639), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (14625, 14639), False, 'import lancedb\n'), ((14728, 14761), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (14743, 14761), False, 'import sqlite3\n'), ((15047, 15095), 'streamlit.subheader', 'st.subheader', (['"""Delete VectorStores in Database:"""'], {}), "('Delete VectorStores in Database:')\n", (15059, 15095), True, 'import streamlit as st\n'), ((16043, 16076), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (16058, 16076), False, 'import sqlite3\n'), ((588, 615), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (613, 615), False, 'import configparser\n'), ((5617, 5650), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5632, 5650), False, 'import sqlite3\n'), ((5976, 6026), 'streamlit.selectbox', 'st.selectbox', (['"""Select an organization:"""', 'org_names'], {}), "('Select an organization:', org_names)\n", (5988, 6026), True, 'import streamlit as st\n'), ((8411, 8445), 'streamlit.error', 'st.error', (['"""Error: User not found."""'], {}), "('Error: User not found.')\n", (8419, 8445), True, 'import streamlit as st\n'), ((11342, 11407), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new subject name:"""'], {'max_chars': '(30)'}), "('Please enter the new subject name:', max_chars=30)\n", (11355, 11407), True, 'import streamlit as st\n'), ((11885, 11948), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new topic name:"""'], {'max_chars': '(30)'}), "('Please enter the new topic name:', max_chars=30)\n", (11898, 11948), True, 'import streamlit as st\n'), ((12528, 12782), 'streamlit_antd_components.transfer', 'sac.transfer', ([], {'items': 'files', 'label': 'None', 'index': 'None', 'titles': "['Uploaded files', 'Select files for KB']", 'format_func': '"""title"""', 'width': '"""100%"""', 'height': 'None', 'search': '(True)', 'pagination': '(False)', 'oneway': '(False)', 'reload': '(True)', 'disabled': '(False)', 'return_index': '(False)'}), "(items=files, label=None, index=None, titles=['Uploaded files',\n 'Select files for KB'], format_func='title', width='100%', height=None,\n search=True, pagination=False, oneway=False, reload=True, disabled=\n False, return_index=False)\n", (12540, 12782), True, 'import streamlit_antd_components as sac\n'), ((12845, 12932), 'streamlit.warning', 'st.warning', (['"""Building your knowledge base will take some time. Please be patient."""'], {}), "(\n 'Building your knowledge base will take some time. Please be patient.')\n", (12855, 12932), True, 'import streamlit as st\n'), ((14125, 14168), 'streamlit.write', 'st.write', (['"""No files found in the database."""'], {}), "('No files found in the database.')\n", (14133, 14168), True, 'import streamlit as st\n'), ((14405, 14423), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (14421, 14423), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((15306, 15381), 'streamlit.multiselect', 'st.multiselect', (['"""Select vectorstores to delete:"""'], {'options': 'vectorstore_names'}), "('Select vectorstores to delete:', options=vectorstore_names)\n", (15320, 15381), True, 'import streamlit as st\n'), ((15407, 15482), 'streamlit.checkbox', 'st.checkbox', (['"""I understand that this action cannot be undone."""'], {'value': '(False)'}), "('I understand that this action cannot be undone.', value=False)\n", (15418, 15482), True, 'import streamlit as st\n'), ((15503, 15534), 'streamlit.button', 'st.button', (['"""Delete VectorStore"""'], {}), "('Delete VectorStore')\n", (15512, 15534), True, 'import streamlit as st\n'), ((15910, 15960), 'streamlit.write', 'st.write', (['"""No vectorstores found in the database."""'], {}), "('No vectorstores found in the database.')\n", (15918, 15960), True, 'import streamlit as st\n'), ((854, 877), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (870, 877), False, 'import ast\n'), ((6287, 6347), 'streamlit.write', 'st.write', (['f"""The org_id for {selected_org_name} is {org_id}."""'], {}), "(f'The org_id for {selected_org_name} is {org_id}.')\n", (6295, 6347), True, 'import streamlit as st\n'), ((6400, 6474), 'streamlit.write', 'st.write', (['f"""Organization \'{selected_org_name}\' not found in the database."""'], {}), '(f"Organization \'{selected_org_name}\' not found in the database.")\n', (6408, 6474), True, 'import streamlit as st\n'), ((13804, 13831), 'json.dumps', 'json.dumps', (['full_docs_dicts'], {}), '(full_docs_dicts)\n', (13814, 13831), False, 'import json\n'), ((14070, 14105), 'streamlit.success', 'st.success', (['"""Knowledge Base loaded"""'], {}), "('Knowledge Base loaded')\n", (14080, 14105), True, 'import streamlit as st\n'), ((8934, 9030), 'streamlit.error', 'st.error', (['"""Error: An entry with the same vectorstore_name and user_id already exists."""'], {}), "(\n 'Error: An entry with the same vectorstore_name and user_id already exists.'\n )\n", (8942, 9030), True, 'import streamlit as st\n'), ((9105, 9143), 'streamlit.error', 'st.error', (['"""Error: Subject is missing."""'], {}), "('Error: Subject is missing.')\n", (9113, 9143), True, 'import streamlit as st\n'), ((9214, 9250), 'streamlit.error', 'st.error', (['"""Error: Topic is missing."""'], {}), "('Error: Topic is missing.')\n", (9222, 9250), True, 'import streamlit as st\n'), ((10003, 10063), 'streamlit.error', 'st.error', (['f"""Error in storing documents and vectorstore: {e}"""'], {}), "(f'Error in storing documents and vectorstore: {e}')\n", (10011, 10063), True, 'import streamlit as st\n'), ((15842, 15891), 'streamlit.warning', 'st.warning', (['"""Please confirm the deletion action."""'], {}), "('Please confirm the deletion action.')\n", (15852, 15891), True, 'import streamlit as st\n'), ((16905, 17003), 'streamlit.error', 'st.error', (['f"""Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."""'], {}), '(\n f"Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."\n )\n', (16913, 17003), True, 'import streamlit as st\n')] |
#!/usr/bin/env python3 -m pytest
"""
Unit test for retrieve_utils.py
"""
import pytest
try:
import chromadb
from autogen.retrieve_utils import (
split_text_to_chunks,
extract_text_from_pdf,
split_files_to_chunks,
get_files_from_dir,
is_url,
create_vector_db_from_dir,
query_vector_db,
)
from autogen.token_count_utils import count_token
except ImportError:
skip = True
else:
skip = False
import os
try:
from unstructured.partition.auto import partition
HAS_UNSTRUCTURED = True
except ImportError:
HAS_UNSTRUCTURED = False
test_dir = os.path.join(os.path.dirname(__file__), "test_files")
expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities
of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and
simplify the process of building applications that leverage the power of LLMs, allowing for seamless
integration, testing, and deployment."""
@pytest.mark.skipif(skip, reason="dependency is not installed")
class TestRetrieveUtils:
def test_split_text_to_chunks(self):
long_text = "A" * 10000
chunks = split_text_to_chunks(long_text, max_tokens=1000)
assert all(count_token(chunk) <= 1000 for chunk in chunks)
def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self):
with pytest.raises(AssertionError):
split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode")
def test_extract_text_from_pdf(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split())
def test_split_files_to_chunks(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
chunks = split_files_to_chunks([pdf_file_path, txt_file_path])
assert all(
isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip()
for chunk in chunks
)
def test_get_files_from_dir(self):
files = get_files_from_dir(test_dir, recursive=False)
assert all(os.path.isfile(file) for file in files)
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
files = get_files_from_dir([pdf_file_path, txt_file_path])
assert all(os.path.isfile(file) for file in files)
files = get_files_from_dir(
[
pdf_file_path,
txt_file_path,
os.path.join(test_dir, "..", "..", "website/docs"),
"https://raw.githubusercontent.com/microsoft/autogen/main/README.md",
],
recursive=True,
)
assert all(os.path.isfile(file) for file in files)
files = get_files_from_dir(
[
pdf_file_path,
txt_file_path,
os.path.join(test_dir, "..", "..", "website/docs"),
"https://raw.githubusercontent.com/microsoft/autogen/main/README.md",
],
recursive=True,
types=["pdf", "txt"],
)
assert all(os.path.isfile(file) for file in files)
assert len(files) == 3
def test_is_url(self):
assert is_url("https://www.example.com")
assert not is_url("not_a_url")
def test_create_vector_db_from_dir(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else:
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
assert client.get_collection("all-my-documents")
def test_query_vector_db(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else: # If the database does not exist, create it first
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
results = query_vector_db(["autogen"], client=client)
assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", []))
def test_custom_vector_db(self):
try:
import lancedb
except ImportError:
return
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
db_path = "/tmp/lancedb"
def create_lancedb():
db = lancedb.connect(db_path)
data = [
{"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"},
{"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"},
{"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"},
{"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"},
{"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"},
{"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"},
]
try:
db.create_table("my_table", data)
except OSError:
pass
class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent):
def query_vector_db(
self,
query_texts,
n_results=10,
search_string="",
):
if query_texts:
vector = [0.1, 0.3]
db = lancedb.connect(db_path)
table = db.open_table("my_table")
query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df()
return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]}
def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""):
results = self.query_vector_db(
query_texts=[problem],
n_results=n_results,
search_string=search_string,
)
self._results = results
print("doc_ids: ", results["ids"])
ragragproxyagent = MyRetrieveUserProxyAgent(
name="ragproxyagent",
human_input_mode="NEVER",
max_consecutive_auto_reply=2,
retrieve_config={
"task": "qa",
"chunk_token_size": 2000,
"client": "__",
"embedding_model": "all-mpnet-base-v2",
},
)
create_lancedb()
ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark")
assert ragragproxyagent._results["ids"] == [[3, 1, 5]]
def test_custom_text_split_function(self):
def custom_text_split_function(text):
return [text[: len(text) // 2], text[len(text) // 2 :]]
db_path = "/tmp/test_retrieve_utils_chromadb.db"
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(
os.path.join(test_dir, "example.txt"),
client=client,
collection_name="mytestcollection",
custom_text_split_function=custom_text_split_function,
get_or_create=True,
recursive=False,
)
results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1)
assert (
"AutoGen is an advanced tool designed to assist developers in harnessing the capabilities"
in results.get("documents")[0][0]
)
def test_retrieve_utils(self):
client = chromadb.PersistentClient(path="/tmp/chromadb")
create_vector_db_from_dir(
dir_path="./website/docs",
client=client,
collection_name="autogen-docs",
custom_text_types=["txt", "md", "rtf", "rst"],
get_or_create=True,
)
results = query_vector_db(
query_texts=[
"How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
],
n_results=4,
client=client,
collection_name="autogen-docs",
search_string="AutoGen",
)
print(results["ids"][0])
assert len(results["ids"][0]) == 4
@pytest.mark.skipif(
not HAS_UNSTRUCTURED,
reason="do not run if unstructured is not installed",
)
def test_unstructured(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
word_file_path = os.path.join(test_dir, "example.docx")
chunks = split_files_to_chunks([pdf_file_path, txt_file_path, word_file_path])
assert all(
isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip()
for chunk in chunks
)
if __name__ == "__main__":
pytest.main()
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
os.remove(db_path) # Delete the database file after tests are finished
| [
"lancedb.connect"
] | [((1045, 1107), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1063, 1107), False, 'import pytest\n'), ((643, 668), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (658, 668), False, 'import os\n'), ((8719, 8818), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_UNSTRUCTURED)'], {'reason': '"""do not run if unstructured is not installed"""'}), "(not HAS_UNSTRUCTURED, reason=\n 'do not run if unstructured is not installed')\n", (8737, 8818), False, 'import pytest\n'), ((9356, 9369), 'pytest.main', 'pytest.main', ([], {}), '()\n', (9367, 9369), False, 'import pytest\n'), ((9431, 9454), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (9445, 9454), False, 'import os\n'), ((1223, 1271), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (1243, 1271), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1598, 1635), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1610, 1635), False, 'import os\n'), ((1814, 1851), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1826, 1851), False, 'import os\n'), ((1876, 1913), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1888, 1913), False, 'import os\n'), ((1931, 1984), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1952, 1984), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2219, 2264), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {'recursive': '(False)'}), '(test_dir, recursive=False)\n', (2237, 2264), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2348, 2385), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2360, 2385), False, 'import os\n'), ((2410, 2447), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (2422, 2447), False, 'import os\n'), ((2464, 2514), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2482, 2514), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3438, 3471), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (3444, 3471), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3626, 3649), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3640, 3649), False, 'import os\n'), ((4013, 4036), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (4027, 4036), False, 'import os\n'), ((4307, 4350), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (4322, 4350), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7347, 7386), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (7372, 7386), False, 'import chromadb\n'), ((7704, 7801), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (7719, 7801), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8026, 8073), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (8051, 8073), False, 'import chromadb\n'), ((8082, 8256), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'custom_text_types': "['txt', 'md', 'rtf', 'rst']", 'get_or_create': '(True)'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs', custom_text_types=['txt', 'md', 'rtf',\n 'rst'], get_or_create=True)\n", (8107, 8256), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8338, 8548), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (8353, 8548), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8894, 8931), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (8906, 8931), False, 'import os\n'), ((8956, 8993), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (8968, 8993), False, 'import os\n'), ((9019, 9057), 'os.path.join', 'os.path.join', (['test_dir', '"""example.docx"""'], {}), "(test_dir, 'example.docx')\n", (9031, 9057), False, 'import os\n'), ((9075, 9144), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path, word_file_path]'], {}), '([pdf_file_path, txt_file_path, word_file_path])\n', (9096, 9144), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((9464, 9482), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (9473, 9482), False, 'import os\n'), ((1423, 1452), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1436, 1452), False, 'import pytest\n'), ((1466, 1530), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1486, 1530), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3491, 3510), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (3497, 3510), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3672, 3711), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3697, 3711), False, 'import chromadb\n'), ((3747, 3786), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3772, 3786), False, 'import chromadb\n'), ((3799, 3849), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (3824, 3849), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4059, 4098), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4084, 4098), False, 'import chromadb\n'), ((4185, 4224), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4210, 4224), False, 'import chromadb\n'), ((4237, 4287), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (4262, 4287), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4771, 4795), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4786, 4795), False, 'import lancedb\n'), ((7434, 7471), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (7446, 7471), False, 'import os\n'), ((2284, 2304), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2298, 2304), False, 'import os\n'), ((2534, 2554), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2548, 2554), False, 'import os\n'), ((2702, 2752), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (2714, 2752), False, 'import os\n'), ((2912, 2932), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2926, 2932), False, 'import os\n'), ((3080, 3130), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (3092, 3130), False, 'import os\n'), ((3324, 3344), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3338, 3344), False, 'import os\n'), ((5851, 5875), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (5866, 5875), False, 'import lancedb\n'), ((1291, 1309), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (1302, 1309), False, 'from autogen.token_count_utils import count_token\n'), ((1693, 1729), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1714, 1729), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')] |
import pytest
from langchain_community.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.mark.requires("lancedb")
def test_lancedb_with_connection() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("lancedb")
def test_lancedb_without_connection() -> None:
embeddings = FakeEmbeddings()
texts = ["text 1", "text 2", "item 3"]
store = LanceDB(embedding=embeddings)
store.add_texts(texts)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("lancedb")
def test_lancedb_add_texts() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
| [
"lancedb.connect"
] | [((151, 182), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (171, 182), False, 'import pytest\n'), ((810, 841), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (830, 841), False, 'import pytest\n'), ((1178, 1209), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (1198, 1209), False, 'import pytest\n'), ((264, 280), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (278, 280), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((290, 321), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (305, 321), False, 'import lancedb\n'), ((641, 667), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (648, 667), False, 'from langchain_community.vectorstores import LanceDB\n'), ((906, 922), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (920, 922), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((979, 1008), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embeddings'}), '(embedding=embeddings)\n', (986, 1008), False, 'from langchain_community.vectorstores import LanceDB\n'), ((1265, 1281), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (1279, 1281), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((1295, 1324), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embeddings'}), '(embedding=embeddings)\n', (1302, 1324), False, 'from langchain_community.vectorstores import LanceDB\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import lancedb
import pytest
# AWS:
# You need to setup AWS credentials an a base path to run this test. Example
# AWS_PROFILE=default TEST_S3_BASE_URL=s3://my_bucket/dataset pytest tests/test_io.py
#
# Azure:
# You need to setup Azure credentials an a base path to run this test. Example
# export AZURE_STORAGE_ACCOUNT_NAME="<account>"
# export AZURE_STORAGE_ACCOUNT_KEY="<key>"
# export REMOTE_BASE_URL=az://my_blob/dataset
# pytest tests/test_io.py
@pytest.fixture(autouse=True, scope="module")
def setup():
yield
if remote_url := os.environ.get("REMOTE_BASE_URL"):
db = lancedb.connect(remote_url)
for table in db.table_names():
db.drop_table(table)
@pytest.mark.skipif(
(os.environ.get("REMOTE_BASE_URL") is None),
reason="please setup remote base url",
)
def test_remote_io():
db = lancedb.connect(os.environ.get("REMOTE_BASE_URL"))
assert db.table_names() == []
table = db.create_table(
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
rs = table.search([100, 100]).limit(1).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "bar"
rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "foo"
assert db.table_names() == ["test"]
assert "test" in db
assert len(db) == 1
assert db.open_table("test").name == db["test"].name
| [
"lancedb.connect"
] | [((1069, 1113), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""module"""'}), "(autouse=True, scope='module')\n", (1083, 1113), False, 'import pytest\n'), ((1159, 1192), 'os.environ.get', 'os.environ.get', (['"""REMOTE_BASE_URL"""'], {}), "('REMOTE_BASE_URL')\n", (1173, 1192), False, 'import os\n'), ((1207, 1234), 'lancedb.connect', 'lancedb.connect', (['remote_url'], {}), '(remote_url)\n', (1222, 1234), False, 'import lancedb\n'), ((1472, 1505), 'os.environ.get', 'os.environ.get', (['"""REMOTE_BASE_URL"""'], {}), "('REMOTE_BASE_URL')\n", (1486, 1505), False, 'import os\n'), ((1336, 1369), 'os.environ.get', 'os.environ.get', (['"""REMOTE_BASE_URL"""'], {}), "('REMOTE_BASE_URL')\n", (1350, 1369), False, 'import os\n')] |
"""
Unit test for retrieve_utils.py
"""
import pytest
try:
import chromadb
from autogen.retrieve_utils import (
split_text_to_chunks,
extract_text_from_pdf,
split_files_to_chunks,
get_files_from_dir,
is_url,
create_vector_db_from_dir,
query_vector_db,
)
from autogen.token_count_utils import count_token
except ImportError:
skip = True
else:
skip = False
import os
try:
from unstructured.partition.auto import partition
HAS_UNSTRUCTURED = True
except ImportError:
HAS_UNSTRUCTURED = False
test_dir = os.path.join(os.path.dirname(__file__), "test_files")
expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities
of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and
simplify the process of building applications that leverage the power of LLMs, allowing for seamless
integration, testing, and deployment."""
@pytest.mark.skipif(skip, reason="dependency is not installed")
class TestRetrieveUtils:
def test_split_text_to_chunks(self):
long_text = "A" * 10000
chunks = split_text_to_chunks(long_text, max_tokens=1000)
assert all(count_token(chunk) <= 1000 for chunk in chunks)
def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self):
with pytest.raises(AssertionError):
split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode")
def test_extract_text_from_pdf(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split())
def test_split_files_to_chunks(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
chunks = split_files_to_chunks([pdf_file_path, txt_file_path])
assert all(
isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip()
for chunk in chunks
)
def test_get_files_from_dir(self):
files = get_files_from_dir(test_dir, recursive=False)
assert all(os.path.isfile(file) for file in files)
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
files = get_files_from_dir([pdf_file_path, txt_file_path])
assert all(os.path.isfile(file) for file in files)
files = get_files_from_dir(
[
pdf_file_path,
txt_file_path,
os.path.join(test_dir, "..", "..", "website/docs"),
"https://raw.githubusercontent.com/microsoft/autogen/main/README.md",
],
recursive=True,
)
assert all(os.path.isfile(file) for file in files)
files = get_files_from_dir(
[
pdf_file_path,
txt_file_path,
os.path.join(test_dir, "..", "..", "website/docs"),
"https://raw.githubusercontent.com/microsoft/autogen/main/README.md",
],
recursive=True,
types=["pdf", "txt"],
)
assert all(os.path.isfile(file) for file in files)
assert len(files) == 3
def test_is_url(self):
assert is_url("https://www.example.com")
assert not is_url("not_a_url")
def test_create_vector_db_from_dir(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else:
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
assert client.get_collection("all-my-documents")
def test_query_vector_db(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else: # If the database does not exist, create it first
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
results = query_vector_db(["autogen"], client=client)
assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", []))
def test_custom_vector_db(self):
try:
import lancedb
except ImportError:
return
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
db_path = "/tmp/lancedb"
def create_lancedb():
db = lancedb.connect(db_path)
data = [
{"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"},
{"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"},
{"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"},
{"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"},
{"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"},
{"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"},
]
try:
db.create_table("my_table", data)
except OSError:
pass
class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent):
def query_vector_db(
self,
query_texts,
n_results=10,
search_string="",
):
if query_texts:
vector = [0.1, 0.3]
db = lancedb.connect(db_path)
table = db.open_table("my_table")
query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df()
return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]}
def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""):
results = self.query_vector_db(
query_texts=[problem],
n_results=n_results,
search_string=search_string,
)
self._results = results
print("doc_ids: ", results["ids"])
ragragproxyagent = MyRetrieveUserProxyAgent(
name="ragproxyagent",
human_input_mode="NEVER",
max_consecutive_auto_reply=2,
retrieve_config={
"task": "qa",
"chunk_token_size": 2000,
"client": "__",
"embedding_model": "all-mpnet-base-v2",
},
)
create_lancedb()
ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark")
assert ragragproxyagent._results["ids"] == [[3, 1, 5]]
def test_custom_text_split_function(self):
def custom_text_split_function(text):
return [text[: len(text) // 2], text[len(text) // 2 :]]
db_path = "/tmp/test_retrieve_utils_chromadb.db"
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(
os.path.join(test_dir, "example.txt"),
client=client,
collection_name="mytestcollection",
custom_text_split_function=custom_text_split_function,
get_or_create=True,
recursive=False,
)
results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1)
assert (
"AutoGen is an advanced tool designed to assist developers in harnessing the capabilities"
in results.get("documents")[0][0]
)
def test_retrieve_utils(self):
client = chromadb.PersistentClient(path="/tmp/chromadb")
create_vector_db_from_dir(
dir_path="./website/docs",
client=client,
collection_name="autogen-docs",
custom_text_types=["txt", "md", "rtf", "rst"],
get_or_create=True,
)
results = query_vector_db(
query_texts=[
"How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
],
n_results=4,
client=client,
collection_name="autogen-docs",
search_string="AutoGen",
)
print(results["ids"][0])
assert len(results["ids"][0]) == 4
@pytest.mark.skipif(
not HAS_UNSTRUCTURED,
reason="do not run if unstructured is not installed",
)
def test_unstructured(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
word_file_path = os.path.join(test_dir, "example.docx")
chunks = split_files_to_chunks([pdf_file_path, txt_file_path, word_file_path])
assert all(
isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip()
for chunk in chunks
)
if __name__ == "__main__":
pytest.main()
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
os.remove(db_path) # Delete the database file after tests are finished
| [
"lancedb.connect"
] | [((1011, 1073), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1029, 1073), False, 'import pytest\n'), ((609, 634), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (624, 634), False, 'import os\n'), ((8685, 8784), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_UNSTRUCTURED)'], {'reason': '"""do not run if unstructured is not installed"""'}), "(not HAS_UNSTRUCTURED, reason=\n 'do not run if unstructured is not installed')\n", (8703, 8784), False, 'import pytest\n'), ((9322, 9335), 'pytest.main', 'pytest.main', ([], {}), '()\n', (9333, 9335), False, 'import pytest\n'), ((9397, 9420), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (9411, 9420), False, 'import os\n'), ((1189, 1237), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (1209, 1237), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1564, 1601), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1576, 1601), False, 'import os\n'), ((1780, 1817), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1792, 1817), False, 'import os\n'), ((1842, 1879), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1854, 1879), False, 'import os\n'), ((1897, 1950), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1918, 1950), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2185, 2230), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {'recursive': '(False)'}), '(test_dir, recursive=False)\n', (2203, 2230), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2314, 2351), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2326, 2351), False, 'import os\n'), ((2376, 2413), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (2388, 2413), False, 'import os\n'), ((2430, 2480), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2448, 2480), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3404, 3437), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (3410, 3437), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3592, 3615), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3606, 3615), False, 'import os\n'), ((3979, 4002), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3993, 4002), False, 'import os\n'), ((4273, 4316), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (4288, 4316), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7313, 7352), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (7338, 7352), False, 'import chromadb\n'), ((7670, 7767), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (7685, 7767), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7992, 8039), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (8017, 8039), False, 'import chromadb\n'), ((8048, 8222), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'custom_text_types': "['txt', 'md', 'rtf', 'rst']", 'get_or_create': '(True)'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs', custom_text_types=['txt', 'md', 'rtf',\n 'rst'], get_or_create=True)\n", (8073, 8222), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8304, 8514), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (8319, 8514), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8860, 8897), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (8872, 8897), False, 'import os\n'), ((8922, 8959), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (8934, 8959), False, 'import os\n'), ((8985, 9023), 'os.path.join', 'os.path.join', (['test_dir', '"""example.docx"""'], {}), "(test_dir, 'example.docx')\n", (8997, 9023), False, 'import os\n'), ((9041, 9110), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path, word_file_path]'], {}), '([pdf_file_path, txt_file_path, word_file_path])\n', (9062, 9110), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((9430, 9448), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (9439, 9448), False, 'import os\n'), ((1389, 1418), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1402, 1418), False, 'import pytest\n'), ((1432, 1496), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1452, 1496), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3457, 3476), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (3463, 3476), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3638, 3677), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3663, 3677), False, 'import chromadb\n'), ((3713, 3752), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3738, 3752), False, 'import chromadb\n'), ((3765, 3815), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (3790, 3815), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4025, 4064), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4050, 4064), False, 'import chromadb\n'), ((4151, 4190), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4176, 4190), False, 'import chromadb\n'), ((4203, 4253), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (4228, 4253), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4737, 4761), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4752, 4761), False, 'import lancedb\n'), ((7400, 7437), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (7412, 7437), False, 'import os\n'), ((2250, 2270), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2264, 2270), False, 'import os\n'), ((2500, 2520), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2514, 2520), False, 'import os\n'), ((2668, 2718), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (2680, 2718), False, 'import os\n'), ((2878, 2898), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2892, 2898), False, 'import os\n'), ((3046, 3096), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (3058, 3096), False, 'import os\n'), ((3290, 3310), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3304, 3310), False, 'import os\n'), ((5817, 5841), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (5832, 5841), False, 'import lancedb\n'), ((1257, 1275), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (1268, 1275), False, 'from autogen.token_count_utils import count_token\n'), ((1659, 1695), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1680, 1695), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')] |
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
def test_lancedb_add_texts() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
| [
"lancedb.connect"
] | [((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (231, 247), False, 'import lancedb\n'), ((567, 593), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (574, 593), False, 'from langchain.vectorstores import LanceDB\n'), ((810, 826), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (824, 826), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((836, 867), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (851, 867), False, 'import lancedb\n'), ((1167, 1193), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1174, 1193), False, 'from langchain.vectorstores import LanceDB\n')] |
import lancedb
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
def test_lancedb_add_texts() -> None:
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
| [
"lancedb.connect"
] | [((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (227, 243), False, 'import lancedb\n'), ((563, 589), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (570, 589), False, 'from langchain.vectorstores import LanceDB\n'), ((786, 802), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (800, 802), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((812, 843), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (827, 843), False, 'import lancedb\n'), ((1143, 1169), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1150, 1169), False, 'from langchain.vectorstores import LanceDB\n')] |
import lancedb
db = lancedb.connect("data/sample-lancedb")
table = db.open_table("python_docs")
print(table.to_pandas())
print(table.to_pandas()["text"])
print(table.to_pandas().columns)
print("vector size: " + str(len(table.to_pandas()['vector'].values[0])))
| [
"lancedb.connect"
] | [((21, 59), 'lancedb.connect', 'lancedb.connect', (['"""data/sample-lancedb"""'], {}), "('data/sample-lancedb')\n", (36, 59), False, 'import lancedb\n')] |
import lancedb
import numpy as np
import pandas as pd
import pytest
import subprocess
from main import get_recommendations, data
import main
# DOWNLOAD ======================================================
subprocess.Popen(
"curl https://files.grouplens.org/datasets/movielens/ml-latest-small.zip -o ml-latest-small.zip",
shell=True,
).wait()
subprocess.Popen("unzip ml-latest-small.zip", shell=True).wait()
# TESTING ======================================================
def test_main():
ratings = pd.read_csv(
"./ml-latest-small/ratings.csv",
header=None,
names=["user id", "movie id", "rating", "timestamp"],
)
ratings = ratings.drop(columns=["timestamp"])
ratings = ratings.drop(0)
ratings["rating"] = ratings["rating"].values.astype(np.float32)
ratings["user id"] = ratings["user id"].values.astype(np.int32)
ratings["movie id"] = ratings["movie id"].values.astype(np.int32)
reviewmatrix = ratings.pivot(
index="user id", columns="movie id", values="rating"
).fillna(0)
# SVD
matrix = reviewmatrix.values
u, s, vh = np.linalg.svd(matrix, full_matrices=False)
vectors = np.rot90(np.fliplr(vh))
print(vectors.shape)
# Metadata
movies = pd.read_csv(
"./ml-latest-small/movies.csv", header=0, names=["movie id", "title", "genres"]
)
movies = movies[movies["movie id"].isin(reviewmatrix.columns)]
for i in range(len(movies)):
data.append(
{
"id": movies.iloc[i]["movie id"],
"title": movies.iloc[i]["title"],
"vector": vectors[i],
"genre": movies.iloc[i]["genres"],
}
)
print(pd.DataFrame(data))
# Connect to LanceDB
db = lancedb.connect("./data/test-db")
try:
main.table = db.create_table("movie_set", data=data)
except:
main.table = db.open_table("movie_set")
print(get_recommendations("Moana (2016)"))
print(get_recommendations("Rogue One: A Star Wars Story (2016)"))
| [
"lancedb.connect"
] | [((519, 634), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/ratings.csv"""'], {'header': 'None', 'names': "['user id', 'movie id', 'rating', 'timestamp']"}), "('./ml-latest-small/ratings.csv', header=None, names=['user id',\n 'movie id', 'rating', 'timestamp'])\n", (530, 634), True, 'import pandas as pd\n'), ((1119, 1161), 'numpy.linalg.svd', 'np.linalg.svd', (['matrix'], {'full_matrices': '(False)'}), '(matrix, full_matrices=False)\n', (1132, 1161), True, 'import numpy as np\n'), ((1255, 1351), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/movies.csv"""'], {'header': '(0)', 'names': "['movie id', 'title', 'genres']"}), "('./ml-latest-small/movies.csv', header=0, names=['movie id',\n 'title', 'genres'])\n", (1266, 1351), True, 'import pandas as pd\n'), ((1777, 1810), 'lancedb.connect', 'lancedb.connect', (['"""./data/test-db"""'], {}), "('./data/test-db')\n", (1792, 1810), False, 'import lancedb\n'), ((210, 346), 'subprocess.Popen', 'subprocess.Popen', (['"""curl https://files.grouplens.org/datasets/movielens/ml-latest-small.zip -o ml-latest-small.zip"""'], {'shell': '(True)'}), "(\n 'curl https://files.grouplens.org/datasets/movielens/ml-latest-small.zip -o ml-latest-small.zip'\n , shell=True)\n", (226, 346), False, 'import subprocess\n'), ((355, 412), 'subprocess.Popen', 'subprocess.Popen', (['"""unzip ml-latest-small.zip"""'], {'shell': '(True)'}), "('unzip ml-latest-small.zip', shell=True)\n", (371, 412), False, 'import subprocess\n'), ((1186, 1199), 'numpy.fliplr', 'np.fliplr', (['vh'], {}), '(vh)\n', (1195, 1199), True, 'import numpy as np\n'), ((1471, 1614), 'main.data.append', 'data.append', (["{'id': movies.iloc[i]['movie id'], 'title': movies.iloc[i]['title'],\n 'vector': vectors[i], 'genre': movies.iloc[i]['genres']}"], {}), "({'id': movies.iloc[i]['movie id'], 'title': movies.iloc[i][\n 'title'], 'vector': vectors[i], 'genre': movies.iloc[i]['genres']})\n", (1482, 1614), False, 'from main import get_recommendations, data\n'), ((1721, 1739), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1733, 1739), True, 'import pandas as pd\n'), ((1952, 1987), 'main.get_recommendations', 'get_recommendations', (['"""Moana (2016)"""'], {}), "('Moana (2016)')\n", (1971, 1987), False, 'from main import get_recommendations, data\n'), ((1999, 2057), 'main.get_recommendations', 'get_recommendations', (['"""Rogue One: A Star Wars Story (2016)"""'], {}), "('Rogue One: A Star Wars Story (2016)')\n", (2018, 2057), False, 'from main import get_recommendations, data\n')] |
"""LanceDB vector store."""
import logging
from typing import Any, List, Optional
import numpy as np
from pandas import DataFrame
from llama_index.legacy.schema import (
BaseNode,
MetadataMode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
from llama_index.legacy.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
DEFAULT_DOC_ID_KEY,
DEFAULT_TEXT_KEY,
legacy_metadata_dict_to_node,
metadata_dict_to_node,
node_to_metadata_dict,
)
_logger = logging.getLogger(__name__)
def _to_lance_filter(standard_filters: MetadataFilters) -> Any:
"""Translate standard metadata filters to Lance specific spec."""
filters = []
for filter in standard_filters.legacy_filters():
if isinstance(filter.value, str):
filters.append(filter.key + ' = "' + filter.value + '"')
else:
filters.append(filter.key + " = " + str(filter.value))
return " AND ".join(filters)
def _to_llama_similarities(results: DataFrame) -> List[float]:
keys = results.keys()
normalized_similarities: np.ndarray
if "score" in keys:
normalized_similarities = np.exp(results["score"] - np.max(results["score"]))
elif "_distance" in keys:
normalized_similarities = np.exp(-results["_distance"])
else:
normalized_similarities = np.linspace(1, 0, len(results))
return normalized_similarities.tolist()
class LanceDBVectorStore(VectorStore):
"""
The LanceDB Vector Store.
Stores text and embeddings in LanceDB. The vector store will open an existing
LanceDB dataset or create the dataset if it does not exist.
Args:
uri (str, required): Location where LanceDB will store its files.
table_name (str, optional): The table name where the embeddings will be stored.
Defaults to "vectors".
vector_column_name (str, optional): The vector column name in the table if different from default.
Defaults to "vector", in keeping with lancedb convention.
nprobes (int, optional): The number of probes used.
A higher number makes search more accurate but also slower.
Defaults to 20.
refine_factor: (int, optional): Refine the results by reading extra elements
and re-ranking them in memory.
Defaults to None
Raises:
ImportError: Unable to import `lancedb`.
Returns:
LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and
querying it.
"""
stores_text = True
flat_metadata: bool = True
def __init__(
self,
uri: str,
table_name: str = "vectors",
vector_column_name: str = "vector",
nprobes: int = 20,
refine_factor: Optional[int] = None,
text_key: str = DEFAULT_TEXT_KEY,
doc_id_key: str = DEFAULT_DOC_ID_KEY,
**kwargs: Any,
) -> None:
"""Init params."""
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
self.connection = lancedb.connect(uri)
self.uri = uri
self.table_name = table_name
self.vector_column_name = vector_column_name
self.nprobes = nprobes
self.text_key = text_key
self.doc_id_key = doc_id_key
self.refine_factor = refine_factor
@property
def client(self) -> None:
"""Get client."""
return
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
data = []
ids = []
for node in nodes:
metadata = node_to_metadata_dict(
node, remove_text=False, flat_metadata=self.flat_metadata
)
append_data = {
"id": node.node_id,
"doc_id": node.ref_doc_id,
"vector": node.get_embedding(),
"text": node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
}
data.append(append_data)
ids.append(node.node_id)
if self.table_name in self.connection.table_names():
tbl = self.connection.open_table(self.table_name)
tbl.add(data)
else:
self.connection.create_table(self.table_name, data)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
table = self.connection.open_table(self.table_name)
table.delete('document_id = "' + ref_doc_id + '"')
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface."
)
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(
query=query.query_embedding,
vector_column_name=self.vector_column_name,
)
.limit(query.similarity_top_k)
.where(where)
.nprobes(self.nprobes)
)
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
results = lance_query.to_pandas()
nodes = []
for _, item in results.iterrows():
try:
node = metadata_dict_to_node(item.metadata)
node.embedding = list(item[self.vector_column_name])
except Exception:
# deprecated legacy logic for backward compatibility
_logger.debug(
"Failed to parse Node metadata, fallback to legacy logic."
)
if "metadata" in item:
metadata, node_info, _relation = legacy_metadata_dict_to_node(
item.metadata, text_key=self.text_key
)
else:
metadata, node_info = {}, {}
node = TextNode(
text=item[self.text_key] or "",
id_=item.id,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id=item[self.doc_id_key]
),
},
)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((607, 634), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (624, 634), False, 'import logging\n'), ((3288, 3308), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3303, 3308), False, 'import lancedb\n'), ((1371, 1400), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1377, 1400), True, 'import numpy as np\n'), ((3843, 3928), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (3864, 3928), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1281, 1305), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1287, 1305), True, 'import numpy as np\n'), ((6116, 6152), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (6137, 6152), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((6541, 6608), 'llama_index.legacy.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (6569, 6608), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((7094, 7140), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item[self.doc_id_key]'}), '(node_id=item[self.doc_id_key])\n', (7109, 7140), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')] |
"""LanceDB vector store."""
import logging
from typing import Any, List, Optional
import numpy as np
from pandas import DataFrame
from llama_index.legacy.schema import (
BaseNode,
MetadataMode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
from llama_index.legacy.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
DEFAULT_DOC_ID_KEY,
DEFAULT_TEXT_KEY,
legacy_metadata_dict_to_node,
metadata_dict_to_node,
node_to_metadata_dict,
)
_logger = logging.getLogger(__name__)
def _to_lance_filter(standard_filters: MetadataFilters) -> Any:
"""Translate standard metadata filters to Lance specific spec."""
filters = []
for filter in standard_filters.legacy_filters():
if isinstance(filter.value, str):
filters.append(filter.key + ' = "' + filter.value + '"')
else:
filters.append(filter.key + " = " + str(filter.value))
return " AND ".join(filters)
def _to_llama_similarities(results: DataFrame) -> List[float]:
keys = results.keys()
normalized_similarities: np.ndarray
if "score" in keys:
normalized_similarities = np.exp(results["score"] - np.max(results["score"]))
elif "_distance" in keys:
normalized_similarities = np.exp(-results["_distance"])
else:
normalized_similarities = np.linspace(1, 0, len(results))
return normalized_similarities.tolist()
class LanceDBVectorStore(VectorStore):
"""
The LanceDB Vector Store.
Stores text and embeddings in LanceDB. The vector store will open an existing
LanceDB dataset or create the dataset if it does not exist.
Args:
uri (str, required): Location where LanceDB will store its files.
table_name (str, optional): The table name where the embeddings will be stored.
Defaults to "vectors".
vector_column_name (str, optional): The vector column name in the table if different from default.
Defaults to "vector", in keeping with lancedb convention.
nprobes (int, optional): The number of probes used.
A higher number makes search more accurate but also slower.
Defaults to 20.
refine_factor: (int, optional): Refine the results by reading extra elements
and re-ranking them in memory.
Defaults to None
Raises:
ImportError: Unable to import `lancedb`.
Returns:
LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and
querying it.
"""
stores_text = True
flat_metadata: bool = True
def __init__(
self,
uri: str,
table_name: str = "vectors",
vector_column_name: str = "vector",
nprobes: int = 20,
refine_factor: Optional[int] = None,
text_key: str = DEFAULT_TEXT_KEY,
doc_id_key: str = DEFAULT_DOC_ID_KEY,
**kwargs: Any,
) -> None:
"""Init params."""
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
self.connection = lancedb.connect(uri)
self.uri = uri
self.table_name = table_name
self.vector_column_name = vector_column_name
self.nprobes = nprobes
self.text_key = text_key
self.doc_id_key = doc_id_key
self.refine_factor = refine_factor
@property
def client(self) -> None:
"""Get client."""
return
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
data = []
ids = []
for node in nodes:
metadata = node_to_metadata_dict(
node, remove_text=False, flat_metadata=self.flat_metadata
)
append_data = {
"id": node.node_id,
"doc_id": node.ref_doc_id,
"vector": node.get_embedding(),
"text": node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
}
data.append(append_data)
ids.append(node.node_id)
if self.table_name in self.connection.table_names():
tbl = self.connection.open_table(self.table_name)
tbl.add(data)
else:
self.connection.create_table(self.table_name, data)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
table = self.connection.open_table(self.table_name)
table.delete('document_id = "' + ref_doc_id + '"')
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface."
)
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(
query=query.query_embedding,
vector_column_name=self.vector_column_name,
)
.limit(query.similarity_top_k)
.where(where)
.nprobes(self.nprobes)
)
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
results = lance_query.to_pandas()
nodes = []
for _, item in results.iterrows():
try:
node = metadata_dict_to_node(item.metadata)
node.embedding = list(item[self.vector_column_name])
except Exception:
# deprecated legacy logic for backward compatibility
_logger.debug(
"Failed to parse Node metadata, fallback to legacy logic."
)
if "metadata" in item:
metadata, node_info, _relation = legacy_metadata_dict_to_node(
item.metadata, text_key=self.text_key
)
else:
metadata, node_info = {}, {}
node = TextNode(
text=item[self.text_key] or "",
id_=item.id,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id=item[self.doc_id_key]
),
},
)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((607, 634), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (624, 634), False, 'import logging\n'), ((3288, 3308), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3303, 3308), False, 'import lancedb\n'), ((1371, 1400), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1377, 1400), True, 'import numpy as np\n'), ((3843, 3928), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (3864, 3928), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1281, 1305), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1287, 1305), True, 'import numpy as np\n'), ((6116, 6152), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (6137, 6152), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((6541, 6608), 'llama_index.legacy.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (6569, 6608), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((7094, 7140), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item[self.doc_id_key]'}), '(node_id=item[self.doc_id_key])\n', (7109, 7140), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from typing import List, Any
from dataclasses import dataclass
import lancedb
import pandas as pd
from autochain.tools.base import Tool
from autochain.models.base import BaseLanguageModel
from autochain.tools.internal_search.base_search_tool import BaseSearchTool
@dataclass
class LanceDBDoc:
doc: str
vector: List[float] = None
class LanceDBSeach(Tool, BaseSearchTool):
"""
Use LanceDB as the internal search tool
LanceDB is a vector database that supports vector search.
Args:
uri: the uri of the database. Default to "lancedb"
table_name: the name of the table. Default to "table"
metric: the metric used for vector search. Default to "cosine"
encoder: the encoder used to encode the documents. Default to None
docs: the documents to be indexed. Default to None
"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
docs: List[LanceDBDoc]
uri: str = "lancedb"
table_name: str = "table"
metric: str = "cosine"
encoder: BaseLanguageModel = None
db: lancedb.db.DBConnection = None
table: lancedb.table.Table = None
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.db = lancedb.connect(self.uri)
if self.docs:
self._encode_docs(self.docs)
self._create_table(self.docs)
def _create_table(self, docs: List[LanceDBDoc]) -> None:
self.table = self.db.create_table(self.table_name, self._docs_to_dataframe(docs), mode="overwrite")
def _encode_docs(self, docs: List[LanceDBDoc]) -> None:
for doc in docs:
if not doc.vector:
if not self.encoder:
raise ValueError("Encoder is not provided for encoding docs")
doc.vector = self.encoder.encode([doc.doc]).embeddings[0]
def _docs_to_dataframe(self, docs: List[LanceDBDoc]) -> pd.DataFrame:
return pd.DataFrame(
[
{"doc": doc.doc, "vector": doc.vector}
for doc in docs
]
)
def _run(
self,
query: str,
top_k: int = 2,
*args: Any,
**kwargs: Any,
) -> str:
if self.table is None:
return ""
embeddings = self.encoder.encode([query]).embeddings[0]
result = self.table.search(embeddings).limit(top_k).to_df()["doc"].to_list()
return "\n".join([f"Doc {i}: {doc}" for i, doc in enumerate(result)])
def add_docs(self, docs: List[LanceDBDoc], **kwargs):
if not len(docs):
return
self._encode_docs(docs)
self.table.add(self._docs_to_dataframe(docs)) if self.table else self._create_table(docs)
def clear_index(self):
if self.table_name in self.db.table_names():
self.db.drop_table(self.table_name)
self.table = None
| [
"lancedb.connect"
] | [((1275, 1300), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (1290, 1300), False, 'import lancedb\n'), ((1984, 2054), 'pandas.DataFrame', 'pd.DataFrame', (["[{'doc': doc.doc, 'vector': doc.vector} for doc in docs]"], {}), "([{'doc': doc.doc, 'vector': doc.vector} for doc in docs])\n", (1996, 2054), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : lancedb_store.py
"""
import lancedb
import shutil, os
class LanceStore:
def __init__(self, name):
db = lancedb.connect('./data/lancedb')
self.db = db
self.name = name
self.table = None
def search(self, query, n_results=2, metric="L2", nprobes=20, **kwargs):
# This assumes query is a vector embedding
# kwargs can be used for optional filtering
# .select - only searches the specified columns
# .where - SQL syntax filtering for metadata (e.g. where("price > 100"))
# .metric - specifies the distance metric to use
# .nprobes - values will yield better recall (more likely to find vectors if they exist) at the expense of latency.
if self.table == None: raise Exception("Table not created yet, please add data first.")
results = self.table \
.search(query) \
.limit(n_results) \
.select(kwargs.get('select')) \
.where(kwargs.get('where')) \
.metric(metric) \
.nprobes(nprobes) \
.to_df()
return results
def persist(self):
raise NotImplementedError
def write(self, data, metadatas, ids):
# This function is similar to add(), but it's for more generalized updates
# "data" is the list of embeddings
# Inserts into table by expanding metadatas into a dataframe: [{'vector', 'id', 'meta', 'meta2'}, ...]
documents = []
for i in range(len(data)):
row = {
'vector': data[i],
'id': ids[i]
}
row.update(metadatas[i])
documents.append(row)
if self.table != None:
self.table.add(documents)
else:
self.table = self.db.create_table(self.name, documents)
def add(self, data, metadata, _id):
# This function is for adding individual documents
# It assumes you're passing in a single vector embedding, metadata, and id
row = {
'vector': data,
'id': _id
}
row.update(metadata)
if self.table != None:
self.table.add([row])
else:
self.table = self.db.create_table(self.name, [row])
def delete(self, _id):
# This function deletes a row by id.
# LanceDB delete syntax uses SQL syntax, so you can use "in" or "="
if self.table == None: raise Exception("Table not created yet, please add data first")
if isinstance(_id, str):
return self.table.delete(f"id = '{_id}'")
else:
return self.table.delete(f"id = {_id}")
def drop(self, name):
# This function drops a table, if it exists.
path = os.path.join(self.db.uri, name + '.lance')
if os.path.exists(path):
shutil.rmtree(path) | [
"lancedb.connect"
] | [((234, 267), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (249, 267), False, 'import lancedb\n'), ((2866, 2908), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2878, 2908), False, 'import shutil, os\n'), ((2920, 2940), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2934, 2940), False, 'import shutil, os\n'), ((2954, 2973), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2967, 2973), False, 'import shutil, os\n')] |
import pytest
from langchain_community.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.mark.requires("lancedb")
def test_lancedb_with_connection() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("lancedb")
def test_lancedb_without_connection() -> None:
embeddings = FakeEmbeddings()
texts = ["text 1", "text 2", "item 3"]
store = LanceDB(embedding=embeddings)
store.add_texts(texts)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("lancedb")
def test_lancedb_add_texts() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
| [
"lancedb.connect"
] | [((151, 182), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (171, 182), False, 'import pytest\n'), ((810, 841), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (830, 841), False, 'import pytest\n'), ((1178, 1209), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (1198, 1209), False, 'import pytest\n'), ((264, 280), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (278, 280), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((290, 321), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (305, 321), False, 'import lancedb\n'), ((641, 667), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (648, 667), False, 'from langchain_community.vectorstores import LanceDB\n'), ((906, 922), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (920, 922), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((979, 1008), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embeddings'}), '(embedding=embeddings)\n', (986, 1008), False, 'from langchain_community.vectorstores import LanceDB\n'), ((1265, 1281), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (1279, 1281), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((1295, 1324), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embeddings'}), '(embedding=embeddings)\n', (1302, 1324), False, 'from langchain_community.vectorstores import LanceDB\n')] |
"""
Unit test for retrieve_utils.py
"""
import pytest
try:
import chromadb
from autogen.retrieve_utils import (
split_text_to_chunks,
extract_text_from_pdf,
split_files_to_chunks,
get_files_from_dir,
is_url,
create_vector_db_from_dir,
query_vector_db,
)
from autogen.token_count_utils import count_token
except ImportError:
skip = True
else:
skip = False
import os
try:
from unstructured.partition.auto import partition
HAS_UNSTRUCTURED = True
except ImportError:
HAS_UNSTRUCTURED = False
test_dir = os.path.join(os.path.dirname(__file__), "test_files")
expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities
of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and
simplify the process of building applications that leverage the power of LLMs, allowing for seamless
integration, testing, and deployment."""
@pytest.mark.skipif(skip, reason="dependency is not installed")
class TestRetrieveUtils:
def test_split_text_to_chunks(self):
long_text = "A" * 10000
chunks = split_text_to_chunks(long_text, max_tokens=1000)
assert all(count_token(chunk) <= 1000 for chunk in chunks)
def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self):
with pytest.raises(AssertionError):
split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode")
def test_extract_text_from_pdf(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split())
def test_split_files_to_chunks(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
chunks = split_files_to_chunks([pdf_file_path, txt_file_path])
assert all(
isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip()
for chunk in chunks
)
def test_get_files_from_dir(self):
files = get_files_from_dir(test_dir, recursive=False)
assert all(os.path.isfile(file) for file in files)
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
files = get_files_from_dir([pdf_file_path, txt_file_path])
assert all(os.path.isfile(file) for file in files)
files = get_files_from_dir(
[
pdf_file_path,
txt_file_path,
os.path.join(test_dir, "..", "..", "website/docs"),
"https://raw.githubusercontent.com/microsoft/autogen/main/README.md",
],
recursive=True,
)
assert all(os.path.isfile(file) for file in files)
files = get_files_from_dir(
[
pdf_file_path,
txt_file_path,
os.path.join(test_dir, "..", "..", "website/docs"),
"https://raw.githubusercontent.com/microsoft/autogen/main/README.md",
],
recursive=True,
types=["pdf", "txt"],
)
assert all(os.path.isfile(file) for file in files)
assert len(files) == 3
def test_is_url(self):
assert is_url("https://www.example.com")
assert not is_url("not_a_url")
def test_create_vector_db_from_dir(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else:
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
assert client.get_collection("all-my-documents")
def test_query_vector_db(self):
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
client = chromadb.PersistentClient(path=db_path)
else: # If the database does not exist, create it first
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(test_dir, client=client)
results = query_vector_db(["autogen"], client=client)
assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", []))
def test_custom_vector_db(self):
try:
import lancedb
except ImportError:
return
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
db_path = "/tmp/lancedb"
def create_lancedb():
db = lancedb.connect(db_path)
data = [
{"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"},
{"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"},
{"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"},
{"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"},
{"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"},
{"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"},
]
try:
db.create_table("my_table", data)
except OSError:
pass
class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent):
def query_vector_db(
self,
query_texts,
n_results=10,
search_string="",
):
if query_texts:
vector = [0.1, 0.3]
db = lancedb.connect(db_path)
table = db.open_table("my_table")
query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df()
return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]}
def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""):
results = self.query_vector_db(
query_texts=[problem],
n_results=n_results,
search_string=search_string,
)
self._results = results
print("doc_ids: ", results["ids"])
ragragproxyagent = MyRetrieveUserProxyAgent(
name="ragproxyagent",
human_input_mode="NEVER",
max_consecutive_auto_reply=2,
retrieve_config={
"task": "qa",
"chunk_token_size": 2000,
"client": "__",
"embedding_model": "all-mpnet-base-v2",
},
)
create_lancedb()
ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark")
assert ragragproxyagent._results["ids"] == [[3, 1, 5]]
def test_custom_text_split_function(self):
def custom_text_split_function(text):
return [text[: len(text) // 2], text[len(text) // 2 :]]
db_path = "/tmp/test_retrieve_utils_chromadb.db"
client = chromadb.PersistentClient(path=db_path)
create_vector_db_from_dir(
os.path.join(test_dir, "example.txt"),
client=client,
collection_name="mytestcollection",
custom_text_split_function=custom_text_split_function,
get_or_create=True,
recursive=False,
)
results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1)
assert (
"AutoGen is an advanced tool designed to assist developers in harnessing the capabilities"
in results.get("documents")[0][0]
)
def test_retrieve_utils(self):
client = chromadb.PersistentClient(path="/tmp/chromadb")
create_vector_db_from_dir(
dir_path="./website/docs",
client=client,
collection_name="autogen-docs",
custom_text_types=["txt", "md", "rtf", "rst"],
get_or_create=True,
)
results = query_vector_db(
query_texts=[
"How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
],
n_results=4,
client=client,
collection_name="autogen-docs",
search_string="AutoGen",
)
print(results["ids"][0])
assert len(results["ids"][0]) == 4
@pytest.mark.skipif(
not HAS_UNSTRUCTURED,
reason="do not run if unstructured is not installed",
)
def test_unstructured(self):
pdf_file_path = os.path.join(test_dir, "example.pdf")
txt_file_path = os.path.join(test_dir, "example.txt")
word_file_path = os.path.join(test_dir, "example.docx")
chunks = split_files_to_chunks([pdf_file_path, txt_file_path, word_file_path])
assert all(
isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip()
for chunk in chunks
)
if __name__ == "__main__":
pytest.main()
db_path = "/tmp/test_retrieve_utils_chromadb.db"
if os.path.exists(db_path):
os.remove(db_path) # Delete the database file after tests are finished
| [
"lancedb.connect"
] | [((1011, 1073), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1029, 1073), False, 'import pytest\n'), ((609, 634), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (624, 634), False, 'import os\n'), ((8685, 8784), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_UNSTRUCTURED)'], {'reason': '"""do not run if unstructured is not installed"""'}), "(not HAS_UNSTRUCTURED, reason=\n 'do not run if unstructured is not installed')\n", (8703, 8784), False, 'import pytest\n'), ((9322, 9335), 'pytest.main', 'pytest.main', ([], {}), '()\n', (9333, 9335), False, 'import pytest\n'), ((9397, 9420), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (9411, 9420), False, 'import os\n'), ((1189, 1237), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (1209, 1237), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1564, 1601), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1576, 1601), False, 'import os\n'), ((1780, 1817), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1792, 1817), False, 'import os\n'), ((1842, 1879), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1854, 1879), False, 'import os\n'), ((1897, 1950), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1918, 1950), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2185, 2230), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {'recursive': '(False)'}), '(test_dir, recursive=False)\n', (2203, 2230), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2314, 2351), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2326, 2351), False, 'import os\n'), ((2376, 2413), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (2388, 2413), False, 'import os\n'), ((2430, 2480), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2448, 2480), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3404, 3437), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (3410, 3437), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3592, 3615), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3606, 3615), False, 'import os\n'), ((3979, 4002), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3993, 4002), False, 'import os\n'), ((4273, 4316), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (4288, 4316), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7313, 7352), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (7338, 7352), False, 'import chromadb\n'), ((7670, 7767), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (7685, 7767), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7992, 8039), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (8017, 8039), False, 'import chromadb\n'), ((8048, 8222), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'custom_text_types': "['txt', 'md', 'rtf', 'rst']", 'get_or_create': '(True)'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs', custom_text_types=['txt', 'md', 'rtf',\n 'rst'], get_or_create=True)\n", (8073, 8222), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8304, 8514), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (8319, 8514), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8860, 8897), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (8872, 8897), False, 'import os\n'), ((8922, 8959), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (8934, 8959), False, 'import os\n'), ((8985, 9023), 'os.path.join', 'os.path.join', (['test_dir', '"""example.docx"""'], {}), "(test_dir, 'example.docx')\n", (8997, 9023), False, 'import os\n'), ((9041, 9110), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path, word_file_path]'], {}), '([pdf_file_path, txt_file_path, word_file_path])\n', (9062, 9110), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((9430, 9448), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (9439, 9448), False, 'import os\n'), ((1389, 1418), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1402, 1418), False, 'import pytest\n'), ((1432, 1496), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1452, 1496), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3457, 3476), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (3463, 3476), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3638, 3677), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3663, 3677), False, 'import chromadb\n'), ((3713, 3752), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3738, 3752), False, 'import chromadb\n'), ((3765, 3815), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (3790, 3815), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4025, 4064), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4050, 4064), False, 'import chromadb\n'), ((4151, 4190), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4176, 4190), False, 'import chromadb\n'), ((4203, 4253), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (4228, 4253), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4737, 4761), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4752, 4761), False, 'import lancedb\n'), ((7400, 7437), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (7412, 7437), False, 'import os\n'), ((2250, 2270), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2264, 2270), False, 'import os\n'), ((2500, 2520), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2514, 2520), False, 'import os\n'), ((2668, 2718), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (2680, 2718), False, 'import os\n'), ((2878, 2898), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2892, 2898), False, 'import os\n'), ((3046, 3096), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (3058, 3096), False, 'import os\n'), ((3290, 3310), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3304, 3310), False, 'import os\n'), ((5817, 5841), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (5832, 5841), False, 'import lancedb\n'), ((1257, 1275), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (1268, 1275), False, 'from autogen.token_count_utils import count_token\n'), ((1659, 1695), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1680, 1695), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')] |
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
def test_lancedb_add_texts() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
| [
"lancedb.connect"
] | [((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (231, 247), False, 'import lancedb\n'), ((567, 593), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (574, 593), False, 'from langchain.vectorstores import LanceDB\n'), ((810, 826), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (824, 826), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((836, 867), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (851, 867), False, 'import lancedb\n'), ((1167, 1193), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1174, 1193), False, 'from langchain.vectorstores import LanceDB\n')] |
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
def test_lancedb_add_texts() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
| [
"lancedb.connect"
] | [((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (231, 247), False, 'import lancedb\n'), ((567, 593), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (574, 593), False, 'from langchain.vectorstores import LanceDB\n'), ((810, 826), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (824, 826), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((836, 867), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (851, 867), False, 'import lancedb\n'), ((1167, 1193), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1174, 1193), False, 'from langchain.vectorstores import LanceDB\n')] |
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
def test_lancedb_add_texts() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
| [
"lancedb.connect"
] | [((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (231, 247), False, 'import lancedb\n'), ((567, 593), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (574, 593), False, 'from langchain.vectorstores import LanceDB\n'), ((810, 826), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (824, 826), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((836, 867), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (851, 867), False, 'import lancedb\n'), ((1167, 1193), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1174, 1193), False, 'from langchain.vectorstores import LanceDB\n')] |
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
def test_lancedb_add_texts() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1"]
vectors = embeddings.embed_documents(texts)
table = db.create_table(
"my_table",
data=[
{"vector": vectors[idx], "id": text, "text": text}
for idx, text in enumerate(texts)
],
mode="overwrite",
)
store = LanceDB(table, embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
| [
"lancedb.connect"
] | [((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (231, 247), False, 'import lancedb\n'), ((567, 593), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (574, 593), False, 'from langchain.vectorstores import LanceDB\n'), ((810, 826), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (824, 826), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((836, 867), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (851, 867), False, 'import lancedb\n'), ((1167, 1193), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1174, 1193), False, 'from langchain.vectorstores import LanceDB\n')] |