code
stringlengths 193
97.3k
| apis
sequencelengths 1
8
| extract_api
stringlengths 113
214k
|
---|---|---|
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# type: ignore
import lancedb
import uuid
from datetime import datetime
from tqdm import tqdm
from typing import Optional, List, Iterator, Dict
from memgpt.config import MemGPTConfig
from memgpt.agent_store.storage import StorageConnector, TableType
from memgpt.config import AgentConfig, MemGPTConfig
from memgpt.constants import MEMGPT_DIR
from memgpt.utils import printd
from memgpt.data_types import Record, Message, Passage, Source
from datetime import datetime
from lancedb.pydantic import Vector, LanceModel
""" Initial implementation - not complete """
def get_db_model(table_name: str, table_type: TableType):
config = MemGPTConfig.load()
if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:
# create schema for archival memory
class PassageModel(LanceModel):
"""Defines data model for storing Passages (consisting of text, embedding)"""
id: uuid.UUID
user_id: str
text: str
doc_id: str
agent_id: str
data_source: str
embedding: Vector(config.default_embedding_config.embedding_dim)
metadata_: Dict
def __repr__(self):
return f"<Passage(passage_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Passage(
text=self.text,
embedding=self.embedding,
doc_id=self.doc_id,
user_id=self.user_id,
id=self.id,
data_source=self.data_source,
agent_id=self.agent_id,
metadata=self.metadata_,
)
return PassageModel
elif table_type == TableType.RECALL_MEMORY:
class MessageModel(LanceModel):
"""Defines data model for storing Message objects"""
__abstract__ = True # this line is necessary
# Assuming message_id is the primary key
id: uuid.UUID
user_id: str
agent_id: str
# openai info
role: str
name: str
text: str
model: str
user: str
# function info
function_name: str
function_args: str
function_response: str
embedding = Vector(config.default_embedding_config.embedding_dim)
# Add a datetime column, with default value as the current time
created_at = datetime
def __repr__(self):
return f"<Message(message_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Message(
user_id=self.user_id,
agent_id=self.agent_id,
role=self.role,
name=self.name,
text=self.text,
model=self.model,
function_name=self.function_name,
function_args=self.function_args,
function_response=self.function_response,
embedding=self.embedding,
created_at=self.created_at,
id=self.id,
)
"""Create database model for table_name"""
return MessageModel
else:
raise ValueError(f"Table type {table_type} not implemented")
class LanceDBConnector(StorageConnector):
"""Storage via LanceDB"""
# TODO: this should probably eventually be moved into a parent DB class
def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None):
# TODO
pass
def generate_where_filter(self, filters: Dict) -> str:
where_filters = []
for key, value in filters.items():
where_filters.append(f"{key}={value}")
return where_filters.join(" AND ")
@abstractmethod
def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:
# TODO
pass
@abstractmethod
def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:
# TODO
pass
@abstractmethod
def get(self, id: uuid.UUID) -> Optional[Record]:
# TODO
pass
@abstractmethod
def size(self, filters: Optional[Dict] = {}) -> int:
# TODO
pass
@abstractmethod
def insert(self, record: Record):
# TODO
pass
@abstractmethod
def insert_many(self, records: List[Record], show_progress=False):
# TODO
pass
@abstractmethod
def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:
# TODO
pass
@abstractmethod
def query_date(self, start_date, end_date):
# TODO
pass
@abstractmethod
def query_text(self, query):
# TODO
pass
@abstractmethod
def delete_table(self):
# TODO
pass
@abstractmethod
def delete(self, filters: Optional[Dict] = {}):
# TODO
pass
@abstractmethod
def save(self):
# TODO
pass
| [
"lancedb.pydantic.Vector"
] | [((639, 658), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (656, 658), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((1094, 1147), 'lancedb.pydantic.Vector', 'Vector', (['config.default_embedding_config.embedding_dim'], {}), '(config.default_embedding_config.embedding_dim)\n', (1100, 1147), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((1375, 1565), 'memgpt.data_types.Passage', 'Passage', ([], {'text': 'self.text', 'embedding': 'self.embedding', 'doc_id': 'self.doc_id', 'user_id': 'self.user_id', 'id': 'self.id', 'data_source': 'self.data_source', 'agent_id': 'self.agent_id', 'metadata': 'self.metadata_'}), '(text=self.text, embedding=self.embedding, doc_id=self.doc_id,\n user_id=self.user_id, id=self.id, data_source=self.data_source,\n agent_id=self.agent_id, metadata=self.metadata_)\n', (1382, 1565), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((2399, 2452), 'lancedb.pydantic.Vector', 'Vector', (['config.default_embedding_config.embedding_dim'], {}), '(config.default_embedding_config.embedding_dim)\n', (2405, 2452), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((2763, 3078), 'memgpt.data_types.Message', 'Message', ([], {'user_id': 'self.user_id', 'agent_id': 'self.agent_id', 'role': 'self.role', 'name': 'self.name', 'text': 'self.text', 'model': 'self.model', 'function_name': 'self.function_name', 'function_args': 'self.function_args', 'function_response': 'self.function_response', 'embedding': 'self.embedding', 'created_at': 'self.created_at', 'id': 'self.id'}), '(user_id=self.user_id, agent_id=self.agent_id, role=self.role, name=\n self.name, text=self.text, model=self.model, function_name=self.\n function_name, function_args=self.function_args, function_response=self\n .function_response, embedding=self.embedding, created_at=self.\n created_at, id=self.id)\n', (2770, 3078), False, 'from memgpt.data_types import Record, Message, Passage, Source\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from lancedb.utils import CONFIG
@click.group()
@click.version_option(help="LanceDB command line interface entry point")
def cli():
"LanceDB command line interface"
diagnostics_help = """
Enable or disable LanceDB diagnostics. When enabled, LanceDB will send anonymous events
to help us improve LanceDB. These diagnostics are used only for error reporting and no
data is collected. You can find more about diagnosis on our docs:
https://lancedb.github.io/lancedb/cli_config/
"""
@cli.command(help=diagnostics_help)
@click.option("--enabled/--disabled", default=True)
def diagnostics(enabled):
CONFIG.update({"diagnostics": True if enabled else False})
click.echo("LanceDB diagnostics is %s" % ("enabled" if enabled else "disabled"))
@cli.command(help="Show current LanceDB configuration")
def config():
# TODO: pretty print as table with colors and formatting
click.echo("Current LanceDB configuration:")
cfg = CONFIG.copy()
cfg.pop("uuid") # Don't show uuid as it is not configurable
for item, amount in cfg.items():
click.echo("{} ({})".format(item, amount))
| [
"lancedb.utils.CONFIG.copy",
"lancedb.utils.CONFIG.update"
] | [((641, 654), 'click.group', 'click.group', ([], {}), '()\n', (652, 654), False, 'import click\n'), ((656, 727), 'click.version_option', 'click.version_option', ([], {'help': '"""LanceDB command line interface entry point"""'}), "(help='LanceDB command line interface entry point')\n", (676, 727), False, 'import click\n'), ((1131, 1181), 'click.option', 'click.option', (['"""--enabled/--disabled"""'], {'default': '(True)'}), "('--enabled/--disabled', default=True)\n", (1143, 1181), False, 'import click\n'), ((1212, 1270), 'lancedb.utils.CONFIG.update', 'CONFIG.update', (["{'diagnostics': True if enabled else False}"], {}), "({'diagnostics': True if enabled else False})\n", (1225, 1270), False, 'from lancedb.utils import CONFIG\n'), ((1275, 1360), 'click.echo', 'click.echo', (["('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled'))"], {}), "('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled')\n )\n", (1285, 1360), False, 'import click\n'), ((1493, 1537), 'click.echo', 'click.echo', (['"""Current LanceDB configuration:"""'], {}), "('Current LanceDB configuration:')\n", (1503, 1537), False, 'import click\n'), ((1548, 1561), 'lancedb.utils.CONFIG.copy', 'CONFIG.copy', ([], {}), '()\n', (1559, 1561), False, 'from lancedb.utils import CONFIG\n')] |
# Copyright (c) Hegel AI, Inc.
# All rights reserved.
#
# This source code's license can be found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import pandas as pd
from typing import Callable, Optional
try:
import lancedb
from lancedb.embeddings import with_embeddings
except ImportError:
lancedb = None
import logging
from time import perf_counter
from .experiment import Experiment
from ._utils import _get_dynamic_columns
VALID_TASKS = [""]
def query_builder(
table: "lancedb.Table",
embed_fn: Callable,
text: str,
metric: str = "cosine",
limit: int = 3,
filter: str = None,
nprobes: int = None,
refine_factor: int = None,
):
if nprobes is not None or refine_factor is not None:
warnings.warn(
"`nprobes` and `refine_factor` are not used by the default `query_builder`. "
"Feel free to open an issue to request adding support for them."
)
query = table.search(embed_fn(text)[0]).metric(metric)
if filter:
query = query.where(filter)
return query.limit(limit).to_df()
class LanceDBExperiment(Experiment):
r"""
Perform an experiment with ``LanceDB`` to test different embedding functions or retrieval arguments.
You can query from an existing table, or create a new one (and insert documents into it) during
the experiment.
Args:
uri (str): LanceDB uri to interact with your database. Default is "lancedb"
table_name (str): the table that you will get or create. Default is "table"
use_existing_table (bool): determines whether to create a new collection or use
an existing one
embedding_fns (list[Callable]): embedding functions to test in the experiment
by default only uses the default one in LanceDB
query_args (dict[str, list]): parameters used to query the table
Each value is expected to be a list to create all possible combinations
data (Optional[list[dict]]): documents or embeddings that will be added to
the newly created table
text_col_name (str): name of the text column in the table. Default is "text"
clean_up (bool): determines whether to drop the table after the experiment ends
"""
def __init__(
self,
embedding_fns: dict[str, Callable],
query_args: dict[str, list],
uri: str = "lancedb",
table_name: str = "table",
use_existing_table: bool = False,
data: Optional[list[dict]] = None,
text_col_name: str = "text",
clean_up: bool = False,
):
if lancedb is None:
raise ModuleNotFoundError(
"Package `lancedb` is required to be installed to use this experiment."
"Please use `pip install lancedb` to install the package"
)
self.table_name = table_name
self.use_existing_table = use_existing_table
self.embedding_fns = embedding_fns
if use_existing_table and data:
raise RuntimeError("You can either use an existing collection or create a new one during the experiment.")
if not use_existing_table and data is None:
raise RuntimeError("If you choose to create a new collection, you must also add to it.")
self.data = data if data is not None else []
self.argument_combos: list[dict] = []
self.text_col_name = text_col_name
self.db = lancedb.connect(uri)
self.completion_fn = self.lancedb_completion_fn
self.query_args = query_args
self.clean_up = clean_up
super().__init__()
def prepare(self):
for combo in itertools.product(*self.query_args.values()):
self.argument_combos.append(dict(zip(self.query_args.keys(), combo)))
def run(self, runs: int = 1):
input_args = [] # This will be used to construct DataFrame table
results = []
latencies = []
if not self.argument_combos:
logging.info("Preparing first...")
self.prepare()
for emb_fn_name, emb_fn in self.embedding_fns.items():
if self.use_existing_table: # Use existing table
table = self.db.open_table(self.table_name)
if not table:
raise RuntimeError(f"Table {self.table_name} does not exist.")
else: # Create table and insert data
data = with_embeddings(emb_fn, self.data, self.text_col_name)
table = self.db.create_table(self.table_name, data, mode="overwrite")
# Query from table
for query_arg_dict in self.argument_combos:
query_args = query_arg_dict.copy()
for _ in range(runs):
start = perf_counter()
results.append(self.lancedb_completion_fn(table=table, embedding_fn=emb_fn, **query_args))
latencies.append(perf_counter() - start)
query_args["emb_fn"] = emb_fn_name # Saving for visualization
input_args.append(query_args)
# Clean up
if self.clean_up:
self.db.drop_table(self.table_name)
self._construct_result_dfs(input_args, results, latencies)
def lancedb_completion_fn(self, table, embedding_fn, **kwargs):
return query_builder(table, embedding_fn, **kwargs)
def _construct_result_dfs(
self,
input_args: list[dict[str, object]],
results: list[dict[str, object]],
latencies: list[float],
):
r"""
Construct a few DataFrames that contain all relevant data (i.e. input arguments, results, evaluation metrics).
This version only extract the most relevant objects returned by LanceDB.
Args:
input_args (list[dict[str, object]]): list of dictionaries, where each of them is a set of
input argument that was passed into the model
results (list[dict[str, object]]): list of responses from the model
latencies (list[float]): list of latency measurements
"""
# `input_arg_df` contains all all input args
input_arg_df = pd.DataFrame(input_args)
# `dynamic_input_arg_df` contains input args that has more than one unique values
dynamic_input_arg_df = _get_dynamic_columns(input_arg_df)
# `response_df` contains the extracted response (often being the text response)
response_dict = dict()
response_dict["top doc ids"] = [self._extract_top_doc_ids(result) for result in results]
response_dict["distances"] = [self._extract_lancedb_dists(result) for result in results]
response_dict["documents"] = [self._extract_lancedb_docs(result) for result in results]
response_df = pd.DataFrame(response_dict)
# `result_df` contains everything returned by the completion function
result_df = response_df # pd.concat([self.response_df, pd.DataFrame(results)], axis=1)
# `score_df` contains computed metrics (e.g. latency, evaluation metrics)
self.score_df = pd.DataFrame({"latency": latencies})
# `partial_df` contains some input arguments, extracted responses, and score
self.partial_df = pd.concat([dynamic_input_arg_df, response_df, self.score_df], axis=1)
# `full_df` contains all input arguments, responses, and score
self.full_df = pd.concat([input_arg_df, result_df, self.score_df], axis=1)
@staticmethod
def _extract_top_doc_ids(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["ids"]
@staticmethod
def _extract_lancedb_dists(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["_distance"]
@staticmethod
def _extract_lancedb_docs(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["text"]
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((797, 961), 'warnings.warn', 'warnings.warn', (['"""`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them."""'], {}), "(\n '`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them.'\n )\n", (810, 961), False, 'import warnings\n'), ((3496, 3516), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3511, 3516), False, 'import lancedb\n'), ((6251, 6275), 'pandas.DataFrame', 'pd.DataFrame', (['input_args'], {}), '(input_args)\n', (6263, 6275), True, 'import pandas as pd\n'), ((6864, 6891), 'pandas.DataFrame', 'pd.DataFrame', (['response_dict'], {}), '(response_dict)\n', (6876, 6891), True, 'import pandas as pd\n'), ((7173, 7209), 'pandas.DataFrame', 'pd.DataFrame', (["{'latency': latencies}"], {}), "({'latency': latencies})\n", (7185, 7209), True, 'import pandas as pd\n'), ((7322, 7391), 'pandas.concat', 'pd.concat', (['[dynamic_input_arg_df, response_df, self.score_df]'], {'axis': '(1)'}), '([dynamic_input_arg_df, response_df, self.score_df], axis=1)\n', (7331, 7391), True, 'import pandas as pd\n'), ((7486, 7545), 'pandas.concat', 'pd.concat', (['[input_arg_df, result_df, self.score_df]'], {'axis': '(1)'}), '([input_arg_df, result_df, self.score_df], axis=1)\n', (7495, 7545), True, 'import pandas as pd\n'), ((4045, 4079), 'logging.info', 'logging.info', (['"""Preparing first..."""'], {}), "('Preparing first...')\n", (4057, 4079), False, 'import logging\n'), ((4479, 4533), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['emb_fn', 'self.data', 'self.text_col_name'], {}), '(emb_fn, self.data, self.text_col_name)\n', (4494, 4533), False, 'from lancedb.embeddings import with_embeddings\n'), ((4825, 4839), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4837, 4839), False, 'from time import perf_counter\n'), ((4988, 5002), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5000, 5002), False, 'from time import perf_counter\n')] |
import os
from pathlib import Path
from tqdm import tqdm
from lancedb import connect
from pydantic import BaseModel
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
from typing import Iterable
DB_PATH = Path(os.getcwd(), "db")
DATA_PATH = Path(os.getcwd(), "data")
DB_TABLE = "paul_graham"
class Document(BaseModel):
id: int
text: str
filename: str
openai = get_registry().get("openai").create(name="text-embedding-3-large", dim=256)
class TextChunk(LanceModel):
id: int
doc_id: int
chunk_num: int
start_pos: int
end_pos: int
text: str = openai.SourceField()
# For some reason if we call openai.ndim(), it returns 1536 instead of 256 like we want
vector: Vector(openai.ndims()) = openai.VectorField(default=None)
def chunk_text(
documents: Iterable[Document], window_size: int = 1024, overlap: int = 0
):
id = 0
for doc in documents:
for chunk_num, start_pos in enumerate(
range(0, len(doc.text), window_size - overlap)
):
# TODO: Fix up this and use a Lance Model instead - have reached out to the team to ask for some help
yield {
"id": id,
"doc_id": doc.id,
"chunk_num": chunk_num,
"start_pos": start_pos,
"end_pos": start_pos + window_size,
"text": doc.text[start_pos : start_pos + window_size],
}
id += 1
def read_file_content(path: Path, file_suffix: str) -> Iterable[Document]:
for i, file in enumerate(path.iterdir()):
if file.suffix != file_suffix:
continue
yield Document(id=i, text=file.read_text(), filename=file.name)
def batch_chunks(chunks, batch_size=10):
batch = []
for item in chunks:
batch.append(item)
if len(batch) == batch_size:
yield batch
batch = []
if batch:
yield batch
def main():
assert "OPENAI_API_KEY" in os.environ, "OPENAI_API_KEY is not set"
db = connect(DB_PATH)
table = db.create_table(DB_TABLE, schema=TextChunk, mode="overwrite")
documents = read_file_content(DATA_PATH, file_suffix=".md")
chunks = chunk_text(documents)
batched_chunks = batch_chunks(chunks, 20)
for chunk_batch in tqdm(batched_chunks):
table.add(chunk_batch)
if __name__ == "__main__":
main()
| [
"lancedb.connect",
"lancedb.embeddings.get_registry"
] | [((253, 264), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (262, 264), False, 'import os\n'), ((289, 300), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (298, 300), False, 'import os\n'), ((2068, 2084), 'lancedb.connect', 'connect', (['DB_PATH'], {}), '(DB_PATH)\n', (2075, 2084), False, 'from lancedb import connect\n'), ((2329, 2349), 'tqdm.tqdm', 'tqdm', (['batched_chunks'], {}), '(batched_chunks)\n', (2333, 2349), False, 'from tqdm import tqdm\n'), ((419, 433), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (431, 433), False, 'from lancedb.embeddings import get_registry\n')] |
import lancedb
import lancedb.embeddings.imagebind
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
import gradio as gr
from downloader import dowload_and_save_audio, dowload_and_save_image, base_path
model = get_registry().get("imagebind").create()
class TextModel(LanceModel):
text: str
image_uri: str = model.SourceField()
audio_path: str
vector: Vector(model.ndims()) = model.VectorField()
text_list = ["A bird", "A dragon", "A car", "A guitar", "A witch", "Thunder"]
image_paths = dowload_and_save_image()
audio_paths = dowload_and_save_audio()
# Load data
inputs = [
{"text": a, "audio_path": b, "image_uri": c}
for a, b, c in zip(text_list, audio_paths, image_paths)
]
db = lancedb.connect("/tmp/lancedb")
table = db.create_table("img_bind", schema=TextModel)
table.add(inputs)
def process_image(inp_img) -> str:
actual = (
table.search(inp_img, vector_column_name="vector")
.limit(1)
.to_pydantic(TextModel)[0]
)
return actual.text, actual.audio_path
def process_text(inp_text) -> str:
actual = (
table.search(inp_text, vector_column_name="vector")
.limit(1)
.to_pydantic(TextModel)[0]
)
return actual.image_uri, actual.audio_path
def process_audio(inp_audio) -> str:
actual = (
table.search(inp_audio, vector_column_name="vector")
.limit(1)
.to_pydantic(TextModel)[0]
)
return actual.image_uri, actual.text
im_to_at = gr.Interface(
process_image,
gr.Image(type="filepath", value=image_paths[0]),
[gr.Text(label="Output Text"), gr.Audio(label="Output Audio")],
examples=image_paths,
allow_flagging="never",
)
txt_to_ia = gr.Interface(
process_text,
gr.Textbox(label="Enter a prompt:"),
[gr.Image(label="Output Image"), gr.Audio(label="Output Audio")],
allow_flagging="never",
examples=text_list,
)
a_to_it = gr.Interface(
process_audio,
gr.Audio(type="filepath", value=audio_paths[0]),
[gr.Image(label="Output Image"), gr.Text(label="Output Text")],
examples=audio_paths,
allow_flagging="never",
)
demo = gr.TabbedInterface(
[im_to_at, txt_to_ia, a_to_it],
["Image to Text/Audio", "Text to Image/Audio", "Audio to Image/Text"],
)
if __name__ == "__main__":
demo.launch(share=True, allowed_paths=[f"{base_path}/test_inputs/"])
| [
"lancedb.connect",
"lancedb.embeddings.get_registry"
] | [((550, 574), 'downloader.dowload_and_save_image', 'dowload_and_save_image', ([], {}), '()\n', (572, 574), False, 'from downloader import dowload_and_save_audio, dowload_and_save_image, base_path\n'), ((589, 613), 'downloader.dowload_and_save_audio', 'dowload_and_save_audio', ([], {}), '()\n', (611, 613), False, 'from downloader import dowload_and_save_audio, dowload_and_save_image, base_path\n'), ((755, 786), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (770, 786), False, 'import lancedb\n'), ((2166, 2291), 'gradio.TabbedInterface', 'gr.TabbedInterface', (['[im_to_at, txt_to_ia, a_to_it]', "['Image to Text/Audio', 'Text to Image/Audio', 'Audio to Image/Text']"], {}), "([im_to_at, txt_to_ia, a_to_it], ['Image to Text/Audio',\n 'Text to Image/Audio', 'Audio to Image/Text'])\n", (2184, 2291), True, 'import gradio as gr\n'), ((1557, 1604), 'gradio.Image', 'gr.Image', ([], {'type': '"""filepath"""', 'value': 'image_paths[0]'}), "(type='filepath', value=image_paths[0])\n", (1565, 1604), True, 'import gradio as gr\n'), ((1778, 1813), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""Enter a prompt:"""'}), "(label='Enter a prompt:')\n", (1788, 1813), True, 'import gradio as gr\n'), ((1986, 2033), 'gradio.Audio', 'gr.Audio', ([], {'type': '"""filepath"""', 'value': 'audio_paths[0]'}), "(type='filepath', value=audio_paths[0])\n", (1994, 2033), True, 'import gradio as gr\n'), ((1611, 1639), 'gradio.Text', 'gr.Text', ([], {'label': '"""Output Text"""'}), "(label='Output Text')\n", (1618, 1639), True, 'import gradio as gr\n'), ((1641, 1671), 'gradio.Audio', 'gr.Audio', ([], {'label': '"""Output Audio"""'}), "(label='Output Audio')\n", (1649, 1671), True, 'import gradio as gr\n'), ((1820, 1850), 'gradio.Image', 'gr.Image', ([], {'label': '"""Output Image"""'}), "(label='Output Image')\n", (1828, 1850), True, 'import gradio as gr\n'), ((1852, 1882), 'gradio.Audio', 'gr.Audio', ([], {'label': '"""Output Audio"""'}), "(label='Output Audio')\n", (1860, 1882), True, 'import gradio as gr\n'), ((2040, 2070), 'gradio.Image', 'gr.Image', ([], {'label': '"""Output Image"""'}), "(label='Output Image')\n", (2048, 2070), True, 'import gradio as gr\n'), ((2072, 2100), 'gradio.Text', 'gr.Text', ([], {'label': '"""Output Text"""'}), "(label='Output Text')\n", (2079, 2100), True, 'import gradio as gr\n'), ((253, 267), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (265, 267), False, 'from lancedb.embeddings import get_registry\n')] |
"""LanceDB vector store with cloud storage support."""
import os
from typing import Any, Optional
from dotenv import load_dotenv
from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase
from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities
from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult
from pandas import DataFrame
load_dotenv()
class LanceDBVectorStore(LanceDBVectorStoreBase):
"""Advanced LanceDB Vector Store supporting cloud storage and prefiltering."""
from lancedb.query import LanceQueryBuilder
from lancedb.table import Table
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
api_key: Optional[str] = None,
region: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self._setup_connection(uri, api_key, region)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
self.api_key = api_key
self.region = region
def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None):
"""Establishes a robust connection to LanceDB."""
api_key = api_key or os.getenv('LANCEDB_API_KEY')
region = region or os.getenv('LANCEDB_REGION')
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
if api_key and region:
self.connection = lancedb.connect(uri, api_key=api_key, region=region)
else:
self.connection = lancedb.connect(uri)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Enhanced query method to support prefiltering in LanceDB queries."""
table = self.connection.open_table(self.table_name)
lance_query = self._prepare_lance_query(query, table, **kwargs)
results = lance_query.to_df()
return self._construct_query_result(results)
def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder:
"""Prepares the LanceDB query considering prefiltering and additional parameters."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface.")
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
prefilter = kwargs.pop("prefilter", False)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding).limit(query.similarity_top_k).where(
where, prefilter=prefilter).nprobes(self.nprobes))
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
return lance_query
def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult:
"""Constructs a VectorStoreQueryResult from a LanceDB query result."""
nodes = []
for _, row in results.iterrows():
node = TextNode(
text=row.get('text', ''), # ensure text is a string
id_=row['id'],
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']),
})
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from pathlib import Path
from typing import Any, Callable
from lancedb import DBConnection as LanceDBConnection
from lancedb import connect as lancedb_connect
from lancedb.table import Table as LanceDBTable
from openai import Client as OpenAIClient
from pydantic import Field, PrivateAttr
from crewai_tools.tools.rag.rag_tool import Adapter
def _default_embedding_function():
client = OpenAIClient()
def _embedding_function(input):
rs = client.embeddings.create(input=input, model="text-embedding-ada-002")
return [record.embedding for record in rs.data]
return _embedding_function
class LanceDBAdapter(Adapter):
uri: str | Path
table_name: str
embedding_function: Callable = Field(default_factory=_default_embedding_function)
top_k: int = 3
vector_column_name: str = "vector"
text_column_name: str = "text"
_db: LanceDBConnection = PrivateAttr()
_table: LanceDBTable = PrivateAttr()
def model_post_init(self, __context: Any) -> None:
self._db = lancedb_connect(self.uri)
self._table = self._db.open_table(self.table_name)
return super().model_post_init(__context)
def query(self, question: str) -> str:
query = self.embedding_function([question])[0]
results = (
self._table.search(query, vector_column_name=self.vector_column_name)
.limit(self.top_k)
.select([self.text_column_name])
.to_list()
)
values = [result[self.text_column_name] for result in results]
return "\n".join(values)
| [
"lancedb.connect"
] | [((393, 407), 'openai.Client', 'OpenAIClient', ([], {}), '()\n', (405, 407), True, 'from openai import Client as OpenAIClient\n'), ((724, 774), 'pydantic.Field', 'Field', ([], {'default_factory': '_default_embedding_function'}), '(default_factory=_default_embedding_function)\n', (729, 774), False, 'from pydantic import Field, PrivateAttr\n'), ((898, 911), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (909, 911), False, 'from pydantic import Field, PrivateAttr\n'), ((939, 952), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (950, 952), False, 'from pydantic import Field, PrivateAttr\n'), ((1028, 1053), 'lancedb.connect', 'lancedb_connect', (['self.uri'], {}), '(self.uri)\n', (1043, 1053), True, 'from lancedb import connect as lancedb_connect\n')] |
import logging
from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple, Type
import lancedb
import pandas as pd
from dotenv import load_dotenv
from lancedb.pydantic import LanceModel, Vector
from lancedb.query import LanceVectorQueryBuilder
from pydantic import BaseModel, ValidationError, create_model
from langroid.embedding_models.base import (
EmbeddingModel,
EmbeddingModelsConfig,
)
from langroid.embedding_models.models import OpenAIEmbeddingsConfig
from langroid.mytypes import Document, EmbeddingFunction
from langroid.utils.configuration import settings
from langroid.utils.pydantic_utils import (
dataframe_to_document_model,
dataframe_to_documents,
extend_document_class,
extra_metadata,
flatten_pydantic_instance,
flatten_pydantic_model,
nested_dict_from_flat,
)
from langroid.vector_store.base import VectorStore, VectorStoreConfig
logger = logging.getLogger(__name__)
class LanceDBConfig(VectorStoreConfig):
cloud: bool = False
collection_name: str | None = "temp"
storage_path: str = ".lancedb/data"
embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig()
distance: str = "cosine"
# document_class is used to store in lancedb with right schema,
# and also to retrieve the right type of Documents when searching.
document_class: Type[Document] = Document
flatten: bool = False # flatten Document class into LanceSchema ?
class LanceDB(VectorStore):
def __init__(self, config: LanceDBConfig = LanceDBConfig()):
super().__init__(config)
self.config: LanceDBConfig = config
emb_model = EmbeddingModel.create(config.embedding)
self.embedding_fn: EmbeddingFunction = emb_model.embedding_fn()
self.embedding_dim = emb_model.embedding_dims
self.host = config.host
self.port = config.port
self.is_from_dataframe = False # were docs ingested from a dataframe?
self.df_metadata_columns: List[str] = [] # metadata columns from dataframe
self._setup_schemas(config.document_class)
load_dotenv()
if self.config.cloud:
logger.warning(
"LanceDB Cloud is not available yet. Switching to local storage."
)
config.cloud = False
else:
try:
self.client = lancedb.connect(
uri=config.storage_path,
)
except Exception as e:
new_storage_path = config.storage_path + ".new"
logger.warning(
f"""
Error connecting to local LanceDB at {config.storage_path}:
{e}
Switching to {new_storage_path}
"""
)
self.client = lancedb.connect(
uri=new_storage_path,
)
# Note: Only create collection if a non-null collection name is provided.
# This is useful to delay creation of vecdb until we have a suitable
# collection name (e.g. we could get it from the url or folder path).
if config.collection_name is not None:
self.create_collection(
config.collection_name, replace=config.replace_collection
)
def _setup_schemas(self, doc_cls: Type[Document] | None) -> None:
doc_cls = doc_cls or self.config.document_class
self.unflattened_schema = self._create_lance_schema(doc_cls)
self.schema = (
self._create_flat_lance_schema(doc_cls)
if self.config.flatten
else self.unflattened_schema
)
def clear_empty_collections(self) -> int:
coll_names = self.list_collections()
n_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
if nr == 0:
n_deletes += 1
self.client.drop_table(name)
return n_deletes
def clear_all_collections(self, really: bool = False, prefix: str = "") -> int:
"""Clear all collections with the given prefix."""
if not really:
logger.warning("Not deleting all collections, set really=True to confirm")
return 0
coll_names = [
c for c in self.list_collections(empty=True) if c.startswith(prefix)
]
if len(coll_names) == 0:
logger.warning(f"No collections found with prefix {prefix}")
return 0
n_empty_deletes = 0
n_non_empty_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
n_empty_deletes += nr == 0
n_non_empty_deletes += nr > 0
self.client.drop_table(name)
logger.warning(
f"""
Deleted {n_empty_deletes} empty collections and
{n_non_empty_deletes} non-empty collections.
"""
)
return n_empty_deletes + n_non_empty_deletes
def list_collections(self, empty: bool = False) -> List[str]:
"""
Returns:
List of collection names that have at least one vector.
Args:
empty (bool, optional): Whether to include empty collections.
"""
colls = self.client.table_names(limit=None)
if len(colls) == 0:
return []
if empty: # include empty tbls
return colls # type: ignore
counts = [self.client.open_table(coll).head(1).shape[0] for coll in colls]
return [coll for coll, count in zip(colls, counts) if count > 0]
def _create_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Create a subclass of LanceModel with fields:
- id (str)
- Vector field that has dims equal to
the embedding dimension of the embedding model, and a data field of type
DocClass.
- other fields from doc_cls
Args:
doc_cls (Type[Document]): A Pydantic model which should be a subclass of
Document, to be used as the type for the data field.
Returns:
Type[BaseModel]: A new Pydantic model subclassing from LanceModel.
Raises:
ValueError: If `n` is not a non-negative integer or if `DocClass` is not a
subclass of Document.
"""
if not issubclass(doc_cls, Document):
raise ValueError("DocClass must be a subclass of Document")
n = self.embedding_dim
# Prepare fields for the new model
fields = {"id": (str, ...), "vector": (Vector(n), ...)}
sorted_fields = dict(
sorted(doc_cls.__fields__.items(), key=lambda item: item[0])
)
# Add both statically and dynamically defined fields from doc_cls
for field_name, field in sorted_fields.items():
fields[field_name] = (field.outer_type_, field.default)
# Create the new model with dynamic fields
NewModel = create_model(
"NewModel", __base__=LanceModel, **fields
) # type: ignore
return NewModel # type: ignore
def _create_flat_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Flat version of the lance_schema, as nested Pydantic schemas are not yet
supported by LanceDB.
"""
lance_model = self._create_lance_schema(doc_cls)
FlatModel = flatten_pydantic_model(lance_model, base_model=LanceModel)
return FlatModel
def create_collection(self, collection_name: str, replace: bool = False) -> None:
"""
Create a collection with the given name, optionally replacing an existing
collection if `replace` is True.
Args:
collection_name (str): Name of the collection to create.
replace (bool): Whether to replace an existing collection
with the same name. Defaults to False.
"""
self.config.collection_name = collection_name
collections = self.list_collections()
if collection_name in collections:
coll = self.client.open_table(collection_name)
if coll.head().shape[0] > 0:
logger.warning(f"Non-empty Collection {collection_name} already exists")
if not replace:
logger.warning("Not replacing collection")
return
else:
logger.warning("Recreating fresh collection")
self.client.create_table(collection_name, schema=self.schema, mode="overwrite")
if settings.debug:
level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
logger.setLevel(level)
def _maybe_set_doc_class_schema(self, doc: Document) -> None:
"""
Set the config.document_class and self.schema based on doc if needed
Args:
doc: an instance of Document, to be added to a collection
"""
extra_metadata_fields = extra_metadata(doc, self.config.document_class)
if len(extra_metadata_fields) > 0:
logger.warning(
f"""
Added documents contain extra metadata fields:
{extra_metadata_fields}
which were not present in the original config.document_class.
Trying to change document_class and corresponding schemas.
Overriding LanceDBConfig.document_class with an auto-generated
Pydantic class that includes these extra fields.
If this fails, or you see odd results, it is recommended that you
define a subclass of Document, with metadata of class derived from
DocMetaData, with extra fields defined via
`Field(..., description="...")` declarations,
and set this document class as the value of the
LanceDBConfig.document_class attribute.
"""
)
doc_cls = extend_document_class(doc)
self.config.document_class = doc_cls
self._setup_schemas(doc_cls)
def add_documents(self, documents: Sequence[Document]) -> None:
super().maybe_add_ids(documents)
colls = self.list_collections(empty=True)
if len(documents) == 0:
return
embedding_vecs = self.embedding_fn([doc.content for doc in documents])
coll_name = self.config.collection_name
if coll_name is None:
raise ValueError("No collection name set, cannot ingest docs")
self._maybe_set_doc_class_schema(documents[0])
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it,
self.create_collection(coll_name, replace=True)
ids = [str(d.id()) for d in documents]
# don't insert all at once, batch in chunks of b,
# else we get an API error
b = self.config.batch_size
def make_batches() -> Generator[List[BaseModel], None, None]:
for i in range(0, len(ids), b):
batch = [
self.unflattened_schema(
id=ids[i + j],
vector=embedding_vecs[i + j],
**doc.dict(),
)
for j, doc in enumerate(documents[i : i + b])
]
if self.config.flatten:
batch = [
flatten_pydantic_instance(instance) # type: ignore
for instance in batch
]
yield batch
tbl = self.client.open_table(self.config.collection_name)
try:
tbl.add(make_batches())
except Exception as e:
logger.error(
f"""
Error adding documents to LanceDB: {e}
POSSIBLE REMEDY: Delete the LancdDB storage directory
{self.config.storage_path} and try again.
"""
)
def add_dataframe(
self,
df: pd.DataFrame,
content: str = "content",
metadata: List[str] = [],
) -> None:
"""
Add a dataframe to the collection.
Args:
df (pd.DataFrame): A dataframe
content (str): The name of the column in the dataframe that contains the
text content to be embedded using the embedding model.
metadata (List[str]): A list of column names in the dataframe that contain
metadata to be stored in the database. Defaults to [].
"""
self.is_from_dataframe = True
actual_metadata = metadata.copy()
self.df_metadata_columns = actual_metadata # could be updated below
# get content column
content_values = df[content].values.tolist()
embedding_vecs = self.embedding_fn(content_values)
# add vector column
df["vector"] = embedding_vecs
if content != "content":
# rename content column to "content", leave existing column intact
df = df.rename(columns={content: "content"}, inplace=False)
if "id" not in df.columns:
docs = dataframe_to_documents(df, content="content", metadata=metadata)
ids = [str(d.id()) for d in docs]
df["id"] = ids
if "id" not in actual_metadata:
actual_metadata += ["id"]
colls = self.list_collections(empty=True)
coll_name = self.config.collection_name
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it
# and set new schema from df
self.client.create_table(
self.config.collection_name,
data=df,
mode="overwrite",
)
doc_cls = dataframe_to_document_model(
df,
content=content,
metadata=actual_metadata,
exclude=["vector"],
)
self.config.document_class = doc_cls # type: ignore
self._setup_schemas(doc_cls) # type: ignore
else:
# collection exists and is not empty, so append to it
tbl = self.client.open_table(self.config.collection_name)
tbl.add(df)
def delete_collection(self, collection_name: str) -> None:
self.client.drop_table(collection_name)
def _lance_result_to_docs(self, result: LanceVectorQueryBuilder) -> List[Document]:
if self.is_from_dataframe:
df = result.to_pandas()
return dataframe_to_documents(
df,
content="content",
metadata=self.df_metadata_columns,
doc_cls=self.config.document_class,
)
else:
records = result.to_arrow().to_pylist()
return self._records_to_docs(records)
def _records_to_docs(self, records: List[Dict[str, Any]]) -> List[Document]:
if self.config.flatten:
docs = [
self.unflattened_schema(**nested_dict_from_flat(rec)) for rec in records
]
else:
try:
docs = [self.schema(**rec) for rec in records]
except ValidationError as e:
raise ValueError(
f"""
Error validating LanceDB result: {e}
HINT: This could happen when you're re-using an
existing LanceDB store with a different schema.
Try deleting your local lancedb storage at `{self.config.storage_path}`
re-ingesting your documents and/or replacing the collections.
"""
)
doc_cls = self.config.document_class
doc_cls_field_names = doc_cls.__fields__.keys()
return [
doc_cls(
**{
field_name: getattr(doc, field_name)
for field_name in doc_cls_field_names
}
)
for doc in docs
]
def get_all_documents(self, where: str = "") -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
tbl = self.client.open_table(self.config.collection_name)
pre_result = tbl.search(None).where(where or None).limit(None)
return self._lance_result_to_docs(pre_result)
def get_documents_by_ids(self, ids: List[str]) -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
_ids = [str(id) for id in ids]
tbl = self.client.open_table(self.config.collection_name)
docs = []
for _id in _ids:
results = self._lance_result_to_docs(tbl.search().where(f"id == '{_id}'"))
if len(results) > 0:
docs.append(results[0])
return docs
def similar_texts_with_scores(
self,
text: str,
k: int = 1,
where: Optional[str] = None,
) -> List[Tuple[Document, float]]:
embedding = self.embedding_fn([text])[0]
tbl = self.client.open_table(self.config.collection_name)
result = (
tbl.search(embedding).metric(self.config.distance).where(where).limit(k)
)
docs = self._lance_result_to_docs(result)
# note _distance is 1 - cosine
if self.is_from_dataframe:
scores = [
1 - rec["_distance"] for rec in result.to_pandas().to_dict("records")
]
else:
scores = [1 - rec["_distance"] for rec in result.to_arrow().to_pylist()]
if len(docs) == 0:
logger.warning(f"No matches found for {text}")
return []
if settings.debug:
logger.info(f"Found {len(docs)} matches, max score: {max(scores)}")
doc_score_pairs = list(zip(docs, scores))
self.show_if_debug(doc_score_pairs)
return doc_score_pairs
| [
"lancedb.connect",
"lancedb.pydantic.Vector"
] | [((911, 938), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (928, 938), False, 'import logging\n'), ((1125, 1149), 'langroid.embedding_models.models.OpenAIEmbeddingsConfig', 'OpenAIEmbeddingsConfig', ([], {}), '()\n', (1147, 1149), False, 'from langroid.embedding_models.models import OpenAIEmbeddingsConfig\n'), ((1627, 1666), 'langroid.embedding_models.base.EmbeddingModel.create', 'EmbeddingModel.create', (['config.embedding'], {}), '(config.embedding)\n', (1648, 1666), False, 'from langroid.embedding_models.base import EmbeddingModel, EmbeddingModelsConfig\n'), ((2080, 2093), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2091, 2093), False, 'from dotenv import load_dotenv\n'), ((7037, 7092), 'pydantic.create_model', 'create_model', (['"""NewModel"""'], {'__base__': 'LanceModel'}), "('NewModel', __base__=LanceModel, **fields)\n", (7049, 7092), False, 'from pydantic import BaseModel, ValidationError, create_model\n'), ((7469, 7527), 'langroid.utils.pydantic_utils.flatten_pydantic_model', 'flatten_pydantic_model', (['lance_model'], {'base_model': 'LanceModel'}), '(lance_model, base_model=LanceModel)\n', (7491, 7527), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((9064, 9111), 'langroid.utils.pydantic_utils.extra_metadata', 'extra_metadata', (['doc', 'self.config.document_class'], {}), '(doc, self.config.document_class)\n', (9078, 9111), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((10124, 10150), 'langroid.utils.pydantic_utils.extend_document_class', 'extend_document_class', (['doc'], {}), '(doc)\n', (10145, 10150), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((13444, 13508), 'langroid.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'metadata'}), "(df, content='content', metadata=metadata)\n", (13466, 13508), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14182, 14280), 'langroid.utils.pydantic_utils.dataframe_to_document_model', 'dataframe_to_document_model', (['df'], {'content': 'content', 'metadata': 'actual_metadata', 'exclude': "['vector']"}), "(df, content=content, metadata=actual_metadata,\n exclude=['vector'])\n", (14209, 14280), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14943, 15064), 'langroid.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'self.df_metadata_columns', 'doc_cls': 'self.config.document_class'}), "(df, content='content', metadata=self.\n df_metadata_columns, doc_cls=self.config.document_class)\n", (14965, 15064), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((2342, 2382), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'config.storage_path'}), '(uri=config.storage_path)\n', (2357, 2382), False, 'import lancedb\n'), ((6637, 6646), 'lancedb.pydantic.Vector', 'Vector', (['n'], {}), '(n)\n', (6643, 6646), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2806, 2843), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'new_storage_path'}), '(uri=new_storage_path)\n', (2821, 2843), False, 'import lancedb\n'), ((11696, 11731), 'langroid.utils.pydantic_utils.flatten_pydantic_instance', 'flatten_pydantic_instance', (['instance'], {}), '(instance)\n', (11721, 11731), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((15432, 15458), 'langroid.utils.pydantic_utils.nested_dict_from_flat', 'nested_dict_from_flat', (['rec'], {}), '(rec)\n', (15453, 15458), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n')] |
import json
import lancedb
from lancedb.pydantic import Vector, LanceModel
from datetime import datetime
# import pyarrow as pa
TABLE_NAME = "documents"
uri = "data/sample-lancedb"
db = lancedb.connect(uri)
# vector: list of vectors
# file_name: name of file
# file_path: path of file
# id
# updated_at
# created_at
class Document(LanceModel):
id: str
file_name: str
file_path: str
created_at: datetime
updated_at: datetime
vector: Vector(768) # Palm Embeddings size
try:
table = db.create_table(TABLE_NAME, schema=Document)
except OSError:
print("table exists")
table = db.open_table(TABLE_NAME)
except Exception as inst:
# Print out the type of exceptions.
print(type(inst))
print(inst.args)
print(inst)
if True:
now = datetime.now()
# Idempotent upsert. Alternatively we can delete first, then insert.
table.add(
[
Document(
id="1",
file_name="test_name",
file_path="test_path",
created_at=now,
updated_at=now,
vector=[i for i in range(768)],
)
]
)
table.delete(f'id="1" AND created_at != timestamp "{now}"')
if False:
table.update(
where='id="1"',
values=Document(
id="1",
file_name="test_name",
file_path="test_path",
created_at=datetime.now(),
updated_at=datetime.now(),
vector=[i for i in range(768)],
),
)
vector = [i for i in range(768)]
result = table.search(vector).limit(2).to_list()
for item in result:
print(item)
# print(json.dumps(item, indent=2))
print(db[TABLE_NAME].head())
| [
"lancedb.connect",
"lancedb.pydantic.Vector"
] | [((189, 209), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (204, 209), False, 'import lancedb\n'), ((461, 472), 'lancedb.pydantic.Vector', 'Vector', (['(768)'], {}), '(768)\n', (467, 472), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((786, 800), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (798, 800), False, 'from datetime import datetime\n'), ((1421, 1435), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1433, 1435), False, 'from datetime import datetime\n'), ((1460, 1474), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1472, 1474), False, 'from datetime import datetime\n')] |
import json
from sentence_transformers import SentenceTransformer
from pydantic.main import ModelMetaclass
from pathlib import Path
import pandas as pd
import sqlite3
from uuid import uuid4
import lancedb
encoder = SentenceTransformer('all-MiniLM-L6-v2')
data_folder = Path('data/collections')
config_file = Path('data/config/indexes.yaml')
index_folder = Path('indexes')
lance_folder = Path('indexes')
lance_folder.mkdir(parents=True, exist_ok=True)
sqlite_folder = Path('data/indexes/')
class LanceDBDocument():
def __init__(self, document:dict, title:str, text:str, fields, tags=None, date=None, file_path=None):
self.document = self.fill_missing_fields(document, text, title, tags, date)
# self.text = document[text]
# self.tags = document[tags] if tags is not None else list()
# self.date = document[date] if date is not None else None
self.file_path = file_path
self.metadata = {k:document[k] for k in fields if k not in [title, text, tags, date]}
self.uuid = str(uuid4()) if 'uuid' not in document else document['uuid']
self.save_uuids = list()
self.sqlite_fields = list()
self.lance_exclude = list()
def fill_missing_fields(self, document, text, title, tags, date):
if title not in document:
self.title = ''
else:
self.title = document[title]
if text not in document:
self.text = ''
else:
self.text = document[text]
if date not in document:
self.date = ''
else:
self.date = document[date]
if tags not in document:
self.tags = list()
else:
self.tags = document[tags]
def create_json_document(self, text, uuids=None):
"""Creates a custom dictionary object that can be used for both sqlite and lancedb
The full document is always stored in sqlite where fixed fields are:
title
text
date
filepath
document_uuid - used for retrieval from lancedb results
Json field contains the whole document for retrieval and display
Lancedb only gets searching text, vectorization of that, and filter fields
"""
_document = {'title':self.title,
'text':text,
'tags':self.tags,
'date':self.date,
'file_path':str(self.file_path),
'uuid':self.uuid,
'metadata': self.metadata}
self._enforce_tags_schema()
for field in ['title','date','file_path']:
self.enforce_string_schema(field, _document)
return _document
def enforce_string_schema(self, field, test_document):
if not isinstance(test_document[field], str):
self.lance_exclude.append(field)
def _enforce_tags_schema(self):
# This enforces a simple List[str] format for the tags to match what lancedb can use for filtering
# If they are of type List[Dict] as a nested field, they are stored in sqlite for retrieval
if isinstance(self.tags, list):
tags_are_list = True
for _tag in self.tags:
if not isinstance(_tag, str):
tags_are_list = False
break
if not tags_are_list:
self.lance_exclude.append('tags')
def return_document(self):
document = self.create_json_document(self.text)
return document
class SqlLiteIngest():
def __init__(self, documents, source_file, db_location, index_name, overwrite):
self.documents = documents
self.source_file = source_file
self.db_location = db_location
self.index_name = index_name
self.overwrite = overwrite
def initialize(self):
self.connection = sqlite3.connect(self.db_location)
if self.overwrite:
self.connection.execute(f"""DROP TABLE IF EXISTS {self.index_name};""")
table_exists = self.connection.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{self.index_name}';").fetchall()
if len(table_exists) == 0:
self.connection.execute(f"""
CREATE TABLE {self.index_name}(
id INTEGER PRIMARY KEY NOT NULL,
uuid STRING NOT NULL,
text STRING NOT NULL,
title STRING,
date STRING,
source_file STRING,
metadata JSONB);""")
def insert(self, document):
self.connection.execute(f"""INSERT INTO
{self.index_name} (uuid, text, title, date, source_file, metadata)
VALUES ('{document.uuid.replace("'","''")}', '{document.text.replace("'","''")}',
'{document.title.replace("'","''")}', '{document.date.replace("'","''")}',
'{self.index_name.replace("'","''")}', '{json.dumps(document.metadata).replace("'","''")}');""")
def bulk_insert(self):
for document in self.documents:
self.insert(document)
self.connection.commit()
self.connection.close()
from lancedb.pydantic import LanceModel, Vector, List
class LanceDBSchema384(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(384)
class LanceDBSchema512(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(512)
class LanceDBIngest():
def __init__(self, documents, lance_location, index_name, overwrite, encoder, schema):
self.documents = documents
self.lance_location = lance_location
self.index_name = index_name
self.overwrite = overwrite
self.encoder = encoder
self.schema = schema
def initialize(self):
self.db = lancedb.connect(self.lance_location)
existing_tables = self.db.table_names()
self.documents = [self.prep_documents(document) for document in self.documents]
if self.overwrite:
self.table = self.db.create_table(self.index_name, data=self.documents, mode='overwrite', schema=self.schema.to_arrow_schema())
else:
if self.index_name in existing_tables:
self.table = self.db.open_table(self.index_name)
self.table.add(self.documents)
else:
self.table = self.db.create_table(self.index_name, data=self.documents, schema=self.schema.to_arrow_schema())
def prep_documents(self, document):
lance_document = dict()
lance_document['text'] = document.text
lance_document['vector'] = self.encoder.encode(document.text)
lance_document['uuid'] = document.uuid
lance_document['title'] = document.title
lance_document['tags'] = document.tags
return lance_document
def insert(self, document):
document['vector'] = self.encoder.encode(document.text)
self.table.add(document)
def bulk_insert(self, create_vectors=False):
if create_vectors:
self.table.create_index(vector_column_name='vector', metric='cosine')
self.table.create_fts_index(field_names=['title','text'], replace=True)
return self.table
class IndexDocuments():
def __init__(self,field_mapping, source_file, index_name, overwrite):
self.field_mapping = field_mapping
self.source_file = source_file
self.index_name = index_name
self.overwrite = overwrite
def open_json(self):
with open(self.source_file, 'r') as f:
self.data = json.load(f)
print(self.data)
def open_csv(self):
self.data = pd.read_csv(self.source_file)
def create_document(self, document):
document = LanceDBDocument(document,
text=self.field_mapping['text'],
title=self.field_mapping['title'],
tags=self.field_mapping['tags'],
date=self.field_mapping['date'],
fields=list(document.keys()),
file_path=self.source_file
)
return document
def create_documents(self):
self.documents = [self.create_document(document) for document in self.data]
def ingest(self, overwrite=False):
# lance_path = Path(f'../indexes/lance')
lance_folder.mkdir(parents=True, exist_ok=True)
lance_ingest = LanceDBIngest(documents=self.documents,
lance_location=lance_folder,
# field_mapping=self.field_mapping,
index_name=self.index_name,
overwrite=self.overwrite,
encoder=encoder,
schema=LanceDBSchema384)
lance_ingest.initialize()
if len(self.documents) <= 256:
_table = lance_ingest.bulk_insert(create_vectors=False)
else:
_table = lance_ingest.bulk_insert(create_vectors=True)
sql_path = sqlite_folder.joinpath('documents.sqlite')
sqlite_ingest = SqlLiteIngest(documents=self.documents,
source_file=self.source_file,
db_location=sql_path,
index_name=self.index_name,
overwrite=self.overwrite)
sqlite_ingest.initialize()
sqlite_ingest.bulk_insert()
| [
"lancedb.connect",
"lancedb.pydantic.Vector"
] | [((216, 255), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (235, 255), False, 'from sentence_transformers import SentenceTransformer\n'), ((271, 295), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (275, 295), False, 'from pathlib import Path\n'), ((310, 342), 'pathlib.Path', 'Path', (['"""data/config/indexes.yaml"""'], {}), "('data/config/indexes.yaml')\n", (314, 342), False, 'from pathlib import Path\n'), ((358, 373), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (362, 373), False, 'from pathlib import Path\n'), ((390, 405), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (394, 405), False, 'from pathlib import Path\n'), ((471, 492), 'pathlib.Path', 'Path', (['"""data/indexes/"""'], {}), "('data/indexes/')\n", (475, 492), False, 'from pathlib import Path\n'), ((5306, 5317), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (5312, 5317), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((5430, 5441), 'lancedb.pydantic.Vector', 'Vector', (['(512)'], {}), '(512)\n', (5436, 5441), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((3896, 3929), 'sqlite3.connect', 'sqlite3.connect', (['self.db_location'], {}), '(self.db_location)\n', (3911, 3929), False, 'import sqlite3\n'), ((5814, 5850), 'lancedb.connect', 'lancedb.connect', (['self.lance_location'], {}), '(self.lance_location)\n', (5829, 5850), False, 'import lancedb\n'), ((7670, 7699), 'pandas.read_csv', 'pd.read_csv', (['self.source_file'], {}), '(self.source_file)\n', (7681, 7699), True, 'import pandas as pd\n'), ((7583, 7595), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7592, 7595), False, 'import json\n'), ((1035, 1042), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1040, 1042), False, 'from uuid import uuid4\n'), ((4948, 4977), 'json.dumps', 'json.dumps', (['document.metadata'], {}), '(document.metadata)\n', (4958, 4977), False, 'import json\n')] |
import os
import urllib.request
import html2text
import predictionguard as pg
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from sentence_transformers import SentenceTransformer
import numpy as np
import lancedb
from lancedb.embeddings import with_embeddings
import pandas as pd
os.environ['PREDICTIONGUARD_TOKEN'] = "q1VuOjnffJ3NO2oFN8Q9m8vghYc84ld13jaqdF7E"
# Let's get the html off of a website.
fp = urllib.request.urlopen("file:////home/shaunak_joshi/gt/insuranceagent.html")
mybytes = fp.read()
html = mybytes.decode("utf8")
fp.close()
# And convert it to text.
h = html2text.HTML2Text()
h.ignore_links = True
text = h.handle(html)
# Clean things up just a bit.
text = text.split("Introduction")[1]
#print(text)
#text = text.split("Location, Location, Location")[0]
#print(text)
#print(type(text))
# Chunk the text into smaller pieces for injection into LLM prompts.
text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50)
docs = text_splitter.split_text(text)
# Let's checkout some of the chunks!
#for i in range(0, 10):
# print("Chunk", str(i+1))
# print("----------------------------")
# print(docs[i])
# print("")
# Let's take care of some of the formatting so it doesn't conflict with our
# typical prompt template structure
docs = [x.replace('#', '-') for x in docs]
# Now we need to embed these documents and put them into a "vector store" or
# "vector db" that we will use for semantic search and retrieval.
# Embeddings setup
name="all-MiniLM-L12-v2"
model = SentenceTransformer(name)
def embed_batch(batch):
return [model.encode(sentence) for sentence in batch]
def embed(sentence):
return model.encode(sentence)
# LanceDB setup
os.mkdir(".lancedb")
uri = ".lancedb"
db = lancedb.connect(uri)
# Create a dataframe with the chunk ids and chunks
metadata = []
for i in range(len(docs)):
metadata.append([i,docs[i]])
doc_df = pd.DataFrame(metadata, columns=["chunk", "text"])
# Embed the documents
data = with_embeddings(embed_batch, doc_df)
# Create the DB table and add the records.
db.create_table("linux", data=data)
table = db.open_table("linux")
table.add(data=data)
# Let's try to match a query to one of our documents.
#message = "What plays a crucial role in deciding insurance policies?"
#results = table.search(embed(message)).limit(5).to_pandas()
#print(results.head())
# Now let's augment our Q&A prompt with this external knowledge on-the-fly!!!
template = """### Instruction:
Read the below input context and respond with a short answer to the given question. Use only the information in the bel>
### Input:
Context: {context}
Question: {question}
### Response:
"""
qa_prompt = PromptTemplate(
input_variables=["context", "question"],
template=template,
)
def rag_answer(message):
# Search the for relevant context
results = table.search(embed(message)).limit(5).to_pandas()
results.sort_values(by=['_distance'], inplace=True, ascending=True)
doc_use = results['text'].values[0]
# Augment the prompt with the context
prompt = qa_prompt.format(context=doc_use, question=message)
# Get a response
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=prompt
)
return result['choices'][0]['text']
response = rag_answer("A house has been destroyed by a tornado and also has been set on fire. The water doesn't work but the gas lines are fine. The area the house is in is notorious for crime. It is built in an earthquake prone zone. There are cracks in the walls and it is quite old. Based on this information, generate three insights about the type of insurance policy the house will require and any other thing you find important. Keep the insights under 20 words each.")
print('')
print("RESPONSE:", response)
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((670, 691), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (689, 691), False, 'import html2text\n'), ((1001, 1056), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (1022, 1056), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1627, 1652), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (1646, 1652), False, 'from sentence_transformers import SentenceTransformer\n'), ((1818, 1838), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (1826, 1838), False, 'import os\n'), ((1863, 1883), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1878, 1883), False, 'import lancedb\n'), ((2025, 2074), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text']"}), "(metadata, columns=['chunk', 'text'])\n", (2037, 2074), True, 'import pandas as pd\n'), ((2108, 2144), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (2123, 2144), False, 'from lancedb.embeddings import with_embeddings\n'), ((2827, 2901), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2841, 2901), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((3294, 3361), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (3314, 3361), True, 'import predictionguard as pg\n')] |
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import EmbeddingFunctionRegistry
registry = EmbeddingFunctionRegistry.get_instance()
func = registry.get("openai").create()
class Questions(LanceModel):
question: str = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
| [
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((117, 157), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (155, 157), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')] |
"""
---------Parameters to be changed between different devices----------
1. Models and directory
2. Input and output directory
3. Device selection (CPU/GPU) - LINE 46 AND 300
Reference:
1. similarity_search_by_vector(): https://python.langchain.com/docs/modules/data_connection/vectorstores/
2. LanceDB Code documentation Q&A bot example with LangChain: https://lancedb.github.io/lancedb/notebooks/code_qa_bot/
3. LanceDB embedding functions: https://lancedb.github.io/lancedb/embeddings/embedding_functions/
4. LanceDB available models: https://lancedb.github.io/lancedb/embeddings/default_embedding_functions/#sentence-transformers
5. https://colab.research.google.com/github/lancedb/vectordb-recipes/blob/main/examples/Code-Documentation-QA-Bot/main.ipynb
"""
import os
import logging
import time
from collections import Counter
from collections import defaultdict
import csv
import json
import torch
import nltk
from nltk.tokenize import sent_tokenize
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
import string
from heapq import nlargest
import torch.nn as nn
from pathlib import Path
from langchain_community.llms import GPT4All
from langchain_community.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.huggingface import HuggingFaceInstructEmbeddings
from transformers import set_seed
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
from lancedb.embeddings import EmbeddingFunctionRegistry
# from langchain_community.vectorstores import LanceDB
print("LanceDB config.")
registry = EmbeddingFunctionRegistry.get_instance()
func = registry.get("sentence-transformers").create(device="cpu")
def process_story_and_store_chunks(data, db_dir, embedding_model, chunk_size, overlap_percentage, embedding_function_name):
storyId = data['storyId']
story_text = data['text']
# Ensure the database directory exists
if not os.path.exists(db_dir):
os.makedirs(db_dir, exist_ok=True)
# Define a unique directory name for the story with the specified chunk size and overlap
unique_dir_name = f"story_{storyId}_{embedding_function_name}_chunk_{chunk_size}_overlap_{int(chunk_size * overlap_percentage)}"
story_db_dir = f"{db_dir}/{unique_dir_name}"
if not os.path.exists(story_db_dir):
os.makedirs(story_db_dir, exist_ok=True)
# Connect to Lancedb using the directory for this specific story, chunk size, and overlap
db = lancedb.connect(story_db_dir)
# Initialize the text splitter with the specified chunk size and overlap
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=int(chunk_size * overlap_percentage))
chunk_splits = text_splitter.split_text(story_text)
# Generate embeddings for each chunk
# chunk_embs = Chroma.from_texts(texts=chunk_splits, embedding=embedding_model)
chunk_embs = embedding_model.embed_documents(chunk_splits)
# Prepare the data for storage
chunks_embs_data = [
{
"storyId": storyId,
"chunk_size": chunk_size,
"overlap_size": overlap_percentage,
"embedding_function": embedding_function_name,
"vector": c_emb,
"chunk_text": chunk_splits[i]
}
for i, c_emb in enumerate(chunk_embs)
]
# Create a new table for this story and configuration, and store the data
db.create_table(
"chunks",
data=chunks_embs_data,
mode="overwrite" # Each story and configuration combination gets its own table
)
return chunk_splits # Return the last used database connection for further operations if necessary
# Function to normalize and stem text
def normalize_and_stem(text):
stemmer = PorterStemmer()
tokens = nltk.word_tokenize(text.lower()) # Normalize to lowercase and tokenize
stemmed_tokens = [stemmer.stem(token) for token in tokens if token not in string.punctuation and token not in stopwords.words('english')] # Stemming and removing punctuation
return ' '.join(stemmed_tokens)
# Modified function to calculate the token-wise F1 score and return precision and recall
def token_eval(predicted, actual):
predicted_tokens = predicted.split()
actual_tokens = actual.split()
common_tokens = Counter(predicted_tokens) & Counter(actual_tokens)
num_same = sum(common_tokens.values())
if num_same == 0 and len(predicted_tokens) == 0 and len(actual_tokens) == 0:
# Case where both predicted and actual answers are empty
return 1.0, 1.0, 1.0, 1 # Perfect score
elif num_same == 0:
return 0, 0, 0, 0 # Return zero precision, recall, F1 score, and exact match
precision = 1.0 * num_same / len(predicted_tokens)
recall = 1.0 * num_same / len(actual_tokens)
f1 = (2 * precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
em = int(predicted.strip() == actual.strip()) # Exact match score
return f1, precision, recall, em
# # Modified function to calculate Exact Match (EM) score
# def calculate_em(predicted, actual):
# return int(predicted == actual)
# # Modified function to calculate the token-wise F1 score and return precision and recall
# def calculate_token_f1(predicted, actual):
# predicted_tokens = predicted.split()
# actual_tokens = actual.split()
# common_tokens = Counter(predicted_tokens) & Counter(actual_tokens)
# num_same = sum(common_tokens.values())
# if num_same == 0:
# return 0, 0, 0 # Return zero precision, recall, and F1 score
# precision = 1.0 * num_same / len(predicted_tokens)
# recall = 1.0 * num_same / len(actual_tokens)
# f1 = (2 * precision * recall) / (precision + recall)
# return f1, precision, recall
def newsqa_loop(data, llm, output_csv_path, output_log_path, max_stories, chunk_sizes, overlap_percentages,
instruct_embedding_model_name, instruct_embedding_model_kwargs,
instruct_embedding_encode_kwargs, QA_CHAIN_PROMPT, db_dir, embedding_function_name):
with open(output_csv_path, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['Chunk_size', 'Chunk_Overlap', 'Time', 'Story Number', 'Question Number', 'EM', 'Precision', 'Recall', 'F1', 'Error'])
if embedding_function_name == 'hf_emb':
# Embedding for story sentences
story_embs = HuggingFaceInstructEmbeddings(
model_name=instruct_embedding_model_name,
model_kwargs=instruct_embedding_model_kwargs,
encode_kwargs=instruct_embedding_encode_kwargs,
embed_instruction="Use the following pieces of context to answer the question at the end:"
)
# Embedding for questions
query_embs = HuggingFaceInstructEmbeddings(
model_name=instruct_embedding_model_name,
model_kwargs=instruct_embedding_model_kwargs,
encode_kwargs=instruct_embedding_encode_kwargs,
query_instruction="How does this information relate to the question?"
)
else:
print("Unsupported embedding functions!")
start_time = time.time()
for chunk_size in chunk_sizes:
print(f"\n{time.time()-start_time} Processing chunk size {chunk_size}:")
last_time = time.time()
for overlap_percentage in overlap_percentages:
actual_overlap = int(chunk_size * overlap_percentage)
print(f"\n{time.time()-start_time}\t{time.time()-last_time}\tOverlap [{overlap_percentage}] {actual_overlap}")
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=actual_overlap)
for i, story in enumerate(data['data']):
if i >= max_stories:
break
now_time = time.time()
print(f"\n{now_time - start_time}\t{now_time - last_time}\t\tstory {i + 1}: ", end='')
last_time = now_time
# Process the story and store its data in a separate table/database
story_db_dir = f"{db_dir}/{story['storyId']}" # Adjust as necessary for your directory structure
chunk_splits = process_story_and_store_chunks(
data=story,
db_dir=story_db_dir,
embedding_model=story_embs,
chunk_size=chunk_size,
overlap_percentage=overlap_percentage,
embedding_function_name=embedding_function_name
)
chunk_vectorstore = Chroma.from_texts(texts=chunk_splits, embedding=story_embs)
# Initialize the QA chain with the vectorstore as the retriever
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=chunk_vectorstore.as_retriever(),
chain_type="stuff",
verbose=False,
chain_type_kwargs={
"prompt": QA_CHAIN_PROMPT,
"verbose": False},
return_source_documents=False
)
for j, question_data in enumerate(story['questions']):
if question_data['isAnswerAbsent']:
continue # Skip this question because an answer is absent
question = question_data['q']
question_emb = query_embs.embed_documents([question])[0]
# Retrieve similar sentences
docs = chunk_vectorstore.similarity_search_by_vector(question_emb)
context_for_qa = ""
for doc in docs:
context_for_qa += doc.page_content + '.. '
# Check if there is a consensus answer and extract it
consensus = question_data['consensus']
if 's' in consensus and 'e' in consensus:
actual_answer = story['text'][consensus['s']:consensus['e']]
else:
continue # No consensus answer, skip to the next question
# Get the prediction from the model
result = qa_chain({"context": context_for_qa, "query": question})
# print(context_for_qa)
# Extract and process the predicted answer
predicted_answer = result['result'] if isinstance(result['result'], str) else ""
# Normalize and stem the predicted and actual answers
normalized_predicted_answer = normalize_and_stem(predicted_answer)
normalized_actual_answer = normalize_and_stem(actual_answer)
# Calculate the F1 score, precision, and recall using normalized and stemmed answers
# f1_score_value, precision, recall, em_score = token_eval(normalized_predicted_answer, normalized_actual_answer)
# print("Calling token_eval with:", normalized_predicted_answer, normalized_actual_answer)
result = token_eval(normalized_predicted_answer, normalized_actual_answer)
# print("token_eval returned:", result)
f1_score_value, precision, recall, em_score = result
# Write the scores to the file
error = 1 if 'error' in normalized_predicted_answer else 0
if error==0:
writer.writerow([chunk_size, overlap_percentage, time.time() - start_time, i, j, em_score, precision, recall, f1_score_value, error])
with open(output_log_path, 'a') as details_file:
details_file.write(f"Chunk Size: {chunk_size}\n")
details_file.write(f"Overlap: {overlap_percentage}\n")
details_file.write(f"Story: {i}\n")
details_file.write(f"Question: {j}\n")
details_file.write(f"Correct Answer: {actual_answer}\n")
details_file.write(f"Normalized Actual Answer: {normalized_actual_answer}\n")
details_file.write(f"Predicted Answer: {predicted_answer}\n")
details_file.write(f"Normalized Predicted Answer: {normalized_predicted_answer}\n")
details_file.write(f"Time: {time.time() - start_time}\n")
details_file.write(f"EM Score: {em_score}\n")
details_file.write(f"Precision: {precision}\n")
details_file.write(f"Recall: {recall}\n")
details_file.write(f"F1: {f1_score_value}\n")
details_file.write("----------------------------------------\n")
# Cleanup
del qa_chain
#del chunk_embs
# End of the story loop
# del text_splitter
############## Running Parameters ##############
max_stories = 50
random_seed = 123
db_dir = 'C:/NewsQA/lancedb'
embedding_function_name = 'hf_emb'
chunk_sizes = [100, 200, 400]
# chunk_sizes = [50,25]
# overlap_percentages = [0, 0.1, 0.2] # Expressed as percentages (0.1 = 10%)
overlap_percentages = [0, 0.1]
# model_location = "C:/Users/24075/AppData/Local/nomic.ai/GPT4All/ggml-model-gpt4all-falcon-q4_0.bin"
model_location = "C:/NewsQA/GPT4ALL/mistral-7b-instruct-v0.1.Q4_0.gguf"
# model_location = "/Users/wk77/Library/CloudStorage/OneDrive-DrexelUniversity/Documents/data/gpt4all/models/gpt4all-falcon-q4_0.gguf"
# model_location = "/Users/wk77/Documents/data/gpt4all-falcon-newbpe-q4_0.gguf"
# model_location = "/Users/wk77/Documents/data/mistral-7b-instruct-v0.1.Q4_0.gguf"
input_file_path='C:/NewsQA/combined-newsqa-data-story2.json'
# input_file_path = "/Users/wk77/Documents/data/newsqa-data-v1/newsqa-data-v1.csv"
# input_file_path = "/Users/wk77/Documents/data/newsqa-data-v1/combined-newsqa-data-v1.json"
# input_file_path = "/Users/wk77/Documents/git/DeepDelight/Thread2/data/combined-newsqa-data-story1.json"
output_csv_path = '../results/combined_chunks2.csv'
# output_file_path = "/Users/wk77/Documents/data/newsqa-data-v1/story1_scores_test.csv"
# output_file_path = "/Users/wk77/Documents/data/newsqa-data-v1/combined_scores_test.csv"
output_log_path = '../results/combined_chunks2.log'
# # Initialize PairwiseDistance
# pdist = nn.PairwiseDistance(p=2.0, eps=1e-06)
##################################################
# logging.basicConfig(level=logging.INFO)
# logging.basicConfig(level=logging.WARNING) # This will show only warnings and errors
logging.basicConfig(level=logging.ERROR)
print("Loading data.")
data = json.loads(Path(input_file_path).read_text())
print("Setting template.")
template_original = """
Based on the following information only:
{context}
{question} Please provide the answer in as few words as possible and please do NOT repeat any word in the question, i.e. "{question}".
Answer:
"""
QA_CHAIN_PROMPT_ORIGINAL = PromptTemplate.from_template(template_original)
print("Random seeding.")
set_seed(random_seed)
# Results storage
f1_results = defaultdict(list)
em_results = defaultdict(list)
text_results = []
# Initialize the language model and the QA chain
print("Loading LLM.")
llm = GPT4All(model=model_location, max_tokens=2048, seed=random_seed)
print("Preparing Parameters.")
# HuggingFace Instruct Embeddings parameters
instruct_embedding_model_name = "sentence-transformers/multi-qa-MiniLM-L6-cos-v1"
instruct_embedding_model_kwargs = {'device': 'cpu'}
# instruct_embedding_model_kwargs = {'device': 'mps'}
instruct_embedding_encode_kwargs = {'normalize_embeddings': True}
# The following code would iterate over the stories and questions to calculate the scores
start_time = time.time()
print(f"{start_time} Started.")
# Main Function Execution
print("Processing.")
newsqa_loop(data, llm, output_csv_path, output_log_path, max_stories, chunk_sizes, overlap_percentages,
instruct_embedding_model_name, instruct_embedding_model_kwargs, instruct_embedding_encode_kwargs,
QA_CHAIN_PROMPT_ORIGINAL, db_dir, embedding_function_name)
| [
"lancedb.connect",
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((1778, 1818), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (1816, 1818), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n'), ((15779, 15819), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR'}), '(level=logging.ERROR)\n', (15798, 15819), False, 'import logging\n'), ((16333, 16380), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template_original'], {}), '(template_original)\n', (16361, 16380), False, 'from langchain.prompts import PromptTemplate\n'), ((16410, 16431), 'transformers.set_seed', 'set_seed', (['random_seed'], {}), '(random_seed)\n', (16418, 16431), False, 'from transformers import set_seed\n'), ((16467, 16484), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (16478, 16484), False, 'from collections import defaultdict\n'), ((16499, 16516), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (16510, 16516), False, 'from collections import defaultdict\n'), ((16618, 16682), 'langchain_community.llms.GPT4All', 'GPT4All', ([], {'model': 'model_location', 'max_tokens': '(2048)', 'seed': 'random_seed'}), '(model=model_location, max_tokens=2048, seed=random_seed)\n', (16625, 16682), False, 'from langchain_community.llms import GPT4All\n'), ((17128, 17139), 'time.time', 'time.time', ([], {}), '()\n', (17137, 17139), False, 'import time\n'), ((2689, 2718), 'lancedb.connect', 'lancedb.connect', (['story_db_dir'], {}), '(story_db_dir)\n', (2704, 2718), False, 'import lancedb\n'), ((4033, 4048), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (4046, 4048), False, 'from nltk.stem import PorterStemmer\n'), ((2138, 2160), 'os.path.exists', 'os.path.exists', (['db_dir'], {}), '(db_dir)\n', (2152, 2160), False, 'import os\n'), ((2171, 2205), 'os.makedirs', 'os.makedirs', (['db_dir'], {'exist_ok': '(True)'}), '(db_dir, exist_ok=True)\n', (2182, 2205), False, 'import os\n'), ((2502, 2530), 'os.path.exists', 'os.path.exists', (['story_db_dir'], {}), '(story_db_dir)\n', (2516, 2530), False, 'import os\n'), ((2541, 2581), 'os.makedirs', 'os.makedirs', (['story_db_dir'], {'exist_ok': '(True)'}), '(story_db_dir, exist_ok=True)\n', (2552, 2581), False, 'import os\n'), ((4579, 4604), 'collections.Counter', 'Counter', (['predicted_tokens'], {}), '(predicted_tokens)\n', (4586, 4604), False, 'from collections import Counter\n'), ((4607, 4629), 'collections.Counter', 'Counter', (['actual_tokens'], {}), '(actual_tokens)\n', (4614, 4629), False, 'from collections import Counter\n'), ((6468, 6484), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (6478, 6484), False, 'import csv\n'), ((7582, 7593), 'time.time', 'time.time', ([], {}), '()\n', (7591, 7593), False, 'import time\n'), ((6752, 7023), 'langchain.embeddings.huggingface.HuggingFaceInstructEmbeddings', 'HuggingFaceInstructEmbeddings', ([], {'model_name': 'instruct_embedding_model_name', 'model_kwargs': 'instruct_embedding_model_kwargs', 'encode_kwargs': 'instruct_embedding_encode_kwargs', 'embed_instruction': '"""Use the following pieces of context to answer the question at the end:"""'}), "(model_name=instruct_embedding_model_name,\n model_kwargs=instruct_embedding_model_kwargs, encode_kwargs=\n instruct_embedding_encode_kwargs, embed_instruction=\n 'Use the following pieces of context to answer the question at the end:')\n", (6781, 7023), False, 'from langchain.embeddings.huggingface import HuggingFaceInstructEmbeddings\n'), ((7160, 7410), 'langchain.embeddings.huggingface.HuggingFaceInstructEmbeddings', 'HuggingFaceInstructEmbeddings', ([], {'model_name': 'instruct_embedding_model_name', 'model_kwargs': 'instruct_embedding_model_kwargs', 'encode_kwargs': 'instruct_embedding_encode_kwargs', 'query_instruction': '"""How does this information relate to the question?"""'}), "(model_name=instruct_embedding_model_name,\n model_kwargs=instruct_embedding_model_kwargs, encode_kwargs=\n instruct_embedding_encode_kwargs, query_instruction=\n 'How does this information relate to the question?')\n", (7189, 7410), False, 'from langchain.embeddings.huggingface import HuggingFaceInstructEmbeddings\n'), ((7747, 7758), 'time.time', 'time.time', ([], {}), '()\n', (7756, 7758), False, 'import time\n'), ((15865, 15886), 'pathlib.Path', 'Path', (['input_file_path'], {}), '(input_file_path)\n', (15869, 15886), False, 'from pathlib import Path\n'), ((4250, 4276), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (4265, 4276), False, 'from nltk.corpus import stopwords\n'), ((8318, 8329), 'time.time', 'time.time', ([], {}), '()\n', (8327, 8329), False, 'import time\n'), ((9190, 9249), 'langchain_community.vectorstores.Chroma.from_texts', 'Chroma.from_texts', ([], {'texts': 'chunk_splits', 'embedding': 'story_embs'}), '(texts=chunk_splits, embedding=story_embs)\n', (9207, 9249), False, 'from langchain_community.vectorstores import Chroma\n'), ((7660, 7671), 'time.time', 'time.time', ([], {}), '()\n', (7669, 7671), False, 'import time\n'), ((7932, 7943), 'time.time', 'time.time', ([], {}), '()\n', (7941, 7943), False, 'import time\n'), ((7958, 7969), 'time.time', 'time.time', ([], {}), '()\n', (7967, 7969), False, 'import time\n'), ((12533, 12544), 'time.time', 'time.time', ([], {}), '()\n', (12542, 12544), False, 'import time\n'), ((13468, 13479), 'time.time', 'time.time', ([], {}), '()\n', (13477, 13479), False, 'import time\n')] |
import logging
import os
import time
from functools import wraps
from pathlib import Path
from random import random, seed
import lancedb
import pyarrow as pa
import pyarrow.parquet as pq
import typer
from lancedb.db import LanceTable
log_level = os.environ.get("LOG_LEVEL", "info")
logging.basicConfig(
level=getattr(logging, log_level.upper()),
format="%(asctime)s %(levelname)s | %(processName)s %(name)s | %(message)s",
)
logger = logging.getLogger(__name__)
app = typer.Typer()
V_SIZE = 256
DB_PATH = "benchmark"
DB_TABLE = "vectors"
DB_TABLE_SIZE = os.environ.get("DB_TABLE_SIZE", 100000)
Q_PATH = "query"
Q_SIZE = os.environ.get("Q_SIZE", 100)
Q_V = "v.parquet"
Q_KNN = "knn.parquet"
Q_ANN = "ann.parquet"
def timeit(func):
@wraps(func)
def f(*args, **kwargs):
start_time = time.perf_counter()
result = func(*args, **kwargs)
end_time = time.perf_counter()
total_time = end_time - start_time
logger.info(f"{func.__name__} {args} done in {total_time:.2f} secs")
return result
return f
def get_db():
if int(os.environ["AZURE"]) == 0:
f = Path(os.environ["DATA"])
f.mkdir(parents=True, exist_ok=True)
return lancedb.connect(f / DB_PATH)
else:
return lancedb.connect(
f"az://{os.environ['AZURE_STORAGE_CONTAINER']}/{DB_PATH}"
)
def open_table(table: str):
return LanceTable(get_db(), table)
def get_q(what="v"):
tables = {
"v": Q_V,
"knn": Q_KNN,
"ann": Q_ANN,
}
f = Path(os.environ["DATA"]) / Q_PATH
f.mkdir(parents=True, exist_ok=True)
return f / tables[what]
def gen_data(n: int, start=1):
seed()
for i in range(start, start + n):
yield ({"id": i, "vector": list(random() for _ in range(V_SIZE))})
@app.command()
def db_init(n: int = DB_TABLE_SIZE):
get_db().create_table(DB_TABLE, data=list(gen_data(n)))
@app.command()
def db_info():
table = open_table(DB_TABLE)
logger.debug(table.head(10))
@app.command()
def db_add(n: int, start: int):
table = open_table(DB_TABLE)
table.add(list(gen_data(n, start=start)))
@app.command()
def q_init(n: int = Q_SIZE):
pq.write_table(pa.Table.from_pylist(list(gen_data(n))), get_q())
@app.command()
def q_info():
logger.debug(pq.read_table(get_q()))
@timeit
def q_process(what: str):
table = open_table(DB_TABLE)
r = pa.Table.from_pylist(
[
{
"id": v["id"],
"neighbours": table.search(v["vector"])
.limit(10)
.select(["id"])
.to_arrow()["id"]
.to_pylist(),
}
for v in pq.read_table(get_q()).to_pylist()
]
)
pq.write_table(r, get_q(what))
@app.command()
@timeit
def create_index():
open_table(DB_TABLE).create_index(
num_sub_vectors=8
) # TODO :avoid hard coded params
@app.command()
def q_knn():
q_process("knn")
@app.command()
def q_ann():
create_index()
q_process("ann")
if __name__ == "__main__":
app()
| [
"lancedb.connect"
] | [((248, 283), 'os.environ.get', 'os.environ.get', (['"""LOG_LEVEL"""', '"""info"""'], {}), "('LOG_LEVEL', 'info')\n", (262, 283), False, 'import os\n'), ((446, 473), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (463, 473), False, 'import logging\n'), ((480, 493), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (491, 493), False, 'import typer\n'), ((569, 608), 'os.environ.get', 'os.environ.get', (['"""DB_TABLE_SIZE"""', '(100000)'], {}), "('DB_TABLE_SIZE', 100000)\n", (583, 608), False, 'import os\n'), ((636, 665), 'os.environ.get', 'os.environ.get', (['"""Q_SIZE"""', '(100)'], {}), "('Q_SIZE', 100)\n", (650, 665), False, 'import os\n'), ((753, 764), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (758, 764), False, 'from functools import wraps\n'), ((1693, 1699), 'random.seed', 'seed', ([], {}), '()\n', (1697, 1699), False, 'from random import random, seed\n'), ((814, 833), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (831, 833), False, 'import time\n'), ((892, 911), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (909, 911), False, 'import time\n'), ((1134, 1158), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1138, 1158), False, 'from pathlib import Path\n'), ((1219, 1247), 'lancedb.connect', 'lancedb.connect', (['(f / DB_PATH)'], {}), '(f / DB_PATH)\n', (1234, 1247), False, 'import lancedb\n'), ((1273, 1347), 'lancedb.connect', 'lancedb.connect', (['f"""az://{os.environ[\'AZURE_STORAGE_CONTAINER\']}/{DB_PATH}"""'], {}), '(f"az://{os.environ[\'AZURE_STORAGE_CONTAINER\']}/{DB_PATH}")\n', (1288, 1347), False, 'import lancedb\n'), ((1553, 1577), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1557, 1577), False, 'from pathlib import Path\n'), ((1778, 1786), 'random.random', 'random', ([], {}), '()\n', (1784, 1786), False, 'from random import random, seed\n')] |
import argparse
import os
import shutil
from functools import lru_cache
from pathlib import Path
from typing import Any, Iterator
import srsly
from codetiming import Timer
from config import Settings
from dotenv import load_dotenv
from rich import progress
from schemas.wine import LanceModelWine, Wine
from sentence_transformers import SentenceTransformer
import lancedb
from lancedb.pydantic import pydantic_to_schema
from lancedb.table import Table
load_dotenv()
# Custom types
JsonBlob = dict[str, Any]
class FileNotFoundError(Exception):
pass
@lru_cache()
def get_settings():
# Use lru_cache to avoid loading .env file for every request
return Settings()
def chunk_iterable(item_list: list[JsonBlob], chunksize: int) -> Iterator[list[JsonBlob]]:
"""
Break a large iterable into an iterable of smaller iterables of size `chunksize`
"""
for i in range(0, len(item_list), chunksize):
yield item_list[i : i + chunksize]
def get_json_data(data_dir: Path, filename: str) -> list[JsonBlob]:
"""Get all line-delimited json files (.jsonl) from a directory with a given prefix"""
file_path = data_dir / filename
if not file_path.is_file():
# File may not have been uncompressed yet so try to do that first
data = srsly.read_gzip_jsonl(file_path)
# This time if it isn't there it really doesn't exist
if not file_path.is_file():
raise FileNotFoundError(f"No valid .jsonl file found in `{data_dir}`")
else:
data = srsly.read_gzip_jsonl(file_path)
return data
def validate(
data: list[JsonBlob],
exclude_none: bool = False,
) -> list[JsonBlob]:
validated_data = [Wine(**item).model_dump(exclude_none=exclude_none) for item in data]
return validated_data
def embed_func(batch: list[str], model) -> list[list[float]]:
return [model.encode(sentence.lower()) for sentence in batch]
def vectorize_text(data: list[JsonBlob]) -> list[LanceModelWine] | None:
# Load a sentence transformer model for semantic similarity from a specified checkpoint
model_id = get_settings().embedding_model_checkpoint
assert model_id, "Invalid embedding model checkpoint specified in .env file"
MODEL = SentenceTransformer(model_id)
ids = [item["id"] for item in data]
to_vectorize = [text.get("to_vectorize") for text in data]
vectors = embed_func(to_vectorize, MODEL)
try:
data_batch = [{**d, "vector": vector} for d, vector in zip(data, vectors)]
except Exception as e:
print(f"{e}: Failed to add ID range {min(ids)}-{max(ids)}")
return None
return data_batch
def embed_batches(tbl: str, validated_data: list[JsonBlob]) -> Table:
"""Ingest vector embeddings in batches for ANN index"""
chunked_data = chunk_iterable(validated_data, CHUNKSIZE)
print(f"Adding vectors to table for ANN index...")
# Add rich progress bar
with progress.Progress(
"[progress.description]{task.description}",
progress.BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
progress.TimeElapsedColumn(),
) as prog:
overall_progress_task = prog.add_task(
"Starting vectorization...", total=len(validated_data) // CHUNKSIZE
)
for chunk in chunked_data:
batch = vectorize_text(chunk)
prog.update(overall_progress_task, advance=1)
tbl.add(batch, mode="append")
def main(tbl: Table, data: list[JsonBlob]) -> None:
"""Generate sentence embeddings and create ANN and FTS indexes"""
with Timer(
name="Data validation in pydantic",
text="Validated data using Pydantic in {:.4f} sec",
):
validated_data = validate(data, exclude_none=False)
with Timer(
name="Insert vectors in batches",
text="Created sentence embeddings in {:.4f} sec",
):
embed_batches(tbl, validated_data)
print(f"Finished inserting {len(tbl)} vectors into LanceDB table")
with Timer(name="Create ANN index", text="Created ANN index in {:.4f} sec"):
print("Creating ANN index...")
# Creating IVF-PQ index for now, as we eagerly await DiskANN
# Choose num partitions as a power of 2 that's closest to len(dataset) // 5000
# In this case, we have 130k datapoints, so the nearest power of 2 is 130000//5000 ~ 32)
tbl.create_index(metric="cosine", num_partitions=4, num_sub_vectors=32)
with Timer(name="Create FTS index", text="Created FTS index in {:.4f} sec"):
# Create a full-text search index via Tantivy (which implements Lucene + BM25 in Rust)
tbl.create_fts_index(["to_vectorize"])
if __name__ == "__main__":
# fmt: off
parser = argparse.ArgumentParser("Bulk index database from the wine reviews JSONL data")
parser.add_argument("--limit", "-l", type=int, default=0, help="Limit the size of the dataset to load for testing purposes")
parser.add_argument("--chunksize", type=int, default=1000, help="Size of each chunk to break the dataset into before processing")
parser.add_argument("--filename", type=str, default="winemag-data-130k-v2.jsonl.gz", help="Name of the JSONL zip file to use")
args = vars(parser.parse_args())
# fmt: on
LIMIT = args["limit"]
DATA_DIR = Path(__file__).parents[1] / "data"
FILENAME = args["filename"]
CHUNKSIZE = args["chunksize"]
data = list(get_json_data(DATA_DIR, FILENAME))
assert data, "No data found in the specified file"
data = data[:LIMIT] if LIMIT > 0 else data
DB_NAME = "./winemag"
TABLE = "wines"
if os.path.exists(DB_NAME):
shutil.rmtree(DB_NAME)
db = lancedb.connect(DB_NAME)
try:
tbl = db.create_table(TABLE, schema=pydantic_to_schema(LanceModelWine), mode="create")
except OSError:
tbl = db.open_table(TABLE)
main(tbl, data)
print("Finished execution!")
| [
"lancedb.connect",
"lancedb.pydantic.pydantic_to_schema"
] | [((455, 468), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (466, 468), False, 'from dotenv import load_dotenv\n'), ((560, 571), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (569, 571), False, 'from functools import lru_cache\n'), ((668, 678), 'config.Settings', 'Settings', ([], {}), '()\n', (676, 678), False, 'from config import Settings\n'), ((2230, 2259), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (2249, 2259), False, 'from sentence_transformers import SentenceTransformer\n'), ((4737, 4816), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Bulk index database from the wine reviews JSONL data"""'], {}), "('Bulk index database from the wine reviews JSONL data')\n", (4760, 4816), False, 'import argparse\n'), ((5613, 5636), 'os.path.exists', 'os.path.exists', (['DB_NAME'], {}), '(DB_NAME)\n', (5627, 5636), False, 'import os\n'), ((5679, 5703), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (5694, 5703), False, 'import lancedb\n'), ((1283, 1315), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1304, 1315), False, 'import srsly\n'), ((1522, 1554), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1543, 1554), False, 'import srsly\n'), ((3582, 3680), 'codetiming.Timer', 'Timer', ([], {'name': '"""Data validation in pydantic"""', 'text': '"""Validated data using Pydantic in {:.4f} sec"""'}), "(name='Data validation in pydantic', text=\n 'Validated data using Pydantic in {:.4f} sec')\n", (3587, 3680), False, 'from codetiming import Timer\n'), ((3770, 3864), 'codetiming.Timer', 'Timer', ([], {'name': '"""Insert vectors in batches"""', 'text': '"""Created sentence embeddings in {:.4f} sec"""'}), "(name='Insert vectors in batches', text=\n 'Created sentence embeddings in {:.4f} sec')\n", (3775, 3864), False, 'from codetiming import Timer\n'), ((4012, 4082), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create ANN index"""', 'text': '"""Created ANN index in {:.4f} sec"""'}), "(name='Create ANN index', text='Created ANN index in {:.4f} sec')\n", (4017, 4082), False, 'from codetiming import Timer\n'), ((4466, 4536), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create FTS index"""', 'text': '"""Created FTS index in {:.4f} sec"""'}), "(name='Create FTS index', text='Created FTS index in {:.4f} sec')\n", (4471, 4536), False, 'from codetiming import Timer\n'), ((5646, 5668), 'shutil.rmtree', 'shutil.rmtree', (['DB_NAME'], {}), '(DB_NAME)\n', (5659, 5668), False, 'import shutil\n'), ((3003, 3023), 'rich.progress.BarColumn', 'progress.BarColumn', ([], {}), '()\n', (3021, 3023), False, 'from rich import progress\n'), ((3090, 3118), 'rich.progress.TimeElapsedColumn', 'progress.TimeElapsedColumn', ([], {}), '()\n', (3116, 3118), False, 'from rich import progress\n'), ((1688, 1700), 'schemas.wine.Wine', 'Wine', ([], {}), '(**item)\n', (1692, 1700), False, 'from schemas.wine import LanceModelWine, Wine\n'), ((5304, 5318), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5308, 5318), False, 'from pathlib import Path\n'), ((5757, 5791), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['LanceModelWine'], {}), '(LanceModelWine)\n', (5775, 5791), False, 'from lancedb.pydantic import pydantic_to_schema\n')] |
from datasets import load_dataset
data = load_dataset('jamescalam/youtube-transcriptions', split='train')
from lancedb.context import contextualize
df = (contextualize(data.to_pandas())
.groupby("title").text_col("text")
.window(20).stride(4)
.to_df())
df.head(1)
import openai
import os
# Configuring the environment variable OPENAI_API_KEY
if "OPENAI_API_KEY" not in os.environ:
# OR set the key here as a variable
openai.api_key = ""
assert len(openai.Model.list()["data"]) > 0
def embed_func(c):
rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002")
return [record["embedding"] for record in rs["data"]]
import lancedb
from lancedb.embeddings import with_embeddings
# data = with_embeddings(embed_func, df, show_progress=True)
# data.to_pandas().head(1)
db = lancedb.connect("/tmp/lancedb")
# tbl = db.create_table("youtube-chatbot", data)
# get table
tbl = db.open_table("youtube-chatbot")
#print the length of the table
print(len(tbl))
tbl.to_pandas().head(1)
def create_prompt(query, context):
limit = 3750
prompt_start = (
"Answer the question based on the context below.\n\n"+
"Context:\n"
)
prompt_end = (
f"\n\nQuestion: {query}\nAnswer:"
)
# append contexts until hitting limit
for i in range(1, len(context)):
if len("\n\n---\n\n".join(context.text[:i])) >= limit:
prompt = (
prompt_start +
"\n\n---\n\n".join(context.text[:i-1]) +
prompt_end
)
break
elif i == len(context)-1:
prompt = (
prompt_start +
"\n\n---\n\n".join(context.text) +
prompt_end
)
print ( "prompt:", prompt )
return prompt
def complete(prompt):
# query text-davinci-003
res = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=0,
max_tokens=400,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None
)
return res['choices'][0]['text'].strip()
query = ("How do I use the Pandas library to create embeddings?")
# Embed the question
emb = embed_func(query)[0]
# Use LanceDB to get top 3 most relevant context
context = tbl.search(emb).limit(3).to_df()
# Get the answer from completion API
prompt = create_prompt(query, context)
print( "context:", context )
print ( complete( prompt )) | [
"lancedb.connect"
] | [((42, 106), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (54, 106), False, 'from datasets import load_dataset\n'), ((831, 862), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (846, 862), False, 'import lancedb\n'), ((549, 614), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': '"""text-embedding-ada-002"""'}), "(input=c, engine='text-embedding-ada-002')\n", (572, 614), False, 'import openai\n'), ((1876, 2042), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': '(0)', 'max_tokens': '(400)', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)', 'stop': 'None'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=0, max_tokens=400, top_p=1, frequency_penalty=0,\n presence_penalty=0, stop=None)\n", (1900, 2042), False, 'import openai\n'), ((483, 502), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (500, 502), False, 'import openai\n')] |
import hashlib
import io
import logging
from typing import List
import numpy as np
from lancedb.pydantic import LanceModel, vector
from PIL import Image
from pydantic import BaseModel, Field, computed_field
from homematch.config import IMAGES_DIR
logger = logging.getLogger(__name__)
class PropertyListingBase(BaseModel):
page_source: str
resource_title: str
resource_country: str
operation_type: str
active: bool
url: str
title: str
normalized_title: str
zone: str
current_price: float | None = None
ad_text: str
basic_info: List[str]
last_update: str
main_image_url: str
scraped_ts: str
@computed_field # type: ignore
@property
def identificator(self) -> str:
return hashlib.sha256(self.url.encode()).hexdigest()[:16]
@computed_field # type: ignore
@property
def text_description(self) -> str:
basic_info_text = ",".join(self.basic_info)
basic_info_text = basic_info_text.replace("habs", "bedrooms")
basic_info_text = basic_info_text.replace("baños", "bathrooms")
basic_info_text = basic_info_text.replace("baño", "bathroom")
basic_info_text = basic_info_text.replace("m²", "square meters")
basic_info_text = basic_info_text.replace("planta", "floor")
basic_info_text = basic_info_text.replace("Bajo", "0 floor")
description = ""
description += f"Zone: {self.zone}."
description += f"\nPrice: {self.current_price} euros."
description += f"\nFeatures: {basic_info_text}"
return description
class PropertyListing(PropertyListingBase):
images_dir: str = Field(str(IMAGES_DIR), description="Directory to store images")
@property
def image_path(self) -> str:
return str(self.images_dir) + f"/{self.identificator}.jpg"
def load_image(self) -> Image.Image:
try:
return Image.open(self.image_path)
except FileNotFoundError:
logger.error(f"Image file not found: {self.image_path}")
raise
@classmethod
def pil_to_bytes(cls, img: Image.Image) -> bytes:
buf = io.BytesIO()
img.save(buf, format="PNG")
return buf.getvalue()
@classmethod
def pil_to_numpy(cls, img: Image.Image) -> np.ndarray:
return np.array(img)
class PropertyData(PropertyListing):
class Config:
arbitrary_types_allowed = True
image: Image.Image
class ImageData(PropertyListing, LanceModel):
vector: vector(768) # type: ignore
image_bytes: bytes
| [
"lancedb.pydantic.vector"
] | [((259, 286), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (276, 286), False, 'import logging\n'), ((2511, 2522), 'lancedb.pydantic.vector', 'vector', (['(768)'], {}), '(768)\n', (2517, 2522), False, 'from lancedb.pydantic import LanceModel, vector\n'), ((2146, 2158), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2156, 2158), False, 'import io\n'), ((2317, 2330), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2325, 2330), True, 'import numpy as np\n'), ((1911, 1938), 'PIL.Image.open', 'Image.open', (['self.image_path'], {}), '(self.image_path)\n', (1921, 1938), False, 'from PIL import Image\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from typing import Any, Callable, Dict, List, Optional, Union
from urllib.parse import urljoin
import attrs
import pyarrow as pa
import requests
from pydantic import BaseModel
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from lancedb.common import Credential
from lancedb.remote import VectorQuery, VectorQueryResult
from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory
from lancedb.remote.errors import LanceDBClientError
ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream"
def _check_not_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.closed:
raise ValueError("Connection is closed")
return f(self, *args, **kwargs)
return wrapped
def _read_ipc(resp: requests.Response) -> pa.Table:
resp_body = resp.content
with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader:
return reader.read_all()
@attrs.define(slots=False)
class RestfulLanceDBClient:
db_name: str
region: str
api_key: Credential
host_override: Optional[str] = attrs.field(default=None)
closed: bool = attrs.field(default=False, init=False)
connection_timeout: float = attrs.field(default=120.0, kw_only=True)
read_timeout: float = attrs.field(default=300.0, kw_only=True)
@functools.cached_property
def session(self) -> requests.Session:
sess = requests.Session()
retry_adapter_instance = retry_adapter(retry_adapter_options())
sess.mount(urljoin(self.url, "/v1/table/"), retry_adapter_instance)
adapter_class = LanceDBClientHTTPAdapterFactory()
sess.mount("https://", adapter_class())
return sess
@property
def url(self) -> str:
return (
self.host_override
or f"https://{self.db_name}.{self.region}.api.lancedb.com"
)
def close(self):
self.session.close()
self.closed = True
@functools.cached_property
def headers(self) -> Dict[str, str]:
headers = {
"x-api-key": self.api_key,
}
if self.region == "local": # Local test mode
headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com"
if self.host_override:
headers["x-lancedb-database"] = self.db_name
return headers
@staticmethod
def _check_status(resp: requests.Response):
if resp.status_code == 404:
raise LanceDBClientError(f"Not found: {resp.text}")
elif 400 <= resp.status_code < 500:
raise LanceDBClientError(
f"Bad Request: {resp.status_code}, error: {resp.text}"
)
elif 500 <= resp.status_code < 600:
raise LanceDBClientError(
f"Internal Server Error: {resp.status_code}, error: {resp.text}"
)
elif resp.status_code != 200:
raise LanceDBClientError(
f"Unknown Error: {resp.status_code}, error: {resp.text}"
)
@_check_not_closed
def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None):
"""Send a GET request and returns the deserialized response payload."""
if isinstance(params, BaseModel):
params: Dict[str, Any] = params.dict(exclude_none=True)
with self.session.get(
urljoin(self.url, uri),
params=params,
headers=self.headers,
timeout=(self.connection_timeout, self.read_timeout),
) as resp:
self._check_status(resp)
return resp.json()
@_check_not_closed
def post(
self,
uri: str,
data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None,
params: Optional[Dict[str, Any]] = None,
content_type: Optional[str] = None,
deserialize: Callable = lambda resp: resp.json(),
request_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Send a POST request and returns the deserialized response payload.
Parameters
----------
uri : str
The uri to send the POST request to.
data: Union[Dict[str, Any], BaseModel]
request_id: Optional[str]
Optional client side request id to be sent in the request headers.
"""
if isinstance(data, BaseModel):
data: Dict[str, Any] = data.dict(exclude_none=True)
if isinstance(data, bytes):
req_kwargs = {"data": data}
else:
req_kwargs = {"json": data}
headers = self.headers.copy()
if content_type is not None:
headers["content-type"] = content_type
if request_id is not None:
headers["x-request-id"] = request_id
with self.session.post(
urljoin(self.url, uri),
headers=headers,
params=params,
timeout=(self.connection_timeout, self.read_timeout),
**req_kwargs,
) as resp:
self._check_status(resp)
return deserialize(resp)
@_check_not_closed
def list_tables(self, limit: int, page_token: Optional[str] = None) -> List[str]:
"""List all tables in the database."""
if page_token is None:
page_token = ""
json = self.get("/v1/table/", {"limit": limit, "page_token": page_token})
return json["tables"]
@_check_not_closed
def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
"""Query a table."""
tbl = self.post(f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc)
return VectorQueryResult(tbl)
def mount_retry_adapter_for_table(self, table_name: str) -> None:
"""
Adds an http adapter to session that will retry retryable requests to the table.
"""
retry_options = retry_adapter_options(methods=["GET", "POST"])
retry_adapter_instance = retry_adapter(retry_options)
session = self.session
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/query/"), retry_adapter_instance
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/describe/"),
retry_adapter_instance,
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/index/list/"),
retry_adapter_instance,
)
def retry_adapter_options(methods=["GET"]) -> Dict[str, Any]:
return {
"retries": int(os.environ.get("LANCE_CLIENT_MAX_RETRIES", "3")),
"connect_retries": int(os.environ.get("LANCE_CLIENT_CONNECT_RETRIES", "3")),
"read_retries": int(os.environ.get("LANCE_CLIENT_READ_RETRIES", "3")),
"backoff_factor": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_FACTOR", "0.25")
),
"backoff_jitter": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_JITTER", "0.25")
),
"statuses": [
int(i.strip())
for i in os.environ.get(
"LANCE_CLIENT_RETRY_STATUSES", "429, 500, 502, 503"
).split(",")
],
"methods": methods,
}
def retry_adapter(options: Dict[str, Any]) -> HTTPAdapter:
total_retries = options["retries"]
connect_retries = options["connect_retries"]
read_retries = options["read_retries"]
backoff_factor = options["backoff_factor"]
backoff_jitter = options["backoff_jitter"]
statuses = options["statuses"]
methods = frozenset(options["methods"])
logging.debug(
f"Setting up retry adapter with {total_retries} retries," # noqa G003
+ f"connect retries {connect_retries}, read retries {read_retries},"
+ f"backoff factor {backoff_factor}, statuses {statuses}, "
+ f"methods {methods}"
)
return HTTPAdapter(
max_retries=Retry(
total=total_retries,
connect=connect_retries,
read=read_retries,
backoff_factor=backoff_factor,
backoff_jitter=backoff_jitter,
status_forcelist=statuses,
allowed_methods=methods,
)
)
| [
"lancedb.remote.VectorQueryResult",
"lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory",
"lancedb.remote.errors.LanceDBClientError"
] | [((1587, 1612), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1599, 1612), False, 'import attrs\n'), ((1207, 1225), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1222, 1225), False, 'import functools\n'), ((1733, 1758), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1744, 1758), False, 'import attrs\n'), ((1779, 1817), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1790, 1817), False, 'import attrs\n'), ((1851, 1891), 'attrs.field', 'attrs.field', ([], {'default': '(120.0)', 'kw_only': '(True)'}), '(default=120.0, kw_only=True)\n', (1862, 1891), False, 'import attrs\n'), ((1918, 1958), 'attrs.field', 'attrs.field', ([], {'default': '(300.0)', 'kw_only': '(True)'}), '(default=300.0, kw_only=True)\n', (1929, 1958), False, 'import attrs\n'), ((8166, 8402), 'logging.debug', 'logging.debug', (["(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')"], {}), "(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')\n", (8179, 8402), False, 'import logging\n'), ((2049, 2067), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2065, 2067), False, 'import requests\n'), ((2242, 2275), 'lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory', 'LanceDBClientHTTPAdapterFactory', ([], {}), '()\n', (2273, 2275), False, 'from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory\n'), ((6258, 6280), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (6275, 6280), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1512, 1538), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1527, 1538), True, 'import pyarrow as pa\n'), ((2160, 2191), 'urllib.parse.urljoin', 'urljoin', (['self.url', '"""/v1/table/"""'], {}), "(self.url, '/v1/table/')\n", (2167, 2191), False, 'from urllib.parse import urljoin\n'), ((3098, 3143), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Not found: {resp.text}"""'], {}), "(f'Not found: {resp.text}')\n", (3116, 3143), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((6665, 6716), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/query/"""'], {}), "(self.url, f'/v1/table/{table_name}/query/')\n", (6672, 6716), False, 'from urllib.parse import urljoin\n'), ((6786, 6840), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/describe/"""'], {}), "(self.url, f'/v1/table/{table_name}/describe/')\n", (6793, 6840), False, 'from urllib.parse import urljoin\n'), ((6923, 6979), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/index/list/"""'], {}), "(self.url, f'/v1/table/{table_name}/index/list/')\n", (6930, 6979), False, 'from urllib.parse import urljoin\n'), ((7127, 7174), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_MAX_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_MAX_RETRIES', '3')\n", (7141, 7174), False, 'import os\n'), ((7208, 7259), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_CONNECT_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_CONNECT_RETRIES', '3')\n", (7222, 7259), False, 'import os\n'), ((7290, 7338), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_READ_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_READ_RETRIES', '3')\n", (7304, 7338), False, 'import os\n'), ((7386, 7445), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_FACTOR"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_FACTOR', '0.25')\n", (7400, 7445), False, 'import os\n'), ((7502, 7561), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_JITTER"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_JITTER', '0.25')\n", (7516, 7561), False, 'import os\n'), ((8487, 8679), 'urllib3.Retry', 'Retry', ([], {'total': 'total_retries', 'connect': 'connect_retries', 'read': 'read_retries', 'backoff_factor': 'backoff_factor', 'backoff_jitter': 'backoff_jitter', 'status_forcelist': 'statuses', 'allowed_methods': 'methods'}), '(total=total_retries, connect=connect_retries, read=read_retries,\n backoff_factor=backoff_factor, backoff_jitter=backoff_jitter,\n status_forcelist=statuses, allowed_methods=methods)\n', (8492, 8679), False, 'from urllib3 import Retry\n'), ((3206, 3280), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Bad Request: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Bad Request: {resp.status_code}, error: {resp.text}')\n", (3224, 3280), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3986, 4008), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (3993, 4008), False, 'from urllib.parse import urljoin\n'), ((5430, 5452), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (5437, 5452), False, 'from urllib.parse import urljoin\n'), ((3373, 3462), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Internal Server Error: {resp.status_code}, error: {resp.text}"""'], {}), "(\n f'Internal Server Error: {resp.status_code}, error: {resp.text}')\n", (3391, 3462), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3544, 3620), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Unknown Error: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Unknown Error: {resp.status_code}, error: {resp.text}')\n", (3562, 3620), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((7643, 7710), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_STATUSES"""', '"""429, 500, 502, 503"""'], {}), "('LANCE_CLIENT_RETRY_STATUSES', '429, 500, 502, 503')\n", (7657, 7710), False, 'import os\n')] |
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
Language,
LatexTextSplitter,
)
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
import argparse, os, arxiv
os.environ["OPENAI_API_KEY"] = "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"
embeddings = OpenAIEmbeddings()
from langchain.vectorstores import LanceDB
from lancedb.pydantic import Vector, LanceModel
from Typing import List
from datetime import datetime
import lancedb
global embedding_out_length
embedding_out_length = 1536
class Content(LanceModel):
id: str
arxiv_id: str
vector: Vector(embedding_out_length)
text: str
uploaded_date: datetime
title: str
authors: List[str]
abstract: str
categories: List[str]
url: str
def PyPDF_to_Vector(table: LanceDB, embeddings: OpenAIEmbeddings, src_dir: str, n_threads: int = 1):
pass
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description="Create Vector DB and perform ingestion from source files")
argparser.add_argument('-s', '--src_dir', type=str, required=True, help = "Source directory where arxiv sources are stored")
argparser.add_argument('-db', '--db_name', type=str, required=True, help = "Name of the LanceDB database to be created")
argparser.add_argument('-t', '--table_name', type=str, required=False, help = "Name of the LanceDB table to be created", default = "EIC_archive")
argparser.add_argument('-openai_key', '--openai_api_key', type=str, required=True, help = "OpenAI API key")
argparser.add_argument('-c', '--chunking', type = str, required=False, help = "Type of Chunking PDF or LATEX", default = "PDF")
argparser.add_argument('-n', '--nthreads', type=int, default=-1)
args = argparser.parse_args()
SRC_DIR = args.src_dir
DB_NAME = args.db_name
TABLE_NAME = args.table_name
OPENAI_API_KEY = args.openai_api_key
NTHREADS = args.nthreads
db = lancedb.connect(DB_NAME)
table = db.create_table(TABLE_NAME, schema=Content, mode="overwrite")
db = lancedb.connect()
meta_data = {"arxiv_id": "1", "title": "EIC LLM",
"category" : "N/A",
"authors": "N/A",
"sub_categories": "N/A",
"abstract": "N/A",
"published": "N/A",
"updated": "N/A",
"doi": "N/A"
},
table = db.create_table(
"EIC_archive",
data=[
{
"vector": embeddings.embed_query("EIC LLM"),
"text": "EIC LLM",
"id": "1",
"arxiv_id" : "N/A",
"title" : "N/A",
"category" : "N/A",
"published" : "N/A"
}
],
mode="overwrite",
)
vectorstore = LanceDB(connection = table, embedding = embeddings)
sourcedir = "PDFs"
count = 0
for source in os.listdir(sourcedir):
if not os.path.isdir(os.path.join("PDFs", source)):
continue
print (f"Adding the source document {source} to the Vector DB")
import arxiv
client = arxiv.Client()
search = arxiv.Search(id_list=[source])
paper = next(arxiv.Client().results(search))
meta_data = {"arxiv_id": paper.entry_id,
"title": paper.title,
"category" : categories[paper.primary_category],
"published": paper.published
}
for file in os.listdir(os.path.join(sourcedir, source)):
if file.endswith(".tex"):
latex_file = os.path.join(sourcedir, source, file)
print (source, latex_file)
documents = TextLoader(latex_file, encoding = 'latin-1').load()
latex_splitter = LatexTextSplitter(
chunk_size=120, chunk_overlap=10
)
documents = latex_splitter.split_documents(documents)
for doc in documents:
for k, v in meta_data.items():
doc.metadata[k] = v
vectorstore.add_documents(documents = documents)
count+=len(documents) | [
"lancedb.connect",
"lancedb.pydantic.Vector"
] | [((342, 360), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (358, 360), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2116, 2133), 'lancedb.connect', 'lancedb.connect', ([], {}), '()\n', (2131, 2133), False, 'import lancedb\n'), ((2820, 2867), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (2827, 2867), False, 'from langchain.vectorstores import LanceDB\n'), ((2916, 2937), 'os.listdir', 'os.listdir', (['sourcedir'], {}), '(sourcedir)\n', (2926, 2937), False, 'import argparse, os, arxiv\n'), ((648, 676), 'lancedb.pydantic.Vector', 'Vector', (['embedding_out_length'], {}), '(embedding_out_length)\n', (654, 676), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((978, 1078), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create Vector DB and perform ingestion from source files"""'}), "(description=\n 'Create Vector DB and perform ingestion from source files')\n", (1001, 1078), False, 'import argparse, os, arxiv\n'), ((2006, 2030), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2021, 2030), False, 'import lancedb\n'), ((3110, 3124), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3122, 3124), False, 'import arxiv\n'), ((3138, 3168), 'arxiv.Search', 'arxiv.Search', ([], {'id_list': '[source]'}), '(id_list=[source])\n', (3150, 3168), False, 'import arxiv\n'), ((3458, 3489), 'os.path.join', 'os.path.join', (['sourcedir', 'source'], {}), '(sourcedir, source)\n', (3470, 3489), False, 'import argparse, os, arxiv\n'), ((2964, 2992), 'os.path.join', 'os.path.join', (['"""PDFs"""', 'source'], {}), "('PDFs', source)\n", (2976, 2992), False, 'import argparse, os, arxiv\n'), ((3551, 3588), 'os.path.join', 'os.path.join', (['sourcedir', 'source', 'file'], {}), '(sourcedir, source, file)\n', (3563, 3588), False, 'import argparse, os, arxiv\n'), ((3733, 3784), 'langchain.text_splitter.LatexTextSplitter', 'LatexTextSplitter', ([], {'chunk_size': '(120)', 'chunk_overlap': '(10)'}), '(chunk_size=120, chunk_overlap=10)\n', (3750, 3784), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, Language, LatexTextSplitter\n'), ((3186, 3200), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3198, 3200), False, 'import arxiv\n'), ((3652, 3694), 'langchain.document_loaders.TextLoader', 'TextLoader', (['latex_file'], {'encoding': '"""latin-1"""'}), "(latex_file, encoding='latin-1')\n", (3662, 3694), False, 'from langchain.document_loaders import TextLoader\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import importlib.metadata
import platform
import random
import sys
import time
from lancedb.utils import CONFIG
from lancedb.utils.general import TryExcept
from .general import (
PLATFORMS,
get_git_origin_url,
is_git_dir,
is_github_actions_ci,
is_online,
is_pip_package,
is_pytest_running,
threaded_request,
)
class _Events:
"""
A class for collecting anonymous event analytics. Event analytics are enabled when
``diagnostics=True`` in config and disabled when ``diagnostics=False``.
You can enable or disable diagnostics by running ``lancedb diagnostics --enabled``
or ``lancedb diagnostics --disabled``.
Attributes
----------
url : str
The URL to send anonymous events.
rate_limit : float
The rate limit in seconds for sending events.
metadata : dict
A dictionary containing metadata about the environment.
enabled : bool
A flag to enable or disable Events based on certain conditions.
"""
_instance = None
url = "https://app.posthog.com/capture/"
headers = {"Content-Type": "application/json"}
api_key = "phc_oENDjGgHtmIDrV6puUiFem2RB4JA8gGWulfdulmMdZP"
# This api-key is write only and is safe to expose in the codebase.
def __init__(self):
"""
Initializes the Events object with default values for events, rate_limit,
and metadata.
"""
self.events = [] # events list
self.throttled_event_names = ["search_table"]
self.throttled_events = set()
self.max_events = 5 # max events to store in memory
self.rate_limit = 60.0 * 60.0 # rate limit (seconds)
self.time = 0.0
if is_git_dir():
install = "git"
elif is_pip_package():
install = "pip"
else:
install = "other"
self.metadata = {
"cli": sys.argv[0],
"install": install,
"python": ".".join(platform.python_version_tuple()[:2]),
"version": importlib.metadata.version("lancedb"),
"platforms": PLATFORMS,
"session_id": round(random.random() * 1e15),
# TODO: In future we might be interested in this metric
# 'engagement_time_msec': 1000
}
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
ONLINE = is_online()
self.enabled = (
CONFIG["diagnostics"]
and not TESTS_RUNNING
and ONLINE
and (
is_pip_package()
or get_git_origin_url() == "https://github.com/lancedb/lancedb.git"
)
)
def __call__(self, event_name, params={}):
"""
Attempts to add a new event to the events list and send events if the rate
limit is reached.
Args
----
event_name : str
The name of the event to be logged.
params : dict, optional
A dictionary of additional parameters to be logged with the event.
"""
### NOTE: We might need a way to tag a session with a label to check usage
### from a source. Setting label should be exposed to the user.
if not self.enabled:
return
if (
len(self.events) < self.max_events
): # Events list limited to self.max_events (drop any events past this)
params.update(self.metadata)
event = {
"event": event_name,
"properties": params,
"timestamp": datetime.datetime.now(
tz=datetime.timezone.utc
).isoformat(),
"distinct_id": CONFIG["uuid"],
}
if event_name not in self.throttled_event_names:
self.events.append(event)
elif event_name not in self.throttled_events:
self.throttled_events.add(event_name)
self.events.append(event)
# Check rate limit
t = time.time()
if (t - self.time) < self.rate_limit:
return
# Time is over rate limiter, send now
data = {
"api_key": self.api_key,
"distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event
"batch": self.events,
}
# POST equivalent to requests.post(self.url, json=data).
# threaded request is used to avoid blocking, retries are disabled, and
# verbose is disabled to avoid any possible disruption in the console.
threaded_request(
method="post",
url=self.url,
headers=self.headers,
json=data,
retry=0,
verbose=False,
)
# Flush & Reset
self.events = []
self.throttled_events = set()
self.time = t
@TryExcept(verbose=False)
def register_event(name: str, **kwargs):
if _Events._instance is None:
_Events._instance = _Events()
_Events._instance(name, **kwargs)
| [
"lancedb.utils.general.TryExcept"
] | [((5469, 5493), 'lancedb.utils.general.TryExcept', 'TryExcept', ([], {'verbose': '(False)'}), '(verbose=False)\n', (5478, 5493), False, 'from lancedb.utils.general import TryExcept\n'), ((4631, 4642), 'time.time', 'time.time', ([], {}), '()\n', (4640, 4642), False, 'import time\n'), ((2582, 2613), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (2611, 2613), False, 'import platform\n'), ((2750, 2765), 'random.random', 'random.random', ([], {}), '()\n', (2763, 2765), False, 'import random\n'), ((4174, 4221), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (4195, 4221), False, 'import datetime\n')] |
import time
import os
import pandas as pd
import streamlit as st
import lancedb
from lancedb.embeddings import with_embeddings
from langchain import PromptTemplate
import predictionguard as pg
import streamlit as st
import duckdb
import re
import numpy as np
from sentence_transformers import SentenceTransformer
#---------------------#
# Lance DB Setup #
#---------------------#
uri = "schema.lancedb"
db = lancedb.connect(uri)
def embed(query, embModel):
return embModel.encode(query)
def batch_embed_func(batch):
return [st.session_state['en_emb'].encode(sentence) for sentence in batch]
#---------------------#
# Streamlit config #
#---------------------#
if "login" not in st.session_state:
st.session_state["login"] = False
# Hide the hamburger menu
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#--------------------------#
# Define datasets #
#--------------------------#
#JOBS
df1=pd.read_csv('datasets/jobs.csv')
#SOCIAL
df2=pd.read_csv('datasets/social.csv')
#movies
df3=pd.read_csv('datasets/movies.csv')
conn = duckdb.connect(database=':memory:')
conn.register('jobs', df1)
conn.register('social', df2)
conn.register('movies', df3)
#--------------------------#
# Prompt Templates #
#--------------------------#
### PROMPT TEMPLATES
### PROMPT TEMPLATES
qa_template = """### System:
You are a data chatbot who answers the user question. To answer these questions we need to run SQL queries on our data and its output is given below in context. You just have to frame your answer using that context. Give a short and crisp response.Don't add any notes or any extra information after your response.
### User:
Question: {question}
context: {context}
### Assistant:
"""
qa_prompt = PromptTemplate(template=qa_template,input_variables=["question", "context"])
sql_template = """<|begin_of_sentence|>You are a SQL expert and you only generate SQL queries which are executable. You provide no extra explanations.
You respond with a SQL query that answers the user question in the below instruction by querying a database with the schema provided in the below instruction.
Always start your query with SELECT statement and end with a semicolon.
### Instruction:
User question: \"{question}\"
Database schema:
{schema}
### Response:
"""
sql_prompt=PromptTemplate(template=sql_template, input_variables=["question","schema"])
#--------------------------#
# Generate SQL Query #
#--------------------------#
# Embeddings setup
name="all-MiniLM-L12-v2"
def load_model():
return SentenceTransformer(name)
model = load_model()
def generate_sql_query(question, schema):
prompt_filled = sql_prompt.format(question=question,schema=schema)
try:
result = pg.Completion.create(
model="deepseek-coder-6.7b-instruct",
prompt=prompt_filled,
max_tokens=300,
temperature=0.1
)
sql_query = result["choices"][0]["text"]
return sql_query
except Exception as e:
return None
def extract_and_refine_sql_query(sql_query):
# Extract SQL query using a regular expression
match = re.search(r"(SELECT.*?);", sql_query, re.DOTALL)
if match:
refined_query = match.group(1)
# Check for and remove any text after a colon
colon_index = refined_query.find(':')
if colon_index != -1:
refined_query = refined_query[:colon_index]
# Ensure the query ends with a semicolon
if not refined_query.endswith(';'):
refined_query += ';'
return refined_query
else:
return ""
def get_answer_from_sql(question):
# Search Relavent Tables
table = db.open_table("schema")
results = table.search(embed(question, model)).limit(2).to_df()
print(results)
results = results[results['_distance'] < 1.5]
print("Results:", results)
if len(results) == 0:
completion = "We did not find any relevant tables."
return completion
else:
results.sort_values(by=['_distance'], inplace=True, ascending=True)
doc_use = ""
for _, row in results.iterrows():
if len(row['text'].split(' ')) < 10:
continue
else:
schema=row['schema']
table_name=row['text']
st.sidebar.info(table_name)
st.sidebar.code(schema)
break
sql_query = generate_sql_query(question, schema)
sql_query = extract_and_refine_sql_query(sql_query)
try:
# print("Executing SQL Query:", sql_query)
result = conn.execute(sql_query).fetchall()
# print("Result:", result)
return result, sql_query
except Exception as e:
print(f"Error executing SQL query: {e}")
return "There was an error executing the SQL query."
#--------------------------#
# Get Answer #
#--------------------------#
def get_answer(question,context):
try:
prompt_filled = qa_prompt.format(question=question, context=context)
# Respond to the user
output = pg.Completion.create(
model="Neural-Chat-7B",
prompt=prompt_filled,
max_tokens=200,
temperature=0.1
)
completion = output['choices'][0]['text']
return completion
except Exception as e:
completion = "There was an error executing the SQL query."
return completion
#--------------------------#
# Streamlit app #
#--------------------------#
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Ask a question"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# contruct prompt thread
examples = []
turn = "user"
example = {}
for m in st.session_state.messages:
latest_message = m["content"]
example[turn] = m["content"]
if turn == "user":
turn = "assistant"
else:
turn = "user"
examples.append(example)
example = {}
if len(example) > 2:
examples = examples[-2:]
else:
thread = ""
# # Check for PII
# with st.spinner("Checking for PII..."):
# pii_result = pg.PII.check(
# prompt=latest_message,
# replace=False,
# replace_method="fake"
# )
# # Check for injection
# with st.spinner("Checking for security vulnerabilities..."):
# injection_result = pg.Injection.check(
# prompt=latest_message,
# detect=True
# )
# # Handle insecure states
# elif "[" in pii_result['checks'][0]['pii_types_and_positions']:
# st.warning('Warning! PII detected. Please avoid using personal information.')
# full_response = "Warning! PII detected. Please avoid using personal information."
# elif injection_result['checks'][0]['probability'] > 0.5:
# st.warning('Warning! Injection detected. Your input might result in a security breach.')
# full_response = "Warning! Injection detected. Your input might result in a security breach."
# generate response
with st.spinner("Generating an answer..."):
context=get_answer_from_sql(latest_message)
print("context",context)
completion = get_answer(latest_message,context)
# display response
for token in completion.split(" "):
full_response += " " + token
message_placeholder.markdown(full_response + "▌")
time.sleep(0.075)
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"lancedb.connect"
] | [((413, 433), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (428, 433), False, 'import lancedb\n'), ((890, 947), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (901, 947), True, 'import streamlit as st\n'), ((1043, 1075), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/jobs.csv"""'], {}), "('datasets/jobs.csv')\n", (1054, 1075), True, 'import pandas as pd\n'), ((1089, 1123), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/social.csv"""'], {}), "('datasets/social.csv')\n", (1100, 1123), True, 'import pandas as pd\n'), ((1137, 1171), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/movies.csv"""'], {}), "('datasets/movies.csv')\n", (1148, 1171), True, 'import pandas as pd\n'), ((1180, 1215), 'duckdb.connect', 'duckdb.connect', ([], {'database': '""":memory:"""'}), "(database=':memory:')\n", (1194, 1215), False, 'import duckdb\n'), ((1861, 1938), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['question', 'context']"}), "(template=qa_template, input_variables=['question', 'context'])\n", (1875, 1938), False, 'from langchain import PromptTemplate\n'), ((2426, 2503), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'sql_template', 'input_variables': "['question', 'schema']"}), "(template=sql_template, input_variables=['question', 'schema'])\n", (2440, 2503), False, 'from langchain import PromptTemplate\n'), ((2672, 2697), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (2691, 2697), False, 'from sentence_transformers import SentenceTransformer\n'), ((3239, 3286), 're.search', 're.search', (['"""(SELECT.*?);"""', 'sql_query', 're.DOTALL'], {}), "('(SELECT.*?);', sql_query, re.DOTALL)\n", (3248, 3286), False, 'import re\n'), ((5846, 5877), 'streamlit.chat_input', 'st.chat_input', (['"""Ask a question"""'], {}), "('Ask a question')\n", (5859, 5877), True, 'import streamlit as st\n'), ((5883, 5952), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (5915, 5952), True, 'import streamlit as st\n'), ((8226, 8311), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (8258, 8311), True, 'import streamlit as st\n'), ((2856, 2974), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""deepseek-coder-6.7b-instruct"""', 'prompt': 'prompt_filled', 'max_tokens': '(300)', 'temperature': '(0.1)'}), "(model='deepseek-coder-6.7b-instruct', prompt=\n prompt_filled, max_tokens=300, temperature=0.1)\n", (2876, 2974), True, 'import predictionguard as pg\n'), ((5195, 5298), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Neural-Chat-7B"""', 'prompt': 'prompt_filled', 'max_tokens': '(200)', 'temperature': '(0.1)'}), "(model='Neural-Chat-7B', prompt=prompt_filled,\n max_tokens=200, temperature=0.1)\n", (5215, 5298), True, 'import predictionguard as pg\n'), ((5758, 5790), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5773, 5790), True, 'import streamlit as st\n'), ((5800, 5831), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (5811, 5831), True, 'import streamlit as st\n'), ((5962, 5985), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (5977, 5985), True, 'import streamlit as st\n'), ((5995, 6014), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (6006, 6014), True, 'import streamlit as st\n'), ((6025, 6053), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6040, 6053), True, 'import streamlit as st\n'), ((6085, 6095), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6093, 6095), True, 'import streamlit as st\n'), ((7748, 7785), 'streamlit.spinner', 'st.spinner', (['"""Generating an answer..."""'], {}), "('Generating an answer...')\n", (7758, 7785), True, 'import streamlit as st\n'), ((4413, 4440), 'streamlit.sidebar.info', 'st.sidebar.info', (['table_name'], {}), '(table_name)\n', (4428, 4440), True, 'import streamlit as st\n'), ((4457, 4480), 'streamlit.sidebar.code', 'st.sidebar.code', (['schema'], {}), '(schema)\n', (4472, 4480), True, 'import streamlit as st\n'), ((8147, 8164), 'time.sleep', 'time.sleep', (['(0.075)'], {}), '(0.075)\n', (8157, 8164), False, 'import time\n')] |
from FlagEmbedding import LLMEmbedder, FlagReranker
import os
import lancedb
import re
import pandas as pd
import random
from datasets import load_dataset
import torch
import gc
import lance
from lancedb.embeddings import with_embeddings
task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch)
embed_model = LLMEmbedder('BAAI/llm-embedder', use_fp16=False) # Load model (automatically use GPUs)
reranker_model = FlagReranker('BAAI/bge-reranker-base', use_fp16=True) # use_fp16 speeds up computation with a slight performance degradation
"""# Load `Chunks` of data from [BeIR Dataset](https://huggingface.co/datasets/BeIR/scidocs)
Note: This is a dataset built specially for retrieval tasks to see how good your search is working
"""
data=pd.read_csv("Kcc_subset.csv")
# just random samples for faster embed demo
data['documents'] = 'query:' + data['QueryText'] + ', answer:' + data['KccAns']
data = data.dropna()
def embed_documents(batch):
"""
Function to embed the whole text data
"""
return embed_model.encode_keys(batch, task=task) # Encode data or 'keys'
db = lancedb.connect("./db") # Connect Local DB
if "doc_embed" in db.table_names():
table = db.open_table("doc_embed") # Open Table
else:
# Use the train text chunk data to save embed in the DB
data1 = with_embeddings(embed_documents, data, column = 'documents',show_progress = True, batch_size = 512)
table = db.create_table("doc_embed", data=data1) # create Table
"""# Search from a random Text"""
def search(query, top_k = 10):
"""
Search a query from the table
"""
query_vector = embed_model.encode_queries(query, task=task) # Encode the QUERY (it is done differently than the 'key')
search_results = table.search(query_vector).limit(top_k)
return ",".join(search_results.to_pandas().dropna(subset = "QueryText").reset_index(drop = True)["documents"].to_list())
# query = "how to control flower drop in bottelgourd?"
# print("QUERY:-> ", query)
# # get top_k search results
# search_results = search(query, top_k = 10).to_pandas().dropna(subset = "Query").reset_index(drop = True)["documents"]
# print(",".join(search_results.to_list))
# def rerank(query, search_results):
# search_results["old_similarity_rank"] = search_results.index+1 # Old ranks
# torch.cuda.empty_cache()
# gc.collect()
# search_results["new_scores"] = reranker_model.compute_score([[query,chunk] for chunk in search_results["text"]]) # Re compute ranks
# return search_results.sort_values(by = "new_scores", ascending = False).reset_index(drop = True)
# print("QUERY:-> ", query) | [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((356, 404), 'FlagEmbedding.LLMEmbedder', 'LLMEmbedder', (['"""BAAI/llm-embedder"""'], {'use_fp16': '(False)'}), "('BAAI/llm-embedder', use_fp16=False)\n", (367, 404), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((463, 516), 'FlagEmbedding.FlagReranker', 'FlagReranker', (['"""BAAI/bge-reranker-base"""'], {'use_fp16': '(True)'}), "('BAAI/bge-reranker-base', use_fp16=True)\n", (475, 516), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((803, 832), 'pandas.read_csv', 'pd.read_csv', (['"""Kcc_subset.csv"""'], {}), "('Kcc_subset.csv')\n", (814, 832), True, 'import pandas as pd\n'), ((1162, 1185), 'lancedb.connect', 'lancedb.connect', (['"""./db"""'], {}), "('./db')\n", (1177, 1185), False, 'import lancedb\n'), ((1370, 1469), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_documents', 'data'], {'column': '"""documents"""', 'show_progress': '(True)', 'batch_size': '(512)'}), "(embed_documents, data, column='documents', show_progress=\n True, batch_size=512)\n", (1385, 1469), False, 'from lancedb.embeddings import with_embeddings\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import asyncio
import inspect
import os
from abc import abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING, Iterable, List, Literal, Optional, Union
import pyarrow as pa
from overrides import EnforceOverrides, override
from pyarrow import fs
from lancedb.common import data_to_reader, validate_schema
from lancedb.embeddings.registry import EmbeddingFunctionRegistry
from lancedb.utils.events import register_event
from ._lancedb import connect as lancedb_connect
from .pydantic import LanceModel
from .table import AsyncTable, LanceTable, Table, _sanitize_data
from .util import fs_from_uri, get_uri_location, get_uri_scheme, join_uri
if TYPE_CHECKING:
from datetime import timedelta
from ._lancedb import Connection as LanceDbConnection
from .common import DATA, URI
from .embeddings import EmbeddingFunctionConfig
class DBConnection(EnforceOverrides):
"""An active LanceDB connection interface."""
@abstractmethod
def table_names(
self, page_token: Optional[str] = None, limit: int = 10
) -> Iterable[str]:
"""List all tables in this database, in sorted order
Parameters
----------
page_token: str, optional
The token to use for pagination. If not present, start from the beginning.
Typically, this token is last table name from the previous page.
Only supported by LanceDb Cloud.
limit: int, default 10
The size of the page to return.
Only supported by LanceDb Cloud.
Returns
-------
Iterable of str
"""
pass
@abstractmethod
def create_table(
self,
name: str,
data: Optional[DATA] = None,
schema: Optional[Union[pa.Schema, LanceModel]] = None,
mode: str = "create",
exist_ok: bool = False,
on_bad_vectors: str = "error",
fill_value: float = 0.0,
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
) -> Table:
"""Create a [Table][lancedb.table.Table] in the database.
Parameters
----------
name: str
The name of the table.
data: The data to initialize the table, *optional*
User must provide at least one of `data` or `schema`.
Acceptable types are:
- dict or list-of-dict
- pandas.DataFrame
- pyarrow.Table or pyarrow.RecordBatch
schema: The schema of the table, *optional*
Acceptable types are:
- pyarrow.Schema
- [LanceModel][lancedb.pydantic.LanceModel]
mode: str; default "create"
The mode to use when creating the table.
Can be either "create" or "overwrite".
By default, if the table already exists, an exception is raised.
If you want to overwrite the table, use mode="overwrite".
exist_ok: bool, default False
If a table by the same name already exists, then raise an exception
if exist_ok=False. If exist_ok=True, then open the existing table;
it will not add the provided data but will validate against any
schema that's specified.
on_bad_vectors: str, default "error"
What to do if any of the vectors are not the same size or contains NaNs.
One of "error", "drop", "fill".
fill_value: float
The value to use when filling vectors. Only used if on_bad_vectors="fill".
Returns
-------
LanceTable
A reference to the newly created table.
!!! note
The vector index won't be created by default.
To create the index, call the `create_index` method on the table.
Examples
--------
Can create with list of tuples or dictionaries:
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
... {"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
>>> db.create_table("my_table", data)
LanceTable(connection=..., name="my_table")
>>> db["my_table"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: double
long: double
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
You can also pass a pandas DataFrame:
>>> import pandas as pd
>>> data = pd.DataFrame({
... "vector": [[1.1, 1.2], [0.2, 1.8]],
... "lat": [45.5, 40.1],
... "long": [-122.7, -74.1]
... })
>>> db.create_table("table2", data)
LanceTable(connection=..., name="table2")
>>> db["table2"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: double
long: double
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
Data is converted to Arrow before being written to disk. For maximum
control over how data is saved, either provide the PyArrow schema to
convert to or else provide a [PyArrow Table](pyarrow.Table) directly.
>>> custom_schema = pa.schema([
... pa.field("vector", pa.list_(pa.float32(), 2)),
... pa.field("lat", pa.float32()),
... pa.field("long", pa.float32())
... ])
>>> db.create_table("table3", data, schema = custom_schema)
LanceTable(connection=..., name="table3")
>>> db["table3"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: float
long: float
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
It is also possible to create an table from `[Iterable[pa.RecordBatch]]`:
>>> import pyarrow as pa
>>> def make_batches():
... for i in range(5):
... yield pa.RecordBatch.from_arrays(
... [
... pa.array([[3.1, 4.1], [5.9, 26.5]],
... pa.list_(pa.float32(), 2)),
... pa.array(["foo", "bar"]),
... pa.array([10.0, 20.0]),
... ],
... ["vector", "item", "price"],
... )
>>> schema=pa.schema([
... pa.field("vector", pa.list_(pa.float32(), 2)),
... pa.field("item", pa.utf8()),
... pa.field("price", pa.float32()),
... ])
>>> db.create_table("table4", make_batches(), schema=schema)
LanceTable(connection=..., name="table4")
"""
raise NotImplementedError
def __getitem__(self, name: str) -> LanceTable:
return self.open_table(name)
def open_table(self, name: str) -> Table:
"""Open a Lance Table in the database.
Parameters
----------
name: str
The name of the table.
Returns
-------
A LanceTable object representing the table.
"""
raise NotImplementedError
def drop_table(self, name: str):
"""Drop a table from the database.
Parameters
----------
name: str
The name of the table.
"""
raise NotImplementedError
def drop_database(self):
"""
Drop database
This is the same thing as dropping all the tables
"""
raise NotImplementedError
class LanceDBConnection(DBConnection):
"""
A connection to a LanceDB database.
Parameters
----------
uri: str or Path
The root uri of the database.
read_consistency_interval: timedelta, default None
The interval at which to check for updates to the table from other
processes. If None, then consistency is not checked. For performance
reasons, this is the default. For strong consistency, set this to
zero seconds. Then every read will check for updates from other
processes. As a compromise, you can set this to a non-zero timedelta
for eventual consistency. If more than that interval has passed since
the last check, then the table will be checked for updates. Note: this
consistency only applies to read operations. Write operations are
always consistent.
Examples
--------
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> db.create_table("my_table", data=[{"vector": [1.1, 1.2], "b": 2},
... {"vector": [0.5, 1.3], "b": 4}])
LanceTable(connection=..., name="my_table")
>>> db.create_table("another_table", data=[{"vector": [0.4, 0.4], "b": 6}])
LanceTable(connection=..., name="another_table")
>>> sorted(db.table_names())
['another_table', 'my_table']
>>> len(db)
2
>>> db["my_table"]
LanceTable(connection=..., name="my_table")
>>> "my_table" in db
True
>>> db.drop_table("my_table")
>>> db.drop_table("another_table")
"""
def __init__(
self, uri: URI, *, read_consistency_interval: Optional[timedelta] = None
):
if not isinstance(uri, Path):
scheme = get_uri_scheme(uri)
is_local = isinstance(uri, Path) or scheme == "file"
if is_local:
if isinstance(uri, str):
uri = Path(uri)
uri = uri.expanduser().absolute()
Path(uri).mkdir(parents=True, exist_ok=True)
self._uri = str(uri)
self._entered = False
self.read_consistency_interval = read_consistency_interval
def __repr__(self) -> str:
val = f"{self.__class__.__name__}({self._uri}"
if self.read_consistency_interval is not None:
val += f", read_consistency_interval={repr(self.read_consistency_interval)}"
val += ")"
return val
@property
def uri(self) -> str:
return self._uri
async def _async_get_table_names(self, start_after: Optional[str], limit: int):
conn = AsyncConnection(await lancedb_connect(self.uri))
return await conn.table_names(start_after=start_after, limit=limit)
@override
def table_names(
self, page_token: Optional[str] = None, limit: int = 10
) -> Iterable[str]:
"""Get the names of all tables in the database. The names are sorted.
Returns
-------
Iterator of str.
A list of table names.
"""
try:
asyncio.get_running_loop()
# User application is async. Soon we will just tell them to use the
# async version. Until then fallback to the old sync implementation.
try:
filesystem = fs_from_uri(self.uri)[0]
except pa.ArrowInvalid:
raise NotImplementedError("Unsupported scheme: " + self.uri)
try:
loc = get_uri_location(self.uri)
paths = filesystem.get_file_info(fs.FileSelector(loc))
except FileNotFoundError:
# It is ok if the file does not exist since it will be created
paths = []
tables = [
os.path.splitext(file_info.base_name)[0]
for file_info in paths
if file_info.extension == "lance"
]
tables.sort()
return tables
except RuntimeError:
# User application is sync. It is safe to use the async implementation
# under the hood.
return asyncio.run(self._async_get_table_names(page_token, limit))
def __len__(self) -> int:
return len(self.table_names())
def __contains__(self, name: str) -> bool:
return name in self.table_names()
@override
def create_table(
self,
name: str,
data: Optional[DATA] = None,
schema: Optional[Union[pa.Schema, LanceModel]] = None,
mode: str = "create",
exist_ok: bool = False,
on_bad_vectors: str = "error",
fill_value: float = 0.0,
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
) -> LanceTable:
"""Create a table in the database.
See
---
DBConnection.create_table
"""
if mode.lower() not in ["create", "overwrite"]:
raise ValueError("mode must be either 'create' or 'overwrite'")
tbl = LanceTable.create(
self,
name,
data,
schema,
mode=mode,
exist_ok=exist_ok,
on_bad_vectors=on_bad_vectors,
fill_value=fill_value,
embedding_functions=embedding_functions,
)
return tbl
@override
def open_table(self, name: str) -> LanceTable:
"""Open a table in the database.
Parameters
----------
name: str
The name of the table.
Returns
-------
A LanceTable object representing the table.
"""
return LanceTable.open(self, name)
@override
def drop_table(self, name: str, ignore_missing: bool = False):
"""Drop a table from the database.
Parameters
----------
name: str
The name of the table.
ignore_missing: bool, default False
If True, ignore if the table does not exist.
"""
try:
filesystem, path = fs_from_uri(self.uri)
table_path = join_uri(path, name + ".lance")
filesystem.delete_dir(table_path)
except FileNotFoundError:
if not ignore_missing:
raise
@override
def drop_database(self):
filesystem, path = fs_from_uri(self.uri)
filesystem.delete_dir(path)
class AsyncConnection(object):
"""An active LanceDB connection
To obtain a connection you can use the [connect] function.
This could be a native connection (using lance) or a remote connection (e.g. for
connecting to LanceDb Cloud)
Local connections do not currently hold any open resources but they may do so in the
future (for example, for shared cache or connections to catalog services) Remote
connections represent an open connection to the remote server. The [close] method
can be used to release any underlying resources eagerly. The connection can also
be used as a context manager:
Connections can be shared on multiple threads and are expected to be long lived.
Connections can also be used as a context manager, however, in many cases a single
connection can be used for the lifetime of the application and so this is often
not needed. Closing a connection is optional. If it is not closed then it will
be automatically closed when the connection object is deleted.
Examples
--------
>>> import asyncio
>>> import lancedb
>>> async def my_connect():
... with await lancedb.connect("/tmp/my_dataset") as conn:
... # do something with the connection
... pass
... # conn is closed here
"""
def __init__(self, connection: LanceDbConnection):
self._inner = connection
def __repr__(self):
return self._inner.__repr__()
def __enter__(self):
self
def __exit__(self, *_):
self.close()
def is_open(self):
"""Return True if the connection is open."""
return self._inner.is_open()
def close(self):
"""Close the connection, releasing any underlying resources.
It is safe to call this method multiple times.
Any attempt to use the connection after it is closed will result in an error."""
self._inner.close()
async def table_names(
self, *, start_after: Optional[str] = None, limit: Optional[int] = None
) -> Iterable[str]:
"""List all tables in this database, in sorted order
Parameters
----------
start_after: str, optional
If present, only return names that come lexicographically after the supplied
value.
This can be combined with limit to implement pagination by setting this to
the last table name from the previous page.
limit: int, default 10
The number of results to return.
Returns
-------
Iterable of str
"""
return await self._inner.table_names(start_after=start_after, limit=limit)
async def create_table(
self,
name: str,
data: Optional[DATA] = None,
schema: Optional[Union[pa.Schema, LanceModel]] = None,
mode: Optional[Literal["create", "overwrite"]] = None,
exist_ok: Optional[bool] = None,
on_bad_vectors: Optional[str] = None,
fill_value: Optional[float] = None,
embedding_functions: Optional[List[EmbeddingFunctionConfig]] = None,
) -> AsyncTable:
"""Create a [Table][lancedb.table.Table] in the database.
Parameters
----------
name: str
The name of the table.
data: The data to initialize the table, *optional*
User must provide at least one of `data` or `schema`.
Acceptable types are:
- dict or list-of-dict
- pandas.DataFrame
- pyarrow.Table or pyarrow.RecordBatch
schema: The schema of the table, *optional*
Acceptable types are:
- pyarrow.Schema
- [LanceModel][lancedb.pydantic.LanceModel]
mode: Literal["create", "overwrite"]; default "create"
The mode to use when creating the table.
Can be either "create" or "overwrite".
By default, if the table already exists, an exception is raised.
If you want to overwrite the table, use mode="overwrite".
exist_ok: bool, default False
If a table by the same name already exists, then raise an exception
if exist_ok=False. If exist_ok=True, then open the existing table;
it will not add the provided data but will validate against any
schema that's specified.
on_bad_vectors: str, default "error"
What to do if any of the vectors are not the same size or contains NaNs.
One of "error", "drop", "fill".
fill_value: float
The value to use when filling vectors. Only used if on_bad_vectors="fill".
Returns
-------
LanceTable
A reference to the newly created table.
!!! note
The vector index won't be created by default.
To create the index, call the `create_index` method on the table.
Examples
--------
Can create with list of tuples or dictionaries:
>>> import lancedb
>>> db = lancedb.connect("./.lancedb")
>>> data = [{"vector": [1.1, 1.2], "lat": 45.5, "long": -122.7},
... {"vector": [0.2, 1.8], "lat": 40.1, "long": -74.1}]
>>> db.create_table("my_table", data)
LanceTable(connection=..., name="my_table")
>>> db["my_table"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: double
long: double
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
You can also pass a pandas DataFrame:
>>> import pandas as pd
>>> data = pd.DataFrame({
... "vector": [[1.1, 1.2], [0.2, 1.8]],
... "lat": [45.5, 40.1],
... "long": [-122.7, -74.1]
... })
>>> db.create_table("table2", data)
LanceTable(connection=..., name="table2")
>>> db["table2"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: double
long: double
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
Data is converted to Arrow before being written to disk. For maximum
control over how data is saved, either provide the PyArrow schema to
convert to or else provide a [PyArrow Table](pyarrow.Table) directly.
>>> custom_schema = pa.schema([
... pa.field("vector", pa.list_(pa.float32(), 2)),
... pa.field("lat", pa.float32()),
... pa.field("long", pa.float32())
... ])
>>> db.create_table("table3", data, schema = custom_schema)
LanceTable(connection=..., name="table3")
>>> db["table3"].head()
pyarrow.Table
vector: fixed_size_list<item: float>[2]
child 0, item: float
lat: float
long: float
----
vector: [[[1.1,1.2],[0.2,1.8]]]
lat: [[45.5,40.1]]
long: [[-122.7,-74.1]]
It is also possible to create an table from `[Iterable[pa.RecordBatch]]`:
>>> import pyarrow as pa
>>> def make_batches():
... for i in range(5):
... yield pa.RecordBatch.from_arrays(
... [
... pa.array([[3.1, 4.1], [5.9, 26.5]],
... pa.list_(pa.float32(), 2)),
... pa.array(["foo", "bar"]),
... pa.array([10.0, 20.0]),
... ],
... ["vector", "item", "price"],
... )
>>> schema=pa.schema([
... pa.field("vector", pa.list_(pa.float32(), 2)),
... pa.field("item", pa.utf8()),
... pa.field("price", pa.float32()),
... ])
>>> db.create_table("table4", make_batches(), schema=schema)
LanceTable(connection=..., name="table4")
"""
if inspect.isclass(schema) and issubclass(schema, LanceModel):
# convert LanceModel to pyarrow schema
# note that it's possible this contains
# embedding function metadata already
schema = schema.to_arrow_schema()
metadata = None
if embedding_functions is not None:
# If we passed in embedding functions explicitly
# then we'll override any schema metadata that
# may was implicitly specified by the LanceModel schema
registry = EmbeddingFunctionRegistry.get_instance()
metadata = registry.get_table_metadata(embedding_functions)
# Defining defaults here and not in function prototype. In the future
# these defaults will move into rust so better to keep them as None.
if on_bad_vectors is None:
on_bad_vectors = "error"
if fill_value is None:
fill_value = 0.0
if data is not None:
data = _sanitize_data(
data,
schema,
metadata=metadata,
on_bad_vectors=on_bad_vectors,
fill_value=fill_value,
)
if schema is None:
if data is None:
raise ValueError("Either data or schema must be provided")
elif hasattr(data, "schema"):
schema = data.schema
elif isinstance(data, Iterable):
if metadata:
raise TypeError(
(
"Persistent embedding functions not yet "
"supported for generator data input"
)
)
if metadata:
schema = schema.with_metadata(metadata)
validate_schema(schema)
if exist_ok is None:
exist_ok = False
if mode is None:
mode = "create"
if mode == "create" and exist_ok:
mode = "exist_ok"
if data is None:
new_table = await self._inner.create_empty_table(name, mode, schema)
else:
data = data_to_reader(data, schema)
new_table = await self._inner.create_table(
name,
mode,
data,
)
register_event("create_table")
return AsyncTable(new_table)
async def open_table(self, name: str) -> Table:
"""Open a Lance Table in the database.
Parameters
----------
name: str
The name of the table.
Returns
-------
A LanceTable object representing the table.
"""
table = await self._inner.open_table(name)
register_event("open_table")
return AsyncTable(table)
async def drop_table(self, name: str):
"""Drop a table from the database.
Parameters
----------
name: str
The name of the table.
"""
raise NotImplementedError
async def drop_database(self):
"""
Drop database
This is the same thing as dropping all the tables
"""
raise NotImplementedError
| [
"lancedb.utils.events.register_event",
"lancedb.embeddings.registry.EmbeddingFunctionRegistry.get_instance",
"lancedb.common.data_to_reader",
"lancedb.common.validate_schema"
] | [((24504, 24527), 'lancedb.common.validate_schema', 'validate_schema', (['schema'], {}), '(schema)\n', (24519, 24527), False, 'from lancedb.common import data_to_reader, validate_schema\n'), ((25026, 25056), 'lancedb.utils.events.register_event', 'register_event', (['"""create_table"""'], {}), "('create_table')\n", (25040, 25056), False, 'from lancedb.utils.events import register_event\n'), ((25442, 25470), 'lancedb.utils.events.register_event', 'register_event', (['"""open_table"""'], {}), "('open_table')\n", (25456, 25470), False, 'from lancedb.utils.events import register_event\n'), ((11366, 11392), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (11390, 11392), False, 'import asyncio\n'), ((22703, 22726), 'inspect.isclass', 'inspect.isclass', (['schema'], {}), '(schema)\n', (22718, 22726), False, 'import inspect\n'), ((23242, 23282), 'lancedb.embeddings.registry.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (23280, 23282), False, 'from lancedb.embeddings.registry import EmbeddingFunctionRegistry\n'), ((24852, 24880), 'lancedb.common.data_to_reader', 'data_to_reader', (['data', 'schema'], {}), '(data, schema)\n', (24866, 24880), False, 'from lancedb.common import data_to_reader, validate_schema\n'), ((10234, 10243), 'pathlib.Path', 'Path', (['uri'], {}), '(uri)\n', (10238, 10243), False, 'from pathlib import Path\n'), ((10302, 10311), 'pathlib.Path', 'Path', (['uri'], {}), '(uri)\n', (10306, 10311), False, 'from pathlib import Path\n'), ((11856, 11876), 'pyarrow.fs.FileSelector', 'fs.FileSelector', (['loc'], {}), '(loc)\n', (11871, 11876), False, 'from pyarrow import fs\n'), ((12061, 12098), 'os.path.splitext', 'os.path.splitext', (['file_info.base_name'], {}), '(file_info.base_name)\n', (12077, 12098), False, 'import os\n')] |
import time
import re
import shutil
import os
import urllib
import html2text
import predictionguard as pg
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import PredictionGuard
import streamlit as st
from sentence_transformers import SentenceTransformer
import lancedb
from lancedb.embeddings import with_embeddings
import pandas as pd
#--------------------------#
# Prompt templates #
#--------------------------#
demo_formatter_template = """\nUser: {user}
Assistant: {assistant}\n"""
demo_prompt = PromptTemplate(
input_variables=["user", "assistant"],
template=demo_formatter_template,
)
category_template = """### Instruction:
Read the below input and determine if it is a request to generate computer code? Respond "yes" or "no".
### Input:
{query}
### Response:
"""
category_prompt = PromptTemplate(
input_variables=["query"],
template=category_template
)
qa_template = """### Instruction:
Read the context below and respond with an answer to the question. If the question cannot be answered based on the context alone or the context does not explicitly say the answer to the question, write "Sorry I had trouble answering this question, based on the information I found."
### Input:
Context: {context}
Question: {query}
### Response:
"""
qa_prompt = PromptTemplate(
input_variables=["context", "query"],
template=qa_template
)
chat_template = """### Instruction:
You are a friendly and clever AI assistant. Respond to the latest human message in the input conversation below.
### Input:
{context}
Human: {query}
AI:
### Response:
"""
chat_prompt = PromptTemplate(
input_variables=["context", "query"],
template=chat_template
)
code_template = """### Instruction:
You are a code generation assistant. Respond with a code snippet and any explanation requested in the below input.
### Input:
{query}
### Response:
"""
code_prompt = PromptTemplate(
input_variables=["query"],
template=code_template
)
#-------------------------#
# Vector search #
#-------------------------#
# Embeddings setup
name="all-MiniLM-L12-v2"
model = SentenceTransformer(name)
def embed_batch(batch):
return [model.encode(sentence) for sentence in batch]
def embed(sentence):
return model.encode(sentence)
# LanceDB setup
if os.path.exists(".lancedb"):
shutil.rmtree(".lancedb")
os.mkdir(".lancedb")
uri = ".lancedb"
db = lancedb.connect(uri)
def vector_search_urls(urls, query, sessionid):
for url in urls:
# Let's get the html off of a website.
fp = urllib.request.urlopen(url)
mybytes = fp.read()
html = mybytes.decode("utf8")
fp.close()
# And convert it to text.
h = html2text.HTML2Text()
h.ignore_links = True
text = h.handle(html)
# Chunk the text into smaller pieces for injection into LLM prompts.
text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50)
docs = text_splitter.split_text(text)
docs = [x.replace('#', '-') for x in docs]
# Create a dataframe with the chunk ids and chunks
metadata = []
for i in range(len(docs)):
metadata.append([
i,
docs[i],
url
])
doc_df = pd.DataFrame(metadata, columns=["chunk", "text", "url"])
# Embed the documents
data = with_embeddings(embed_batch, doc_df)
# Create the table if there isn't one.
if sessionid not in db.table_names():
db.create_table(sessionid, data=data)
else:
table = db.open_table(sessionid)
table.add(data=data)
# Perform the query
table = db.open_table(sessionid)
results = table.search(embed(query)).limit(1).to_df()
results = results[results['_distance'] < 1.0]
if len(results) == 0:
doc_use = ""
else:
doc_use = results['text'].values[0]
# Clean up
db.drop_table(sessionid)
return doc_use
#-------------------------#
# Info Agent #
#-------------------------#
tools = load_tools(["serpapi"], llm=PredictionGuard(model="Nous-Hermes-Llama2-13B"))
agent = initialize_agent(
tools,
PredictionGuard(model="Nous-Hermes-Llama2-13B"),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
max_execution_time=30)
#-------------------------#
# Helper functions #
#-------------------------#
def find_urls(text):
return re.findall(r'(https?://[^\s]+)', text)
# QuestionID provides some help in determining if a sentence is a question.
class QuestionID:
"""
QuestionID has the actual logic used to determine if sentence is a question
"""
def padCharacter(self, character: str, sentence: str):
if character in sentence:
position = sentence.index(character)
if position > 0 and position < len(sentence):
# Check for existing white space before the special character.
if (sentence[position - 1]) != " ":
sentence = sentence.replace(character, (" " + character))
return sentence
def predict(self, sentence: str):
questionStarters = [
"which", "wont", "cant", "isnt", "arent", "is", "do", "does",
"will", "can"
]
questionElements = [
"who", "what", "when", "where", "why", "how", "sup", "?"
]
sentence = sentence.lower()
sentence = sentence.replace("\'", "")
sentence = self.padCharacter('?', sentence)
splitWords = sentence.split()
if any(word == splitWords[0] for word in questionStarters) or any(
word in splitWords for word in questionElements):
return True
else:
return False
#---------------------#
# Streamlit config #
#---------------------#
#st.set_page_config(layout="wide")
# Hide the hamburger menu
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#--------------------------#
# Streamlit sidebar #
#--------------------------#
st.sidebar.title("Super Chat 🚀")
st.sidebar.markdown(
"This app provides a chat interface driven by various generative AI models and "
"augmented (via information retrieval and agentic processing)."
)
url_text = st.sidebar.text_area(
"Enter one or more urls for reference information (separated by a comma):",
"", height=100)
if len(url_text) > 0:
urls = url_text.split(",")
else:
urls = []
#--------------------------#
# Streamlit app #
#--------------------------#
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Hello?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# process the context
examples = []
turn = "user"
example = {}
for m in st.session_state.messages:
latest_message = m["content"]
example[turn] = m["content"]
if turn == "user":
turn = "assistant"
else:
turn = "user"
examples.append(example)
example = {}
if len(example) > 4:
examples = examples[-4:]
# Determine what kind of message this is.
with st.spinner("Trying to figure out what you are wanting..."):
result = pg.Completion.create(
model="WizardCoder",
prompt=category_prompt.format(query=latest_message),
output={
"type": "categorical",
"categories": ["yes", "no"]
}
)
# configure out chain
code = result['choices'][0]['output']
qIDModel = QuestionID()
question = qIDModel.predict(latest_message)
if code == "no" and question:
# if there are urls, let's embed them as a primary data source.
if len(urls) > 0:
with st.spinner("Performing vector search..."):
info_context = vector_search_urls(urls, latest_message, "assistant")
else:
info_context = ""
# Handle the informational request.
if info_context != "":
with st.spinner("Generating a RAG result..."):
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=qa_prompt.format(context=info_context, query=latest_message)
)
completion = result['choices'][0]['text'].split('#')[0].strip()
# Otherwise try an agentic approach.
else:
with st.spinner("Trying to find an answer with an agent..."):
try:
completion = agent.run(latest_message)
except:
completion = "Sorry, I didn't find an answer. Could you rephrase the question?"
if "Agent stopped" in completion:
completion = "Sorry, I didn't find an answer. Could you rephrase the question?"
elif code == "yes":
# Handle the code generation request.
with st.spinner("Generating code..."):
result = pg.Completion.create(
model="WizardCoder",
prompt=code_prompt.format(query=latest_message),
max_tokens=500
)
completion = result['choices'][0]['text']
else:
# contruct prompt
few_shot_prompt = FewShotPromptTemplate(
examples=examples,
example_prompt=demo_prompt,
example_separator="",
prefix="The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n",
suffix="\nHuman: {human}\nAssistant: ",
input_variables=["human"],
)
prompt = few_shot_prompt.format(human=latest_message)
# generate response
with st.spinner("Generating chat response..."):
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=prompt,
)
completion = result['choices'][0]['text']
# Print out the response.
completion = completion.split("Human:")[0].strip()
completion = completion.split("H:")[0].strip()
completion = completion.split('#')[0].strip()
for token in completion.split(" "):
full_response += " " + token
message_placeholder.markdown(full_response + "▌")
time.sleep(0.075)
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((728, 820), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['user', 'assistant']", 'template': 'demo_formatter_template'}), "(input_variables=['user', 'assistant'], template=\n demo_formatter_template)\n", (742, 820), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1030, 1099), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query']", 'template': 'category_template'}), "(input_variables=['query'], template=category_template)\n", (1044, 1099), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1510, 1584), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'query']", 'template': 'qa_template'}), "(input_variables=['context', 'query'], template=qa_template)\n", (1524, 1584), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1820, 1896), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'query']", 'template': 'chat_template'}), "(input_variables=['context', 'query'], template=chat_template)\n", (1834, 1896), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((2113, 2178), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query']", 'template': 'code_template'}), "(input_variables=['query'], template=code_template)\n", (2127, 2178), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((2328, 2353), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (2347, 2353), False, 'from sentence_transformers import SentenceTransformer\n'), ((2513, 2539), 'os.path.exists', 'os.path.exists', (['""".lancedb"""'], {}), "('.lancedb')\n", (2527, 2539), False, 'import os\n'), ((2571, 2591), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (2579, 2591), False, 'import os\n'), ((2614, 2634), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2629, 2634), False, 'import lancedb\n'), ((6281, 6338), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (6292, 6338), True, 'import streamlit as st\n'), ((6429, 6461), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Super Chat 🚀"""'], {}), "('Super Chat 🚀')\n", (6445, 6461), True, 'import streamlit as st\n'), ((6462, 6634), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""This app provides a chat interface driven by various generative AI models and augmented (via information retrieval and agentic processing)."""'], {}), "(\n 'This app provides a chat interface driven by various generative AI models and augmented (via information retrieval and agentic processing).'\n )\n", (6481, 6634), True, 'import streamlit as st\n'), ((6649, 6770), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter one or more urls for reference information (separated by a comma):"""', '""""""'], {'height': '(100)'}), "(\n 'Enter one or more urls for reference information (separated by a comma):',\n '', height=100)\n", (6669, 6770), True, 'import streamlit as st\n'), ((2545, 2570), 'shutil.rmtree', 'shutil.rmtree', (['""".lancedb"""'], {}), "('.lancedb')\n", (2558, 2570), False, 'import shutil\n'), ((4440, 4487), 'langchain.llms.PredictionGuard', 'PredictionGuard', ([], {'model': '"""Nous-Hermes-Llama2-13B"""'}), "(model='Nous-Hermes-Llama2-13B')\n", (4455, 4487), False, 'from langchain.llms import PredictionGuard\n'), ((4702, 4740), 're.findall', 're.findall', (['"""(https?://[^\\\\s]+)"""', 'text'], {}), "('(https?://[^\\\\s]+)', text)\n", (4712, 4740), False, 'import re\n'), ((7149, 7172), 'streamlit.chat_input', 'st.chat_input', (['"""Hello?"""'], {}), "('Hello?')\n", (7162, 7172), True, 'import streamlit as st\n'), ((7178, 7247), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (7210, 7247), True, 'import streamlit as st\n'), ((11513, 11598), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (11545, 11598), True, 'import streamlit as st\n'), ((2767, 2794), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (2789, 2794), False, 'import urllib\n'), ((2927, 2948), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (2946, 2948), False, 'import html2text\n'), ((3111, 3166), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (3132, 3166), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3507, 3563), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text', 'url']"}), "(metadata, columns=['chunk', 'text', 'url'])\n", (3519, 3563), True, 'import pandas as pd\n'), ((3618, 3654), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (3633, 3654), False, 'from lancedb.embeddings import with_embeddings\n'), ((4349, 4396), 'langchain.llms.PredictionGuard', 'PredictionGuard', ([], {'model': '"""Nous-Hermes-Llama2-13B"""'}), "(model='Nous-Hermes-Llama2-13B')\n", (4364, 4396), False, 'from langchain.llms import PredictionGuard\n'), ((7061, 7093), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (7076, 7093), True, 'import streamlit as st\n'), ((7103, 7134), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (7114, 7134), True, 'import streamlit as st\n'), ((7257, 7280), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (7272, 7280), True, 'import streamlit as st\n'), ((7290, 7309), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (7301, 7309), True, 'import streamlit as st\n'), ((7320, 7348), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (7335, 7348), True, 'import streamlit as st\n'), ((7380, 7390), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (7388, 7390), True, 'import streamlit as st\n'), ((7955, 8013), 'streamlit.spinner', 'st.spinner', (['"""Trying to figure out what you are wanting..."""'], {}), "('Trying to figure out what you are wanting...')\n", (7965, 8013), True, 'import streamlit as st\n'), ((11438, 11455), 'time.sleep', 'time.sleep', (['(0.075)'], {}), '(0.075)\n', (11448, 11455), False, 'import time\n'), ((10288, 10613), 'langchain.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'demo_prompt', 'example_separator': '""""""', 'prefix': '"""The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n"""', 'suffix': '"""\nHuman: {human}\nAssistant: """', 'input_variables': "['human']"}), '(examples=examples, example_prompt=demo_prompt,\n example_separator=\'\', prefix=\n """The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n"""\n , suffix="""\nHuman: {human}\nAssistant: """, input_variables=[\'human\'])\n', (10309, 10613), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((8640, 8681), 'streamlit.spinner', 'st.spinner', (['"""Performing vector search..."""'], {}), "('Performing vector search...')\n", (8650, 8681), True, 'import streamlit as st\n'), ((8929, 8969), 'streamlit.spinner', 'st.spinner', (['"""Generating a RAG result..."""'], {}), "('Generating a RAG result...')\n", (8939, 8969), True, 'import streamlit as st\n'), ((9377, 9432), 'streamlit.spinner', 'st.spinner', (['"""Trying to find an answer with an agent..."""'], {}), "('Trying to find an answer with an agent...')\n", (9387, 9432), True, 'import streamlit as st\n'), ((9910, 9942), 'streamlit.spinner', 'st.spinner', (['"""Generating code..."""'], {}), "('Generating code...')\n", (9920, 9942), True, 'import streamlit as st\n'), ((10823, 10864), 'streamlit.spinner', 'st.spinner', (['"""Generating chat response..."""'], {}), "('Generating chat response...')\n", (10833, 10864), True, 'import streamlit as st\n'), ((10891, 10958), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (10911, 10958), True, 'import predictionguard as pg\n')] |
import logging
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Union
logger = logging.getLogger(__name__)
from hamilton import contrib
with contrib.catch_import_errors(__name__, __file__, logger):
import pyarrow as pa
import lancedb
import numpy as np
import pandas as pd
from lancedb.pydantic import LanceModel
from hamilton.function_modifiers import tag
VectorType = Union[list, np.ndarray, pa.Array, pa.ChunkedArray]
DataType = Union[Dict, List[Dict], pd.DataFrame, pa.Table, Iterable[pa.RecordBatch]]
TableSchema = Union[pa.Schema, LanceModel]
def client(uri: Union[str, Path] = "./.lancedb") -> lancedb.DBConnection:
"""Create a LanceDB connection.
:param uri: path to local LanceDB
:return: connection to LanceDB instance.
"""
return lancedb.connect(uri=uri)
def _create_table(
client: lancedb.DBConnection,
table_name: str,
schema: Optional[TableSchema] = None,
overwrite_table: bool = False,
) -> lancedb.db.LanceTable:
"""Create a new table based on schema."""
mode = "overwrite" if overwrite_table else "create"
table = client.create_table(name=table_name, schema=schema, mode=mode)
return table
@tag(side_effect="True")
def table_ref(
client: lancedb.DBConnection,
table_name: str,
schema: Optional[TableSchema] = None,
overwrite_table: bool = False,
) -> lancedb.db.LanceTable:
"""Create or reference a LanceDB table
:param vdb_client: LanceDB connection.
:param table_name: Name of the table.
:param schema: Pyarrow schema defining the table schema.
:param overwrite_table: If True, overwrite existing table
:return: Reference to existing or newly created table.
"""
try:
table = client.open_table(table_name)
except FileNotFoundError:
if schema is None:
raise ValueError("`schema` must be provided to create table.")
table = _create_table(
client=client,
table_name=table_name,
schema=schema,
overwrite_table=overwrite_table,
)
return table
@tag(side_effect="True")
def reset(client: lancedb.DBConnection) -> Dict[str, List[str]]:
"""Drop all existing tables.
:param vdb_client: LanceDB connection.
:return: dictionary containing all the dropped tables.
"""
tables_dropped = []
for table_name in client.table_names():
client.drop_table(table_name)
tables_dropped.append(table_name)
return dict(tables_dropped=tables_dropped)
@tag(side_effect="True")
def insert(table_ref: lancedb.db.LanceTable, data: DataType) -> Dict:
"""Push new data to the specified table.
:param table_ref: Reference to the LanceDB table.
:param data: Data to add to the table. Ref: https://lancedb.github.io/lancedb/guides/tables/#adding-to-a-table
:return: Reference to the table and number of rows added
"""
n_rows_before = table_ref.to_arrow().shape[0]
table_ref.add(data)
n_rows_after = table_ref.to_arrow().shape[0]
n_rows_added = n_rows_after - n_rows_before
return dict(table=table_ref, n_rows_added=n_rows_added)
@tag(side_effect="True")
def delete(table_ref: lancedb.db.LanceTable, delete_expression: str) -> Dict:
"""Delete existing data using an SQL expression.
:param table_ref: Reference to the LanceDB table.
:param data: Expression to select data. Ref: https://lancedb.github.io/lancedb/sql/
:return: Reference to the table and number of rows deleted
"""
n_rows_before = table_ref.to_arrow().shape[0]
table_ref.delete(delete_expression)
n_rows_after = table_ref.to_arrow().shape[0]
n_rows_deleted = n_rows_before - n_rows_after
return dict(table=table_ref, n_rows_deleted=n_rows_deleted)
def vector_search(
table_ref: lancedb.db.LanceTable,
vector_query: VectorType,
columns: Optional[List[str]] = None,
where: Optional[str] = None,
prefilter_where: bool = False,
limit: int = 10,
) -> pd.DataFrame:
"""Search database using an embedding vector.
:param table_ref: table to search
:param vector_query: embedding of the query
:param columns: columns to include in the results
:param where: SQL where clause to pre- or post-filter results
:param prefilter_where: If True filter rows before search else filter after search
:param limit: number of rows to return
:return: A dataframe of results
"""
query_ = (
table_ref.search(
query=vector_query,
query_type="vector",
vector_column_name="vector",
)
.select(columns=columns)
.where(where, prefilter=prefilter_where)
.limit(limit=limit)
)
return query_.to_pandas()
def full_text_search(
table_ref: lancedb.db.LanceTable,
full_text_query: str,
full_text_index: Union[str, List[str]],
where: Optional[str] = None,
limit: int = 10,
rebuild_index: bool = True,
) -> pd.DataFrame:
"""Search database using an embedding vector.
:param table_ref: table to search
:param full_text_query: text query
:param full_text_index: one or more text columns to search
:param where: SQL where clause to pre- or post-filter results
:param limit: number of rows to return
:param rebuild_index: If True rebuild the index
:return: A dataframe of results
"""
# NOTE. Currently, the index needs to be recreated whenever data is added
# ref: https://lancedb.github.io/lancedb/fts/#installation
if rebuild_index:
table_ref.create_fts_index(full_text_index)
query_ = (
table_ref.search(query=full_text_query, query_type="fts")
.select(full_text_index)
.where(where)
.limit(limit)
)
return query_.to_pandas()
| [
"lancedb.connect"
] | [((107, 134), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (124, 134), False, 'import logging\n'), ((1219, 1242), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (1222, 1242), False, 'from hamilton.function_modifiers import tag\n'), ((2122, 2145), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (2125, 2145), False, 'from hamilton.function_modifiers import tag\n'), ((2554, 2577), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (2557, 2577), False, 'from hamilton.function_modifiers import tag\n'), ((3166, 3189), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (3169, 3189), False, 'from hamilton.function_modifiers import tag\n'), ((171, 226), 'hamilton.contrib.catch_import_errors', 'contrib.catch_import_errors', (['__name__', '__file__', 'logger'], {}), '(__name__, __file__, logger)\n', (198, 226), False, 'from hamilton import contrib\n'), ((816, 840), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'uri'}), '(uri=uri)\n', (831, 840), False, 'import lancedb\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import importlib.metadata
import platform
import random
import sys
import time
from lancedb.utils import CONFIG
from lancedb.utils.general import TryExcept
from .general import (
PLATFORMS,
get_git_origin_url,
is_git_dir,
is_github_actions_ci,
is_online,
is_pip_package,
is_pytest_running,
threaded_request,
)
class _Events:
"""
A class for collecting anonymous event analytics. Event analytics are enabled when ``diagnostics=True`` in config and
disabled when ``diagnostics=False``.
You can enable or disable diagnostics by running ``lancedb diagnostics --enabled`` or ``lancedb diagnostics --disabled``.
Attributes
----------
url : str
The URL to send anonymous events.
rate_limit : float
The rate limit in seconds for sending events.
metadata : dict
A dictionary containing metadata about the environment.
enabled : bool
A flag to enable or disable Events based on certain conditions.
"""
_instance = None
url = "https://app.posthog.com/capture/"
headers = {"Content-Type": "application/json"}
api_key = "phc_oENDjGgHtmIDrV6puUiFem2RB4JA8gGWulfdulmMdZP"
# This api-key is write only and is safe to expose in the codebase.
def __init__(self):
"""
Initializes the Events object with default values for events, rate_limit, and metadata.
"""
self.events = [] # events list
self.throttled_event_names = ["search_table"]
self.throttled_events = set()
self.max_events = 5 # max events to store in memory
self.rate_limit = 60.0 * 5 # rate limit (seconds)
self.time = 0.0
if is_git_dir():
install = "git"
elif is_pip_package():
install = "pip"
else:
install = "other"
self.metadata = {
"cli": sys.argv[0],
"install": install,
"python": ".".join(platform.python_version_tuple()[:2]),
"version": importlib.metadata.version("lancedb"),
"platforms": PLATFORMS,
"session_id": round(random.random() * 1e15),
# 'engagement_time_msec': 1000 # TODO: In future we might be interested in this metric
}
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
ONLINE = is_online()
self.enabled = (
CONFIG["diagnostics"]
and not TESTS_RUNNING
and ONLINE
and (
is_pip_package()
or get_git_origin_url() == "https://github.com/lancedb/lancedb.git"
)
)
def __call__(self, event_name, params={}):
"""
Attempts to add a new event to the events list and send events if the rate limit is reached.
Args
----
event_name : str
The name of the event to be logged.
params : dict, optional
A dictionary of additional parameters to be logged with the event.
"""
### NOTE: We might need a way to tag a session with a label to check usage from a source. Setting label should be exposed to the user.
if not self.enabled:
return
if (
len(self.events) < self.max_events
): # Events list limited to self.max_events (drop any events past this)
params.update(self.metadata)
event = {
"event": event_name,
"properties": params,
"timestamp": datetime.datetime.now(
tz=datetime.timezone.utc
).isoformat(),
"distinct_id": CONFIG["uuid"],
}
if event_name not in self.throttled_event_names:
self.events.append(event)
elif event_name not in self.throttled_events:
self.throttled_events.add(event_name)
self.events.append(event)
# Check rate limit
t = time.time()
if (t - self.time) < self.rate_limit:
return
# Time is over rate limiter, send now
data = {
"api_key": self.api_key,
"distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event
"batch": self.events,
}
# POST equivalent to requests.post(self.url, json=data).
# threaded request is used to avoid blocking, retries are disabled, and verbose is disabled
# to avoid any possible disruption in the console.
threaded_request(
method="post",
url=self.url,
headers=self.headers,
json=data,
retry=0,
verbose=False,
)
# Flush & Reset
self.events = []
self.throttled_events = set()
self.time = t
@TryExcept(verbose=False)
def register_event(name: str, **kwargs):
if _Events._instance is None:
_Events._instance = _Events()
_Events._instance(name, **kwargs)
| [
"lancedb.utils.general.TryExcept"
] | [((5422, 5446), 'lancedb.utils.general.TryExcept', 'TryExcept', ([], {'verbose': '(False)'}), '(verbose=False)\n', (5431, 5446), False, 'from lancedb.utils.general import TryExcept\n'), ((4584, 4595), 'time.time', 'time.time', ([], {}), '()\n', (4593, 4595), False, 'import time\n'), ((2567, 2598), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (2596, 2598), False, 'import platform\n'), ((2735, 2750), 'random.random', 'random.random', ([], {}), '()\n', (2748, 2750), False, 'import random\n'), ((4127, 4174), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (4148, 4174), False, 'import datetime\n')] |
import argparse
import os
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from functools import lru_cache
from pathlib import Path
from typing import Any, Iterator
import lancedb
import pandas as pd
import srsly
from codetiming import Timer
from dotenv import load_dotenv
from lancedb.pydantic import pydantic_to_schema
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
sys.path.insert(1, os.path.realpath(Path(__file__).resolve().parents[1]))
from api.config import Settings
from schemas.wine import LanceModelWine, Wine
load_dotenv()
# Custom types
JsonBlob = dict[str, Any]
class FileNotFoundError(Exception):
pass
@lru_cache()
def get_settings():
# Use lru_cache to avoid loading .env file for every request
return Settings()
def chunk_iterable(item_list: list[JsonBlob], chunksize: int) -> Iterator[list[JsonBlob]]:
"""
Break a large iterable into an iterable of smaller iterables of size `chunksize`
"""
for i in range(0, len(item_list), chunksize):
yield item_list[i : i + chunksize]
def get_json_data(data_dir: Path, filename: str) -> list[JsonBlob]:
"""Get all line-delimited json files (.jsonl) from a directory with a given prefix"""
file_path = data_dir / filename
if not file_path.is_file():
# File may not have been uncompressed yet so try to do that first
data = srsly.read_gzip_jsonl(file_path)
# This time if it isn't there it really doesn't exist
if not file_path.is_file():
raise FileNotFoundError(f"No valid .jsonl file found in `{data_dir}`")
else:
data = srsly.read_gzip_jsonl(file_path)
return data
def validate(
data: list[JsonBlob],
exclude_none: bool = False,
) -> list[JsonBlob]:
validated_data = [Wine(**item).model_dump(exclude_none=exclude_none) for item in data]
return validated_data
def embed_func(batch: list[str], model) -> list[list[float]]:
return [model.encode(sentence.lower()) for sentence in batch]
def vectorize_text(data: list[JsonBlob]) -> list[LanceModelWine] | None:
# Load a sentence transformer model for semantic similarity from a specified checkpoint
model_id = get_settings().embedding_model_checkpoint
assert model_id, "Invalid embedding model checkpoint specified in .env file"
MODEL = SentenceTransformer(model_id)
ids = [item["id"] for item in data]
to_vectorize = [text.get("to_vectorize") for text in data]
vectors = embed_func(to_vectorize, MODEL)
try:
data_batch = [{**d, "vector": vector} for d, vector in zip(data, vectors)]
except Exception as e:
print(f"{e}: Failed to add ID range {min(ids)}-{max(ids)}")
return None
return data_batch
def embed_batches(tbl: str, validated_data: list[JsonBlob]) -> pd.DataFrame:
with ProcessPoolExecutor(max_workers=WORKERS) as executor:
chunked_data = chunk_iterable(validated_data, CHUNKSIZE)
embed_data = []
for chunk in tqdm(chunked_data, total=len(validated_data) // CHUNKSIZE):
futures = [executor.submit(vectorize_text, chunk)]
embed_data = [f.result() for f in as_completed(futures) if f.result()][0]
df = pd.DataFrame.from_dict(embed_data)
tbl.add(df, mode="overwrite")
def main(data: list[JsonBlob]) -> None:
DB_NAME = f"../{get_settings().lancedb_dir}"
TABLE = "wines"
db = lancedb.connect(DB_NAME)
tbl = db.create_table(TABLE, schema=pydantic_to_schema(LanceModelWine), mode="overwrite")
print(f"Created table `{TABLE}`, with length {len(tbl)}")
with Timer(name="Bulk Index", text="Validated data using Pydantic in {:.4f} sec"):
validated_data = validate(data, exclude_none=False)
with Timer(name="Embed batches", text="Created sentence embeddings in {:.4f} sec"):
embed_batches(tbl, validated_data)
print(f"Finished inserting {len(tbl)} items into LanceDB table")
with Timer(name="Create index", text="Created IVF-PQ index in {:.4f} sec"):
# Creating index (choose num partitions as a power of 2 that's closest to len(dataset) // 5000)
# In this case, we have 130k datapoints, so the nearest power of 2 is 130000//5000 ~ 32)
tbl.create_index(metric="cosine", num_partitions=4, num_sub_vectors=32)
if __name__ == "__main__":
# fmt: off
parser = argparse.ArgumentParser("Bulk index database from the wine reviews JSONL data")
parser.add_argument("--limit", type=int, default=0, help="Limit the size of the dataset to load for testing purposes")
parser.add_argument("--chunksize", type=int, default=1000, help="Size of each chunk to break the dataset into before processing")
parser.add_argument("--filename", type=str, default="winemag-data-130k-v2.jsonl.gz", help="Name of the JSONL zip file to use")
parser.add_argument("--workers", type=int, default=4, help="Number of workers to use for vectorization")
args = vars(parser.parse_args())
# fmt: on
LIMIT = args["limit"]
DATA_DIR = Path(__file__).parents[3] / "data"
FILENAME = args["filename"]
CHUNKSIZE = args["chunksize"]
WORKERS = args["workers"]
data = list(get_json_data(DATA_DIR, FILENAME))
assert data, "No data found in the specified file"
data = data[:LIMIT] if LIMIT > 0 else data
main(data)
print("Finished execution!")
| [
"lancedb.connect",
"lancedb.pydantic.pydantic_to_schema"
] | [((580, 593), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (591, 593), False, 'from dotenv import load_dotenv\n'), ((685, 696), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (694, 696), False, 'from functools import lru_cache\n'), ((793, 803), 'api.config.Settings', 'Settings', ([], {}), '()\n', (801, 803), False, 'from api.config import Settings\n'), ((2355, 2384), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (2374, 2384), False, 'from sentence_transformers import SentenceTransformer\n'), ((3439, 3463), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (3454, 3463), False, 'import lancedb\n'), ((4390, 4469), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Bulk index database from the wine reviews JSONL data"""'], {}), "('Bulk index database from the wine reviews JSONL data')\n", (4413, 4469), False, 'import argparse\n'), ((1408, 1440), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1429, 1440), False, 'import srsly\n'), ((1647, 1679), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1668, 1679), False, 'import srsly\n'), ((2852, 2892), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'WORKERS'}), '(max_workers=WORKERS)\n', (2871, 2892), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((3631, 3707), 'codetiming.Timer', 'Timer', ([], {'name': '"""Bulk Index"""', 'text': '"""Validated data using Pydantic in {:.4f} sec"""'}), "(name='Bulk Index', text='Validated data using Pydantic in {:.4f} sec')\n", (3636, 3707), False, 'from codetiming import Timer\n'), ((3779, 3856), 'codetiming.Timer', 'Timer', ([], {'name': '"""Embed batches"""', 'text': '"""Created sentence embeddings in {:.4f} sec"""'}), "(name='Embed batches', text='Created sentence embeddings in {:.4f} sec')\n", (3784, 3856), False, 'from codetiming import Timer\n'), ((3981, 4050), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create index"""', 'text': '"""Created IVF-PQ index in {:.4f} sec"""'}), "(name='Create index', text='Created IVF-PQ index in {:.4f} sec')\n", (3986, 4050), False, 'from codetiming import Timer\n'), ((3242, 3276), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['embed_data'], {}), '(embed_data)\n', (3264, 3276), True, 'import pandas as pd\n'), ((3505, 3539), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['LanceModelWine'], {}), '(LanceModelWine)\n', (3523, 3539), False, 'from lancedb.pydantic import pydantic_to_schema\n'), ((1813, 1825), 'schemas.wine.Wine', 'Wine', ([], {}), '(**item)\n', (1817, 1825), False, 'from schemas.wine import LanceModelWine, Wine\n'), ((5060, 5074), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5064, 5074), False, 'from pathlib import Path\n'), ((462, 476), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'from pathlib import Path\n'), ((3185, 3206), 'concurrent.futures.as_completed', 'as_completed', (['futures'], {}), '(futures)\n', (3197, 3206), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Any, Callable, Dict, Iterable, Optional, Union
import aiohttp
import attrs
import pyarrow as pa
from pydantic import BaseModel
from lancedb.common import Credential
from lancedb.remote import VectorQuery, VectorQueryResult
from lancedb.remote.errors import LanceDBClientError
ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream"
def _check_not_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.closed:
raise ValueError("Connection is closed")
return f(self, *args, **kwargs)
return wrapped
async def _read_ipc(resp: aiohttp.ClientResponse) -> pa.Table:
resp_body = await resp.read()
with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader:
return reader.read_all()
@attrs.define(slots=False)
class RestfulLanceDBClient:
db_name: str
region: str
api_key: Credential
host_override: Optional[str] = attrs.field(default=None)
closed: bool = attrs.field(default=False, init=False)
@functools.cached_property
def session(self) -> aiohttp.ClientSession:
url = (
self.host_override
or f"https://{self.db_name}.{self.region}.api.lancedb.com"
)
return aiohttp.ClientSession(url)
async def close(self):
await self.session.close()
self.closed = True
@functools.cached_property
def headers(self) -> Dict[str, str]:
headers = {
"x-api-key": self.api_key,
}
if self.region == "local": # Local test mode
headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com"
if self.host_override:
headers["x-lancedb-database"] = self.db_name
return headers
@staticmethod
async def _check_status(resp: aiohttp.ClientResponse):
if resp.status == 404:
raise LanceDBClientError(f"Not found: {await resp.text()}")
elif 400 <= resp.status < 500:
raise LanceDBClientError(
f"Bad Request: {resp.status}, error: {await resp.text()}"
)
elif 500 <= resp.status < 600:
raise LanceDBClientError(
f"Internal Server Error: {resp.status}, error: {await resp.text()}"
)
elif resp.status != 200:
raise LanceDBClientError(
f"Unknown Error: {resp.status}, error: {await resp.text()}"
)
@_check_not_closed
async def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None):
"""Send a GET request and returns the deserialized response payload."""
if isinstance(params, BaseModel):
params: Dict[str, Any] = params.dict(exclude_none=True)
async with self.session.get(
uri,
params=params,
headers=self.headers,
timeout=aiohttp.ClientTimeout(total=30),
) as resp:
await self._check_status(resp)
return await resp.json()
@_check_not_closed
async def post(
self,
uri: str,
data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None,
params: Optional[Dict[str, Any]] = None,
content_type: Optional[str] = None,
deserialize: Callable = lambda resp: resp.json(),
request_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Send a POST request and returns the deserialized response payload.
Parameters
----------
uri : str
The uri to send the POST request to.
data: Union[Dict[str, Any], BaseModel]
request_id: Optional[str]
Optional client side request id to be sent in the request headers.
"""
if isinstance(data, BaseModel):
data: Dict[str, Any] = data.dict(exclude_none=True)
if isinstance(data, bytes):
req_kwargs = {"data": data}
else:
req_kwargs = {"json": data}
headers = self.headers.copy()
if content_type is not None:
headers["content-type"] = content_type
if request_id is not None:
headers["x-request-id"] = request_id
async with self.session.post(
uri,
headers=headers,
params=params,
timeout=aiohttp.ClientTimeout(total=30),
**req_kwargs,
) as resp:
resp: aiohttp.ClientResponse = resp
await self._check_status(resp)
return await deserialize(resp)
@_check_not_closed
async def list_tables(
self, limit: int, page_token: Optional[str] = None
) -> Iterable[str]:
"""List all tables in the database."""
if page_token is None:
page_token = ""
json = await self.get("/v1/table/", {"limit": limit, "page_token": page_token})
return json["tables"]
@_check_not_closed
async def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
"""Query a table."""
tbl = await self.post(
f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc
)
return VectorQueryResult(tbl)
| [
"lancedb.remote.VectorQueryResult"
] | [((1402, 1427), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1414, 1427), False, 'import attrs\n'), ((1006, 1024), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1021, 1024), False, 'import functools\n'), ((1548, 1573), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1559, 1573), False, 'import attrs\n'), ((1594, 1632), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1605, 1632), False, 'import attrs\n'), ((1856, 1882), 'aiohttp.ClientSession', 'aiohttp.ClientSession', (['url'], {}), '(url)\n', (1877, 1882), False, 'import aiohttp\n'), ((5743, 5765), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (5760, 5765), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1327, 1353), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1342, 1353), True, 'import pyarrow as pa\n'), ((3473, 3504), 'aiohttp.ClientTimeout', 'aiohttp.ClientTimeout', ([], {'total': '(30)'}), '(total=30)\n', (3494, 3504), False, 'import aiohttp\n'), ((4904, 4935), 'aiohttp.ClientTimeout', 'aiohttp.ClientTimeout', ([], {'total': '(30)'}), '(total=30)\n', (4925, 4935), False, 'import aiohttp\n')] |
import os
from typing import List, Dict, Optional, Any
from dataclasses import dataclass, field
from .preprocessor import call_pdf_preprocess, call_image_preprocess, call_audio_preprocess
from .utils.semantic_chuncker import SemanticChunker
from .utils.txt_preprocessor import NERExtractor
from langchain_openai.embeddings import OpenAIEmbeddings
from lancedb.embeddings import EmbeddingFunctionRegistry
from lancedb.pydantic import LanceModel, Vector
import lancedb
import uuid
from time import sleep
import colorama
registry = EmbeddingFunctionRegistry().get_instance()
openai = registry.get("openai").create()
class Schema(LanceModel):
vector: Vector(openai.ndims()) = openai.VectorField()
id: str
text: str
@dataclass
class SrcType:
txt: List[str] = field(default_factory=lambda: ['txt', 'csv', 'json'])
pdf: List[str] = field(default_factory=lambda: ['pdf'])
word: List[str] = field(default_factory=lambda: ['doc', 'docx'])
image: List[str] = field(default_factory=lambda: ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tiff'])
sound: List[str] = field(default_factory=lambda: ['mp3', 'wav', 'aac', 'flac', 'ogg'])
class SrcIngestor(SrcType, NERExtractor):
"""
class that will read any document or file of various format and store its content in the corresponding memory bank
"""
def __init__(self, memory_bank: str,
sess_name: str,
file_sources: Dict[str, list[str]],
schema=Schema,
openai_api=None) -> None:
super().__init__()
NERExtractor.__init__(self)
self.entities = None
self.ai_credentials = openai_api
self.base_schema = schema
self.memory_bank = memory_bank
self.file_sources = file_sources
self._embeddings = OpenAIEmbeddings(openai_api_key=self.ai_credentials)
current_dir = os.path.dirname(__file__)
stm_dir = os.path.join(current_dir, '..', 'STM')
self.db = lancedb.connect(f"{stm_dir}\\{self.memory_bank}")
self._table = self.db.create_table(f"{sess_name}", schema=self.base_schema, mode="overwrite")
def file_broker(self, progress_update=None) -> None:
"""
This function will take file_sources and pair it with the corresponding method to ingest the file.
"""
for file_type, files in self.file_sources.items():
for file in files:
extension = file.split('.')[-1]
if extension in self.txt:
self.ingest_txt(file)
elif extension in self.pdf:
# print in blue color
print(colorama.Fore.BLUE + f"Processing PDF {file}...")
sleep(2)
self.ingest_pdf(file, open_ai=self.ai_credentials)
elif extension in self.word:
self.ingest_word(file)
elif extension in self.image:
# print in blue color
print(colorama.Fore.BLUE + f"Processing Image {file}...")
sleep(2)
self.ingest_image(file, open_ai=self.ai_credentials)
elif extension in self.sound:
# print in blue color
print(colorama.Fore.BLUE + f"Processing Audio file {file}...")
sleep(2)
self.ingest_sound(file)
else:
print(f"Unsupported file type: {file}")
os.remove(file)
def add_text(self, texts: List[Any], metadata: Optional[List[dict]] = None) -> None:
docs = []
ids = [str(uuid.uuid4()) for _ in texts]
embeddings = self._embeddings.embed_documents(texts)
for idx, text in enumerate(texts):
embedding = embeddings[idx]
meta = metadata[idx] if metadata else {}
# make a string out of metadata
str_meta = '[DOCUMENT INFO]:\n' + ' '.join([f"{k}: {v}\n" for k, v in meta.items()]) if meta else ''
entities = self.entities if self.entities else ''
text += '\n' + str_meta + '\n' + entities
docs.append(
{
"vector": embedding,
"id": ids[idx],
"text": text,
**meta,
}
)
self._table.add(docs)
def store_data(self, documents, metadata) -> None:
self.add_text(documents, metadata)
def entity_finder(self, text):
# Apply the bert-base-NER pipeline to the text
self.entities = self.extract_entities(text)
def ingest_word(self, payload) -> None:
"""
This function will ingest a Word file and store its content in the corresponding memory bank.
"""
pass
def ingest_txt(self, payload) -> None:
"""
This function will ingest a txt file (text, csv, json) and store its content in the corresponding memory bank.
"""
pass
def ingest_pdf(self, payload, open_ai) -> None:
"""
This function will ingest a pdf file and store its content in the corresponding memory bank.
"""
processed_pdf, size = call_pdf_preprocess(payload, open_ai)
self.entity_finder(processed_pdf)
txt_splitter, metadata = self.chunker(payload, processed_pdf, size, 'pdf')
pdf_documents = txt_splitter.split_text(processed_pdf)
self.store_data(pdf_documents, metadata)
def ingest_image(self, payload, open_ai) -> None:
"""
This function will ingest an image file and store its content in the corresponding memory bank.
"""
processed_image, metadata = call_image_preprocess(payload, open_ai)
txt_splitter, metadata = self.chunker(payload, processed_image, metadata, 'image')
image_documents = txt_splitter.split_text(processed_image)
self.store_data(image_documents, metadata)
def ingest_sound(self, payload) -> None:
"""
This function will ingest a sound file and store its content in the corresponding memory bank.
"""
preprocessed_audio, metadata = call_audio_preprocess(payload)
txt_splitter, metadata = self.chunker(payload, preprocessed_audio, metadata, 'sound')
audio_documents = txt_splitter.split_text(preprocessed_audio)
self.store_data(audio_documents, metadata)
def chunker(self, payload, preprocessed_audio, metadata, action):
"""
This function will chunk the audio file into smaller pieces and store its content in the corresponding memory bank.
"""
file_name = os.path.basename(payload).split('.')[0]
text_splitter = SemanticChunker(self._embeddings)
other_file_names = os.listdir(payload.split(file_name)[0])
metadata_dic = [{'file_name': file_name, 'other_files': other_file_names,
'doc_info': {'file_type': action, 'metadata': metadata}}
for _ in range(len(preprocessed_audio))]
return text_splitter, metadata_dic
| [
"lancedb.connect",
"lancedb.embeddings.EmbeddingFunctionRegistry"
] | [((801, 855), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : ['txt', 'csv', 'json'])"}), "(default_factory=lambda : ['txt', 'csv', 'json'])\n", (806, 855), False, 'from dataclasses import dataclass, field\n'), ((877, 916), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : ['pdf'])"}), "(default_factory=lambda : ['pdf'])\n", (882, 916), False, 'from dataclasses import dataclass, field\n'), ((939, 986), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : ['doc', 'docx'])"}), "(default_factory=lambda : ['doc', 'docx'])\n", (944, 986), False, 'from dataclasses import dataclass, field\n'), ((1010, 1086), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tiff'])"}), "(default_factory=lambda : ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tiff'])\n", (1015, 1086), False, 'from dataclasses import dataclass, field\n'), ((1110, 1178), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : ['mp3', 'wav', 'aac', 'flac', 'ogg'])"}), "(default_factory=lambda : ['mp3', 'wav', 'aac', 'flac', 'ogg'])\n", (1115, 1178), False, 'from dataclasses import dataclass, field\n'), ((544, 571), 'lancedb.embeddings.EmbeddingFunctionRegistry', 'EmbeddingFunctionRegistry', ([], {}), '()\n', (569, 571), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n'), ((1852, 1904), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'self.ai_credentials'}), '(openai_api_key=self.ai_credentials)\n', (1868, 1904), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((1928, 1953), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1943, 1953), False, 'import os\n'), ((1973, 2011), 'os.path.join', 'os.path.join', (['current_dir', '""".."""', '"""STM"""'], {}), "(current_dir, '..', 'STM')\n", (1985, 2011), False, 'import os\n'), ((2031, 2080), 'lancedb.connect', 'lancedb.connect', (['f"""{stm_dir}\\\\{self.memory_bank}"""'], {}), "(f'{stm_dir}\\\\{self.memory_bank}')\n", (2046, 2080), False, 'import lancedb\n'), ((3587, 3602), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (3596, 3602), False, 'import os\n'), ((3734, 3746), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3744, 3746), False, 'import uuid\n'), ((6822, 6847), 'os.path.basename', 'os.path.basename', (['payload'], {}), '(payload)\n', (6838, 6847), False, 'import os\n'), ((2791, 2799), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2796, 2799), False, 'from time import sleep\n'), ((3152, 3160), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (3157, 3160), False, 'from time import sleep\n'), ((3430, 3438), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (3435, 3438), False, 'from time import sleep\n')] |
import os
import time
import shutil
import pandas as pd
import lancedb
from lancedb.embeddings import with_embeddings
from langchain import PromptTemplate
import predictionguard as pg
import numpy as np
from sentence_transformers import SentenceTransformer
#---------------------#
# Lance DB Setup #
#---------------------#
#Import datasets
#JOBS
df1=pd.read_csv('datasets/jobs.csv')
df1_table_name = "jobs"
#SOCIAL
df2=pd.read_csv('datasets/social.csv')
df2_table_name = "social"
#movies
df3=pd.read_csv('datasets/movies.csv')
df3_table_name = "movies"
# local path of the vector db
uri = "schema.lancedb"
db = lancedb.connect(uri)
# Embeddings setup
name="all-MiniLM-L12-v2"
# Load model
def load_model():
return SentenceTransformer(name)
def embed(query, embModel):
return embModel.encode(query)
#---------------------#
# SQL Schema Creation #
#---------------------#
def create_schema(df,table_name):
# Here we will create an example SQL schema based on the data in this dataset.
# In a real use case, you likely already have this sort of CREATE TABLE statement.
# Performance can be improved by manually curating the descriptions.
columns_info = []
# Iterate through each column in the DataFrame
for col in df.columns:
# Determine the SQL data type based on the first non-null value in the column
first_non_null = df[col].dropna().iloc[0]
if isinstance(first_non_null, np.int64):
kind = "INTEGER"
elif isinstance(first_non_null, np.float64):
kind = "DECIMAL(10,2)"
elif isinstance(first_non_null, str):
kind = "VARCHAR(255)" # Assuming a default max length of 255
else:
kind = "VARCHAR(255)" # Default to VARCHAR for other types or customize as needed
# Sample a few example values
example_values = ', '.join([str(x) for x in df[col].dropna().unique()[0:4]])
# Append column info to the list
columns_info.append(f"{col} {kind}, -- Example values are {example_values}")
# Construct the CREATE TABLE statement
create_table_statement = "CREATE TABLE" + " " + table_name + " (\n " + ",\n ".join(columns_info) + "\n);"
# Adjust the statement to handle the final comma, primary keys, or other specifics
create_table_statement = create_table_statement.replace(",\n);", "\n);")
return create_table_statement
# SQL Schema for Table Jobs
df1_schema=create_schema(df1,df1_table_name)
# SQL Schema for Table Social
df2_schema=create_schema(df2,df2_table_name)
# SQL Schema for Table Movies
df3_schema=create_schema(df3,df3_table_name)
#---------------------#
# Prompt Templates #
#---------------------#
template="""
###System:
Generate a brief description of the below data. Be as detailed as possible.
###User:
{schema}
###Assistant:
"""
prompt=PromptTemplate(template=template,input_variables=["schema"])
#---------------------#
# Generate Description #
#---------------------#
def generate_description(schema):
prompt_filled=prompt.format(schema=schema)
result=pg.Completion.create(
model="Neural-Chat-7B",
prompt=prompt_filled,
temperature=0.1,
max_tokens=300
)
return result['choices'][0]['text']
df1_desc=generate_description(df1_schema)
df2_desc=generate_description(df2_schema)
df3_desc=generate_description(df3_schema)
# Create Pandas DataFrame
df = pd.DataFrame({
'text': [df1_desc, df2_desc, df3_desc],
'table_name': [df1_table_name, df2_table_name, df3_table_name],
'schema': [df1_schema, df2_schema, df3_schema],
})
print(df)
def load_data():
if os.path.exists("schema.lancedb"):
shutil.rmtree("schema.lancedb")
os.mkdir("schema.lancedb")
db = lancedb.connect(uri)
batchModel = SentenceTransformer(name)
def batch_embed_func(batch):
return [batchModel.encode(sentence) for sentence in batch]
vecData = with_embeddings(batch_embed_func, df)
if "schema" not in db.table_names():
db.create_table("schema", data=vecData)
else:
table = db.open_table("schema")
table.add(data=vecData)
return
load_data()
print("Done") | [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((359, 391), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/jobs.csv"""'], {}), "('datasets/jobs.csv')\n", (370, 391), True, 'import pandas as pd\n'), ((429, 463), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/social.csv"""'], {}), "('datasets/social.csv')\n", (440, 463), True, 'import pandas as pd\n'), ((503, 537), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/movies.csv"""'], {}), "('datasets/movies.csv')\n", (514, 537), True, 'import pandas as pd\n'), ((623, 643), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (638, 643), False, 'import lancedb\n'), ((2866, 2927), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['schema']"}), "(template=template, input_variables=['schema'])\n", (2880, 2927), False, 'from langchain import PromptTemplate\n'), ((3432, 3607), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': [df1_desc, df2_desc, df3_desc], 'table_name': [df1_table_name,\n df2_table_name, df3_table_name], 'schema': [df1_schema, df2_schema,\n df3_schema]}"], {}), "({'text': [df1_desc, df2_desc, df3_desc], 'table_name': [\n df1_table_name, df2_table_name, df3_table_name], 'schema': [df1_schema,\n df2_schema, df3_schema]})\n", (3444, 3607), True, 'import pandas as pd\n'), ((732, 757), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (751, 757), False, 'from sentence_transformers import SentenceTransformer\n'), ((3095, 3198), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Neural-Chat-7B"""', 'prompt': 'prompt_filled', 'temperature': '(0.1)', 'max_tokens': '(300)'}), "(model='Neural-Chat-7B', prompt=prompt_filled,\n temperature=0.1, max_tokens=300)\n", (3115, 3198), True, 'import predictionguard as pg\n'), ((3651, 3683), 'os.path.exists', 'os.path.exists', (['"""schema.lancedb"""'], {}), "('schema.lancedb')\n", (3665, 3683), False, 'import os\n'), ((3729, 3755), 'os.mkdir', 'os.mkdir', (['"""schema.lancedb"""'], {}), "('schema.lancedb')\n", (3737, 3755), False, 'import os\n'), ((3765, 3785), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3780, 3785), False, 'import lancedb\n'), ((3808, 3833), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (3827, 3833), False, 'from sentence_transformers import SentenceTransformer\n'), ((3953, 3990), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['batch_embed_func', 'df'], {}), '(batch_embed_func, df)\n', (3968, 3990), False, 'from lancedb.embeddings import with_embeddings\n'), ((3693, 3724), 'shutil.rmtree', 'shutil.rmtree', (['"""schema.lancedb"""'], {}), "('schema.lancedb')\n", (3706, 3724), False, 'import shutil\n')] |
import typer
import openai
from rag_app.models import TextChunk
from lancedb import connect
from typing import List
from pathlib import Path
from rich.console import Console
from rich.table import Table
from rich import box
import duckdb
app = typer.Typer()
@app.command(help="Query LanceDB for some results")
def db(
db_path: str = typer.Option(help="Your LanceDB path"),
table_name: str = typer.Option(help="Table to ingest data into"),
query: str = typer.Option(help="Text to query against existing vector db chunks"),
n: int = typer.Option(default=3, help="Maximum number of chunks to return"),
):
if not Path(db_path).exists():
raise ValueError(f"Database path {db_path} does not exist.")
db = connect(db_path)
db_table = db.open_table(table_name)
client = openai.OpenAI()
query_vector = (
client.embeddings.create(
input=query, model="text-embedding-3-large", dimensions=256
)
.data[0]
.embedding
)
results: List[TextChunk] = (
db_table.search(query_vector).limit(n).to_pydantic(TextChunk)
)
sql_table = db_table.to_lance()
df = duckdb.query(
"SELECT doc_id, count(chunk_id) as count FROM sql_table GROUP BY doc_id"
).to_df()
doc_ids = df["doc_id"].to_list()
counts = df["count"].to_list()
doc_id_to_count = {id: chunk_count for id, chunk_count in zip(doc_ids, counts)}
table = Table(title="Results", box=box.HEAVY, padding=(1, 2), show_lines=True)
table.add_column("Post Title", style="green", max_width=30)
table.add_column("Content", style="magenta", max_width=120)
table.add_column("Chunk Number", style="yellow")
table.add_column("Publish Date", style="blue")
for result in results:
chunk_number = f"{result.chunk_id}"
table.add_row(
f"{result.post_title}({result.source})",
result.text,
f"{chunk_number}/{doc_id_to_count[result.doc_id]}",
result.publish_date.strftime("%Y-%m"),
)
Console().print(table)
| [
"lancedb.connect"
] | [((245, 258), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (256, 258), False, 'import typer\n'), ((340, 378), 'typer.Option', 'typer.Option', ([], {'help': '"""Your LanceDB path"""'}), "(help='Your LanceDB path')\n", (352, 378), False, 'import typer\n'), ((402, 448), 'typer.Option', 'typer.Option', ([], {'help': '"""Table to ingest data into"""'}), "(help='Table to ingest data into')\n", (414, 448), False, 'import typer\n'), ((467, 535), 'typer.Option', 'typer.Option', ([], {'help': '"""Text to query against existing vector db chunks"""'}), "(help='Text to query against existing vector db chunks')\n", (479, 535), False, 'import typer\n'), ((550, 616), 'typer.Option', 'typer.Option', ([], {'default': '(3)', 'help': '"""Maximum number of chunks to return"""'}), "(default=3, help='Maximum number of chunks to return')\n", (562, 616), False, 'import typer\n'), ((734, 750), 'lancedb.connect', 'connect', (['db_path'], {}), '(db_path)\n', (741, 750), False, 'from lancedb import connect\n'), ((806, 821), 'openai.OpenAI', 'openai.OpenAI', ([], {}), '()\n', (819, 821), False, 'import openai\n'), ((1437, 1507), 'rich.table.Table', 'Table', ([], {'title': '"""Results"""', 'box': 'box.HEAVY', 'padding': '(1, 2)', 'show_lines': '(True)'}), "(title='Results', box=box.HEAVY, padding=(1, 2), show_lines=True)\n", (1442, 1507), False, 'from rich.table import Table\n'), ((1157, 1248), 'duckdb.query', 'duckdb.query', (['"""SELECT doc_id, count(chunk_id) as count FROM sql_table GROUP BY doc_id"""'], {}), "(\n 'SELECT doc_id, count(chunk_id) as count FROM sql_table GROUP BY doc_id')\n", (1169, 1248), False, 'import duckdb\n'), ((2042, 2051), 'rich.console.Console', 'Console', ([], {}), '()\n', (2049, 2051), False, 'from rich.console import Console\n'), ((632, 645), 'pathlib.Path', 'Path', (['db_path'], {}), '(db_path)\n', (636, 645), False, 'from pathlib import Path\n')] |
import typer
from lancedb import connect
from rag_app.models import TextChunk, Document
from pathlib import Path
from typing import Iterable
from tqdm import tqdm
from rich import print
import frontmatter
import hashlib
from datetime import datetime
from unstructured.partition.text import partition_text
app = typer.Typer()
def read_files(path: Path, file_suffix: str) -> Iterable[Document]:
for i, file in enumerate(path.iterdir()):
if file.suffix != file_suffix:
continue
post = frontmatter.load(file)
yield Document(
id=hashlib.md5(post.content.encode("utf-8")).hexdigest(),
content=post.content,
filename=file.name,
metadata=post.metadata,
)
def batch_chunks(chunks, batch_size=20):
batch = []
for chunk in chunks:
batch.append(chunk)
if len(batch) == batch_size:
yield batch
batch = []
if batch:
yield batch
def chunk_text(
documents: Iterable[Document], window_size: int = 1024, overlap: int = 0
):
for doc in documents:
for chunk_num, chunk in enumerate(partition_text(text=doc.content)):
yield {
"doc_id": doc.id,
"chunk_id": chunk_num + 1,
"text": chunk.text,
"post_title": doc.metadata["title"],
"publish_date": datetime.strptime(doc.metadata["date"], "%Y-%m"),
"source": doc.metadata["url"],
}
@app.command(help="Ingest data into a given lancedb")
def from_folder(
db_path: str = typer.Option(help="Your LanceDB path"),
table_name: str = typer.Option(help="Table to ingest data into"),
folder_path: str = typer.Option(help="Folder to read data from"),
file_suffix: str = typer.Option(default=".md", help="File suffix to filter by"),
):
db = connect(db_path)
if table_name not in db.table_names():
db.create_table(table_name, schema=TextChunk, mode="overwrite")
table = db.open_table(table_name)
path = Path(folder_path)
if not path.exists():
raise ValueError(f"Ingestion folder of {folder_path} does not exist")
files = read_files(path, file_suffix)
chunks = chunk_text(files)
batched_chunks = batch_chunks(chunks)
ttl = 0
for chunk_batch in tqdm(batched_chunks):
table.add(chunk_batch)
ttl += len(chunk_batch)
print(f"Added {ttl} chunks to {table_name}")
| [
"lancedb.connect"
] | [((312, 325), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (323, 325), False, 'import typer\n'), ((1598, 1636), 'typer.Option', 'typer.Option', ([], {'help': '"""Your LanceDB path"""'}), "(help='Your LanceDB path')\n", (1610, 1636), False, 'import typer\n'), ((1660, 1706), 'typer.Option', 'typer.Option', ([], {'help': '"""Table to ingest data into"""'}), "(help='Table to ingest data into')\n", (1672, 1706), False, 'import typer\n'), ((1731, 1776), 'typer.Option', 'typer.Option', ([], {'help': '"""Folder to read data from"""'}), "(help='Folder to read data from')\n", (1743, 1776), False, 'import typer\n'), ((1801, 1861), 'typer.Option', 'typer.Option', ([], {'default': '""".md"""', 'help': '"""File suffix to filter by"""'}), "(default='.md', help='File suffix to filter by')\n", (1813, 1861), False, 'import typer\n'), ((1875, 1891), 'lancedb.connect', 'connect', (['db_path'], {}), '(db_path)\n', (1882, 1891), False, 'from lancedb import connect\n'), ((2058, 2075), 'pathlib.Path', 'Path', (['folder_path'], {}), '(folder_path)\n', (2062, 2075), False, 'from pathlib import Path\n'), ((2333, 2353), 'tqdm.tqdm', 'tqdm', (['batched_chunks'], {}), '(batched_chunks)\n', (2337, 2353), False, 'from tqdm import tqdm\n'), ((2423, 2467), 'rich.print', 'print', (['f"""Added {ttl} chunks to {table_name}"""'], {}), "(f'Added {ttl} chunks to {table_name}')\n", (2428, 2467), False, 'from rich import print\n'), ((517, 539), 'frontmatter.load', 'frontmatter.load', (['file'], {}), '(file)\n', (533, 539), False, 'import frontmatter\n'), ((1142, 1174), 'unstructured.partition.text.partition_text', 'partition_text', ([], {'text': 'doc.content'}), '(text=doc.content)\n', (1156, 1174), False, 'from unstructured.partition.text import partition_text\n'), ((1395, 1443), 'datetime.datetime.strptime', 'datetime.strptime', (["doc.metadata['date']", '"""%Y-%m"""'], {}), "(doc.metadata['date'], '%Y-%m')\n", (1412, 1443), False, 'from datetime import datetime\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from lancedb.utils import CONFIG
@click.group()
@click.version_option(help="LanceDB command line interface entry point")
def cli():
"LanceDB command line interface"
diagnostics_help = """
Enable or disable LanceDB diagnostics. When enabled, LanceDB will send anonymous events to help us improve LanceDB.
These diagnostics are used only for error reporting and no data is collected. You can find more about diagnosis on
our docs: https://lancedb.github.io/lancedb/cli_config/
"""
@cli.command(help=diagnostics_help)
@click.option("--enabled/--disabled", default=True)
def diagnostics(enabled):
CONFIG.update({"diagnostics": True if enabled else False})
click.echo("LanceDB diagnostics is %s" % ("enabled" if enabled else "disabled"))
@cli.command(help="Show current LanceDB configuration")
def config():
# TODO: pretty print as table with colors and formatting
click.echo("Current LanceDB configuration:")
cfg = CONFIG.copy()
cfg.pop("uuid") # Don't show uuid as it is not configurable
for item, amount in cfg.items():
click.echo("{} ({})".format(item, amount))
| [
"lancedb.utils.CONFIG.copy",
"lancedb.utils.CONFIG.update"
] | [((641, 654), 'click.group', 'click.group', ([], {}), '()\n', (652, 654), False, 'import click\n'), ((656, 727), 'click.version_option', 'click.version_option', ([], {'help': '"""LanceDB command line interface entry point"""'}), "(help='LanceDB command line interface entry point')\n", (676, 727), False, 'import click\n'), ((1131, 1181), 'click.option', 'click.option', (['"""--enabled/--disabled"""'], {'default': '(True)'}), "('--enabled/--disabled', default=True)\n", (1143, 1181), False, 'import click\n'), ((1212, 1270), 'lancedb.utils.CONFIG.update', 'CONFIG.update', (["{'diagnostics': True if enabled else False}"], {}), "({'diagnostics': True if enabled else False})\n", (1225, 1270), False, 'from lancedb.utils import CONFIG\n'), ((1275, 1360), 'click.echo', 'click.echo', (["('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled'))"], {}), "('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled')\n )\n", (1285, 1360), False, 'import click\n'), ((1493, 1537), 'click.echo', 'click.echo', (['"""Current LanceDB configuration:"""'], {}), "('Current LanceDB configuration:')\n", (1503, 1537), False, 'import click\n'), ((1548, 1561), 'lancedb.utils.CONFIG.copy', 'CONFIG.copy', ([], {}), '()\n', (1559, 1561), False, 'from lancedb.utils import CONFIG\n')] |
import json
from sentence_transformers import SentenceTransformer
from pydantic.main import ModelMetaclass
from pathlib import Path
import pandas as pd
import sqlite3
from uuid import uuid4
import lancedb
encoder = SentenceTransformer('all-MiniLM-L6-v2')
data_folder = Path('data/collections')
config_file = Path('data/config/indexes.yaml')
index_folder = Path('indexes')
lance_folder = Path('indexes')
lance_folder.mkdir(parents=True, exist_ok=True)
sqlite_folder = Path('data/indexes/')
with sqlite3.connect(sqlite_folder.joinpath('documents.sqlite')) as conn:
cursor = conn.cursor()
cursor.execute('SELECT SQLITE_VERSION()')
data = cursor.fetchone()
print(f"Sqlite version: {data}")
class LanceDBDocument():
def __init__(self, document:dict, title:str, text:str, fields, tags=None, date=None, file_path=None):
self.document = self.fill_missing_fields(document, text, title, tags, date)
# self.text = document[text]
# self.tags = document[tags] if tags is not None else list()
# self.date = document[date] if date is not None else None
self.file_path = file_path
self.metadata = {k:document[k] for k in fields if k not in [title, text, tags, date]}
self.uuid = str(uuid4()) if 'uuid' not in document else document['uuid']
self.save_uuids = list()
self.sqlite_fields = list()
self.lance_exclude = list()
def fill_missing_fields(self, document, text, title, tags, date):
if title not in document:
self.title = ''
else:
self.title = document[title]
if text not in document:
self.text = ''
else:
self.text = document[text]
if date not in document:
self.date = ''
else:
self.date = document[date]
if tags not in document:
self.tags = list()
else:
self.tags = document[tags]
def create_json_document(self, text, uuids=None):
"""Creates a custom dictionary object that can be used for both sqlite and lancedb
The full document is always stored in sqlite where fixed fields are:
title
text
date
filepath
document_uuid - used for retrieval from lancedb results
Json field contains the whole document for retrieval and display
Lancedb only gets searching text, vectorization of that, and filter fields
"""
_document = {'title':self.title,
'text':text,
'tags':self.tags,
'date':self.date,
'file_path':str(self.file_path),
'uuid':self.uuid,
'metadata': self.metadata}
self._enforce_tags_schema()
for field in ['title','date','file_path']:
self.enforce_string_schema(field, _document)
return _document
def enforce_string_schema(self, field, test_document):
if not isinstance(test_document[field], str):
self.lance_exclude.append(field)
def _enforce_tags_schema(self):
# This enforces a simple List[str] format for the tags to match what lancedb can use for filtering
# If they are of type List[Dict] as a nested field, they are stored in sqlite for retrieval
if isinstance(self.tags, list):
tags_are_list = True
for _tag in self.tags:
if not isinstance(_tag, str):
tags_are_list = False
break
if not tags_are_list:
self.lance_exclude.append('tags')
def return_document(self):
document = self.create_json_document(self.text)
return document
class SqlLiteIngestNotes():
def __init__(self, documents, source_file, db_location, index_name, overwrite):
self.documents = documents
self.source_file = source_file
self.db_location = db_location
self.index_name = index_name
self.overwrite = overwrite
def initialize(self):
self.connection = sqlite3.connect(self.db_location)
if self.overwrite:
self.connection.execute(f"""DROP TABLE IF EXISTS {self.index_name};""")
self.connection.commit()
table_exists = self.connection.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{self.index_name}';").fetchall()
if len(table_exists) == 0:
self.connection.execute(f"""
CREATE TABLE {self.index_name}(
id INTEGER PRIMARY KEY NOT NULL,
uuid STRING NOT NULL UNIQUE,
text STRING NOT NULL,
title STRING,
date STRING,
source_file STRING,
metadata JSONB);""")
self.connection.commit()
def insert(self, document):
self.connection.execute(f"""INSERT OR IGNORE INTO
{self.index_name} (uuid, text, title, date, source_file, metadata)
VALUES ('{document.uuid.replace("'","''")}', '{document.text.replace("'","''")}',
'{document.title.replace("'","''")}', '{document.date.replace("'","''")}',
'{self.index_name.replace("'","''")}', '{json.dumps(document.metadata).replace("'","''")}');""")
def bulk_insert(self):
for document in self.documents:
self.insert(document)
self.connection.commit()
self.connection.close()
from lancedb.pydantic import LanceModel, Vector, List
class LanceDBSchema384(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(384)
class LanceDBSchema512(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(512)
class LanceDBIngestNotes():
def __init__(self, documents, lance_location, index_name, overwrite, encoder, schema):
self.documents = documents
self.lance_location = lance_location
self.index_name = index_name
self.overwrite = overwrite
self.encoder = encoder
self.schema = schema
def initialize(self):
self.db = lancedb.connect(self.lance_location)
existing_tables = self.db.table_names()
self.documents = [self.prep_documents(document) for document in self.documents]
if self.overwrite:
self.table = self.db.create_table(self.index_name, data=self.documents, mode='overwrite', schema=self.schema.to_arrow_schema())
else:
if self.index_name in existing_tables:
self.table = self.db.open_table(self.index_name)
self.table.add(self.documents)
else:
self.table = self.db.create_table(self.index_name, data=self.documents, schema=self.schema.to_arrow_schema())
def prep_documents(self, document):
lance_document = dict()
lance_document['text'] = document.text
lance_document['vector'] = self.encoder.encode(document.text)
lance_document['uuid'] = document.uuid
lance_document['title'] = document.title
lance_document['tags'] = document.tags
return lance_document
def insert(self, document):
document['vector'] = self.encoder.encode(document.text)
self.table.add(document)
def bulk_insert(self, create_vectors=False):
if create_vectors:
self.table.create_index(vector_column_name='vector', metric='cosine')
self.table.create_fts_index(field_names=['title','text'], replace=True)
return self.table
class IndexDocumentsNotes():
def __init__(self,field_mapping, source_file, index_name, overwrite):
self.field_mapping = field_mapping
self.source_file = source_file
self.index_name = index_name
self.overwrite = overwrite
def open_json(self):
with open(self.source_file, 'r') as f:
self.data = json.load(f)
print(self.data)
def open_csv(self):
self.data = pd.read_csv(self.source_file)
def create_document(self, document):
document = LanceDBDocument(document,
text=self.field_mapping['text'],
title=self.field_mapping['title'],
tags=self.field_mapping['tags'],
date=self.field_mapping['date'],
fields=list(document.keys()),
file_path=self.source_file
)
return document
def create_documents(self):
self.documents = [self.create_document(document) for document in self.data]
def ingest(self, overwrite=False):
# lance_path = Path(f'../indexes/lance')
lance_folder.mkdir(parents=True, exist_ok=True)
lance_ingest = LanceDBIngestNotes(documents=self.documents,
lance_location=lance_folder,
# field_mapping=self.field_mapping,
index_name=self.index_name,
overwrite=self.overwrite,
encoder=encoder,
schema=LanceDBSchema384)
lance_ingest.initialize()
if len(self.documents) <= 256:
_table = lance_ingest.bulk_insert(create_vectors=False)
else:
_table = lance_ingest.bulk_insert(create_vectors=True)
sql_path = sqlite_folder.joinpath('documents.sqlite')
sqlite_ingest = SqlLiteIngestNotes(documents=self.documents,
source_file=self.source_file,
db_location=sql_path,
index_name=self.index_name,
overwrite=self.overwrite)
sqlite_ingest.initialize()
sqlite_ingest.bulk_insert()
| [
"lancedb.connect",
"lancedb.pydantic.Vector"
] | [((216, 255), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (235, 255), False, 'from sentence_transformers import SentenceTransformer\n'), ((271, 295), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (275, 295), False, 'from pathlib import Path\n'), ((310, 342), 'pathlib.Path', 'Path', (['"""data/config/indexes.yaml"""'], {}), "('data/config/indexes.yaml')\n", (314, 342), False, 'from pathlib import Path\n'), ((358, 373), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (362, 373), False, 'from pathlib import Path\n'), ((390, 405), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (394, 405), False, 'from pathlib import Path\n'), ((471, 492), 'pathlib.Path', 'Path', (['"""data/indexes/"""'], {}), "('data/indexes/')\n", (475, 492), False, 'from pathlib import Path\n'), ((5615, 5626), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (5621, 5626), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((5739, 5750), 'lancedb.pydantic.Vector', 'Vector', (['(512)'], {}), '(512)\n', (5745, 5750), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((4114, 4147), 'sqlite3.connect', 'sqlite3.connect', (['self.db_location'], {}), '(self.db_location)\n', (4129, 4147), False, 'import sqlite3\n'), ((6128, 6164), 'lancedb.connect', 'lancedb.connect', (['self.lance_location'], {}), '(self.lance_location)\n', (6143, 6164), False, 'import lancedb\n'), ((7989, 8018), 'pandas.read_csv', 'pd.read_csv', (['self.source_file'], {}), '(self.source_file)\n', (8000, 8018), True, 'import pandas as pd\n'), ((7902, 7914), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7911, 7914), False, 'import json\n'), ((1248, 1255), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1253, 1255), False, 'from uuid import uuid4\n'), ((5257, 5286), 'json.dumps', 'json.dumps', (['document.metadata'], {}), '(document.metadata)\n', (5267, 5286), False, 'import json\n')] |
import argparse
import pandas as pd
from unstructured.partition.pdf import partition_pdf
import lancedb.embeddings.gte
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
def split_text_into_chunks(text, chunk_size, overlap):
"""
Split text into chunks with a specified size and overlap.
Parameters:
- text (str): The input text to be split into chunks.
- chunk_size (int): The size of each chunk.
- overlap (int): The number of characters to overlap between consecutive chunks.
Returns:
- List of chunks (str).
"""
if chunk_size <= 0 or overlap < 0:
raise ValueError("Invalid chunk size or overlap value.")
chunks = []
start = 0
while start < len(text):
end = start + chunk_size
chunk = text[start:end]
chunks.append(chunk)
start += chunk_size - overlap
return chunks
def pdf_to_lancedb(pdf_file: str, path: str = "/tmp/lancedb"):
"""
create lancedb table from a pdf file
Parameters:
- pdf_file (str): The path to the input PDF file.
- path (str): The path to store the vector DB.
default: /tmp/lancedb
Returns:
- None
"""
elements = partition_pdf(pdf_file)
content = "\n\n".join([e.text for e in elements])
chunks = split_text_into_chunks(text=content, chunk_size=1000, overlap=200)
model = (
get_registry().get("gte-text").create(mlx=True)
) # mlx=True for Apple silicon only.
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": chunks})
db = lancedb.connect(path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create a Vector DB from a PDF file")
# Input
parser.add_argument(
"--pdf",
help="The path to the input PDF file",
default="flash_attention.pdf",
)
# Output
parser.add_argument(
"--db_path",
type=str,
default="/tmp/lancedb",
help="The path to store the vector DB",
)
args = parser.parse_args()
pdf_to_lancedb(args.pdf, args.db_path)
print("ingestion done , move to query!")
| [
"lancedb.embeddings.get_registry"
] | [((1242, 1265), 'unstructured.partition.pdf.partition_pdf', 'partition_pdf', (['pdf_file'], {}), '(pdf_file)\n', (1255, 1265), False, 'from unstructured.partition.pdf import partition_pdf\n'), ((1658, 1688), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': chunks}"], {}), "({'text': chunks})\n", (1670, 1688), True, 'import pandas as pd\n'), ((1864, 1937), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create a Vector DB from a PDF file"""'}), "(description='Create a Vector DB from a PDF file')\n", (1887, 1937), False, 'import argparse\n'), ((1424, 1438), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (1436, 1438), False, 'from lancedb.embeddings import get_registry\n')] |
from datetime import datetime
from typing import List, Union
from pydantic import field_validator
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
from pydantic import BaseModel
openai = get_registry().get("openai").create(name="text-embedding-3-large", dim=256)
class TextChunk(LanceModel):
doc_id: str
text: str = openai.SourceField()
vector: Vector(openai.ndims()) = openai.VectorField(default=None)
post_title: str
publish_date: datetime
chunk_id: int
source: str
class Document(BaseModel):
id: str
content: str
filename: str
metadata: dict[str, Union[str, List[str]]]
@field_validator('metadata')
@classmethod
def metadata_must_contain_a_valid_datestring(cls, v: dict[str, Union[str, List[str]]]):
try:
datetime.strptime(v["date"], "%Y-%m")
except Exception as e:
raise ValueError(
f"Date format must be YYYY-MM (Eg. 2024-10). Unable to parse provided date of {v['date']} "
)
return v
@field_validator('metadata')
@classmethod
def metadata_must_contain_required_keys(cls,v:dict[str, Union[str, List[str]]]):
required_keys = [
"url","date","title"
]
for k in required_keys:
if k not in v:
raise ValueError(f"Required Property {k} is not present in metadata")
return v
| [
"lancedb.embeddings.get_registry"
] | [((671, 698), 'pydantic.field_validator', 'field_validator', (['"""metadata"""'], {}), "('metadata')\n", (686, 698), False, 'from pydantic import field_validator\n'), ((1082, 1109), 'pydantic.field_validator', 'field_validator', (['"""metadata"""'], {}), "('metadata')\n", (1097, 1109), False, 'from pydantic import field_validator\n'), ((833, 870), 'datetime.datetime.strptime', 'datetime.strptime', (["v['date']", '"""%Y-%m"""'], {}), "(v['date'], '%Y-%m')\n", (850, 870), False, 'from datetime import datetime\n'), ((231, 245), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (243, 245), False, 'from lancedb.embeddings import get_registry\n')] |
import os
import shutil
from pathlib import Path
import lancedb
from lancedb.pydantic import LanceModel, Vector, pydantic_to_schema
from langchain.document_loaders import TextLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import LanceDB
# LanceDB pydantic schema
class Content(LanceModel):
text: str
vector: Vector(384)
def get_files() -> list[str]:
# Get a list of files from the data directory
data_dir = Path("../data")
txt_files = list(data_dir.glob("*.txt"))
# Return string of paths or else lancedb/pydantic will complain
txt_files = [str(f) for f in txt_files]
return txt_files
def get_docs(txt_files: list[str]):
loaders = [TextLoader(f) for f in txt_files]
docs = [loader.load() for loader in loaders]
return docs
def create_lance_table(table_name: str) -> lancedb.table.LanceTable:
try:
# Create empty table if it does not exist
tbl = db.create_table(table_name, schema=pydantic_to_schema(Content), mode="overwrite")
except OSError:
# If table exists, open it
tbl = db.open_table(table_name, mode="append")
return tbl
async def search_lancedb(query: str, retriever: LanceDB) -> list[Content]:
"Perform async retrieval from LanceDB"
search_result = await retriever.asimilarity_search(query, k=5)
if len(search_result) > 0:
print(search_result[0].page_content)
else:
print("Failed to find similar result")
return search_result
def main() -> None:
txt_files = get_files()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"}
)
tbl = create_lance_table("countries")
docs = get_docs(txt_files)
chunked_docs = []
for doc in docs:
chunked_docs.extend(text_splitter.split_documents(doc))
# Ingest docs in append mode
retriever = LanceDB.from_documents(chunked_docs, embeddings, connection=tbl)
return retriever
if __name__ == "__main__":
DB_NAME = "./db"
TABLE = "countries"
if os.path.exists(DB_NAME):
# Clear DB if it exists
shutil.rmtree(DB_NAME)
db = lancedb.connect(DB_NAME)
retriever = main()
print("Finished loading documents to LanceDB")
query = "Is Tonga a monarchy or a democracy"
docsearch = retriever.as_retriever(
search_kwargs={"k": 3, "threshold": 0.8, "return_vector": False}
)
search_result = docsearch.get_relevant_documents(query)
if len(search_result) > 0:
print(f"Found {len(search_result)} relevant results")
print([r.page_content for r in search_result])
else:
print("Failed to find relevant result") | [
"lancedb.connect",
"lancedb.pydantic.Vector",
"lancedb.pydantic.pydantic_to_schema"
] | [((429, 440), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (435, 440), False, 'from lancedb.pydantic import LanceModel, Vector, pydantic_to_schema\n'), ((538, 553), 'pathlib.Path', 'Path', (['"""../data"""'], {}), "('../data')\n", (542, 553), False, 'from pathlib import Path\n'), ((1650, 1714), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(512)', 'chunk_overlap': '(50)'}), '(chunk_size=512, chunk_overlap=50)\n', (1680, 1714), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1732, 1842), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name='sentence-transformers/all-MiniLM-L6-v2',\n model_kwargs={'device': 'cpu'})\n", (1753, 1842), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((2082, 2146), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['chunked_docs', 'embeddings'], {'connection': 'tbl'}), '(chunked_docs, embeddings, connection=tbl)\n', (2104, 2146), False, 'from langchain.vectorstores import LanceDB\n'), ((2249, 2272), 'os.path.exists', 'os.path.exists', (['DB_NAME'], {}), '(DB_NAME)\n', (2263, 2272), False, 'import os\n'), ((2347, 2371), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2362, 2371), False, 'import lancedb\n'), ((785, 798), 'langchain.document_loaders.TextLoader', 'TextLoader', (['f'], {}), '(f)\n', (795, 798), False, 'from langchain.document_loaders import TextLoader\n'), ((2314, 2336), 'shutil.rmtree', 'shutil.rmtree', (['DB_NAME'], {}), '(DB_NAME)\n', (2327, 2336), False, 'import shutil\n'), ((1063, 1090), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['Content'], {}), '(Content)\n', (1081, 1090), False, 'from lancedb.pydantic import LanceModel, Vector, pydantic_to_schema\n')] |
import zipfile
import numpy as np
import pandas as pd
import scipy.sparse
import torch
import implicit
from implicit import evaluation
import lancedb
import pydantic
from lancedb.pydantic import pydantic_to_schema, vector
import argparse
def products_bought_by_user_in_the_past(user_id: int, top: int = 10):
selected = data[data.user_id == user_id].sort_values(
by=["total_orders"], ascending=False
)
selected["product_name"] = selected["product_id"].map(
product_entries.set_index("product_id")["product_name"]
)
selected = selected[["product_id", "product_name", "total_orders"]].reset_index(
drop=True
)
if selected.shape[0] < top:
return selected
return selected[:top]
def args_parse():
parser = argparse.ArgumentParser(description="Product Recommender")
parser.add_argument(
"--factors", type=int, default=128, help="dimension of latent factor vectors"
)
parser.add_argument(
"--regularization", type=float, default=0.05, help="strength of penalty term"
)
parser.add_argument(
"--iterations", type=int, default=50, help="number of iterations to update"
)
parser.add_argument(
"--num-threads", type=int, default=1, help="amount of parallelization"
)
parser.add_argument(
"--num-partitions",
type=int,
default=256,
help="number of partitions of the index",
)
parser.add_argument(
"--num-sub-vectors",
type=int,
default=16,
help="number of sub-vectors (M) that will be created during Product Quantization (PQ).",
)
args = parser.parse_args()
return args
files = [
"instacart-market-basket-analysis.zip",
"order_products__train.csv.zip",
"order_products__prior.csv.zip",
"products.csv.zip",
"orders.csv.zip",
]
if __name__ == "__main__":
args = args_parse()
for filename in files:
with zipfile.ZipFile(filename, "r") as zip_ref:
zip_ref.extractall("./")
products = pd.read_csv("products.csv")
orders = pd.read_csv("orders.csv")
order_products = pd.concat(
[
pd.read_csv("order_products__train.csv"),
pd.read_csv("order_products__prior.csv"),
]
)
customer_order_products = pd.merge(
orders, order_products, how="inner", on="order_id"
)
# create confidence table
data = (
customer_order_products.groupby(["user_id", "product_id"])[["order_id"]]
.count()
.reset_index()
)
data.columns = ["user_id", "product_id", "total_orders"]
data.product_id = data.product_id.astype("int64")
data_new = pd.DataFrame(
[
[data.user_id.max() + 1, 46149, 50], # user 1 orders 50 Zero Calorie Cola
[data.user_id.max() + 2, 27845, 49], # user 2 orders 49 Organic Whole Milk
[
data.user_id.max() + 2,
26604,
32,
], # user 2 orders 32 Organic Blackberries
],
columns=["user_id", "product_id", "total_orders"],
)
data = pd.concat([data, data_new]).reset_index(drop=True)
# extract unique user and product ids
unique_users = list(np.sort(data.user_id.unique()))
unique_products = list(np.sort(products.product_id.unique()))
purchases = list(data.total_orders)
# create zero-based index position <-> user/item ID mappings
index_to_user = pd.Series(unique_users)
# create reverse mappings from user/item ID to index positions
user_to_index = pd.Series(data=index_to_user.index + 1, index=index_to_user.values)
# create row and column for user and product ids
users_rows = data.user_id.astype(int)
products_cols = data.product_id.astype(int)
# create CSR matrix
matrix = scipy.sparse.csr_matrix(
(purchases, (users_rows, products_cols)),
shape=(len(unique_users) + 1, len(unique_products) + 1),
)
matrix.data = np.nan_to_num(matrix.data, copy=False)
# split data into train and test splits
train, test = evaluation.train_test_split(matrix, train_percentage=0.9)
# initialize the recommender model
model = implicit.als.AlternatingLeastSquares(
factors=args.factors,
regularization=args.regularization,
iterations=args.iterations,
num_threads=args.num_threads,
)
alpha = 15
train = (train * alpha).astype("double")
# train the model on CSR matrix
model.fit(train, show_progress=True)
test = (test * alpha).astype("double")
evaluation.ranking_metrics_at_k(
model, train, test, K=100, show_progress=True, num_threads=1
)
db = lancedb.connect("data/lancedb")
class ProductModel(pydantic.BaseModel):
product_id: int
product_name: str
vector: vector(args.factors)
schema = pydantic_to_schema(ProductModel)
table_name = "product_recommender"
tbl = db.create_table(table_name, schema=schema, mode="overwrite")
# Transform items into factors
items_factors = model.item_factors
product_entries = products[["product_id", "product_name"]].drop_duplicates()
product_entries["product_id"] = product_entries.product_id.astype("int64")
device = "cuda" if torch.cuda.is_available() else "cpu"
item_embeddings = (
items_factors[1:].to_numpy().tolist()
if device == "cuda"
else items_factors[1:].tolist()
)
product_entries["vector"] = item_embeddings
tbl.add(product_entries)
tbl.create_index(
num_partitions=args.num_partitions, num_sub_vectors=args.num_sub_vectors
)
test_user_ids = [206210, 206211]
test_user_factors = model.user_factors[user_to_index[test_user_ids]]
# Query by user factors
test_user_embeddings = (
test_user_factors.to_numpy().tolist()
if device == "cuda"
else test_user_factors.tolist()
)
for embedding, id in zip(test_user_embeddings, test_user_ids):
results = tbl.search(embedding).limit(10).to_df()
print(results.drop(columns=["vector"]).to_string(max_cols=None))
print(products_bought_by_user_in_the_past(id, top=15).to_string(max_cols=None))
| [
"lancedb.connect",
"lancedb.pydantic.vector",
"lancedb.pydantic.pydantic_to_schema"
] | [((774, 832), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Product Recommender"""'}), "(description='Product Recommender')\n", (797, 832), False, 'import argparse\n'), ((2049, 2076), 'pandas.read_csv', 'pd.read_csv', (['"""products.csv"""'], {}), "('products.csv')\n", (2060, 2076), True, 'import pandas as pd\n'), ((2090, 2115), 'pandas.read_csv', 'pd.read_csv', (['"""orders.csv"""'], {}), "('orders.csv')\n", (2101, 2115), True, 'import pandas as pd\n'), ((2313, 2373), 'pandas.merge', 'pd.merge', (['orders', 'order_products'], {'how': '"""inner"""', 'on': '"""order_id"""'}), "(orders, order_products, how='inner', on='order_id')\n", (2321, 2373), True, 'import pandas as pd\n'), ((3471, 3494), 'pandas.Series', 'pd.Series', (['unique_users'], {}), '(unique_users)\n', (3480, 3494), True, 'import pandas as pd\n'), ((3583, 3650), 'pandas.Series', 'pd.Series', ([], {'data': '(index_to_user.index + 1)', 'index': 'index_to_user.values'}), '(data=index_to_user.index + 1, index=index_to_user.values)\n', (3592, 3650), True, 'import pandas as pd\n'), ((3997, 4035), 'numpy.nan_to_num', 'np.nan_to_num', (['matrix.data'], {'copy': '(False)'}), '(matrix.data, copy=False)\n', (4010, 4035), True, 'import numpy as np\n'), ((4099, 4156), 'implicit.evaluation.train_test_split', 'evaluation.train_test_split', (['matrix'], {'train_percentage': '(0.9)'}), '(matrix, train_percentage=0.9)\n', (4126, 4156), False, 'from implicit import evaluation\n'), ((4209, 4371), 'implicit.als.AlternatingLeastSquares', 'implicit.als.AlternatingLeastSquares', ([], {'factors': 'args.factors', 'regularization': 'args.regularization', 'iterations': 'args.iterations', 'num_threads': 'args.num_threads'}), '(factors=args.factors, regularization=\n args.regularization, iterations=args.iterations, num_threads=args.\n num_threads)\n', (4245, 4371), False, 'import implicit\n'), ((4588, 4686), 'implicit.evaluation.ranking_metrics_at_k', 'evaluation.ranking_metrics_at_k', (['model', 'train', 'test'], {'K': '(100)', 'show_progress': '(True)', 'num_threads': '(1)'}), '(model, train, test, K=100, show_progress=\n True, num_threads=1)\n', (4619, 4686), False, 'from implicit import evaluation\n'), ((4706, 4737), 'lancedb.connect', 'lancedb.connect', (['"""data/lancedb"""'], {}), "('data/lancedb')\n", (4721, 4737), False, 'import lancedb\n'), ((4884, 4916), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['ProductModel'], {}), '(ProductModel)\n', (4902, 4916), False, 'from lancedb.pydantic import pydantic_to_schema, vector\n'), ((4849, 4869), 'lancedb.pydantic.vector', 'vector', (['args.factors'], {}), '(args.factors)\n', (4855, 4869), False, 'from lancedb.pydantic import pydantic_to_schema, vector\n'), ((5285, 5310), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5308, 5310), False, 'import torch\n'), ((1953, 1983), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1968, 1983), False, 'import zipfile\n'), ((2170, 2210), 'pandas.read_csv', 'pd.read_csv', (['"""order_products__train.csv"""'], {}), "('order_products__train.csv')\n", (2181, 2210), True, 'import pandas as pd\n'), ((2224, 2264), 'pandas.read_csv', 'pd.read_csv', (['"""order_products__prior.csv"""'], {}), "('order_products__prior.csv')\n", (2235, 2264), True, 'import pandas as pd\n'), ((3129, 3156), 'pandas.concat', 'pd.concat', (['[data, data_new]'], {}), '([data, data_new])\n', (3138, 3156), True, 'import pandas as pd\n')] |
import lancedb
import uuid
from datetime import datetime
from tqdm import tqdm
from typing import Optional, List, Iterator, Dict
from memgpt.config import MemGPTConfig
from memgpt.agent_store.storage import StorageConnector, TableType
from memgpt.config import AgentConfig, MemGPTConfig
from memgpt.constants import MEMGPT_DIR
from memgpt.utils import printd
from memgpt.data_types import Record, Message, Passage, Source
from datetime import datetime
from lancedb.pydantic import Vector, LanceModel
""" Initial implementation - not complete """
def get_db_model(table_name: str, table_type: TableType):
config = MemGPTConfig.load()
if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:
# create schema for archival memory
class PassageModel(LanceModel):
"""Defines data model for storing Passages (consisting of text, embedding)"""
id: uuid.UUID
user_id: str
text: str
doc_id: str
agent_id: str
data_source: str
embedding: Vector(config.embedding_dim)
metadata_: Dict
def __repr__(self):
return f"<Passage(passage_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Passage(
text=self.text,
embedding=self.embedding,
doc_id=self.doc_id,
user_id=self.user_id,
id=self.id,
data_source=self.data_source,
agent_id=self.agent_id,
metadata=self.metadata_,
)
return PassageModel
elif table_type == TableType.RECALL_MEMORY:
class MessageModel(LanceModel):
"""Defines data model for storing Message objects"""
__abstract__ = True # this line is necessary
# Assuming message_id is the primary key
id: uuid.UUID
user_id: str
agent_id: str
# openai info
role: str
text: str
model: str
user: str
# function info
function_name: str
function_args: str
function_response: str
embedding = Vector(config.embedding_dim)
# Add a datetime column, with default value as the current time
created_at = datetime
def __repr__(self):
return f"<Message(message_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Message(
user_id=self.user_id,
agent_id=self.agent_id,
role=self.role,
name=self.name,
text=self.text,
model=self.model,
function_name=self.function_name,
function_args=self.function_args,
function_response=self.function_response,
embedding=self.embedding,
created_at=self.created_at,
id=self.id,
)
"""Create database model for table_name"""
return MessageModel
elif table_type == TableType.DATA_SOURCES:
class SourceModel(LanceModel):
"""Defines data model for storing Passages (consisting of text, embedding)"""
# Assuming passage_id is the primary key
id: uuid.UUID
user_id: str
name: str
created_at: datetime
def __repr__(self):
return f"<Source(passage_id='{self.id}', name='{self.name}')>"
def to_record(self):
return Source(id=self.id, user_id=self.user_id, name=self.name, created_at=self.created_at)
"""Create database model for table_name"""
return SourceModel
else:
raise ValueError(f"Table type {table_type} not implemented")
class LanceDBConnector(StorageConnector):
"""Storage via LanceDB"""
# TODO: this should probably eventually be moved into a parent DB class
def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None):
# TODO
pass
def generate_where_filter(self, filters: Dict) -> str:
where_filters = []
for key, value in filters.items():
where_filters.append(f"{key}={value}")
return where_filters.join(" AND ")
@abstractmethod
def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:
# TODO
pass
@abstractmethod
def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:
# TODO
pass
@abstractmethod
def get(self, id: str) -> Optional[Record]:
# TODO
pass
@abstractmethod
def size(self, filters: Optional[Dict] = {}) -> int:
# TODO
pass
@abstractmethod
def insert(self, record: Record):
# TODO
pass
@abstractmethod
def insert_many(self, records: List[Record], show_progress=False):
# TODO
pass
@abstractmethod
def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:
# TODO
pass
@abstractmethod
def query_date(self, start_date, end_date):
# TODO
pass
@abstractmethod
def query_text(self, query):
# TODO
pass
@abstractmethod
def delete_table(self):
# TODO
pass
@abstractmethod
def delete(self, filters: Optional[Dict] = {}):
# TODO
pass
@abstractmethod
def save(self):
# TODO
pass
| [
"lancedb.pydantic.Vector"
] | [((623, 642), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (640, 642), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((1078, 1106), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (1084, 1106), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((1334, 1524), 'memgpt.data_types.Passage', 'Passage', ([], {'text': 'self.text', 'embedding': 'self.embedding', 'doc_id': 'self.doc_id', 'user_id': 'self.user_id', 'id': 'self.id', 'data_source': 'self.data_source', 'agent_id': 'self.agent_id', 'metadata': 'self.metadata_'}), '(text=self.text, embedding=self.embedding, doc_id=self.doc_id,\n user_id=self.user_id, id=self.id, data_source=self.data_source,\n agent_id=self.agent_id, metadata=self.metadata_)\n', (1341, 1524), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((2336, 2364), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (2342, 2364), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((2675, 2990), 'memgpt.data_types.Message', 'Message', ([], {'user_id': 'self.user_id', 'agent_id': 'self.agent_id', 'role': 'self.role', 'name': 'self.name', 'text': 'self.text', 'model': 'self.model', 'function_name': 'self.function_name', 'function_args': 'self.function_args', 'function_response': 'self.function_response', 'embedding': 'self.embedding', 'created_at': 'self.created_at', 'id': 'self.id'}), '(user_id=self.user_id, agent_id=self.agent_id, role=self.role, name=\n self.name, text=self.text, model=self.model, function_name=self.\n function_name, function_args=self.function_args, function_response=self\n .function_response, embedding=self.embedding, created_at=self.\n created_at, id=self.id)\n', (2682, 2990), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((3816, 3905), 'memgpt.data_types.Source', 'Source', ([], {'id': 'self.id', 'user_id': 'self.user_id', 'name': 'self.name', 'created_at': 'self.created_at'}), '(id=self.id, user_id=self.user_id, name=self.name, created_at=self.\n created_at)\n', (3822, 3905), False, 'from memgpt.data_types import Record, Message, Passage, Source\n')] |
import os
import argparse
import lancedb
from lancedb.context import contextualize
from lancedb.embeddings import with_embeddings
from datasets import load_dataset
import openai
import pytest
OPENAI_MODEL = None
def embed_func(c):
rs = openai.Embedding.create(input=c, engine=OPENAI_MODEL)
return [record["embedding"] for record in rs["data"]]
def create_prompt(query, context):
limit = 3750
prompt_start = "Answer the question based on the context below.\n\n" + "Context:\n"
prompt_end = f"\n\nQuestion: {query}\nAnswer:"
# append contexts until hitting limit
for i in range(1, len(context)):
if len("\n\n---\n\n".join(context.text[:i])) >= limit:
prompt = (
prompt_start + "\n\n---\n\n".join(context.text[: i - 1]) + prompt_end
)
break
elif i == len(context) - 1:
prompt = prompt_start + "\n\n---\n\n".join(context.text) + prompt_end
return prompt
def complete(prompt):
# query text-davinci-003
res = openai.Completion.create(
engine=OPENAI_MODEL,
prompt=prompt,
temperature=0,
max_tokens=400,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None,
)
return res["choices"][0]["text"].strip()
def arg_parse():
default_query = "Which training method should I use for sentence transformers when I only have pairs of related sentences?"
global OPENAI_MODEL
parser = argparse.ArgumentParser(description="Youtube Search QA Bot")
parser.add_argument(
"--query", type=str, default=default_query, help="query to search"
)
parser.add_argument(
"--context-length",
type=int,
default=3,
help="Number of queries to use as context",
)
parser.add_argument("--window-size", type=int, default=20, help="window size")
parser.add_argument("--stride", type=int, default=4, help="stride")
parser.add_argument("--openai-key", type=str, help="OpenAI API Key")
parser.add_argument(
"--model", type=str, default="text-embedding-ada-002", help="OpenAI API Key"
)
args = parser.parse_args()
if not args.openai_key:
if "OPENAI_API_KEY" not in os.environ:
raise ValueError(
"OPENAI_API_KEY environment variable not set. Please set it or pass --openai_key"
)
else:
openai.api_key = args.openai_key
OPENAI_MODEL = args.model
return args
if __name__ == "__main__":
args = arg_parse()
db = lancedb.connect("~/tmp/lancedb")
table_name = "youtube-chatbot"
if table_name not in db.table_names():
assert len(openai.Model.list()["data"]) > 0
data = load_dataset("jamescalam/youtube-transcriptions", split="train")
df = (
contextualize(data.to_pandas())
.groupby("title")
.text_col("text")
.window(args.window_size)
.stride(args.stride)
.to_df()
)
data = with_embeddings(embed_func, df, show_progress=True)
data.to_pandas().head(1)
tbl = db.create_table(table_name, data)
print(f"Created LaneDB table of length: {len(tbl)}")
else:
tbl = db.open_table(table_name)
load_dataset("jamescalam/youtube-transcriptions", split="train")
emb = embed_func(args.query)[0]
context = tbl.search(emb).limit(args.context_length).to_df()
prompt = create_prompt(args.query, context)
complete(prompt)
top_match = context.iloc[0]
print(f"Top Match: {top_match['url']}&t={top_match['start']}")
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((243, 296), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': 'OPENAI_MODEL'}), '(input=c, engine=OPENAI_MODEL)\n', (266, 296), False, 'import openai\n'), ((1031, 1192), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'OPENAI_MODEL', 'prompt': 'prompt', 'temperature': '(0)', 'max_tokens': '(400)', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)', 'stop': 'None'}), '(engine=OPENAI_MODEL, prompt=prompt, temperature=0,\n max_tokens=400, top_p=1, frequency_penalty=0, presence_penalty=0, stop=None\n )\n', (1055, 1192), False, 'import openai\n'), ((1485, 1545), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Youtube Search QA Bot"""'}), "(description='Youtube Search QA Bot')\n", (1508, 1545), False, 'import argparse\n'), ((2553, 2585), 'lancedb.connect', 'lancedb.connect', (['"""~/tmp/lancedb"""'], {}), "('~/tmp/lancedb')\n", (2568, 2585), False, 'import lancedb\n'), ((3281, 3345), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (3293, 3345), False, 'from datasets import load_dataset\n'), ((2731, 2795), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (2743, 2795), False, 'from datasets import load_dataset\n'), ((3032, 3083), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_func', 'df'], {'show_progress': '(True)'}), '(embed_func, df, show_progress=True)\n', (3047, 3083), False, 'from lancedb.embeddings import with_embeddings\n'), ((2683, 2702), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (2700, 2702), False, 'import openai\n')] |
import os, time
import pandas as pd
import numpy as np
from collections import Counter
from .utils import abbreviate_book_name_in_full_reference, get_train_test_split_from_verse_list, embed_batch
from .types import TranslationTriplet, ChatResponse, VerseMap, AIResponse
from pydantic import BaseModel, Field
from typing import Any, List, Optional, Callable
from random import shuffle
import requests
import guidance
import lancedb
from lancedb.embeddings import with_embeddings
from nltk.util import ngrams
from nltk import FreqDist
import logging
logger = logging.getLogger('uvicorn')
machine = 'http://192.168.1.76:8081'
def get_dataframes(target_language_code=None, file_suffix=None):
"""Get source data dataframes (literalistic english Bible and macula Greek/Hebrew)"""
bsb_bible_df = pd.read_csv('data/bsb-utf8.txt', sep='\t', names=['vref', 'content'], header=0)
bsb_bible_df['vref'] = bsb_bible_df['vref'].apply(abbreviate_book_name_in_full_reference)
macula_df = pd.read_csv('data/combined_greek_hebrew_vref.csv') # Note: csv wrangled in notebook: `create-combined-macula-df.ipynb`
if target_language_code:
target_tsv = get_target_vref_df(target_language_code, file_suffix=file_suffix)
target_df = get_target_vref_df(target_language_code, file_suffix=file_suffix)
return bsb_bible_df, macula_df, target_df
else:
return bsb_bible_df, macula_df
def get_vref_list(book_abbreviation=None):
vref_url = 'https://raw.githubusercontent.com/BibleNLP/ebible/main/metadata/vref.txt'
if not os.path.exists('data/vref.txt'):
os.system(f'wget {vref_url} -O data/vref.txt')
with open('data/vref.txt', 'r', encoding="utf8") as f:
if book_abbreviation:
return [i.strip() for i in f.readlines() if i.startswith(book_abbreviation)]
else:
return list(set([i.strip().split(' ')[0] for i in f.readlines()]))
def get_target_vref_df(language_code, file_suffix=None, drop_empty_verses=False):
"""Get target language data by language code"""
if not len(language_code) == 3:
return 'Invalid language code. Please use 3-letter ISO 639-3 language code.'
language_code = language_code.lower().strip()
language_code = f'{language_code}-{language_code}'
# if file_suffix:
# print('adding file suffix', file_suffix)
language_code = f'{language_code}{file_suffix if file_suffix else ""}'
target_data_url = f'https://raw.githubusercontent.com/BibleNLP/ebible/main/corpus/{language_code}.txt'
path = f'data/{language_code}.txt'
if not os.path.exists(path):
try:
os.system(f'wget {target_data_url} -O {path}')
except:
return 'No data found for language code. Please check the eBible repo for available data.'
with open(path, 'r', encoding="utf8") as f:
target_text = f.readlines()
target_text = [i.strip() for i in target_text]
vref_url = 'https://raw.githubusercontent.com/BibleNLP/ebible/main/metadata/vref.txt'
if not os.path.exists('data/vref.txt'):
os.system(f'wget {vref_url} -O data/vref.txt')
with open('data/vref.txt', 'r', encoding="utf8") as f:
target_vref = f.readlines()
target_vref = [i.strip() for i in target_vref]
target_tsv = [i for i in list(zip(target_vref, target_text))]
if drop_empty_verses:
target_tsv = [i for i in target_tsv if i[1] != '']
target_df = pd.DataFrame(target_tsv, columns=['vref', 'content'])
return target_df
from pandas import DataFrame as DataFrameClass
def create_lancedb_table_from_df(df: DataFrameClass, table_name, content_column_name='content'):
"""Turn a pandas dataframe into a LanceDB table."""
start_time = time.time()
logger.info('Creating LanceDB table...')
import lancedb
from lancedb.embeddings import with_embeddings
logger.error(f'Creating LanceDB table: {table_name}, {df.head}')
# rename 'content' field as 'text' as lancedb expects
try:
df = df.rename(columns={content_column_name: 'text'})
except:
assert 'text' in df.columns, 'Please rename the content column to "text" or specify the column name in the function call.'
# Add target_language_code to the dataframe
df['language_code'] = table_name
# mkdir lancedb if it doesn't exist
if not os.path.exists('./lancedb'):
os.mkdir('./lancedb')
# Connect to LanceDB
db = lancedb.connect("./lancedb")
table = get_table_from_database(table_name)
if not table:
# If it doesn't exist, create it
df_filtered = df[df['text'].str.strip() != '']
# data = with_embeddings(embed_batch, df_filtered.sample(1000)) # FIXME: I can't process the entirety of the bsb bible for some reason. Something is corrupt or malformed in the data perhaps
data = with_embeddings(embed_batch, df_filtered)
# data = with_embeddings(embed_batch, df)
table = db.create_table(
table_name,
data=data,
mode="create",
)
else:
# If it exists, append to it
df_filtered = df[df['text'].str.strip() != '']
data = with_embeddings(embed_batch, df_filtered.sample(10000))
data = data.fillna(0) # Fill missing values with 0
table.append(data)
print('LanceDB table created. Time elapsed: ', time.time() - start_time, 'seconds.')
return table
def load_database(target_language_code=None, file_suffix=None):
print('Loading dataframes...')
if target_language_code:
print(f'Loading target language data for {target_language_code} (suffix: {file_suffix})...')
bsb_bible_df, macula_df, target_df = get_dataframes(target_language_code, file_suffix=file_suffix)
else:
print('No target language code specified. Loading English and Greek/Hebrew data only.')
bsb_bible_df, macula_df = get_dataframes()
target_df = None
print('Creating tables...')
# table_name = 'verses'
# create_lancedb_table_from_df(bsb_bible_df, table_name)
# create_lancedb_table_from_df(macula_df, table_name)
create_lancedb_table_from_df(bsb_bible_df, 'bsb_bible')
create_lancedb_table_from_df(macula_df, 'macula')
if target_df is not None:
print('Creating target language tables...')
# create_lancedb_table_from_df(target_df, table_name)
target_table_name = target_language_code if not file_suffix else f'{target_language_code}{file_suffix}'
create_lancedb_table_from_df(target_df, target_table_name)
print('Database populated.')
return True
def get_table_from_database(table_name):
"""
Returns a table by name.
Use '/api/db_info' endpoint to see available tables.
"""
import lancedb
db = lancedb.connect("./lancedb")
table_names = db.table_names()
if table_name not in table_names:
logger.error(f'''Table {table_name} not found. Please check the table name and try again.
Available tables: {table_names}''')
return None
table = db.open_table(table_name)
return table
def get_verse_triplet(full_verse_ref: str, language_code: str, bsb_bible_df, macula_df):
"""
Get verse from bsb_bible_df,
AND macula_df (greek and hebrew)
AND target_vref_data (target language)
e.g., http://localhost:3000/api/verse/GEN%202:19&aai
or NT: http://localhost:3000/api/verse/ROM%202:19&aai
"""
bsb_row = bsb_bible_df[bsb_bible_df['vref'] == full_verse_ref]
macula_row = macula_df[macula_df['vref'] == full_verse_ref]
target_df = get_target_vref_df(language_code)
target_row = target_df[target_df['vref'] == full_verse_ref]
if not bsb_row.empty and not macula_row.empty:
return {
'bsb': {
'verse_number': int(bsb_row.index[0]),
'vref': bsb_row['vref'][bsb_row.index[0]],
'content': bsb_row['content'][bsb_row.index[0]]
},
'macula': {
'verse_number': int(macula_row.index[0]),
'vref': macula_row['vref'][macula_row.index[0]],
'content': macula_row['content'][macula_row.index[0]]
},
'target': {
'verse_number': int(target_row.index[0]),
'vref': target_row['vref'][target_row.index[0]],
'content': target_row['content'][target_row.index[0]]
}
}
else:
return None
def query_lancedb_table(language_code: str, query: str, limit: str='50'):
"""Get similar sentences from a LanceDB table."""
# limit = int(limit) # I don't know if this is necessary. The FastAPI endpoint might infer an int from the query param if I typed it that way
table = get_table_from_database(language_code)
query_vector = embed_batch([query])[0]
if not table:
return {'error':'table not found'}
result = table.search(query_vector).limit(limit).to_df().to_dict()
if not result.values():
return []
texts = result['text']
# scores = result['_distance']
vrefs = result['vref']
output = []
for i in range(len(texts)):
output.append({
'text': texts[i],
# 'score': scores[i],
'vref': vrefs[i]
})
return output
def get_unique_tokens_for_language(language_code):
"""Get unique tokens for a language"""
tokens_to_ignore = ['']
if language_code == 'bsb' or language_code =='bsb_bible':
df, _, _ = get_dataframes()
elif language_code =='macula':
_, df, _ = get_dataframes()
else:
_, _, df = get_dataframes(target_language_code=language_code)
target_tokens = df['content'].apply(lambda x: x.split(' ')).explode().tolist()
target_tokens = [token for token in target_tokens if token not in tokens_to_ignore]
unique_tokens = Counter(target_tokens)
return unique_tokens
def get_ngrams(language_code: str, size: int=2, n=100, string_filter: list[str]=[]):
"""Get ngrams with frequencies for a language
Params:
- language_code (str): language code
- size (int): ngram size
- n (int): max number of ngrams to return
- string_filter (list[str]): if passed, only return ngrams where all ngram tokens are contained in string_filter
A string_filter might be, for example, a tokenized sentence where you want to detect ngrams relative to the entire corpus.
NOTE: calculating these is not slow, and it is assumed that the corpus itself will change during iterative translation
If it winds up being slow, we can cache the results and only recalculate when the corpus changes. # ?FIXME
"""
tokens_to_ignore = ['']
# TODO: use a real character filter. I'm sure NLTK has something built in
if language_code == 'bsb' or language_code =='bsb_bible':
df, _, _ = get_dataframes()
elif language_code =='macula':
_, df, _ = get_dataframes()
else:
_, _, df = get_dataframes(target_language_code=language_code)
target_tokens = df['content'].apply(lambda x: x.split(' ')).explode().tolist()
target_tokens = [token for token in target_tokens if token not in tokens_to_ignore]
n_grams = [tuple(gram) for gram in ngrams(target_tokens, size)]
print('ngrams before string_filter:', len(n_grams))
if string_filter:
print('filtering with string_filter')
n_grams = [gram for gram in n_grams if all(token in string_filter for token in gram)]
freq_dist = FreqDist(n_grams)
print('ngrams after string_filter:', len(n_grams))
return list(freq_dist.most_common(n))
def build_translation_prompt(
vref,
target_language_code,
source_language_code=None,
bsb_bible_df=None,
macula_df=None,
number_of_examples=3,
backtranslate=False) -> dict[str, TranslationTriplet]:
"""Build a prompt for translation"""
if bsb_bible_df is None or bsb_bible_df.empty or macula_df is None or macula_df.empty: # build bsb_bible_df and macula_df only if not supplied (saves overhead)
bsb_bible_df, macula_df, target_df = get_dataframes(target_language_code=target_language_code)
if source_language_code:
_, _, source_df = get_dataframes(target_language_code=source_language_code)
else:
source_df = bsb_bible_df
# Query the LanceDB table for the most similar verses to the source text (or bsb if source_language_code is None)
table_name = source_language_code if source_language_code else 'bsb_bible'
query = source_df[source_df['vref']==vref]['content'].values[0]
original_language_source = macula_df[macula_df['vref']==vref]['content'].values[0]
print(f'Query result: {query}')
similar_verses = query_lancedb_table(table_name, query, limit=number_of_examples) # FIXME: query 50 and then filter to first n that have target content?
triplets = [get_verse_triplet(similar_verse['vref'], target_language_code, bsb_bible_df, macula_df) for similar_verse in similar_verses]
target_verse = target_df[target_df['vref']==vref]['content'].values[0]
# Initialize an empty dictionary to store the JSON objects
json_objects: dict[str, TranslationTriplet] = dict()
for triplet in triplets:
# Create a JSON object for each triplet with top-level keys being the VREFs
json_objects[triplet["bsb"]["vref"]] = TranslationTriplet(
source=triplet["macula"]["content"],
bridge_translation=triplet["bsb"]["content"],
target=triplet["target"]["content"] # FIXME: validate that content exists here?
).to_dict()
# Add the source verse Greek/Hebrew and English reference to the JSON objects
json_objects[vref] = TranslationTriplet(
source=original_language_source,
bridge_translation=query,
target=target_verse
).to_dict()
return json_objects
def execute_discriminator_evaluation(verse_triplets: dict[str, TranslationTriplet], hypothesis_vref: str, hypothesis_key='target') -> ChatResponse:
"""
Accepts an array of verses as verse_triplets.
The final triplet is assumed to be the hypothesis.
The hypothesis string is assumed to be the target language rendering.
This simple discriminator type of evaluation scrambles the input verse_triplets
and prompts the LLM to detect which is the hypothesis.
The return value is:
{
'y_index': index_of_hypothesis,
'y_hat_index': llm_predicted_index,
'rationale': rationale_string,
}
If you introduce any intermediate translation steps (e.g., leaving unknown tokens untranslated),
then this type of evaluation is not recommended.
"""
hypothesis_triplet = verse_triplets[hypothesis_vref]
print(f'Hypothesis: {hypothesis_triplet}')
verse_triplets_list: list[tuple] = list(verse_triplets.items())
print('Verse triplets keys:', [k for k, v in verse_triplets_list])
# # Shuffle the verse_triplets
shuffle(verse_triplets_list)
print(f'Shuffled verse triplets keys: {[k for k, v in verse_triplets_list]}')
# # Build the prompt
prompt = ''
for i, triplet in enumerate(verse_triplets_list):
print(f'Verse triplet {i}: {triplet}')
prompt += f'\n{triplet[0]}. Target: {triplet[1]["target"]}'
url = f"{machine}/v1/chat/completions"
headers = {
"Content-Type": "application/json",
}
payload = {
"messages": [
# FIXME: I think I should just ask the model to designate which verse stands out as the least likely to be correct.
{"role": "user", "content": f"### Instruction: One of these translations is incorrect, and you can only try to determine by comparing the examples given:\n{prompt}\nWhich one of these is incorrect? (show only '[put verse ref here] -- rationale as to why you picked this one relative only to the other options')\n###Response:"}
],
"temperature": 0.7,
"max_tokens": -1,
"stream": False,
}
response = requests.post(url, json=payload, headers=headers)
return response.json()
def execute_fewshot_translation(vref, target_language_code, source_language_code=None, bsb_bible_df=None, macula_df=None, number_of_examples=3, backtranslate=False) -> ChatResponse:
prompt = build_translation_prompt(vref, target_language_code, source_language_code, bsb_bible_df, macula_df, number_of_examples, backtranslate)
url = f"{machine}/v1/chat/completions"
headers = {
"Content-Type": "application/json",
}
payload = {
"messages": [
{"role": "user", "content": prompt}
],
"temperature": 0.7,
"max_tokens": -1,
"stream": False,
}
response = requests.post(url, json=payload, headers=headers)
return response.json()
class RevisionLoop(BaseModel):
# FIXME: this loop should only work for (revise-evaluate)*n, where you start with a translation draft.
# TODO: implement a revision function whose output could be evaluated
iterations: int
function_a: Optional[Callable] = None
function_b: Optional[Callable] = None
function_a_output: Optional[Any] = Field(None, description="Output of function A")
function_b_output: Optional[Any] = Field(None, description="Output of function B")
loop_data: Optional[List[Any]] = Field(None, description="List to store data generated in the loop")
current_iteration: int = Field(0, description="Current iteration of the loop")
def __init__(self, iterations: int, function_a=execute_fewshot_translation, function_b=execute_discriminator_evaluation):
super().__init__(iterations=iterations)
self.function_a = function_a
self.function_b = function_b
self.loop_data = ['test item']
def __iter__(self):
self.current_iteration = 0
return self
def __next__(self):
if self.current_iteration < self.iterations:
print("Executing function A...")
self.function_a_output: VerseMap = self.function_a()
print("Executing function B...")
# inputs for function b: (verse_triplets: dict[str, TranslationTriplet], hypothesis_vref: str, hypothesis_key='target') -> ChatResponse:
function_b_input = {
"verse_triplets": self.function_a_output,
"hypothesis_vref": list(self.function_a_output.keys())[-1],
"hypothesis_key": "target"
}
self.function_b_output = self.function_b(**function_b_input)
self.loop_data.append((self.function_a_output, self.function_b_output))
self.current_iteration += 1
return self.function_a_output, self.function_b_output
else:
print("Reached maximum iterations, stopping loop...")
raise StopIteration
def get_loop_data(self):
return self.loop_data
class Translation():
"""Translations differ from revisions insofar as revisions require an existing draft of the target"""
def __init__(self, vref: str, target_language_code: str, number_of_examples=3, should_backtranslate=False):
self.vref = vref
self.target_language_code = target_language_code
self.number_of_examples = number_of_examples
self.should_backtranslate = should_backtranslate
bsb_bible_df, macula_df = get_dataframes()
self.verse = get_verse_triplet(full_verse_ref=self.vref, language_code=self.target_language_code, bsb_bible_df=bsb_bible_df, macula_df=macula_df)
self.vref_triplets = build_translation_prompt(vref, target_language_code)
# Predict translation
self.hypothesis: ChatResponse = execute_fewshot_translation(vref, target_language_code, source_language_code=None, bsb_bible_df=bsb_bible_df, macula_df=macula_df, number_of_examples=3, backtranslate=False)
# Get feedback on the translation
# NOTE: here is where various evaluation functions could be swapped out
self.feedback: ChatResponse = execute_discriminator_evaluation(self.vref_triplets, self.vref)
def get_hypothesis(self):
return self.hypothesis
def get_feedback(self):
return self.feedback
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((559, 587), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (576, 587), False, 'import logging\n'), ((801, 880), 'pandas.read_csv', 'pd.read_csv', (['"""data/bsb-utf8.txt"""'], {'sep': '"""\t"""', 'names': "['vref', 'content']", 'header': '(0)'}), "('data/bsb-utf8.txt', sep='\\t', names=['vref', 'content'], header=0)\n", (812, 880), True, 'import pandas as pd\n'), ((991, 1041), 'pandas.read_csv', 'pd.read_csv', (['"""data/combined_greek_hebrew_vref.csv"""'], {}), "('data/combined_greek_hebrew_vref.csv')\n", (1002, 1041), True, 'import pandas as pd\n'), ((3498, 3551), 'pandas.DataFrame', 'pd.DataFrame', (['target_tsv'], {'columns': "['vref', 'content']"}), "(target_tsv, columns=['vref', 'content'])\n", (3510, 3551), True, 'import pandas as pd\n'), ((3797, 3808), 'time.time', 'time.time', ([], {}), '()\n', (3806, 3808), False, 'import os, time\n'), ((4519, 4547), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (4534, 4547), False, 'import lancedb\n'), ((6898, 6926), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (6913, 6926), False, 'import lancedb\n'), ((10036, 10058), 'collections.Counter', 'Counter', (['target_tokens'], {}), '(target_tokens)\n', (10043, 10058), False, 'from collections import Counter\n'), ((11697, 11714), 'nltk.FreqDist', 'FreqDist', (['n_grams'], {}), '(n_grams)\n', (11705, 11714), False, 'from nltk import FreqDist\n'), ((15245, 15273), 'random.shuffle', 'shuffle', (['verse_triplets_list'], {}), '(verse_triplets_list)\n', (15252, 15273), False, 'from random import shuffle\n'), ((16298, 16347), 'requests.post', 'requests.post', (['url'], {'json': 'payload', 'headers': 'headers'}), '(url, json=payload, headers=headers)\n', (16311, 16347), False, 'import requests\n'), ((17017, 17066), 'requests.post', 'requests.post', (['url'], {'json': 'payload', 'headers': 'headers'}), '(url, json=payload, headers=headers)\n', (17030, 17066), False, 'import requests\n'), ((17450, 17497), 'pydantic.Field', 'Field', (['None'], {'description': '"""Output of function A"""'}), "(None, description='Output of function A')\n", (17455, 17497), False, 'from pydantic import BaseModel, Field\n'), ((17537, 17584), 'pydantic.Field', 'Field', (['None'], {'description': '"""Output of function B"""'}), "(None, description='Output of function B')\n", (17542, 17584), False, 'from pydantic import BaseModel, Field\n'), ((17622, 17689), 'pydantic.Field', 'Field', (['None'], {'description': '"""List to store data generated in the loop"""'}), "(None, description='List to store data generated in the loop')\n", (17627, 17689), False, 'from pydantic import BaseModel, Field\n'), ((17719, 17772), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""Current iteration of the loop"""'}), "(0, description='Current iteration of the loop')\n", (17724, 17772), False, 'from pydantic import BaseModel, Field\n'), ((1562, 1593), 'os.path.exists', 'os.path.exists', (['"""data/vref.txt"""'], {}), "('data/vref.txt')\n", (1576, 1593), False, 'import os, time\n'), ((1603, 1649), 'os.system', 'os.system', (['f"""wget {vref_url} -O data/vref.txt"""'], {}), "(f'wget {vref_url} -O data/vref.txt')\n", (1612, 1649), False, 'import os, time\n'), ((2626, 2646), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2640, 2646), False, 'import os, time\n'), ((3081, 3112), 'os.path.exists', 'os.path.exists', (['"""data/vref.txt"""'], {}), "('data/vref.txt')\n", (3095, 3112), False, 'import os, time\n'), ((3122, 3168), 'os.system', 'os.system', (['f"""wget {vref_url} -O data/vref.txt"""'], {}), "(f'wget {vref_url} -O data/vref.txt')\n", (3131, 3168), False, 'import os, time\n'), ((4421, 4448), 'os.path.exists', 'os.path.exists', (['"""./lancedb"""'], {}), "('./lancedb')\n", (4435, 4448), False, 'import os, time\n'), ((4458, 4479), 'os.mkdir', 'os.mkdir', (['"""./lancedb"""'], {}), "('./lancedb')\n", (4466, 4479), False, 'import os, time\n'), ((4933, 4974), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'df_filtered'], {}), '(embed_batch, df_filtered)\n', (4948, 4974), False, 'from lancedb.embeddings import with_embeddings\n'), ((2673, 2719), 'os.system', 'os.system', (['f"""wget {target_data_url} -O {path}"""'], {}), "(f'wget {target_data_url} -O {path}')\n", (2682, 2719), False, 'import os, time\n'), ((5469, 5480), 'time.time', 'time.time', ([], {}), '()\n', (5478, 5480), False, 'import os, time\n'), ((11431, 11458), 'nltk.util.ngrams', 'ngrams', (['target_tokens', 'size'], {}), '(target_tokens, size)\n', (11437, 11458), False, 'from nltk.util import ngrams\n')] |
import logging
import pyarrow as pa
import pyarrow.compute as pc
from tabulate import tabulate
from llama_cpp import Llama
from dryg.settings import DEFAULT_MODEL
from dryg.db import open_table, create_table
from lancedb.embeddings import with_embeddings
MODEL = None
def get_code_blocks(body: pa.ChunkedArray):
"""
Get code blocks from the body of an issue
Args:
body (str): Body of the issue
Returns:
list: List of code blocks
"""
code_blocks = []
for body_chunk in body:
if body_chunk is None:
continue
code_blocks += str(body_chunk).split("```")[1::2]
return code_blocks
def setup_model(model_name:str = None):
"""
Set the model to be used for embedding
"""
global MODEL
if model_name is None:
model_name = DEFAULT_MODEL
if model_name.endswith(".bin"):
MODEL = Llama(model_name, embedding=True, n_threads=8) # workers=8 hardcoded for now
else:
raise ValueError("Invalid model format")
def embedding_func(batch):
"""
Embedding function for the model
"""
if MODEL is None:
setup_model()
return [MODEL.embed(x) for x in batch]
def save_embeddings(issue_table: str, force: bool = False):
"""
Create an index for the issue table
"""
issues = open_table(issue_table).to_arrow()
if "vector" in issues.column_names and not force:
logging.info("Embeddings already exist. Use `force=True` to overwrite")
return
issues = with_embeddings(embedding_func, issues, "title") # Turn this into a Toy problem
create_table(issue_table, issues, mode="overwrite")
def search_table(table: str, query: str):
"""
Search issues in the issue table
Args:
issue_table (str): Name of the issue table
query (str): Query to search for
Returns:
list: List of issues
"""
issues = open_table(table)
query_embedding = embedding_func([query])[0]
results = issues.search(query_embedding).limit(4).to_df()
table = [["Title", "Link"]]
for title, link in zip(results["title"], results["html_url"]):
table.append([title, link])
print(tabulate(table))
| [
"lancedb.embeddings.with_embeddings"
] | [((1527, 1575), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embedding_func', 'issues', '"""title"""'], {}), "(embedding_func, issues, 'title')\n", (1542, 1575), False, 'from lancedb.embeddings import with_embeddings\n'), ((1611, 1662), 'dryg.db.create_table', 'create_table', (['issue_table', 'issues'], {'mode': '"""overwrite"""'}), "(issue_table, issues, mode='overwrite')\n", (1623, 1662), False, 'from dryg.db import open_table, create_table\n'), ((1918, 1935), 'dryg.db.open_table', 'open_table', (['table'], {}), '(table)\n', (1928, 1935), False, 'from dryg.db import open_table, create_table\n'), ((895, 941), 'llama_cpp.Llama', 'Llama', (['model_name'], {'embedding': '(True)', 'n_threads': '(8)'}), '(model_name, embedding=True, n_threads=8)\n', (900, 941), False, 'from llama_cpp import Llama\n'), ((1426, 1497), 'logging.info', 'logging.info', (['"""Embeddings already exist. Use `force=True` to overwrite"""'], {}), "('Embeddings already exist. Use `force=True` to overwrite')\n", (1438, 1497), False, 'import logging\n'), ((2198, 2213), 'tabulate.tabulate', 'tabulate', (['table'], {}), '(table)\n', (2206, 2213), False, 'from tabulate import tabulate\n'), ((1329, 1352), 'dryg.db.open_table', 'open_table', (['issue_table'], {}), '(issue_table)\n', (1339, 1352), False, 'from dryg.db import open_table, create_table\n')] |
from pathlib import Path
from collections import defaultdict
import math
import json
import pandas as pd
import cv2
import duckdb
import matplotlib.pyplot as plt
import numpy as np
import yaml
from tqdm import tqdm
from ultralytics.utils import LOGGER, colorstr
from ultralytics.utils.plotting import Annotator, colors
from torch import Tensor
import lancedb
import pyarrow as pa
from lancedb.embeddings import with_embeddings
from sklearn.decomposition import PCA
from yoloexplorer.dataset import get_dataset_info, Dataset
from yoloexplorer.frontend import launch
from yoloexplorer.config import TEMP_CONFIG_PATH
import torch
import torchvision.models as models
from torchvision import datasets, transforms
from PIL import Image
import sys
SCHEMA = [
"id",
# "img", # Make this optional; disabled by default. Not feasible unless we can have row_id/primary key to index
"path",
"cls",
"labels",
"bboxes",
"segments",
"keypoints",
"meta",
] # + "vector" with embeddings
def encode(img_path):
img = cv2.imread(img_path)
ext = Path(img_path).suffix
img_encoded = cv2.imencode(ext, img)[1].tobytes()
return img_encoded
def decode(img_encoded):
nparr = np.frombuffer(img_encoded, np.byte)
img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
return img
class Explorer:
"""
Dataset explorer
"""
def __init__(self, data, device="", model="resnet18", batch_size=64, project="run") -> None:
"""
Args:
data (str, optional): path to dataset file
table (str, optional): path to LanceDB table to load embeddings Table from.
model (str, optional): path to model. Defaults to None.
device (str, optional): device to use. Defaults to ''. If empty, uses the default device.
project (str, optional): path to project. Defaults to "runs/dataset".
"""
self.data = data
self.table = None
self.model = model
self.device = device
self.batch_size = batch_size
self.project = project
self.dataset_info = None
self.predictor = None
self.trainset = None
self.removed_img_count = 0
self.verbose = False # For embedding function
self._sim_index = None
self.version = None
self.table_name = Path(data).name
self.temp_table_name = self.table_name + "_temp"
self.model_arch_supported = [
"resnet18",
"resnet50",
"efficientnet_b0",
"efficientnet_v2_s",
"googlenet",
"mobilenet_v3_small",
]
if model:
self.predictor = self._setup_predictor(model, device)
if data:
self.dataset_info = get_dataset_info(self.data)
self.transform = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
]
)
def build_embeddings(self, verbose=False, force=False, store_imgs=False):
"""
Builds the dataset in LanceDB table format
Args:
batch (int, optional): batch size. Defaults to 1000.
verbose (bool, optional): verbose. Defaults to False.
force (bool, optional): force rebuild. Defaults to False.
"""
trainset = self.dataset_info["train"]
trainset = trainset if isinstance(trainset, list) else [trainset]
self.trainset = trainset
self.verbose = verbose
dataset = Dataset(img_path=trainset, data=self.dataset_info, augment=False, cache=False)
batch_size = self.batch_size # TODO: fix this hardcoding
db = self._connect()
if not force and self.table_name in db.table_names():
LOGGER.info("LanceDB embedding space already exists. Attempting to reuse it. Use force=True to overwrite.")
self.table = self._open_table(self.table_name)
self.version = self.table.version
if len(self.table) == dataset.ni:
return
else:
self.table = None
LOGGER.info("Table length does not match the number of images in the dataset. Building embeddings...")
table_data = defaultdict(list)
for idx, batch in enumerate(dataset):
batch["id"] = idx
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [self.dataset_info["names"][i] for i in batch["cls"]]
batch["path"] = batch["im_file"]
# batch["cls"] = batch["cls"].tolist()
keys = (key for key in SCHEMA if key in batch)
for key in keys:
val = batch[key]
if isinstance(val, Tensor):
val = val.tolist()
table_data[key].append(val)
table_data["img"].append(encode(batch["im_file"])) if store_imgs else None
if len(table_data[key]) == batch_size or idx == dataset.ni - 1:
df = pd.DataFrame(table_data)
df = with_embeddings(self._embedding_func, df, "path", batch_size=batch_size)
if self.table:
self.table.add(df)
else:
self.table = self._create_table(self.table_name, data=df, mode="overwrite")
self.version = self.table.version
table_data = defaultdict(list)
LOGGER.info(f'{colorstr("LanceDB:")} Embedding space built successfully.')
def plot_embeddings(self):
"""
Projects the embedding space to 2D using PCA
Args:
n_components (int, optional): number of components. Defaults to 2.
"""
if self.table is None:
LOGGER.error("No embedding space found. Please build the embedding space first.")
return None
pca = PCA(n_components=2)
embeddings = np.array(self.table.to_arrow()["vector"].to_pylist())
embeddings = pca.fit_transform(embeddings)
plt.scatter(embeddings[:, 0], embeddings[:, 1])
plt.show()
def get_similar_imgs(self, img, n=10):
"""
Returns the n most similar images to the given image
Args:
img (int, str, Path): index of image in the table, or path to image
n (int, optional): number of similar images to return. Defaults to 10.
Returns:
tuple: (list of paths, list of ids)
"""
embeddings = None
if self.table is None:
LOGGER.error("No embedding space found. Please build the embedding space first.")
return None
if isinstance(img, int):
embeddings = self.table.to_pandas()["vector"][img]
elif isinstance(img, (str, Path)):
img = img
elif isinstance(img, bytes):
img = decode(img)
elif isinstance(img, list): # exceptional case for batch search from dash
df = self.table.to_pandas().set_index("path")
array = None
try:
array = df.loc[img]["vector"].to_list()
embeddings = np.array(array)
except KeyError:
pass
else:
LOGGER.error("img should be index from the table(int), path of an image (str or Path), or bytes")
return
if embeddings is None:
if isinstance(img, list):
embeddings = np.array(
[self.predictor(self._image_encode(i)).squeeze().cpu().detach().numpy() for i in img]
)
else:
embeddings = self.predictor(self._image_encode(img)).squeeze().cpu().detach().numpy()
if len(embeddings.shape) > 1:
embeddings = np.mean(embeddings, axis=0)
sim = self.table.search(embeddings).limit(n).to_df()
return sim["path"].to_list(), sim["id"].to_list()
def plot_similar_imgs(self, img, n=10):
"""
Plots the n most similar images to the given image
Args:
img (int, str, Path): index of image in the table, or path to image.
n (int, optional): number of similar images to return. Defaults to 10.
"""
_, ids = self.get_similar_imgs(img, n)
self.plot_imgs(ids)
def plot_imgs(self, ids=None, query=None, labels=True):
if ids is None and query is None:
ValueError("ids or query must be provided")
# Resize the images to the minimum and maximum width and height
resized_images = []
df = self.sql(query) if query else self.table.to_pandas().iloc[ids]
for _, row in df.iterrows():
img = cv2.imread(row["path"])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if labels:
ann = Annotator(img)
for box, label, cls in zip(row["bboxes"], row["labels"], row["cls"]):
ann.box_label(box, label, color=colors(cls, True))
img = ann.result()
resized_images.append(img)
if not resized_images:
LOGGER.error("No images found")
return
# Create a grid of the images
cols = 10 if len(resized_images) > 10 else max(2, len(resized_images))
rows = max(1, math.ceil(len(resized_images) / cols))
fig, axes = plt.subplots(nrows=rows, ncols=cols)
fig.subplots_adjust(hspace=0, wspace=0)
for i, ax in enumerate(axes.ravel()):
if i < len(resized_images):
ax.imshow(resized_images[i])
ax.axis("off")
# Display the grid of images
plt.show()
def get_similarity_index(self, top_k=0.01, sim_thres=0.90, reduce=False, sorted=False):
"""
Args:
sim_thres (float, optional): Similarity threshold to set the minimum similarity. Defaults to 0.9.
top_k (float, optional): Top k fraction of the similar embeddings to apply the threshold on. Default 0.1.
dim (int, optional): Dimension of the reduced embedding space. Defaults to 256.
sorted (bool, optional): Sort the embeddings by similarity. Defaults to False.
Returns:
np.array: Similarity index
"""
if self.table is None:
LOGGER.error("No embedding space found. Please build the embedding space first.")
return None
if top_k > 1.0:
LOGGER.warning("top_k should be between 0 and 1. Setting top_k to 1.0")
top_k = 1.0
if top_k < 0.0:
LOGGER.warning("top_k should be between 0 and 1. Setting top_k to 0.0")
top_k = 0.0
if sim_thres is not None:
if sim_thres > 1.0:
LOGGER.warning("sim_thres should be between 0 and 1. Setting sim_thres to 1.0")
sim_thres = 1.0
if sim_thres < 0.0:
LOGGER.warning("sim_thres should be between 0 and 1. Setting sim_thres to 0.0")
sim_thres = 0.0
embs = np.array(self.table.to_arrow()["vector"].to_pylist())
self._sim_index = np.zeros(len(embs))
limit = max(int(len(embs) * top_k), 1)
# create a new table with reduced dimensionality to speedup the search
self._search_table = self.table
if reduce:
dim = min(256, embs.shape[1]) # TODO: make this configurable
pca = PCA(n_components=min(dim, len(embs)))
embs = pca.fit_transform(embs)
dim = embs.shape[1]
values = pa.array(embs.reshape(-1), type=pa.float32())
table_data = pa.FixedSizeListArray.from_arrays(values, dim)
table = pa.table([table_data, self.table.to_arrow()["id"]], names=["vector", "id"])
self._search_table = self._create_table("reduced_embs", data=table, mode="overwrite")
# with multiprocessing.Pool() as pool: # multiprocessing doesn't do much. Need to revisit
# list(tqdm(pool.imap(build_index, iterable)))
for _, emb in enumerate(tqdm(embs)):
df = self._search_table.search(emb).metric("cosine").limit(limit).to_df()
if sim_thres is not None:
df = df.query(f"_distance >= {1.0 - sim_thres}")
for idx in df["id"][1:]:
self._sim_index[idx] += 1
self._drop_table("reduced_embs") if reduce else None
return self._sim_index if not sorted else np.sort(self._sim_index)
def plot_similarity_index(self, sim_thres=0.90, top_k=0.01, reduce=False, sorted=False):
"""
Plots the similarity index
Args:
threshold (float, optional): Similarity threshold to set the minimum similarity. Defaults to 0.9.
top_k (float, optional): Top k fraction of the similar embeddings to apply the threshold on. Default 0.1.
dim (int, optional): Dimension of the reduced embedding space. Defaults to 256.
sorted (bool, optional): Whether to sort the index or not. Defaults to False.
"""
index = self.get_similarity_index(top_k, sim_thres, reduce)
if sorted:
index = np.sort(index)
plt.bar([i for i in range(len(index))], index)
plt.xlabel("idx")
plt.ylabel("similarity count")
plt.show()
def remove_imgs(self, idxs):
"""
Works on temporary table. To apply the changes to the main table, call `persist()`
Args:
idxs (int or list): Index of the image to remove from the dataset.
"""
if isinstance(idxs, int):
idxs = [idxs]
pa_table = self.table.to_arrow()
mask = [True for _ in range(len(pa_table))]
for idx in idxs:
mask[idx] = False
self.removed_img_count += len(idxs)
table = pa_table.filter(mask)
ids = [i for i in range(len(table))]
table = table.set_column(0, "id", [ids]) # TODO: Revisit this. This is a hack to fix the ids==dix
self.table = self._create_table(self.temp_table_name, data=table, mode="overwrite") # work on a temporary table
self.log_status()
def add_imgs(self, exp, idxs):
"""
Works on temporary table. To apply the changes to the main table, call `persist()`
Args:
data (pd.DataFrame or pa.Table): Table rows to add to the dataset.
"""
table_df = self.table.to_pandas()
data = exp.table.to_pandas().iloc[idxs]
assert len(table_df["vector"].iloc[0]) == len(data["vector"].iloc[0]), "Vector dimension mismatch"
table_df = pd.concat([table_df, data], ignore_index=True)
ids = [i for i in range(len(table_df))]
table_df["id"] = ids
self.table = self._create_table(
self.temp_table_name, data=table_df, mode="overwrite"
) # work on a temporary table
self.log_status()
def reset(self):
"""
Resets the dataset table to its original state or to the last persisted state.
"""
if self.table is None:
LOGGER.info("No changes made to the dataset.")
return
db = self._connect()
if self.temp_table_name in db.table_names():
self._drop_table(self.temp_table_name)
self.table = self._open_table(self.table_name)
self.removed_img_count = 0
# self._sim_index = None # Not sure if we should reset this as computing the index is expensive
LOGGER.info("Dataset reset to original state.")
def persist(self, name=None):
"""
Persists the changes made to the dataset. Available only if data is provided in the constructor.
Args:
name (str, optional): Name of the new dataset. Defaults to `data_updated.yaml`.
"""
db = self._connect()
if self.table is None or self.temp_table_name not in db.table_names():
LOGGER.info("No changes made to the dataset.")
return
LOGGER.info("Persisting changes to the dataset...")
self.log_status()
if not name:
name = self.data.split(".")[0] + "_updated"
datafile_name = name + ".yaml"
train_txt = "train_updated.txt"
path = Path(name).resolve() # add new train.txt file in the dataset parent path
path.mkdir(parents=True, exist_ok=True)
if (path / train_txt).exists():
(path / train_txt).unlink() # remove existing
for img in tqdm(self.table.to_pandas()["path"].to_list()):
with open(path / train_txt, "a") as f:
f.write(f"{img}" + "\n") # add image to txt file
new_dataset_info = self.dataset_info.copy()
new_dataset_info.pop("yaml_file")
new_dataset_info.pop("path") # relative paths will get messed up when merging datasets
new_dataset_info.pop("download") # Assume all files are present offline, there is no way to store metadata yet
new_dataset_info["train"] = (path / train_txt).resolve().as_posix()
for key, value in new_dataset_info.items():
if isinstance(value, Path):
new_dataset_info[key] = value.as_posix()
yaml.dump(new_dataset_info, open(path / datafile_name, "w")) # update dataset.yaml file
# TODO: not sure if this should be called data_final to prevent overwriting the original data?
self.table = self._create_table(datafile_name, data=self.table.to_arrow(), mode="overwrite")
db.drop_table(self.temp_table_name)
LOGGER.info("Changes persisted to the dataset.")
log = self._log_training_cmd(Path(path / datafile_name).relative_to(Path.cwd()).as_posix())
return log
def log_status(self):
# TODO: Pretty print log status
LOGGER.info("\n|-----------------------------------------------|")
LOGGER.info(f"\t Number of images: {len(self.table.to_arrow())}")
LOGGER.info("|------------------------------------------------|")
def sql(self, query: str):
"""
Executes a SQL query on the dataset table.
Args:
query (str): SQL query to execute.
"""
if self.table is None:
LOGGER.info("No table found. Please provide a dataset to work on.")
return
table = self.table.to_arrow() # noqa
result = duckdb.sql(query).to_df()
return result
def dash(self, exps=None, analysis=False):
"""
Launches a dashboard to visualize the dataset.
"""
config = {}
Path(TEMP_CONFIG_PATH).parent.mkdir(exist_ok=True, parents=True)
with open(TEMP_CONFIG_PATH, "w+") as file:
config_exp = [self.config]
if exps:
for exp in exps:
config_exp.append(exp.config)
config["exps"] = config_exp
config["analysis"] = analysis
json.dump(config, file)
launch()
@property
def config(self):
return {"project": self.project, "model": self.model, "device": self.device, "data": self.data}
def _log_training_cmd(self, data_path):
success_log = (
f'{colorstr("LanceDB: ") }New dataset created successfully! Run the following command to train a model:'
)
train_cmd = f"yolo train model={self.model} data={data_path} epochs=10"
success_log = success_log + "\n" + train_cmd
LOGGER.info(success_log)
return train_cmd
def _connect(self):
db = lancedb.connect(self.project)
return db
def _create_table(self, name, data=None, mode="overwrite"):
db = lancedb.connect(self.project)
table = db.create_table(name, data=data, mode=mode)
return table
def _open_table(self, name):
db = lancedb.connect(self.project)
table = db.open_table(name) if name in db.table_names() else None
if table is None:
raise ValueError(f'{colorstr("LanceDB: ") }Table not found.')
return table
def _drop_table(self, name):
db = lancedb.connect(self.project)
if name in db.table_names():
db.drop_table(name)
return True
return False
def _copy_table_to_project(self, table_path):
if not table_path.endswith(".lance"):
raise ValueError(f"{colorstr('LanceDB: ')} Table must be a .lance file")
LOGGER.info(f"Copying table from {table_path}")
path = Path(table_path).parent
name = Path(table_path).stem # lancedb doesn't need .lance extension
db = lancedb.connect(path)
table = db.open_table(name)
return self._create_table(self.table_name, data=table.to_arrow(), mode="overwrite")
def _image_encode(self, img):
image = Image.open(img)
n_channels = np.array(image).ndim
if n_channels == 2:
image = image.convert(mode="RGB")
img_tensor = self.transform(image)
trans_img = img_tensor.unsqueeze(0)
return trans_img
def _embedding_func(self, imgs):
embeddings = []
for img in tqdm(imgs):
encod_img = self._image_encode(img)
embeddings.append(self.predictor(encod_img).squeeze().cpu().detach().numpy())
return embeddings
def _setup_predictor(self, model_arch, device=""):
if model_arch in self.model_arch_supported:
load_model = getattr(models, model_arch)
model = load_model(pretrained=True)
predictor = torch.nn.Sequential(*list(model.children())[:-1])
return predictor
else:
LOGGER.error(f"Supported for {model_arch} is not added yet")
sys.exit(1)
def create_index(self):
# TODO: create index
pass
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((1044, 1064), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1054, 1064), False, 'import cv2\n'), ((1214, 1249), 'numpy.frombuffer', 'np.frombuffer', (['img_encoded', 'np.byte'], {}), '(img_encoded, np.byte)\n', (1227, 1249), True, 'import numpy as np\n'), ((1260, 1300), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_ANYCOLOR'], {}), '(nparr, cv2.IMREAD_ANYCOLOR)\n', (1272, 1300), False, 'import cv2\n'), ((1075, 1089), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (1079, 1089), False, 'from pathlib import Path\n'), ((3544, 3622), 'yoloexplorer.dataset.Dataset', 'Dataset', ([], {'img_path': 'trainset', 'data': 'self.dataset_info', 'augment': '(False)', 'cache': '(False)'}), '(img_path=trainset, data=self.dataset_info, augment=False, cache=False)\n', (3551, 3622), False, 'from yoloexplorer.dataset import get_dataset_info, Dataset\n'), ((4267, 4284), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4278, 4284), False, 'from collections import defaultdict\n'), ((6116, 6135), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (6119, 6135), False, 'from sklearn.decomposition import PCA\n'), ((6270, 6317), 'matplotlib.pyplot.scatter', 'plt.scatter', (['embeddings[:, 0]', 'embeddings[:, 1]'], {}), '(embeddings[:, 0], embeddings[:, 1])\n', (6281, 6317), True, 'import matplotlib.pyplot as plt\n'), ((6326, 6336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6334, 6336), True, 'import matplotlib.pyplot as plt\n'), ((9593, 9629), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'rows', 'ncols': 'cols'}), '(nrows=rows, ncols=cols)\n', (9605, 9629), True, 'import matplotlib.pyplot as plt\n'), ((9881, 9891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9889, 9891), True, 'import matplotlib.pyplot as plt\n'), ((13468, 13485), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""idx"""'], {}), "('idx')\n", (13478, 13485), True, 'import matplotlib.pyplot as plt\n'), ((13494, 13524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""similarity count"""'], {}), "('similarity count')\n", (13504, 13524), True, 'import matplotlib.pyplot as plt\n'), ((13533, 13543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13541, 13543), True, 'import matplotlib.pyplot as plt\n'), ((14841, 14887), 'pandas.concat', 'pd.concat', (['[table_df, data]'], {'ignore_index': '(True)'}), '([table_df, data], ignore_index=True)\n', (14850, 14887), True, 'import pandas as pd\n'), ((15716, 15763), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Dataset reset to original state."""'], {}), "('Dataset reset to original state.')\n", (15727, 15763), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((16230, 16281), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Persisting changes to the dataset..."""'], {}), "('Persisting changes to the dataset...')\n", (16241, 16281), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((17779, 17827), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Changes persisted to the dataset."""'], {}), "('Changes persisted to the dataset.')\n", (17790, 17827), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((18023, 18092), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""\n|-----------------------------------------------|"""'], {}), '("""\n|-----------------------------------------------|""")\n', (18034, 18092), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((18172, 18237), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""|------------------------------------------------|"""'], {}), "('|------------------------------------------------|')\n", (18183, 18237), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((19192, 19200), 'yoloexplorer.frontend.launch', 'launch', ([], {}), '()\n', (19198, 19200), False, 'from yoloexplorer.frontend import launch\n'), ((19680, 19704), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['success_log'], {}), '(success_log)\n', (19691, 19704), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((19769, 19798), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (19784, 19798), False, 'import lancedb\n'), ((19896, 19925), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (19911, 19925), False, 'import lancedb\n'), ((20055, 20084), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (20070, 20084), False, 'import lancedb\n'), ((20327, 20356), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (20342, 20356), False, 'import lancedb\n'), ((20663, 20710), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Copying table from {table_path}"""'], {}), "(f'Copying table from {table_path}')\n", (20674, 20710), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((20841, 20862), 'lancedb.connect', 'lancedb.connect', (['path'], {}), '(path)\n', (20856, 20862), False, 'import lancedb\n'), ((21042, 21057), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (21052, 21057), False, 'from PIL import Image\n'), ((21368, 21378), 'tqdm.tqdm', 'tqdm', (['imgs'], {}), '(imgs)\n', (21372, 21378), False, 'from tqdm import tqdm\n'), ((2346, 2356), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2350, 2356), False, 'from pathlib import Path\n'), ((2773, 2800), 'yoloexplorer.dataset.get_dataset_info', 'get_dataset_info', (['self.data'], {}), '(self.data)\n', (2789, 2800), False, 'from yoloexplorer.dataset import get_dataset_info, Dataset\n'), ((3792, 3909), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""LanceDB embedding space already exists. Attempting to reuse it. Use force=True to overwrite."""'], {}), "(\n 'LanceDB embedding space already exists. Attempting to reuse it. Use force=True to overwrite.'\n )\n", (3803, 3909), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((5996, 6082), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No embedding space found. Please build the embedding space first."""'], {}), "(\n 'No embedding space found. Please build the embedding space first.')\n", (6008, 6082), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((6779, 6865), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No embedding space found. Please build the embedding space first."""'], {}), "(\n 'No embedding space found. Please build the embedding space first.')\n", (6791, 6865), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((8007, 8034), 'numpy.mean', 'np.mean', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (8014, 8034), True, 'import numpy as np\n'), ((8928, 8951), 'cv2.imread', 'cv2.imread', (["row['path']"], {}), "(row['path'])\n", (8938, 8951), False, 'import cv2\n'), ((8970, 9006), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8982, 9006), False, 'import cv2\n'), ((9343, 9374), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No images found"""'], {}), "('No images found')\n", (9355, 9374), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((10534, 10620), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No embedding space found. Please build the embedding space first."""'], {}), "(\n 'No embedding space found. Please build the embedding space first.')\n", (10546, 10620), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((10676, 10747), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""top_k should be between 0 and 1. Setting top_k to 1.0"""'], {}), "('top_k should be between 0 and 1. Setting top_k to 1.0')\n", (10690, 10747), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((10808, 10879), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""top_k should be between 0 and 1. Setting top_k to 0.0"""'], {}), "('top_k should be between 0 and 1. Setting top_k to 0.0')\n", (10822, 10879), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((11856, 11902), 'pyarrow.FixedSizeListArray.from_arrays', 'pa.FixedSizeListArray.from_arrays', (['values', 'dim'], {}), '(values, dim)\n', (11889, 11902), True, 'import pyarrow as pa\n'), ((12287, 12297), 'tqdm.tqdm', 'tqdm', (['embs'], {}), '(embs)\n', (12291, 12297), False, 'from tqdm import tqdm\n'), ((12680, 12704), 'numpy.sort', 'np.sort', (['self._sim_index'], {}), '(self._sim_index)\n', (12687, 12704), True, 'import numpy as np\n'), ((13390, 13404), 'numpy.sort', 'np.sort', (['index'], {}), '(index)\n', (13397, 13404), True, 'import numpy as np\n'), ((15313, 15359), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No changes made to the dataset."""'], {}), "('No changes made to the dataset.')\n", (15324, 15359), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((16155, 16201), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No changes made to the dataset."""'], {}), "('No changes made to the dataset.')\n", (16166, 16201), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((18450, 18517), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No table found. Please provide a dataset to work on."""'], {}), "('No table found. Please provide a dataset to work on.')\n", (18461, 18517), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((19159, 19182), 'json.dump', 'json.dump', (['config', 'file'], {}), '(config, file)\n', (19168, 19182), False, 'import json\n'), ((20726, 20742), 'pathlib.Path', 'Path', (['table_path'], {}), '(table_path)\n', (20730, 20742), False, 'from pathlib import Path\n'), ((20765, 20781), 'pathlib.Path', 'Path', (['table_path'], {}), '(table_path)\n', (20769, 20781), False, 'from pathlib import Path\n'), ((21079, 21094), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (21087, 21094), True, 'import numpy as np\n'), ((21884, 21944), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['f"""Supported for {model_arch} is not added yet"""'], {}), "(f'Supported for {model_arch} is not added yet')\n", (21896, 21944), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((21957, 21968), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21965, 21968), False, 'import sys\n'), ((1115, 1137), 'cv2.imencode', 'cv2.imencode', (['ext', 'img'], {}), '(ext, img)\n', (1127, 1137), False, 'import cv2\n'), ((2877, 2906), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (2894, 2906), False, 'from torchvision import datasets, transforms\n'), ((2924, 2945), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2943, 2945), False, 'from torchvision import datasets, transforms\n'), ((4142, 4254), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table length does not match the number of images in the dataset. Building embeddings..."""'], {}), "(\n 'Table length does not match the number of images in the dataset. Building embeddings...'\n )\n", (4153, 4254), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((5262, 5286), 'pandas.DataFrame', 'pd.DataFrame', (['table_data'], {}), '(table_data)\n', (5274, 5286), True, 'import pandas as pd\n'), ((5308, 5380), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['self._embedding_func', 'df', '"""path"""'], {'batch_size': 'batch_size'}), "(self._embedding_func, df, 'path', batch_size=batch_size)\n", (5323, 5380), False, 'from lancedb.embeddings import with_embeddings\n'), ((5648, 5665), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5659, 5665), False, 'from collections import defaultdict\n'), ((9052, 9066), 'ultralytics.utils.plotting.Annotator', 'Annotator', (['img'], {}), '(img)\n', (9061, 9066), False, 'from ultralytics.utils.plotting import Annotator, colors\n'), ((10986, 11065), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""sim_thres should be between 0 and 1. Setting sim_thres to 1.0"""'], {}), "('sim_thres should be between 0 and 1. Setting sim_thres to 1.0')\n", (11000, 11065), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((11146, 11225), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""sim_thres should be between 0 and 1. Setting sim_thres to 0.0"""'], {}), "('sim_thres should be between 0 and 1. Setting sim_thres to 0.0')\n", (11160, 11225), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((16481, 16491), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (16485, 16491), False, 'from pathlib import Path\n'), ((18601, 18618), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (18611, 18618), False, 'import duckdb\n'), ((19426, 19447), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB: """'], {}), "('LanceDB: ')\n", (19434, 19447), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((5690, 5710), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB:"""'], {}), "('LanceDB:')\n", (5698, 5710), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((11817, 11829), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (11827, 11829), True, 'import pyarrow as pa\n'), ((18805, 18827), 'pathlib.Path', 'Path', (['TEMP_CONFIG_PATH'], {}), '(TEMP_CONFIG_PATH)\n', (18809, 18827), False, 'from pathlib import Path\n'), ((7473, 7580), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""img should be index from the table(int), path of an image (str or Path), or bytes"""'], {}), "(\n 'img should be index from the table(int), path of an image (str or Path), or bytes'\n )\n", (7485, 7580), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((17904, 17914), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (17912, 17914), False, 'from pathlib import Path\n'), ((20217, 20238), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB: """'], {}), "('LanceDB: ')\n", (20225, 20238), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((20601, 20622), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB: """'], {}), "('LanceDB: ')\n", (20609, 20622), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((7381, 7396), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (7389, 7396), True, 'import numpy as np\n'), ((9205, 9222), 'ultralytics.utils.plotting.colors', 'colors', (['cls', '(True)'], {}), '(cls, True)\n', (9211, 9222), False, 'from ultralytics.utils.plotting import Annotator, colors\n'), ((17865, 17891), 'pathlib.Path', 'Path', (['(path / datafile_name)'], {}), '(path / datafile_name)\n', (17869, 17891), False, 'from pathlib import Path\n')] |
"""
Run this script to benchmark the serial search performance of FTS and vector search
"""
import argparse
import random
from functools import lru_cache
from pathlib import Path
from typing import Any
from codetiming import Timer
from config import Settings
from rich import progress
from schemas.wine import SearchResult
from sentence_transformers import SentenceTransformer
import lancedb
from lancedb.table import Table
# Custom types
JsonBlob = dict[str, Any]
@lru_cache()
def get_settings():
# Use lru_cache to avoid loading .env file for every request
return Settings()
def get_query_terms(filename: str) -> list[str]:
assert filename.endswith(".txt")
query_terms_file = Path("./benchmark_queries") / filename
with open(query_terms_file, "r") as f:
queries = f.readlines()
assert queries
result = [query.strip() for query in queries]
return result
def fts_search(table: Table, query: str) -> list[SearchResult] | None:
search_result = (
table.search(query, vector_column_name="description")
.select(["id", "title", "description", "country", "variety", "price", "points"])
.limit(10)
).to_pydantic(SearchResult)
if not search_result:
return None
return search_result
def vector_search(model, table: Table, query: str) -> list[SearchResult] | None:
query_vector = model.encode(query.lower())
search_result = (
table.search(query_vector)
.metric("cosine")
.nprobes(20)
.select(["id", "title", "description", "country", "variety", "price", "points"])
.limit(10)
).to_pydantic(SearchResult)
if not search_result:
return None
return search_result
def main():
if args.search == "fts":
URL = "http://localhost:8000/fts_search"
queries = get_query_terms("keyword_terms.txt")
else:
URL = "http://localhost:8000/vector_search"
queries = get_query_terms("vector_terms.txt")
random_choice_queries = [random.choice(queries) for _ in range(LIMIT)]
# Run the search directly on the lancedb table
with Timer(name="Serial search", text="Finished search in {:.4f} sec"):
# Add rich progress bar
with progress.Progress(
"[progress.description]{task.description}",
progress.BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
progress.TimeElapsedColumn(),
) as prog:
overall_progress_task = prog.add_task(
f"Performing {args.search} search", total=len(random_choice_queries)
)
for query in random_choice_queries:
if args.search == "fts":
_ = fts_search(tbl, query)
else:
_ = vector_search(MODEL, tbl, query)
prog.update(overall_progress_task, advance=1)
if __name__ == "__main__":
# fmt: off
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=37, help="Seed for random number generator")
parser.add_argument("--limit", "-l", type=int, default=10, help="Number of search terms to randomly generate")
parser.add_argument("--search", type=str, default="fts", help="Specify whether to do FTS or vector search")
args = parser.parse_args()
# fmt: on
LIMIT = args.limit
SEED = args.seed
# Assert that the search type is only one of "fts" or "vector"
assert args.search in ["fts", "vector"], "Please specify a valid search type: 'fts' or 'vector'"
# Assumes that the table in the DB has already been created
DB_NAME = "./winemag"
TABLE = "wines"
db = lancedb.connect(DB_NAME)
tbl = db.open_table(TABLE)
# Load a sentence transformer model for semantic similarity from a specified checkpoint
model_id = get_settings().embedding_model_checkpoint
assert model_id, "Invalid embedding model checkpoint specified in .env file"
MODEL = SentenceTransformer(model_id)
main()
| [
"lancedb.connect"
] | [((471, 482), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (480, 482), False, 'from functools import lru_cache\n'), ((579, 589), 'config.Settings', 'Settings', ([], {}), '()\n', (587, 589), False, 'from config import Settings\n'), ((2943, 2968), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2966, 2968), False, 'import argparse\n'), ((3672, 3696), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (3687, 3696), False, 'import lancedb\n'), ((3971, 4000), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (3990, 4000), False, 'from sentence_transformers import SentenceTransformer\n'), ((701, 728), 'pathlib.Path', 'Path', (['"""./benchmark_queries"""'], {}), "('./benchmark_queries')\n", (705, 728), False, 'from pathlib import Path\n'), ((2009, 2031), 'random.choice', 'random.choice', (['queries'], {}), '(queries)\n', (2022, 2031), False, 'import random\n'), ((2116, 2181), 'codetiming.Timer', 'Timer', ([], {'name': '"""Serial search"""', 'text': '"""Finished search in {:.4f} sec"""'}), "(name='Serial search', text='Finished search in {:.4f} sec')\n", (2121, 2181), False, 'from codetiming import Timer\n'), ((2315, 2335), 'rich.progress.BarColumn', 'progress.BarColumn', ([], {}), '()\n', (2333, 2335), False, 'from rich import progress\n'), ((2410, 2438), 'rich.progress.TimeElapsedColumn', 'progress.TimeElapsedColumn', ([], {}), '()\n', (2436, 2438), False, 'from rich import progress\n')] |
from neumai.Shared.NeumSinkInfo import NeumSinkInfo
from neumai.Shared.NeumVector import NeumVector
from neumai.Shared.NeumSearch import NeumSearchResult
from neumai.Shared.Exceptions import(
LanceDBInsertionException,
LanceDBIndexInfoException,
LanceDBIndexCreationException,
LanceDBQueryException
)
from neumai.SinkConnectors.SinkConnector import SinkConnector
from typing import List, Optional
from neumai.SinkConnectors.filter_utils import FilterCondition
from pydantic import Field
import lancedb
from lancedb import DBConnection
class LanceDBSink(SinkConnector):
"""
LanceDB sink
A sink connector for LanceDB, designed to facilitate data output into a
LanceDB storage system. For details about LanceDB, refer to
https://github.com/lancedb/lancedb.
LanceDB supports flat search as well as ANN search.
For indexing, read here - https://lancedb.github.io/lancedb/ann_indexes/#creating-an-ivf_pq-index
Attributes:
-----------
uri: str
URI for LanceDB database.
api_key: str
If presented, connect to LanceDB cloud.
Otherwise, connect to a database on file system or cloud storage.
region: str
Region for use of LanceDB cloud.
table_name: str
Name of LanceDB table to use
create_index: bool
LanceDB offers flat search as well as ANN search. If set to True,
a vector index would be created for searching instead of a
brute-force knn search.
metric: str
The distance metric to use. By default it uses euclidean distance 'L2'.
It also supports 'cosine' and 'dot' distance as well. Needs to be set if create_index is True.
num_partitions: int
The number of partitions of the index.
Needs to be set if create_index is True. And needs to be altered as per data size.
num_sub_vectors: int
The number of sub-vectors (M) that will be created during
Product Quantization (PQ). For D dimensional vector, it will be divided into
M of D/M sub-vectors, each of which is presented by a single PQ code.
accelerator: str
The accelerator to use for the index creation process. Supports GPU and MPS.
Example usage:
ldb = LanceDBSink(uri="data/test_ldb_sink", table_name="demo_ldb_table")
ldb.store(neum_vectors)
ldb.search(query)
"""
uri: str = Field(..., description="URI for LanceDB database")
api_key: Optional[str] = Field(default=None, description="API key for LanceDB cloud")
region: Optional[str] = Field(default=None, description="Region for use of LanceDB cloud")
table_name: str = Field(..., description="Name of LanceDB table to use")
create_index: bool = Field(default=False, description="Boolean to create index or use flat search")
metric: str = Field(default="cosine", description="The distance metric to use in the index")
num_partitions: int = Field(default=256, description="The number of partitions of the index")
num_sub_vectors: int = Field(default=96, description="The number of sub-vectors (M) that will be created during Product Quantization (PQ)")
accelerator: str = Field(default=None, description="Specify to cuda or mps (on Apple Silicon) to enable GPU training.")
# Check API reference for more details
# - https://lancedb.github.io/lancedb/python/python/#lancedb.connect
# db: DBConnection = lancedb.connect(uri=uri, api_key=api_key, region=region)
@property
def sink_name(self) -> str:
return "LanceDBSink"
@property
def required_properties(self) -> List[str]:
return ['uri', 'api_key', 'table_name']
@property
def optional_properties(self) -> List[str]:
return []
def validation(self) -> bool:
"""config_validation connector setup"""
db = lancedb.connect(uri=self.uri, api_key=self.api_key, region=self.region)
return True
def _get_db_connection(self) -> DBConnection:
return lancedb.connect(uri=self.uri, api_key=self.api_key, region=self.region)
def store(self, vectors_to_store: List[NeumVector]) -> int:
db = self._get_db_connection()
table_name = self.table_name
data = []
for vec in vectors_to_store:
dic = {
'id': vec.id,
'vector': vec.vector,
}
for k,v in vec.metadata.items():
dic[k] = v
data.append(dic)
tbl = db.create_table(table_name, data=data, mode="overwrite")
if tbl:
return len(tbl.to_pandas())
raise LanceDBInsertionException("LanceDB storing failed. Try later")
def search(self, vector: List[float], number_of_results: int, filters: List[FilterCondition] = []) -> List[NeumSearchResult]:
db = self._get_db_connection()
tbl = db.open_table(self.table_name)
if self.create_index:
# For more details, refer to docs
# - https://lancedb.github.io/lancedb/python/python/#lancedb.table.Table.create_index
try:
tbl.create_index(
metric=self.metric,
num_partitions=self.num_partitions,
num_sub_vectors=self.num_sub_vectors,
accelerator=self.accelerator,
replace=True)
except Exception as e:
raise LanceDBIndexCreationException(f"LanceDB index creation failed. \nException - {e}")
try:
search_results = tbl.search(query=vector)
for filter in filters:
search_results = search_results.where(f"{filter.field} {filter.operator.value} {filter.value}")
search_results = search_results.limit(number_of_results).to_pandas()
except Exception as e:
raise LanceDBQueryException(f"Failed to query LanceDB. Exception - {e}")
matches = []
cols = search_results.columns
for i in range(len(search_results)):
_id = search_results.iloc[i]['id']
_vec = list(search_results.iloc[i]['vector'])
matches.append(
NeumSearchResult(
id=_id,
vector=_vec,
metadata={k:search_results.iloc[i][k] for k in cols if k not in ['id', 'vector', '_distance']},
score=1-search_results.iloc[i]['_distance']
)
)
return matches
def get_representative_vector(self) -> list:
db = self._get_db_connection()
tbl = db.open_table(self.table_name)
return list(tbl.to_pandas()['vector'].mean())
def info(self) -> NeumSinkInfo:
try:
db = self._get_db_connection()
tbl = db.open_table(self.table_name)
return(NeumSinkInfo(number_vectors_stored=len(tbl)))
except Exception as e:
raise LanceDBIndexInfoException(f"Failed to get information from LanceDB. Exception - {e}")
def delete_vectors_with_file_id(self, file_id: str) -> bool:
db = self._get_db_connection()
table_name = self.table_name
tbl = db.open_table(table_name)
try:
tbl.delete(where=f"id = '{file_id}'")
except:
raise Exception("LanceDB deletion by file id failed.")
return True | [
"lancedb.connect"
] | [((2397, 2447), 'pydantic.Field', 'Field', (['...'], {'description': '"""URI for LanceDB database"""'}), "(..., description='URI for LanceDB database')\n", (2402, 2447), False, 'from pydantic import Field\n'), ((2477, 2537), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""API key for LanceDB cloud"""'}), "(default=None, description='API key for LanceDB cloud')\n", (2482, 2537), False, 'from pydantic import Field\n'), ((2566, 2632), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Region for use of LanceDB cloud"""'}), "(default=None, description='Region for use of LanceDB cloud')\n", (2571, 2632), False, 'from pydantic import Field\n'), ((2655, 2709), 'pydantic.Field', 'Field', (['...'], {'description': '"""Name of LanceDB table to use"""'}), "(..., description='Name of LanceDB table to use')\n", (2660, 2709), False, 'from pydantic import Field\n'), ((2735, 2813), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Boolean to create index or use flat search"""'}), "(default=False, description='Boolean to create index or use flat search')\n", (2740, 2813), False, 'from pydantic import Field\n'), ((2832, 2910), 'pydantic.Field', 'Field', ([], {'default': '"""cosine"""', 'description': '"""The distance metric to use in the index"""'}), "(default='cosine', description='The distance metric to use in the index')\n", (2837, 2910), False, 'from pydantic import Field\n'), ((2937, 3008), 'pydantic.Field', 'Field', ([], {'default': '(256)', 'description': '"""The number of partitions of the index"""'}), "(default=256, description='The number of partitions of the index')\n", (2942, 3008), False, 'from pydantic import Field\n'), ((3036, 3162), 'pydantic.Field', 'Field', ([], {'default': '(96)', 'description': '"""The number of sub-vectors (M) that will be created during Product Quantization (PQ)"""'}), "(default=96, description=\n 'The number of sub-vectors (M) that will be created during Product Quantization (PQ)'\n )\n", (3041, 3162), False, 'from pydantic import Field\n'), ((3176, 3281), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Specify to cuda or mps (on Apple Silicon) to enable GPU training."""'}), "(default=None, description=\n 'Specify to cuda or mps (on Apple Silicon) to enable GPU training.')\n", (3181, 3281), False, 'from pydantic import Field\n'), ((3852, 3923), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'self.uri', 'api_key': 'self.api_key', 'region': 'self.region'}), '(uri=self.uri, api_key=self.api_key, region=self.region)\n', (3867, 3923), False, 'import lancedb\n'), ((4015, 4086), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'self.uri', 'api_key': 'self.api_key', 'region': 'self.region'}), '(uri=self.uri, api_key=self.api_key, region=self.region)\n', (4030, 4086), False, 'import lancedb\n'), ((4629, 4691), 'neumai.Shared.Exceptions.LanceDBInsertionException', 'LanceDBInsertionException', (['"""LanceDB storing failed. Try later"""'], {}), "('LanceDB storing failed. Try later')\n", (4654, 4691), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n'), ((5864, 5930), 'neumai.Shared.Exceptions.LanceDBQueryException', 'LanceDBQueryException', (['f"""Failed to query LanceDB. Exception - {e}"""'], {}), "(f'Failed to query LanceDB. Exception - {e}')\n", (5885, 5930), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n'), ((6186, 6376), 'neumai.Shared.NeumSearch.NeumSearchResult', 'NeumSearchResult', ([], {'id': '_id', 'vector': '_vec', 'metadata': "{k: search_results.iloc[i][k] for k in cols if k not in ['id', 'vector',\n '_distance']}", 'score': "(1 - search_results.iloc[i]['_distance'])"}), "(id=_id, vector=_vec, metadata={k: search_results.iloc[i][k\n ] for k in cols if k not in ['id', 'vector', '_distance']}, score=1 -\n search_results.iloc[i]['_distance'])\n", (6202, 6376), False, 'from neumai.Shared.NeumSearch import NeumSearchResult\n'), ((6958, 7048), 'neumai.Shared.Exceptions.LanceDBIndexInfoException', 'LanceDBIndexInfoException', (['f"""Failed to get information from LanceDB. Exception - {e}"""'], {}), "(\n f'Failed to get information from LanceDB. Exception - {e}')\n", (6983, 7048), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n'), ((5435, 5525), 'neumai.Shared.Exceptions.LanceDBIndexCreationException', 'LanceDBIndexCreationException', (['f"""LanceDB index creation failed. \nException - {e}"""'], {}), '(\n f"""LanceDB index creation failed. \nException - {e}""")\n', (5464, 5525), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n')] |
from FlagEmbedding import LLMEmbedder, FlagReranker
import lancedb
import re
import pandas as pd
import random
from datasets import load_dataset
import torch
import gc
from lancedb.embeddings import with_embeddings
embed_model = LLMEmbedder(
"BAAI/llm-embedder", use_fp16=False
) # Load model (automatically use GPUs)
reranker_model = FlagReranker(
"BAAI/bge-reranker-base", use_fp16=True
) # use_fp16 speeds up computation with a slight performance degradation
task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch)
# get embedding using LLM embedder
def embed_documents(batch):
"""
Function to embed the whole text data
"""
return embed_model.encode_keys(batch, task=task) # Encode data or 'keys'
def search(table, query, top_k=10):
"""
Search a query from the table
"""
query_vector = embed_model.encode_queries(
query, task=task
) # Encode the QUERY (it is done differently than the 'key')
search_results = table.search(query_vector).limit(top_k)
return search_results
def rerank(query, search_results):
search_results["old_similarity_rank"] = search_results.index + 1 # Old ranks
torch.cuda.empty_cache()
gc.collect()
search_results["new_scores"] = reranker_model.compute_score(
[[query, chunk] for chunk in search_results["text"]]
) # Re compute ranks
return search_results.sort_values(by="new_scores", ascending=False).reset_index(
drop=True
)
def main():
queries = load_dataset("BeIR/scidocs", "queries")["queries"].to_pandas()
docs = (
load_dataset("BeIR/scidocs", "corpus")["corpus"]
.to_pandas()
.dropna(subset="text")
.sample(10000)
) # just random samples for faster embed demo
# create Database using LanceDB Cloud
uri = "db://your-project-slug"
api_key = "sk_..."
db = lancedb.connect(uri, api_key=api_key, region="us-east-1")
table_name = "doc_embed"
try:
# Use the train text chunk data to save embed in the DB
data = with_embeddings(
embed_documents, docs, column="text", show_progress=True, batch_size=128
)
table = db.create_table(table_name, data=data) # create Table
except:
table = db.open_table(table_name) # Open Table
query = random.choice(queries["text"])
print("QUERY:-> ", query)
# get top_k search results
search_results = (
search(table, "what is mitochondria?", top_k=10)
.to_pandas()
.dropna(subset="text")
.reset_index(drop=True)
)
print("SEARCH RESULTS:-> ", search_results)
# Rerank search results using Reranker from BGE Reranker
print("QUERY:-> ", query)
search_results_reranked = rerank(query, search_results)
print("SEARCH RESULTS RERANKED:-> ", search_results_reranked)
if __name__ == "__main__":
main()
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((233, 281), 'FlagEmbedding.LLMEmbedder', 'LLMEmbedder', (['"""BAAI/llm-embedder"""'], {'use_fp16': '(False)'}), "('BAAI/llm-embedder', use_fp16=False)\n", (244, 281), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((344, 397), 'FlagEmbedding.FlagReranker', 'FlagReranker', (['"""BAAI/bge-reranker-base"""'], {'use_fp16': '(True)'}), "('BAAI/bge-reranker-base', use_fp16=True)\n", (356, 397), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((1196, 1220), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1218, 1220), False, 'import torch\n'), ((1225, 1237), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1235, 1237), False, 'import gc\n'), ((1897, 1954), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': '"""us-east-1"""'}), "(uri, api_key=api_key, region='us-east-1')\n", (1912, 1954), False, 'import lancedb\n'), ((2072, 2165), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_documents', 'docs'], {'column': '"""text"""', 'show_progress': '(True)', 'batch_size': '(128)'}), "(embed_documents, docs, column='text', show_progress=True,\n batch_size=128)\n", (2087, 2165), False, 'from lancedb.embeddings import with_embeddings\n'), ((2340, 2370), 'random.choice', 'random.choice', (["queries['text']"], {}), "(queries['text'])\n", (2353, 2370), False, 'import random\n'), ((1528, 1567), 'datasets.load_dataset', 'load_dataset', (['"""BeIR/scidocs"""', '"""queries"""'], {}), "('BeIR/scidocs', 'queries')\n", (1540, 1567), False, 'from datasets import load_dataset\n'), ((1612, 1650), 'datasets.load_dataset', 'load_dataset', (['"""BeIR/scidocs"""', '"""corpus"""'], {}), "('BeIR/scidocs', 'corpus')\n", (1624, 1650), False, 'from datasets import load_dataset\n')] |
import os
import urllib.request
import shutil
import html2text
import predictionguard as pg
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from sentence_transformers import SentenceTransformer
import numpy as np
import lancedb
from lancedb.embeddings import with_embeddings
import pandas as pd
import json
os.environ['PREDICTIONGUARD_TOKEN'] = "q1VuOjnffJ3NO2oFN8Q9m8vghYc84ld13jaqdF7E"
# get the ruleset from a local file
fp = urllib.request.urlopen("file:///home/ubuntu/insuranceagent.html")
mybytes = fp.read()
html = mybytes.decode("utf8")
fp.close()
# and convert it to text
h = html2text.HTML2Text()
h.ignore_links = True
text = h.handle(html)
text = text.split("Introduction")[1]
# Chunk the text into smaller pieces for injection into LLM prompts.
text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50)
docs = text_splitter.split_text(text)
docs = [x.replace('#', '-') for x in docs]
# Now we need to embed these documents and put them into a "vector store" or
# "vector db" that we will use for semantic search and retrieval.
# Embeddings setup
name="all-MiniLM-L12-v2"
model = SentenceTransformer(name)
def embed_batch(batch):
return [model.encode(sentence) for sentence in batch]
def embed(sentence):
return model.encode(sentence)
# LanceDB setup
if os.path.exists(".lancedb"):
shutil.rmtree(".lancedb")
os.mkdir(".lancedb")
uri = ".lancedb"
db = lancedb.connect(uri)
# Create a dataframe with the chunk ids and chunks
metadata = []
for i in range(len(docs)):
metadata.append([i,docs[i]])
doc_df = pd.DataFrame(metadata, columns=["chunk", "text"])
# Embed the documents
data = with_embeddings(embed_batch, doc_df)
# Create the DB table and add the records.
db.create_table("linux", data=data)
table = db.open_table("linux")
table.add(data=data)
# Now let's augment our Q&A prompt with this external knowledge on-the-fly!!!
template = """### Instruction:
Read the below input context and respond with a short answer to the given question. Use only the information in the below input to answer the question. If you cannot answer the question, respond with "Sorry, I can't find an answer, but you might try looking in the following resource."
### Input:
Context: {context}
Question: {question}
### Response:
"""
qa_prompt = PromptTemplate(
input_variables=["context", "question"],
template=template,
)
#define the pre-prompt in order to give the LLM a little bit of expertise
pre_prompt="You are an expert insurance agent. You are getting information about a property. The information is a mixture of the state of the house and the homeowner's complaints. The state of the house will be just a few words describing the condition (for example, water damage). You will analyze the input and produce exactly three insights. These insights should constitute maintenance and protection recommendations for homeowners tailored to their home's condition. All the insights are at most 20 words long. Generate the insights in this form: Insight 1: (text), then on a new line, Insight 2: (text), then on a new line, Insight 3: (text). Only generate the insights and nothing else. Keep a professional tone. Do not make quote anyone. Do not add unrelated information. Do not add any code. Here is the home's condition: "
def rag_answer(message):
# Search the for relevant context
results = table.search(embed(message)).limit(10).to_pandas()
results.sort_values(by=['_distance'], inplace=True, ascending=True)
doc_use = results['text'].values[0]
# Augment the prompt with the context
prompt = qa_prompt.format(context=doc_use, question=message)
# Get a response
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=prompt
)
return result['choices'][0]['text']
with open('vision_output.json','r') as json_file:
data=json.load(json_file)
visionoutput=data['vision_output']
with open('data.json','r') as json_file:
data=json.load(json_file)
ownercomplaint=data['text']
house_condition=visionoutput+". "+ownercomplaint
#house_condition="Water damage. The gas lines don't work. The kitchen is spotless. The building is in good condition and the walls do not have any cracks in them. There is a termite infestation in the basement."
response=rag_answer(pre_prompt+house_condition)
#response = rag_answer("A house has been destroyed by a tornado and also has been set on fire. The water doesn't work but the gas lines are fine. The area the house is in is notorious for crime. It is built in an earthquake prone zone. There are cracks in the walls and it is quite old.")
print('')
print("3 insights that we've generated based on your report are:\n", response)
with open('insights.json', 'w') as json_file:
json.dump(response,json_file)
with open('stats_output.json','r') as json_file:
data=json.load(json_file)
predicted_claim=str(data['stats'])
#predicted_claim=0.5 #input from statistical model
full_report_pre_prompt="You are an expert insurance agent. You have been given a list of personalized insights about a home that has been surveyed, along with a probability that the homeowner files a claim in the next 3 to 6 months. Based on this, give the property a rating from 1 to 5, where 5 means that the property is healthy, and also explain why the rating was given in not more than 180 words, based on the input insights. A rating of 1 means that the property is not healthy at all. In this scenario, a healthy property is one that has mostly positive or neutral insights and a low probability of having a claim filed. An unhealthy probability is one that has mostly negative insights and a high probability of having a claim filed. Remember that even if the homeowner has a high chance of filing a claim, the property may have positive insights and therefore you should give it a higher score. The rating should be at the beginning of your response. Ensure that you do not have any incomplete sentences. Do not quote anyone. Do not quote any insights verbatim. Keep the tone professional. You are permitted to expand upon the insights but do not stray. Ensure that you complete each sentence. Keep the report to only one continuous paragraph. The insights are: "
#full_report_temp_prompt=full_report_pre_prompt+response
full_report_final_prompt=full_report_pre_prompt+" .The probability of filing a claim is: "+str(predicted_claim)
full_report=rag_answer(full_report_final_prompt)
#full_report_temp_2=rag_answer(full_report_final_prompt)
#full_report_second_prompt="You are an insurance agent that was given an incomplete report. You have psychic powers and can complete missing reports, with perfect extrapolation. Complete the given incomplete report: "
#full_report=rag_answer(full_report_second_prompt+full_report_temp_2)
print("The full report is: ")
print(full_report)
with open('fullreport.json','w') as json_file:
json.dump(full_report,json_file)
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((657, 678), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (676, 678), False, 'import html2text\n'), ((847, 902), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (868, 902), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1183, 1208), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (1202, 1208), False, 'from sentence_transformers import SentenceTransformer\n'), ((1368, 1394), 'os.path.exists', 'os.path.exists', (['""".lancedb"""'], {}), "('.lancedb')\n", (1382, 1394), False, 'import os\n'), ((1427, 1447), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (1435, 1447), False, 'import os\n'), ((1470, 1490), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1485, 1490), False, 'import lancedb\n'), ((1626, 1675), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text']"}), "(metadata, columns=['chunk', 'text'])\n", (1638, 1675), True, 'import pandas as pd\n'), ((1706, 1742), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (1721, 1742), False, 'from lancedb.embeddings import with_embeddings\n'), ((2355, 2429), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2369, 2429), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1400, 1425), 'shutil.rmtree', 'shutil.rmtree', (['""".lancedb"""'], {}), "('.lancedb')\n", (1413, 1425), False, 'import shutil\n'), ((3719, 3786), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (3739, 3786), True, 'import predictionguard as pg\n'), ((3902, 3922), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3911, 3922), False, 'import json\n'), ((4010, 4030), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4019, 4030), False, 'import json\n'), ((4800, 4830), 'json.dump', 'json.dump', (['response', 'json_file'], {}), '(response, json_file)\n', (4809, 4830), False, 'import json\n'), ((4889, 4909), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4898, 4909), False, 'import json\n'), ((6933, 6966), 'json.dump', 'json.dump', (['full_report', 'json_file'], {}), '(full_report, json_file)\n', (6942, 6966), False, 'import json\n')] |
import json
import logging
from typing import Any, Dict, Generator, List, Optional, Sequence, Set, Tuple, Type
import lancedb
import pandas as pd
from dotenv import load_dotenv
from lancedb.pydantic import LanceModel, Vector
from lancedb.query import LanceVectorQueryBuilder
from pydantic import BaseModel, ValidationError, create_model
from src.embedding_models.base import (
EmbeddingModel,
EmbeddingModelsConfig,
)
from src.embedding_models.models import OpenAIEmbeddingsConfig
from src.types import Document, EmbeddingFunction
from src.utils.configuration import settings
from src.utils.pydantic_utils import (
clean_schema,
dataframe_to_document_model,
dataframe_to_documents,
extract_fields,
flatten_pydantic_instance,
flatten_pydantic_model,
nested_dict_from_flat,
)
from src.db.base import VectorStore, VectorStoreConfig
logger = logging.getLogger(__name__)
class LanceDBConfig(VectorStoreConfig):
collection_name: str | None = "temp"
storage_path: str = ".lancedb/data"
embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig()
distance: str = "cosine"
document_class: Type[Document] = Document
flatten: bool = False # flatten Document class into LanceSchema ?
filter_fields: List[str] = [] # fields usable in filter
filter: str | None = None # filter condition for lexical/semantic search
class LanceDB(VectorStore):
def __init__(self, config: LanceDBConfig = LanceDBConfig()):
super().__init__(config)
self.config: LanceDBConfig = config
emb_model = EmbeddingModel.create(config.embedding)
self.embedding_fn: EmbeddingFunction = emb_model.embedding_fn()
self.embedding_dim = emb_model.embedding_dims
self.host = None
self.port = None
self.is_from_dataframe = False # were docs ingested from a dataframe?
self.df_metadata_columns: List[str] = [] # metadata columns from dataframe
self._setup_schemas(config.document_class)
load_dotenv()
try:
self.client = lancedb.connect(
uri=config.storage_path,
)
except Exception as e:
new_storage_path = config.storage_path + ".new"
logger.warning(
f"""
Error connecting to local LanceDB at {config.storage_path}:
{e}
Switching to {new_storage_path}
"""
)
self.client = lancedb.connect(
uri=new_storage_path,
)
# Note: Only create collection if a non-null collection name is provided.
# This is useful to delay creation of vecdb until we have a suitable
# collection name (e.g. we could get it from the url or folder path).
if config.collection_name is not None:
self.create_collection(
config.collection_name, replace=config.replace_collection
)
def _setup_schemas(self, doc_cls: Type[Document] | None) -> None:
doc_cls = doc_cls or self.config.document_class
self.unflattened_schema = self._create_lance_schema(doc_cls)
self.schema = (
self._create_flat_lance_schema(doc_cls)
if self.config.flatten
else self.unflattened_schema
)
def clear_empty_collections(self) -> int:
coll_names = self.list_collections()
n_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
if nr == 0:
n_deletes += 1
self.client.drop_table(name)
return n_deletes
def clear_all_collections(self, really: bool = False, prefix: str = "") -> int:
"""Clear all collections with the given prefix."""
if not really:
logger.warning("Not deleting all collections, set really=True to confirm")
return 0
coll_names = [
c for c in self.list_collections(empty=True) if c.startswith(prefix)
]
if len(coll_names) == 0:
logger.warning(f"No collections found with prefix {prefix}")
return 0
n_empty_deletes = 0
n_non_empty_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
n_empty_deletes += nr == 0
n_non_empty_deletes += nr > 0
self.client.drop_table(name)
logger.warning(
f"""
Deleted {n_empty_deletes} empty collections and
{n_non_empty_deletes} non-empty collections.
"""
)
return n_empty_deletes + n_non_empty_deletes
def list_collections(self, empty: bool = False) -> List[str]:
"""
Returns:
List of collection names that have at least one vector.
Args:
empty (bool, optional): Whether to include empty collections.
"""
colls = self.client.table_names()
if len(colls) == 0:
return []
if empty: # include empty tbls
return colls # type: ignore
counts = [self.client.open_table(coll).head(1).shape[0] for coll in colls]
return [coll for coll, count in zip(colls, counts) if count > 0]
def _create_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Create a subclass of LanceModel with fields:
- id (str)
- Vector field that has dims equal to
the embedding dimension of the embedding model, and a data field of type
DocClass.
- other fields from doc_cls
Args:
doc_cls (Type[Document]): A Pydantic model which should be a subclass of
Document, to be used as the type for the data field.
Returns:
Type[BaseModel]: A new Pydantic model subclassing from LanceModel.
Raises:
ValueError: If `n` is not a non-negative integer or if `DocClass` is not a
subclass of Document.
"""
if not issubclass(doc_cls, Document):
raise ValueError("DocClass must be a subclass of Document")
n = self.embedding_dim
# Prepare fields for the new model
fields = {"id": (str, ...), "vector": (Vector(n), ...)}
# Add both statically and dynamically defined fields from doc_cls
for field_name, field in doc_cls.model_fields.items():
fields[field_name] = (field.annotation, field.default)
# Create the new model with dynamic fields
NewModel = create_model(
"NewModel", __base__=LanceModel, **fields
) # type: ignore
return NewModel # type: ignore
def _create_flat_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Flat version of the lance_schema, as nested Pydantic schemas are not yet
supported by LanceDB.
"""
lance_model = self._create_lance_schema(doc_cls)
FlatModel = flatten_pydantic_model(lance_model, base_model=LanceModel)
return FlatModel
def create_collection(self, collection_name: str, replace: bool = False) -> None:
"""
Create a collection with the given name, optionally replacing an existing
collection if `replace` is True.
Args:
collection_name (str): Name of the collection to create.
replace (bool): Whether to replace an existing collection
with the same name. Defaults to False.
"""
self.config.collection_name = collection_name
collections = self.list_collections()
if collection_name in collections:
coll = self.client.open_table(collection_name)
if coll.head().shape[0] > 0:
logger.warning(f"Non-empty Collection {collection_name} already exists")
if not replace:
logger.warning("Not replacing collection")
return
else:
logger.warning("Recreating fresh collection")
self.client.create_table(
collection_name, schema=self.schema, mode="overwrite", on_bad_vectors="drop"
)
tbl = self.client.open_table(self.config.collection_name)
# We assume "content" is available as top-level field
if "content" in tbl.schema.names:
tbl.create_fts_index("content", replace=True)
if settings.debug:
level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
logger.setLevel(level)
def add_documents(self, documents: Sequence[Document]) -> None:
super().maybe_add_ids(documents)
colls = self.list_collections(empty=True)
if len(documents) == 0:
return
embedding_vecs = self.embedding_fn([doc.content for doc in documents])
coll_name = self.config.collection_name
if coll_name is None:
raise ValueError("No collection name set, cannot ingest docs")
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it,
# possibly with a new schema
doc_cls = type(documents[0])
self.config.document_class = doc_cls
self._setup_schemas(doc_cls)
self.create_collection(coll_name, replace=True)
ids = [str(d.id()) for d in documents]
# don't insert all at once, batch in chunks of b,
# else we get an API error
b = self.config.batch_size
def make_batches() -> Generator[List[BaseModel], None, None]:
for i in range(0, len(ids), b):
batch = [
self.unflattened_schema(
id=ids[i],
vector=embedding_vecs[i],
**doc.model_dump(),
)
for i, doc in enumerate(documents[i : i + b])
]
if self.config.flatten:
batch = [
flatten_pydantic_instance(instance) # type: ignore
for instance in batch
]
yield batch
tbl = self.client.open_table(self.config.collection_name)
try:
tbl.add(make_batches())
if "content" in tbl.schema.names:
tbl.create_fts_index("content", replace=True)
except Exception as e:
logger.error(
f"""
Error adding documents to LanceDB: {e}
POSSIBLE REMEDY: Delete the LancdDB storage directory
{self.config.storage_path} and try again.
"""
)
def add_dataframe(
self,
df: pd.DataFrame,
content: str = "content",
metadata: List[str] = [],
) -> None:
"""
Add a dataframe to the collection.
Args:
df (pd.DataFrame): A dataframe
content (str): The name of the column in the dataframe that contains the
text content to be embedded using the embedding model.
metadata (List[str]): A list of column names in the dataframe that contain
metadata to be stored in the database. Defaults to [].
"""
self.is_from_dataframe = True
actual_metadata = metadata.copy()
self.df_metadata_columns = actual_metadata # could be updated below
# get content column
content_values = df[content].values.tolist()
if "vector" not in df.columns:
embedding_vecs = self.embedding_fn(content_values)
df["vector"] = embedding_vecs
if content != "content":
# rename content column to "content", leave existing column intact
df = df.rename(columns={content: "content"}, inplace=False)
if "id" not in df.columns:
docs = dataframe_to_documents(df, content="content", metadata=metadata)
ids = [str(d.id()) for d in docs]
df["id"] = ids
if "id" not in actual_metadata:
actual_metadata += ["id"]
colls = self.list_collections(empty=True)
coll_name = self.config.collection_name
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it
# and set new schema from df
self.client.create_table(
self.config.collection_name,
data=df,
mode="overwrite",
on_bad_vectors="drop",
)
doc_cls = dataframe_to_document_model(
df,
content=content,
metadata=actual_metadata,
exclude=["vector"],
)
self.config.document_class = doc_cls # type: ignore
self._setup_schemas(doc_cls) # type: ignore
tbl = self.client.open_table(self.config.collection_name)
# We assume "content" is available as top-level field
if "content" in tbl.schema.names:
tbl.create_fts_index("content", replace=True)
else:
# collection exists and is not empty, so append to it
tbl = self.client.open_table(self.config.collection_name)
tbl.add(df)
if "content" in tbl.schema.names:
tbl.create_fts_index("content", replace=True)
def delete_collection(self, collection_name: str) -> None:
self.client.drop_table(collection_name)
def _lance_result_to_docs(self, result: LanceVectorQueryBuilder) -> List[Document]:
if self.is_from_dataframe:
df = result.to_pandas()
return dataframe_to_documents(
df,
content="content",
metadata=self.df_metadata_columns,
doc_cls=self.config.document_class,
)
else:
records = result.to_arrow().to_pylist()
return self._records_to_docs(records)
def _records_to_docs(self, records: List[Dict[str, Any]]) -> List[Document]:
if self.config.flatten:
docs = [
self.unflattened_schema(**nested_dict_from_flat(rec)) for rec in records
]
else:
try:
docs = [self.schema(**rec) for rec in records]
except ValidationError as e:
raise ValueError(
f"""
Error validating LanceDB result: {e}
HINT: This could happen when you're re-using an
existing LanceDB store with a different schema.
Try deleting your local lancedb storage at `{self.config.storage_path}`
re-ingesting your documents and/or replacing the collections.
"""
)
doc_cls = self.config.document_class
doc_cls_field_names = doc_cls.model_fields.keys()
return [
doc_cls(
**{
field_name: getattr(doc, field_name)
for field_name in doc_cls_field_names
}
)
for doc in docs
]
def get_all_documents(self, where: str = "") -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
tbl = self.client.open_table(self.config.collection_name)
pre_result = tbl.search(None).where(where or None)
return self._lance_result_to_docs(pre_result)
def get_documents_by_ids(self, ids: List[str]) -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
_ids = [str(id) for id in ids]
tbl = self.client.open_table(self.config.collection_name)
docs = [
self._lance_result_to_docs(tbl.search().where(f"id == '{_id}'"))
for _id in _ids
]
return docs
def similar_texts_with_scores(
self,
text: str,
k: int = 1,
where: Optional[str] = None,
) -> List[Tuple[Document, float]]:
embedding = self.embedding_fn([text])[0]
tbl = self.client.open_table(self.config.collection_name)
result = (
tbl.search(embedding).metric(self.config.distance).where(where).limit(k)
)
docs = self._lance_result_to_docs(result)
# note _distance is 1 - cosine
if self.is_from_dataframe:
scores = [
1 - rec["_distance"] for rec in result.to_pandas().to_dict("records")
]
else:
scores = [1 - rec["_distance"] for rec in result.to_arrow().to_pylist()]
if len(docs) == 0:
logger.warning(f"No matches found for {text}")
return []
if settings.debug:
logger.info(f"Found {len(docs)} matches, max score: {max(scores)}")
doc_score_pairs = list(zip(docs, scores))
self.show_if_debug(doc_score_pairs)
return doc_score_pairs
def get_fts_chunks(
self,
query: str,
k: int = 5,
where: Optional[str] = None,
) -> List[Tuple[Document, float]]:
"""
Uses LanceDB FTS (Full Text Search).
"""
# Clean up query: replace all newlines with spaces in query,
# force special search keywords to lower case, remove quotes,
# so it's not interpreted as code syntax
query_clean = (
query.replace("\n", " ")
.replace("AND", "and")
.replace("OR", "or")
.replace("NOT", "not")
.replace("'", "")
.replace('"', "")
)
tbl = self.client.open_table(self.config.collection_name)
tbl.create_fts_index(field_names="content", replace=True)
result = tbl.search(query_clean).where(where).limit(k).with_row_id(True)
docs = self._lance_result_to_docs(result)
scores = [r["score"] for r in result.to_list()]
return list(zip(docs, scores))
def _get_clean_vecdb_schema(self) -> str:
"""Get a cleaned schema of the vector-db, to pass to the LLM
as part of instructions on how to generate a SQL filter."""
if len(self.config.filter_fields) == 0:
filterable_fields = (
self.client.open_table(self.config.collection_name)
.search()
.limit(1)
.to_pandas(flatten=True)
.columns.tolist()
)
# drop id, vector, metadata.id, metadata.window_ids, metadata.is_chunk
for fields in [
"id",
"vector",
"metadata.id",
"metadata.window_ids",
"metadata.is_chunk",
]:
if fields in filterable_fields:
filterable_fields.remove(fields)
logger.warning(
f"""
No filter_fields set in config, so using these fields as filterable fields:
{filterable_fields}
"""
)
self.config.filter_fields = filterable_fields
if self.is_from_dataframe:
return self.is_from_dataframe
schema_dict = clean_schema(
self.schema,
excludes=["id", "vector"],
)
# intersect config.filter_fields with schema_dict.keys() in case
# there are extraneous fields in config.filter_fields
filter_fields_set = set(
self.config.filter_fields or schema_dict.keys()
).intersection(schema_dict.keys())
# remove 'content' from filter_fields_set, even if it's not in filter_fields_set
filter_fields_set.discard("content")
# possible values of filterable fields
filter_field_values = self.get_field_values(list(filter_fields_set))
# add field values to schema_dict as another field `values` for each field
for field, values in filter_field_values.items():
if field in schema_dict:
schema_dict[field]["values"] = values
# if self.config.filter_fields is set, restrict to these:
if len(self.config.filter_fields) > 0:
schema_dict = {
k: v for k, v in schema_dict.items() if k in self.config.filter_fields
}
schema = json.dumps(schema_dict, indent=2)
schema += f"""
NOTE when creating a filter for a query,
ONLY the following fields are allowed:
{",".join(self.config.filter_fields)}
"""
return schema
def get_field_values(self, fields: list[str]) -> Dict[str, str]:
"""Get string-listing of possible values of each filterable field,
e.g.
{
"genre": "crime, drama, mystery, ... (10 more)",
"certificate": "R, PG-13, PG, R",
}
"""
field_values: Dict[str, Set[str]] = {}
# make empty set for each field
for f in fields:
field_values[f] = set()
# get all documents and accumulate possible values of each field until 10
docs = self.get_all_documents()
for d in docs:
# extract fields from d
doc_field_vals = extract_fields(d, fields)
for field, val in doc_field_vals.items():
field_values[field].add(val)
# For each field make a string showing list of possible values,
# truncate to 20 values, and if there are more, indicate how many
# more there are, e.g. Genre: crime, drama, mystery, ... (20 more)
field_values_list = {}
for f in fields:
vals = list(field_values[f])
n = len(vals)
remaining = n - 20
vals = vals[:20]
if n > 20:
vals.append(f"(...{remaining} more)")
# make a string of the values, ensure they are strings
field_values_list[f] = ", ".join(str(v) for v in vals)
return field_values_list
| [
"lancedb.connect",
"lancedb.pydantic.Vector"
] | [((877, 904), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (894, 904), False, 'import logging\n'), ((1067, 1091), 'src.embedding_models.models.OpenAIEmbeddingsConfig', 'OpenAIEmbeddingsConfig', ([], {}), '()\n', (1089, 1091), False, 'from src.embedding_models.models import OpenAIEmbeddingsConfig\n'), ((1569, 1608), 'src.embedding_models.base.EmbeddingModel.create', 'EmbeddingModel.create', (['config.embedding'], {}), '(config.embedding)\n', (1590, 1608), False, 'from src.embedding_models.base import EmbeddingModel, EmbeddingModelsConfig\n'), ((2008, 2021), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2019, 2021), False, 'from dotenv import load_dotenv\n'), ((6584, 6639), 'pydantic.create_model', 'create_model', (['"""NewModel"""'], {'__base__': 'LanceModel'}), "('NewModel', __base__=LanceModel, **fields)\n", (6596, 6639), False, 'from pydantic import BaseModel, ValidationError, create_model\n'), ((7016, 7074), 'src.utils.pydantic_utils.flatten_pydantic_model', 'flatten_pydantic_model', (['lance_model'], {'base_model': 'LanceModel'}), '(lance_model, base_model=LanceModel)\n', (7038, 7074), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((19539, 19591), 'src.utils.pydantic_utils.clean_schema', 'clean_schema', (['self.schema'], {'excludes': "['id', 'vector']"}), "(self.schema, excludes=['id', 'vector'])\n", (19551, 19591), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((20650, 20683), 'json.dumps', 'json.dumps', (['schema_dict'], {'indent': '(2)'}), '(schema_dict, indent=2)\n', (20660, 20683), False, 'import json\n'), ((2062, 2102), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'config.storage_path'}), '(uri=config.storage_path)\n', (2077, 2102), False, 'import lancedb\n'), ((12047, 12111), 'src.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'metadata'}), "(df, content='content', metadata=metadata)\n", (12069, 12111), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((12824, 12922), 'src.utils.pydantic_utils.dataframe_to_document_model', 'dataframe_to_document_model', (['df'], {'content': 'content', 'metadata': 'actual_metadata', 'exclude': "['vector']"}), "(df, content=content, metadata=actual_metadata,\n exclude=['vector'])\n", (12851, 12922), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((13937, 14058), 'src.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'self.df_metadata_columns', 'doc_cls': 'self.config.document_class'}), "(df, content='content', metadata=self.\n df_metadata_columns, doc_cls=self.config.document_class)\n", (13959, 14058), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((21541, 21566), 'src.utils.pydantic_utils.extract_fields', 'extract_fields', (['d', 'fields'], {}), '(d, fields)\n', (21555, 21566), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((2478, 2515), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'new_storage_path'}), '(uri=new_storage_path)\n', (2493, 2515), False, 'import lancedb\n'), ((6291, 6300), 'lancedb.pydantic.Vector', 'Vector', (['n'], {}), '(n)\n', (6297, 6300), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((10171, 10206), 'src.utils.pydantic_utils.flatten_pydantic_instance', 'flatten_pydantic_instance', (['instance'], {}), '(instance)\n', (10196, 10206), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14426, 14452), 'src.utils.pydantic_utils.nested_dict_from_flat', 'nested_dict_from_flat', (['rec'], {}), '(rec)\n', (14447, 14452), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n')] |
from datasets import load_dataset
import os
import lancedb
import getpass
import time
import argparse
from tqdm.auto import tqdm
from lancedb.embeddings import EmbeddingFunctionRegistry
from lancedb.pydantic import LanceModel, Vector
def main(query=None):
if "COHERE_API_KEY" not in os.environ:
os.environ["COHERE_API_KEY"] = getpass.getpass("Enter your Cohere API key: ")
en = dataset = load_dataset(
"wikipedia",
"20220301.en",
streaming=True,
)
fr = load_dataset("wikipedia", "20220301.fr", streaming=True)
datasets = {"english": iter(en["train"]), "french": iter(fr["train"])}
registry = EmbeddingFunctionRegistry().get_instance()
cohere = registry.get(
"cohere"
).create() # uses multi-lingual model by default (768 dim)
class Schema(LanceModel):
vector: Vector(cohere.ndims()) = cohere.VectorField()
text: str = cohere.SourceField()
url: str
title: str
id: str
lang: str
db = lancedb.connect("~/lancedb")
tbl = (
db.create_table("wikipedia-cohere", schema=Schema, mode="overwrite")
if "wikipedia-cohere" not in db
else db.open_table("wikipedia-cohere")
)
# let's use cohere embeddings. Use can also set it to openai version of the table
batch_size = 1000
num_records = 10000
data = []
for i in tqdm(range(0, num_records, batch_size)):
for lang, dataset in datasets.items():
batch = [next(dataset) for _ in range(batch_size)]
texts = [x["text"] for x in batch]
ids = [f"{x['id']}-{lang}" for x in batch]
data.extend(
{
"text": x["text"],
"title": x["title"],
"url": x["url"],
"lang": lang,
"id": f"{lang}-{x['id']}",
}
for x in batch
)
# add in batches to avoid token limit
tbl.add(data)
data = []
print("Added batch. Sleeping for 20 seconds to avoid rate limit")
time.sleep(20) # wait for 20 seconds to avoid rate limit
if not query:
it = iter(fr["train"])
for i in range(5):
next(it)
query = next(it)
rs = tbl.search(query["text"]).limit(3).to_list()
print("Query: ", query["text"])
print("Results: ", rs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--query", type=str, default="", help="Query to search")
args = parser.parse_args()
main(query=args.query)
| [
"lancedb.connect",
"lancedb.embeddings.EmbeddingFunctionRegistry"
] | [((407, 463), 'datasets.load_dataset', 'load_dataset', (['"""wikipedia"""', '"""20220301.en"""'], {'streaming': '(True)'}), "('wikipedia', '20220301.en', streaming=True)\n", (419, 463), False, 'from datasets import load_dataset\n'), ((504, 560), 'datasets.load_dataset', 'load_dataset', (['"""wikipedia"""', '"""20220301.fr"""'], {'streaming': '(True)'}), "('wikipedia', '20220301.fr', streaming=True)\n", (516, 560), False, 'from datasets import load_dataset\n'), ((1018, 1046), 'lancedb.connect', 'lancedb.connect', (['"""~/lancedb"""'], {}), "('~/lancedb')\n", (1033, 1046), False, 'import lancedb\n'), ((2458, 2483), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2481, 2483), False, 'import argparse\n'), ((340, 386), 'getpass.getpass', 'getpass.getpass', (['"""Enter your Cohere API key: """'], {}), "('Enter your Cohere API key: ')\n", (355, 386), False, 'import getpass\n'), ((2117, 2131), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (2127, 2131), False, 'import time\n'), ((653, 680), 'lancedb.embeddings.EmbeddingFunctionRegistry', 'EmbeddingFunctionRegistry', ([], {}), '()\n', (678, 680), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')] |
from typing import Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
from lancedb.pydantic import LanceModel, Vector
class Wine(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
validate_assignment=True,
extra="allow",
str_strip_whitespace=True,
json_schema_extra={
"example": {
"id": 45100,
"points": 85,
"title": "Balduzzi 2012 Reserva Merlot (Maule Valley)",
"description": "Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.",
"price": 10.0,
"variety": "Merlot",
"winery": "Balduzzi",
"vineyard": "Reserva",
"country": "Chile",
"province": "Maule Valley",
"region_1": "null",
"region_2": "null",
"taster_name": "Michael Schachner",
"taster_twitter_handle": "@wineschach",
}
},
)
id: int
points: int
title: str
description: Optional[str]
price: Optional[float]
variety: Optional[str]
winery: Optional[str]
vineyard: Optional[str] = Field(..., alias="designation")
country: Optional[str]
province: Optional[str]
region_1: Optional[str]
region_2: Optional[str]
taster_name: Optional[str]
taster_twitter_handle: Optional[str]
@model_validator(mode="before")
def _fill_country_unknowns(cls, values):
"Fill in missing country values with 'Unknown', as we always want this field to be queryable"
country = values.get("country")
if not country:
values["country"] = "Unknown"
return values
@model_validator(mode="before")
def _add_to_vectorize_fields(cls, values):
"Add a field to_vectorize that will be used to create sentence embeddings"
variety = values.get("variety", "")
title = values.get("title", "")
description = values.get("description", "")
to_vectorize = list(filter(None, [variety, title, description]))
values["to_vectorize"] = " ".join(to_vectorize).strip()
return values
class LanceModelWine(BaseModel):
"""
Pydantic model for LanceDB, with a vector field added for sentence embeddings
"""
id: int
points: int
title: str
description: Optional[str]
price: Optional[float]
variety: Optional[str]
winery: Optional[str]
vineyard: Optional[str] = Field(..., alias="designation")
country: Optional[str]
province: Optional[str]
region_1: Optional[str]
region_2: Optional[str]
taster_name: Optional[str]
taster_twitter_handle: Optional[str]
to_vectorize: str
vector: Vector(384)
class SearchResult(LanceModel):
"Model to return search results"
model_config = ConfigDict(
extra="ignore",
json_schema_extra={
"example": {
"id": 374,
"title": "Borgo Conventi 2002 I Fiori del Borgo Sauvignon Blanc (Collio)",
"description": "Crisp, green, grassy wine with fresh acidity and herbeceous character. It is very New World with its tropical flavors and open, forward fruit.",
"country": "Italy",
"variety": "Sauvignon Blanc",
"price": 15,
"points": 88,
}
},
)
id: int
title: str
description: Optional[str]
country: Optional[str]
variety: Optional[str]
price: Optional[float]
points: Optional[int]
| [
"lancedb.pydantic.Vector"
] | [((189, 894), 'pydantic.ConfigDict', 'ConfigDict', ([], {'populate_by_name': '(True)', 'validate_assignment': '(True)', 'extra': '"""allow"""', 'str_strip_whitespace': '(True)', 'json_schema_extra': "{'example': {'id': 45100, 'points': 85, 'title':\n 'Balduzzi 2012 Reserva Merlot (Maule Valley)', 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}}"}), "(populate_by_name=True, validate_assignment=True, extra='allow',\n str_strip_whitespace=True, json_schema_extra={'example': {'id': 45100,\n 'points': 85, 'title': 'Balduzzi 2012 Reserva Merlot (Maule Valley)',\n 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}})\n", (199, 894), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1355, 1386), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (1360, 1386), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1576, 1606), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1591, 1606), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1888, 1918), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1903, 1918), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((2662, 2693), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (2667, 2693), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((2911, 2922), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (2917, 2922), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((3014, 3422), 'pydantic.ConfigDict', 'ConfigDict', ([], {'extra': '"""ignore"""', 'json_schema_extra': "{'example': {'id': 374, 'title':\n 'Borgo Conventi 2002 I Fiori del Borgo Sauvignon Blanc (Collio)',\n 'description':\n 'Crisp, green, grassy wine with fresh acidity and herbeceous character. It is very New World with its tropical flavors and open, forward fruit.'\n , 'country': 'Italy', 'variety': 'Sauvignon Blanc', 'price': 15,\n 'points': 88}}"}), "(extra='ignore', json_schema_extra={'example': {'id': 374,\n 'title':\n 'Borgo Conventi 2002 I Fiori del Borgo Sauvignon Blanc (Collio)',\n 'description':\n 'Crisp, green, grassy wine with fresh acidity and herbeceous character. It is very New World with its tropical flavors and open, forward fruit.'\n , 'country': 'Italy', 'variety': 'Sauvignon Blanc', 'price': 15,\n 'points': 88}})\n", (3024, 3422), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n')] |
from typing import Any
from lancedb.embeddings import EmbeddingFunctionRegistry
def register_model(model_name: str) -> Any:
"""
Register a model with the given name using LanceDB's EmbeddingFunctionRegistry.
Args:
model_name (str): The name of the model to register.
Returns:
model: The registered model instance.
Usage:
>>> model = register_model("open-clip")
"""
registry = EmbeddingFunctionRegistry.get_instance()
model = registry.get(model_name).create()
return model
| [
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((430, 470), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (468, 470), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')] |
#!/usr/bin/env python
import os
import lancedb
from lancedb.embeddings import with_embeddings
import openai
import pandas as pd
from pydantic import BaseModel, Field
import requests
from aifunctools.openai_funcs import complete_with_functions
openai.api_key = os.getenv("OPENAI_API_KEY")
MODEL = "gpt-3.5-turbo-16k-0613"
db = lancedb.connect(".lancedb")
def embed_func(c):
rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002")
return [record["embedding"] for record in rs["data"]]
def to_lancedb_table(db, memes):
df = pd.DataFrame([m.model_dump() for m in memes])
data = with_embeddings(embed_func, df, column="name")
if "memes" in db.table_names():
tbl = db.open_table("memes")
tbl.add(data, mode="overwrite")
else:
tbl = db.create_table("memes", data)
return tbl
class Meme(BaseModel):
id: str = Field(description="The meme id")
name: str = Field(description="The meme name")
url: str = Field(description="The meme url")
width: int = Field(description="The meme image width")
height: int = Field(description="The meme image height")
box_count: int = Field(description="The number of text boxes in the meme")
def get_memes():
"""
Get a list of memes from the meme api
"""
resp = requests.get("https://api.imgflip.com/get_memes")
return [Meme(**m) for m in resp.json()["data"]["memes"]]
def search_memes(query: str):
"""
Get the most popular memes from imgflip and do a semantic search based on the user query
:param query: str, the search string
"""
memes = get_memes()
tbl = to_lancedb_table(db, memes)
df = tbl.search(embed_func(query)[0]).limit(1).to_df()
return Meme(**df.to_dict(orient="records")[0]).model_dump()
if __name__ == "__main__":
question = "Please find me the image link for that popular meme with Fry from Futurama"
print(complete_with_functions(question, search_memes)["choices"][0]["message"]["content"])
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((263, 290), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (272, 290), False, 'import os\n'), ((331, 358), 'lancedb.connect', 'lancedb.connect', (['""".lancedb"""'], {}), "('.lancedb')\n", (346, 358), False, 'import lancedb\n'), ((389, 454), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': '"""text-embedding-ada-002"""'}), "(input=c, engine='text-embedding-ada-002')\n", (412, 454), False, 'import openai\n'), ((614, 660), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_func', 'df'], {'column': '"""name"""'}), "(embed_func, df, column='name')\n", (629, 660), False, 'from lancedb.embeddings import with_embeddings\n'), ((883, 915), 'pydantic.Field', 'Field', ([], {'description': '"""The meme id"""'}), "(description='The meme id')\n", (888, 915), False, 'from pydantic import BaseModel, Field\n'), ((932, 966), 'pydantic.Field', 'Field', ([], {'description': '"""The meme name"""'}), "(description='The meme name')\n", (937, 966), False, 'from pydantic import BaseModel, Field\n'), ((982, 1015), 'pydantic.Field', 'Field', ([], {'description': '"""The meme url"""'}), "(description='The meme url')\n", (987, 1015), False, 'from pydantic import BaseModel, Field\n'), ((1033, 1074), 'pydantic.Field', 'Field', ([], {'description': '"""The meme image width"""'}), "(description='The meme image width')\n", (1038, 1074), False, 'from pydantic import BaseModel, Field\n'), ((1093, 1135), 'pydantic.Field', 'Field', ([], {'description': '"""The meme image height"""'}), "(description='The meme image height')\n", (1098, 1135), False, 'from pydantic import BaseModel, Field\n'), ((1157, 1214), 'pydantic.Field', 'Field', ([], {'description': '"""The number of text boxes in the meme"""'}), "(description='The number of text boxes in the meme')\n", (1162, 1214), False, 'from pydantic import BaseModel, Field\n'), ((1303, 1352), 'requests.get', 'requests.get', (['"""https://api.imgflip.com/get_memes"""'], {}), "('https://api.imgflip.com/get_memes')\n", (1315, 1352), False, 'import requests\n'), ((1913, 1960), 'aifunctools.openai_funcs.complete_with_functions', 'complete_with_functions', (['question', 'search_memes'], {}), '(question, search_memes)\n', (1936, 1960), False, 'from aifunctools.openai_funcs import complete_with_functions\n')] |
import lancedb
import uuid
from datetime import datetime
from tqdm import tqdm
from typing import Optional, List, Iterator, Dict
from memgpt.config import MemGPTConfig
from memgpt.connectors.storage import StorageConnector, TableType
from memgpt.config import AgentConfig, MemGPTConfig
from memgpt.constants import MEMGPT_DIR
from memgpt.utils import printd
from memgpt.data_types import Record, Message, Passage, Source
from datetime import datetime
from lancedb.pydantic import Vector, LanceModel
""" Initial implementation - not complete """
def get_db_model(table_name: str, table_type: TableType):
config = MemGPTConfig.load()
if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:
# create schema for archival memory
class PassageModel(LanceModel):
"""Defines data model for storing Passages (consisting of text, embedding)"""
id: uuid.UUID
user_id: str
text: str
doc_id: str
agent_id: str
data_source: str
embedding: Vector(config.embedding_dim)
metadata_: Dict
def __repr__(self):
return f"<Passage(passage_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Passage(
text=self.text,
embedding=self.embedding,
doc_id=self.doc_id,
user_id=self.user_id,
id=self.id,
data_source=self.data_source,
agent_id=self.agent_id,
metadata=self.metadata_,
)
return PassageModel
elif table_type == TableType.RECALL_MEMORY:
class MessageModel(LanceModel):
"""Defines data model for storing Message objects"""
__abstract__ = True # this line is necessary
# Assuming message_id is the primary key
id: uuid.UUID
user_id: str
agent_id: str
# openai info
role: str
text: str
model: str
user: str
# function info
function_name: str
function_args: str
function_response: str
embedding = Vector(config.embedding_dim)
# Add a datetime column, with default value as the current time
created_at = datetime
def __repr__(self):
return f"<Message(message_id='{self.id}', text='{self.text}', embedding='{self.embedding})>"
def to_record(self):
return Message(
user_id=self.user_id,
agent_id=self.agent_id,
role=self.role,
user=self.user,
text=self.text,
model=self.model,
function_name=self.function_name,
function_args=self.function_args,
function_response=self.function_response,
embedding=self.embedding,
created_at=self.created_at,
id=self.id,
)
"""Create database model for table_name"""
return MessageModel
elif table_type == TableType.DATA_SOURCES:
class SourceModel(LanceModel):
"""Defines data model for storing Passages (consisting of text, embedding)"""
# Assuming passage_id is the primary key
id: uuid.UUID
user_id: str
name: str
created_at: datetime
def __repr__(self):
return f"<Source(passage_id='{self.id}', name='{self.name}')>"
def to_record(self):
return Source(id=self.id, user_id=self.user_id, name=self.name, created_at=self.created_at)
"""Create database model for table_name"""
return SourceModel
else:
raise ValueError(f"Table type {table_type} not implemented")
class LanceDBConnector(StorageConnector):
"""Storage via LanceDB"""
# TODO: this should probably eventually be moved into a parent DB class
def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None):
# TODO
pass
def generate_where_filter(self, filters: Dict) -> str:
where_filters = []
for key, value in filters.items():
where_filters.append(f"{key}={value}")
return where_filters.join(" AND ")
@abstractmethod
def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:
# TODO
pass
@abstractmethod
def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:
# TODO
pass
@abstractmethod
def get(self, id: str) -> Optional[Record]:
# TODO
pass
@abstractmethod
def size(self, filters: Optional[Dict] = {}) -> int:
# TODO
pass
@abstractmethod
def insert(self, record: Record):
# TODO
pass
@abstractmethod
def insert_many(self, records: List[Record], show_progress=False):
# TODO
pass
@abstractmethod
def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:
# TODO
pass
@abstractmethod
def query_date(self, start_date, end_date):
# TODO
pass
@abstractmethod
def query_text(self, query):
# TODO
pass
@abstractmethod
def delete_table(self):
# TODO
pass
@abstractmethod
def delete(self, filters: Optional[Dict] = {}):
# TODO
pass
@abstractmethod
def save(self):
# TODO
pass
| [
"lancedb.pydantic.Vector"
] | [((622, 641), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (639, 641), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((1077, 1105), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (1083, 1105), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((1333, 1523), 'memgpt.data_types.Passage', 'Passage', ([], {'text': 'self.text', 'embedding': 'self.embedding', 'doc_id': 'self.doc_id', 'user_id': 'self.user_id', 'id': 'self.id', 'data_source': 'self.data_source', 'agent_id': 'self.agent_id', 'metadata': 'self.metadata_'}), '(text=self.text, embedding=self.embedding, doc_id=self.doc_id,\n user_id=self.user_id, id=self.id, data_source=self.data_source,\n agent_id=self.agent_id, metadata=self.metadata_)\n', (1340, 1523), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((2335, 2363), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (2341, 2363), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((2674, 2989), 'memgpt.data_types.Message', 'Message', ([], {'user_id': 'self.user_id', 'agent_id': 'self.agent_id', 'role': 'self.role', 'user': 'self.user', 'text': 'self.text', 'model': 'self.model', 'function_name': 'self.function_name', 'function_args': 'self.function_args', 'function_response': 'self.function_response', 'embedding': 'self.embedding', 'created_at': 'self.created_at', 'id': 'self.id'}), '(user_id=self.user_id, agent_id=self.agent_id, role=self.role, user=\n self.user, text=self.text, model=self.model, function_name=self.\n function_name, function_args=self.function_args, function_response=self\n .function_response, embedding=self.embedding, created_at=self.\n created_at, id=self.id)\n', (2681, 2989), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((3815, 3904), 'memgpt.data_types.Source', 'Source', ([], {'id': 'self.id', 'user_id': 'self.user_id', 'name': 'self.name', 'created_at': 'self.created_at'}), '(id=self.id, user_id=self.user_id, name=self.name, created_at=self.\n created_at)\n', (3821, 3904), False, 'from memgpt.data_types import Record, Message, Passage, Source\n')] |
""" Install lancedb with instructor embedding support
copy this and paste it in the terminal, and install additional dependencies via requirements.txt file
pip install git+https://github.com/lancedb/lancedb.git@main#subdirectory=python
"""
import lancedb
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
from lancedb.embeddings import InstructorEmbeddingFunction
instructor = (
get_registry()
.get("instructor")
.create(
source_instruction="represent the document for retreival",
query_instruction="represent the document for most similar definition",
)
)
class Schema(LanceModel):
vector: Vector(instructor.ndims()) = instructor.VectorField()
text: str = instructor.SourceField()
# Creating LanceDB table
db = lancedb.connect("~/.lancedb")
tbl = db.create_table("intruct-multitask", schema=Schema, mode="overwrite")
data_f1 = [
{
"text": "Aspirin is a widely-used over-the-counter medication known for its anti-inflammatory and analgesic properties. It is commonly used to relieve pain, reduce fever, and alleviate minor aches and pains."
},
{
"text": "Amoxicillin is an antibiotic medication commonly prescribed to treat various bacterial infections, such as respiratory, ear, throat, and urinary tract infections. It belongs to the penicillin class of antibiotics and works by inhibiting bacterial cell wall synthesis."
},
{
"text": "Atorvastatin is a lipid-lowering medication used to manage high cholesterol levels and reduce the risk of cardiovascular events. It belongs to the statin class of drugs and works by inhibiting an enzyme involved in cholesterol production in the liver."
},
{
"text": "The Theory of Relativity is a fundamental physics theory developed by Albert Einstein, consisting of the special theory of relativity and the general theory of relativity. It revolutionized our understanding of space, time, and gravity."
},
{
"text": "Photosynthesis is a vital biological process by which green plants, algae, and some bacteria convert light energy into chemical energy in the form of glucose, using carbon dioxide and water."
},
{
"text": "The Big Bang Theory is the prevailing cosmological model that describes the origin of the universe. It suggests that the universe began as a singularity and has been expanding for billions of years."
},
{
"text": "Compound Interest is the addition of interest to the principal sum of a loan or investment, resulting in the interest on interest effect over time."
},
{
"text": "Stock Market is a financial marketplace where buyers and sellers trade ownership in companies, typically in the form of stocks or shares."
},
{
"text": "Inflation is the rate at which the general level of prices for goods and services is rising and subsequently purchasing power is falling."
},
{
"text": "Diversification is an investment strategy that involves spreading your investments across different asset classes to reduce risk."
},
{
"text": "Liquidity refers to how easily an asset can be converted into cash without a significant loss of value. It's a key consideration in financial management."
},
{
"text": "401(k) is a retirement savings plan offered by employers, allowing employees to save and invest a portion of their paycheck before taxes."
},
{
"text": "Ballet is a classical dance form that originated in the Italian Renaissance courts of the 15th century and later developed into a highly technical art."
},
{
"text": "Rock and Roll is a genre of popular music that originated and evolved in the United States during the late 1940s and early 1950s, characterized by a strong rhythm and amplified instruments."
},
{
"text": "Cuisine is a style or method of cooking, especially as characteristic of a particular country, region, or establishment."
},
{"text": "Renaissance was a cultural, artistic, and intellectual movement that"},
{
"text": "Neutrino is subatomic particles with very little mass and no electric charge. They are produced in various nuclear reactions, including those in the Sun, and play a significant role in astrophysics and particle physics."
},
{
"text": "Higgs Boson is a subatomic particle that gives mass to other elementary particles. Its discovery was a significant achievement in particle physics."
},
{
"text": "Quantum Entanglement is a quantum physics phenomenon where two or more particles become connected in such a way that the state of one particle is dependent on the state of the other(s), even when they are separated by large distances."
},
{
"text": "Genome Sequencing is the process of determining the complete DNA sequence of an organism's genome. It has numerous applications in genetics, biology, and medicine."
},
]
tbl.add(data_f1)
# LanceDB supports full text search, so there is no need of embedding the Query manually
query = "amoxicillin"
result = tbl.search(query).limit(1).to_pandas()
# printing the output
print(result)
#########################################################################################################################
################# SAME INPUT DATA WITH DIFFERENT INSTRUCTION PAIR #######################################################
#########################################################################################################################
# uncomment the below code to check for different instruction pair on the same data
"""instructor = get_registry().get("instructor").create(
source_instruction="represent the captions",
query_instruction="represent the captions for retrieving duplicate captions"
)
class Schema(LanceModel):
vector: Vector(instructor.ndims()) = instructor.VectorField()
text: str = instructor.SourceField()
db = lancedb.connect("~/.lancedb")
tbl = db.create_table("intruct-multitask", schema=Schema, mode="overwrite")
data_f2 = [
{"text": "Aspirin is a widely-used over-the-counter medication known for its anti-inflammatory and analgesic properties. It is commonly used to relieve pain, reduce fever, and alleviate minor aches and pains."},
{"text": "Amoxicillin is an antibiotic medication commonly prescribed to treat various bacterial infections, such as respiratory, ear, throat, and urinary tract infections. It belongs to the penicillin class of antibiotics and works by inhibiting bacterial cell wall synthesis."},
{"text": "Atorvastatin is a lipid-lowering medication used to manage high cholesterol levels and reduce the risk of cardiovascular events. It belongs to the statin class of drugs and works by inhibiting an enzyme involved in cholesterol production in the liver."},
{"text": "The Theory of Relativity is a fundamental physics theory developed by Albert Einstein, consisting of the special theory of relativity and the general theory of relativity. It revolutionized our understanding of space, time, and gravity."},
{"text": "Photosynthesis is a vital biological process by which green plants, algae, and some bacteria convert light energy into chemical energy in the form of glucose, using carbon dioxide and water."},
{"text": "The Big Bang Theory is the prevailing cosmological model that describes the origin of the universe. It suggests that the universe began as a singularity and has been expanding for billions of years."},
{"text": "Compound Interest is the addition of interest to the principal sum of a loan or investment, resulting in the interest on interest effect over time."},
{"text": "Stock Market is a financial marketplace where buyers and sellers trade ownership in companies, typically in the form of stocks or shares."},
{"text": "Inflation is the rate at which the general level of prices for goods and services is rising and subsequently purchasing power is falling."},
{"text": "Diversification is an investment strategy that involves spreading your investments across different asset classes to reduce risk."},
{"text": "Liquidity refers to how easily an asset can be converted into cash without a significant loss of value. It's a key consideration in financial management."},
{"text": "401(k) is a retirement savings plan offered by employers, allowing employees to save and invest a portion of their paycheck before taxes."},
{"text": "Ballet is a classical dance form that originated in the Italian Renaissance courts of the 15th century and later developed into a highly technical art."},
{"text": "Rock and Roll is a genre of popular music that originated and evolved in the United States during the late 1940s and early 1950s, characterized by a strong rhythm and amplified instruments."},
{"text": "Cuisine is a style or method of cooking, especially as characteristic of a particular country, region, or establishment."},
{"text": "Renaissance was a cultural, artistic, and intellectual movement that"},
{"text": "Neutrino is subatomic particles with very little mass and no electric charge. They are produced in various nuclear reactions, including those in the Sun, and play a significant role in astrophysics and particle physics."},
{"text": "Higgs Boson is a subatomic particle that gives mass to other elementary particles. Its discovery was a significant achievement in particle physics."},
{"text": "Quantum Entanglement is a quantum physics phenomenon where two or more particles become connected in such a way that the state of one particle is dependent on the state of the other(s), even when they are separated by large distances."},
{"text": "Genome Sequencing is the process of determining the complete DNA sequence of an organism's genome. It has numerous applications in genetics, biology, and medicine."},
]
tbl.add(data_f2)
#same query, but for the differently embed data
query = "amoxicillin"
result = tbl.search(query).limit(1).to_pandas()
#showing the result
print(result)
"""
| [
"lancedb.connect",
"lancedb.embeddings.get_registry"
] | [((818, 847), 'lancedb.connect', 'lancedb.connect', (['"""~/.lancedb"""'], {}), "('~/.lancedb')\n", (833, 847), False, 'import lancedb\n'), ((445, 459), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (457, 459), False, 'from lancedb.embeddings import get_registry\n')] |
from pathlib import Path
from uuid import uuid4
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
import lancedb
from knowledge_graph.configuration.config import cfg
from lancedb import DBConnection
def check_if_embedding_exists(text: str):
db = lancedb.connect(cfg.db_path)
tbl_text = db.open_table("knowledge_graph_text")
df = tbl_text.search(text).to_pandas(flatten=True)
print(df.text)
if text in df.text.values.astype(str):
return True
else:
return False
async def create_embeddings_text(text: str):
db = lancedb.connect(cfg.db_path)
table_text = db.create_table(
name=f"knowledge_graph_text",
data=[
{
"vector": cfg.emb_func.embed_query("Placeholder"),
"text": "Placeholder",
"id": "1",
}
],
mode="overwrite",
)
text_splitter = CharacterTextSplitter(chunk_size=cfg.chunk_size, chunk_overlap=0)
documents = text_splitter.split_text(text)
db_text = LanceDB.from_texts(documents, cfg.emb_func, connection=table_text)
return db_text
async def create_embeddings_summary(summary_path: Path):
db = lancedb.connect(cfg.db_path)
table_summary = db.create_table(
name=f"knowledge_graph_summary",
data=[
{
"vector": cfg.emb_func.embed_query("Placeholder"),
"text": "Placeholder",
"id": "1",
}
],
mode="overwrite",
)
loader = TextLoader(summary_path.as_posix())
docs_summary = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=cfg.chunk_size, chunk_overlap=0)
doc = text_splitter.split_documents(docs_summary)
db_summary = LanceDB.from_documents(doc, cfg.emb_func, connection=table_summary)
return db_summary
async def similarity_search(query: str):
db = lancedb.connect(cfg.db_path)
tbl_text = db.open_table("knowledge_graph_text")
tbl_summary = db.open_table("knowledge_graph_summary")
vectorstore_text = LanceDB(tbl_text, cfg.emb_func)
result_text = vectorstore_text.similarity_search(query)
ans_text = result_text[0].page_content
vectorstore_summary = LanceDB(tbl_summary, cfg.emb_func)
result_summary = vectorstore_summary.similarity_search(query)
ans_summary = result_summary[0].page_content
return ans_text, ans_summary
if __name__ == "__main__":
input_val = """Animals are the most adorable and loving creatures existing on Earth. They might not be able to speak, but they can understand. They have a unique mode of interaction which is beyond human understanding. There are two types of animals: domestic and wild animals.
Domestic Animals | Domestic animals such as dogs, cows, cats, donkeys, mules and elephants are the ones which are used for the purpose of domestication. Wild animals refer to animals that are not normally domesticated and generally live in forests. They are important for their economic, survival, beauty, and scientific value.
Wild Animals | Wild animals provide various useful substances and animal products such as honey, leather, ivory, tusk, etc. They are of cultural asset and aesthetic value to humankind. Human life largely depends on wild animals for elementary requirements like the medicines we consume and the clothes we wear daily.
Nature and wildlife are largely associated with humans for several reasons, such as emotional and social issues. The balanced functioning of the biosphere depends on endless interactions among microorganisms, plants and animals. This has led to countless efforts by humans for the conservation of animals and to protect them from extinction. Animals have occupied a special place of preservation and veneration in various cultures worldwide."""
print(check_if_embedding_exists(input_val))
#path = Path(r"C:\tmp\graph_desc\graph_desc_310150f8-a4a8-4ba9-b1c7-07bc5b4944d1.txt")
#db = create_embeddings_summary(path)
#print(db)
| [
"lancedb.connect"
] | [((410, 438), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (425, 438), False, 'import lancedb\n'), ((715, 743), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (730, 743), False, 'import lancedb\n'), ((1060, 1125), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': 'cfg.chunk_size', 'chunk_overlap': '(0)'}), '(chunk_size=cfg.chunk_size, chunk_overlap=0)\n', (1081, 1125), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1187, 1253), 'langchain.vectorstores.LanceDB.from_texts', 'LanceDB.from_texts', (['documents', 'cfg.emb_func'], {'connection': 'table_text'}), '(documents, cfg.emb_func, connection=table_text)\n', (1205, 1253), False, 'from langchain.vectorstores import LanceDB\n'), ((1345, 1373), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (1360, 1373), False, 'import lancedb\n'), ((1778, 1843), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': 'cfg.chunk_size', 'chunk_overlap': '(0)'}), '(chunk_size=cfg.chunk_size, chunk_overlap=0)\n', (1799, 1843), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1915, 1982), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['doc', 'cfg.emb_func'], {'connection': 'table_summary'}), '(doc, cfg.emb_func, connection=table_summary)\n', (1937, 1982), False, 'from langchain.vectorstores import LanceDB\n'), ((2059, 2087), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (2074, 2087), False, 'import lancedb\n'), ((2224, 2255), 'langchain.vectorstores.LanceDB', 'LanceDB', (['tbl_text', 'cfg.emb_func'], {}), '(tbl_text, cfg.emb_func)\n', (2231, 2255), False, 'from langchain.vectorstores import LanceDB\n'), ((2387, 2421), 'langchain.vectorstores.LanceDB', 'LanceDB', (['tbl_summary', 'cfg.emb_func'], {}), '(tbl_summary, cfg.emb_func)\n', (2394, 2421), False, 'from langchain.vectorstores import LanceDB\n'), ((871, 910), 'knowledge_graph.configuration.config.cfg.emb_func.embed_query', 'cfg.emb_func.embed_query', (['"""Placeholder"""'], {}), "('Placeholder')\n", (895, 910), False, 'from knowledge_graph.configuration.config import cfg\n'), ((1511, 1550), 'knowledge_graph.configuration.config.cfg.emb_func.embed_query', 'cfg.emb_func.embed_query', (['"""Placeholder"""'], {}), "('Placeholder')\n", (1535, 1550), False, 'from knowledge_graph.configuration.config import cfg\n')] |
from glob import glob
from os.path import basename
from pathlib import Path
import chromadb
import lancedb
import pandas as pd
import torch
from chromadb.utils import embedding_functions
from lancedb.embeddings import EmbeddingFunctionRegistry
from lancedb.pydantic import LanceModel, Vector
from loguru import logger
from rich import print
from rich.progress import track
MODEL_NAME = "all-distilroberta-v1"
DB_PATH = "db/lancedb-test"
TABLE_NAME = COLLECTION_NAME = "test"
registry = EmbeddingFunctionRegistry.get_instance()
func = registry.get("sentence-transformers").create(
name="all-distilroberta-v1", device="cuda" if torch.cuda.is_available() else "cpu"
)
class Document(LanceModel):
document: str = func.SourceField()
embedding: Vector(func.ndims()) = func.VectorField()
source: str
def get_collection() -> chromadb.Collection:
chroma_client = chromadb.PersistentClient(DB_PATH)
try:
collection = chroma_client.get_collection(name=COLLECTION_NAME)
except Exception as e:
logger.exception(e)
logger.warning("Indexing documents...")
collection = chroma_client.create_collection(name=COLLECTION_NAME)
csvs = glob("crawled/*.csv")
sentence_transformer_ef = (
embedding_functions.SentenceTransformerEmbeddingFunction(
model_name=MODEL_NAME
)
)
data = []
for csv in track(csvs):
df = pd.read_csv(csv)
if len(df) == 0:
continue
urls, documents = df["URL"].tolist(), df["Section Content"].tolist()
embeddings = sentence_transformer_ef(documents)
assert len(urls) == len(documents) == len(embeddings)
base = basename(urls[0])
collection.add(
embeddings=embeddings,
documents=documents,
metadatas=[{"source": url} for url in urls],
ids=[f"{base}_{i}" for i in range(len(documents))],
)
return collection
def get_table():
uri = DB_PATH[:]
db = lancedb.connect(uri)
table = db.open_table(TABLE_NAME)
return table
| [
"lancedb.connect",
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((489, 529), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (527, 529), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n'), ((881, 915), 'chromadb.PersistentClient', 'chromadb.PersistentClient', (['DB_PATH'], {}), '(DB_PATH)\n', (906, 915), False, 'import chromadb\n'), ((2082, 2102), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2097, 2102), False, 'import lancedb\n'), ((633, 658), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (656, 658), False, 'import torch\n'), ((1033, 1052), 'loguru.logger.exception', 'logger.exception', (['e'], {}), '(e)\n', (1049, 1052), False, 'from loguru import logger\n'), ((1061, 1100), 'loguru.logger.warning', 'logger.warning', (['"""Indexing documents..."""'], {}), "('Indexing documents...')\n", (1075, 1100), False, 'from loguru import logger\n'), ((1191, 1212), 'glob.glob', 'glob', (['"""crawled/*.csv"""'], {}), "('crawled/*.csv')\n", (1195, 1212), False, 'from glob import glob\n'), ((1261, 1340), 'chromadb.utils.embedding_functions.SentenceTransformerEmbeddingFunction', 'embedding_functions.SentenceTransformerEmbeddingFunction', ([], {'model_name': 'MODEL_NAME'}), '(model_name=MODEL_NAME)\n', (1317, 1340), False, 'from chromadb.utils import embedding_functions\n'), ((1418, 1429), 'rich.progress.track', 'track', (['csvs'], {}), '(csvs)\n', (1423, 1429), False, 'from rich.progress import track\n'), ((1448, 1464), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (1459, 1464), True, 'import pandas as pd\n'), ((1745, 1762), 'os.path.basename', 'basename', (['urls[0]'], {}), '(urls[0])\n', (1753, 1762), False, 'from os.path import basename\n')] |
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
batch['cls'] = batch['cls'].flatten().int().tolist()
box_cls_pair = sorted(zip(batch['bboxes'].tolist(), batch['cls']), key=lambda x: x[1])
batch['bboxes'] = [box for box, _ in box_cls_pair]
batch['cls'] = [cls for _, cls in box_cls_pair]
batch['labels'] = [dataset_info['names'][i] for i in batch['cls']]
batch['masks'] = batch['masks'].tolist() if 'masks' in batch else [[[]]]
batch['keypoints'] = batch['keypoints'].tolist() if 'keypoints' in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = similar_set.to_dict(
orient='list') if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get('im_file', [])
bboxes = similar_set.get('bboxes', []) if similar_set.get('bboxes') is not empty_boxes else []
masks = similar_set.get('masks') if similar_set.get('masks')[0] != empty_masks else []
kpts = similar_set.get('keypoints') if similar_set.get('keypoints')[0] != empty_masks else []
cls = similar_set.get('cls', [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(imgs,
batch_idx,
cls,
bboxes=boxes,
masks=masks,
kpts=kpts,
max_subplots=len(images),
save=False,
threaded=False)
def prompt_sql_query(query):
check_requirements('openai>=1.6.1')
from openai import OpenAI
if not SETTINGS['openai_api_key']:
logger.warning('OpenAI API key not found in settings. Please enter your API key below.')
openai_api_key = getpass.getpass('OpenAI API key: ')
SETTINGS.update({'openai_api_key': openai_api_key})
openai = OpenAI(api_key=SETTINGS['openai_api_key'])
messages = [
{
'role':
'system',
'content':
'''
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
'''},
{
'role': 'user',
'content': f'{query}'}, ]
response = openai.chat.completions.create(model='gpt-3.5-turbo', messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3411, 3433), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3419, 3433), True, 'import numpy as np\n'), ((3771, 3804), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (3785, 3804), True, 'import numpy as np\n'), ((4239, 4274), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4257, 4274), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4576, 4618), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4582, 4618), False, 'from openai import OpenAI\n'), ((695, 714), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (701, 714), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2463, 2478), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2473, 2478), False, 'import cv2\n'), ((2492, 2527), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2504, 2527), False, 'import cv2\n'), ((3446, 3474), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3454, 3474), True, 'import numpy as np\n'), ((3503, 3530), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3511, 3530), True, 'import numpy as np\n'), ((3542, 3575), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3556, 3575), True, 'import numpy as np\n'), ((3603, 3638), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3611, 3638), True, 'import numpy as np\n'), ((3725, 3754), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3733, 3754), True, 'import numpy as np\n'), ((4353, 4446), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4367, 4446), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4467, 4502), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4482, 4502), False, 'import getpass\n'), ((4511, 4562), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4526, 4562), False, 'from ultralytics.utils import SETTINGS\n'), ((3661, 3695), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3675, 3695), True, 'import numpy as np\n'), ((3831, 3858), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (3839, 3858), True, 'import numpy as np\n'), ((2788, 2825), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (2796, 2825), True, 'import numpy as np\n'), ((3209, 3244), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3217, 3244), True, 'import numpy as np\n'), ((3013, 3047), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3021, 3047), True, 'import numpy as np\n'), ((2622, 2656), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2631, 2656), False, 'from ultralytics.data.augment import LetterBox\n'), ((3085, 3119), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3094, 3119), False, 'from ultralytics.data.augment import LetterBox\n'), ((3355, 3392), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3363, 3392), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4054, 4087), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4068, 4087), True, 'import numpy as np\n'), ((4421, 4456), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4439, 4456), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4758, 4800), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4764, 4800), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3794, 3813), True, 'import numpy as np\n'), ((3825, 3858), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3839, 3858), True, 'import numpy as np\n'), ((3886, 3921), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3894, 3921), True, 'import numpy as np\n'), ((4008, 4037), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (4016, 4037), True, 'import numpy as np\n'), ((4535, 4628), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4549, 4628), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4649, 4684), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4664, 4684), False, 'import getpass\n'), ((4693, 4744), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4708, 4744), False, 'from ultralytics.utils import SETTINGS\n'), ((3944, 3978), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3958, 3978), True, 'import numpy as np\n'), ((4114, 4141), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4122, 4141), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
from typing import Optional
from lancedb.pydantic import Vector
from pydantic import BaseModel, ConfigDict, Field, model_validator
class Wine(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
validate_assignment=True,
extra="allow",
str_strip_whitespace=True,
json_schema_extra={
"example": {
"id": 45100,
"points": 85,
"title": "Balduzzi 2012 Reserva Merlot (Maule Valley)",
"description": "Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.",
"price": 10.0,
"variety": "Merlot",
"winery": "Balduzzi",
"vineyard": "Reserva",
"country": "Chile",
"province": "Maule Valley",
"region_1": "null",
"region_2": "null",
"taster_name": "Michael Schachner",
"taster_twitter_handle": "@wineschach",
}
},
)
id: int
points: int
title: str
description: Optional[str]
price: Optional[float]
variety: Optional[str]
winery: Optional[str]
vineyard: Optional[str] = Field(..., alias="designation")
country: Optional[str]
province: Optional[str]
region_1: Optional[str]
region_2: Optional[str]
taster_name: Optional[str]
taster_twitter_handle: Optional[str]
@model_validator(mode="before")
def _fill_country_unknowns(cls, values):
"Fill in missing country values with 'Unknown', as we always want this field to be queryable"
country = values.get("country")
if not country:
values["country"] = "Unknown"
return values
@model_validator(mode="before")
def _add_to_vectorize_fields(cls, values):
"Add a field to_vectorize that will be used to create sentence embeddings"
variety = values.get("variety", "")
title = values.get("title", "")
description = values.get("description", "")
to_vectorize = list(filter(None, [variety, title, description]))
values["to_vectorize"] = " ".join(to_vectorize).strip()
return values
class LanceModelWine(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
validate_assignment=True,
extra="allow",
str_strip_whitespace=True,
json_schema_extra={
"example": {
"id": 45100,
"points": 85,
"title": "Balduzzi 2012 Reserva Merlot (Maule Valley)",
"description": "Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.",
"price": 10.0,
"variety": "Merlot",
"winery": "Balduzzi",
"vineyard": "Reserva",
"country": "Chile",
"province": "Maule Valley",
"region_1": "null",
"region_2": "null",
"taster_name": "Michael Schachner",
"taster_twitter_handle": "@wineschach",
}
},
)
id: int
points: int
title: str
description: Optional[str]
price: Optional[float]
variety: Optional[str]
winery: Optional[str]
vineyard: Optional[str] = Field(..., alias="designation")
country: Optional[str]
province: Optional[str]
region_1: Optional[str]
region_2: Optional[str]
taster_name: Optional[str]
taster_twitter_handle: Optional[str]
to_vectorize: str
vector: Vector(384)
| [
"lancedb.pydantic.Vector"
] | [((176, 881), 'pydantic.ConfigDict', 'ConfigDict', ([], {'populate_by_name': '(True)', 'validate_assignment': '(True)', 'extra': '"""allow"""', 'str_strip_whitespace': '(True)', 'json_schema_extra': "{'example': {'id': 45100, 'points': 85, 'title':\n 'Balduzzi 2012 Reserva Merlot (Maule Valley)', 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}}"}), "(populate_by_name=True, validate_assignment=True, extra='allow',\n str_strip_whitespace=True, json_schema_extra={'example': {'id': 45100,\n 'points': 85, 'title': 'Balduzzi 2012 Reserva Merlot (Maule Valley)',\n 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}})\n", (186, 881), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1342, 1373), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (1347, 1373), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1563, 1593), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1578, 1593), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1875, 1905), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1890, 1905), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((2385, 3090), 'pydantic.ConfigDict', 'ConfigDict', ([], {'populate_by_name': '(True)', 'validate_assignment': '(True)', 'extra': '"""allow"""', 'str_strip_whitespace': '(True)', 'json_schema_extra': "{'example': {'id': 45100, 'points': 85, 'title':\n 'Balduzzi 2012 Reserva Merlot (Maule Valley)', 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}}"}), "(populate_by_name=True, validate_assignment=True, extra='allow',\n str_strip_whitespace=True, json_schema_extra={'example': {'id': 45100,\n 'points': 85, 'title': 'Balduzzi 2012 Reserva Merlot (Maule Valley)',\n 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}})\n", (2395, 3090), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((3551, 3582), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (3556, 3582), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((3800, 3811), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (3806, 3811), False, 'from lancedb.pydantic import Vector\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from engine.data.augment import LetterBox
from engine.utils import LOGGER as logger
from engine.utils import SETTINGS
from engine.utils.checks import check_requirements
from engine.utils.ops import xyxy2xywh
from engine.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3664, 3686), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3672, 3686), True, 'import numpy as np\n'), ((3997, 4030), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4011, 4030), True, 'import numpy as np\n'), ((4364, 4399), 'engine.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4382, 4399), False, 'from engine.utils.checks import check_requirements\n'), ((4701, 4743), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4707, 4743), False, 'from openai import OpenAI\n'), ((768, 787), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (774, 787), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2716, 2731), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2726, 2731), False, 'import cv2\n'), ((2745, 2780), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2757, 2780), False, 'import cv2\n'), ((3699, 3727), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3707, 3727), True, 'import numpy as np\n'), ((3747, 3774), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3755, 3774), True, 'import numpy as np\n'), ((3786, 3819), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3800, 3819), True, 'import numpy as np\n'), ((3838, 3873), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3846, 3873), True, 'import numpy as np\n'), ((3951, 3980), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3959, 3980), True, 'import numpy as np\n'), ((4478, 4571), 'engine.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4492, 4571), True, 'from engine.utils import LOGGER as logger\n'), ((4592, 4627), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4607, 4627), False, 'import getpass\n'), ((4636, 4687), 'engine.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4651, 4687), False, 'from engine.utils import SETTINGS\n'), ((3896, 3930), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3910, 3930), True, 'import numpy as np\n'), ((4057, 4084), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4065, 4084), True, 'import numpy as np\n'), ((3041, 3078), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3049, 3078), True, 'import numpy as np\n'), ((3462, 3497), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3470, 3497), True, 'import numpy as np\n'), ((3266, 3300), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3274, 3300), True, 'import numpy as np\n'), ((2875, 2909), 'engine.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2884, 2909), False, 'from engine.data.augment import LetterBox\n'), ((3338, 3372), 'engine.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3347, 3372), False, 'from engine.data.augment import LetterBox\n'), ((3608, 3645), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3616, 3645), True, 'import numpy as np\n')] |
import json
import lancedb
import pytest
from lancedb.utils.events import _Events
@pytest.fixture(autouse=True)
def request_log_path(tmp_path):
return tmp_path / "request.json"
def mock_register_event(name: str, **kwargs):
if _Events._instance is None:
_Events._instance = _Events()
_Events._instance.enabled = True
_Events._instance.rate_limit = 0
_Events._instance(name, **kwargs)
def test_event_reporting(monkeypatch, request_log_path, tmp_path) -> None:
def mock_request(**kwargs):
json_data = kwargs.get("json", {})
with open(request_log_path, "w") as f:
json.dump(json_data, f)
monkeypatch.setattr(
lancedb.table, "register_event", mock_register_event
) # Force enable registering events and strip exception handling
monkeypatch.setattr(lancedb.utils.events, "threaded_request", mock_request)
db = lancedb.connect(tmp_path)
db.create_table(
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
mode="overwrite",
)
assert request_log_path.exists() # test if event was registered
with open(request_log_path, "r") as f:
json_data = json.load(f)
# TODO: don't hardcode these here. Instead create a module level json scehma in
# lancedb.utils.events for better evolvability
batch_keys = ["api_key", "distinct_id", "batch"]
event_keys = ["event", "properties", "timestamp", "distinct_id"]
property_keys = ["cli", "install", "platforms", "version", "session_id"]
assert all([key in json_data for key in batch_keys])
assert all([key in json_data["batch"][0] for key in event_keys])
assert all([key in json_data["batch"][0]["properties"] for key in property_keys])
# cleanup & reset
monkeypatch.undo()
_Events._instance = None
| [
"lancedb.connect",
"lancedb.utils.events._Events",
"lancedb.utils.events._Events._instance"
] | [((86, 114), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (100, 114), False, 'import pytest\n'), ((383, 416), 'lancedb.utils.events._Events._instance', '_Events._instance', (['name'], {}), '(name, **kwargs)\n', (400, 416), False, 'from lancedb.utils.events import _Events\n'), ((899, 924), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (914, 924), False, 'import lancedb\n'), ((294, 303), 'lancedb.utils.events._Events', '_Events', ([], {}), '()\n', (301, 303), False, 'from lancedb.utils.events import _Events\n'), ((1287, 1299), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1296, 1299), False, 'import json\n'), ((628, 651), 'json.dump', 'json.dump', (['json_data', 'f'], {}), '(json_data, f)\n', (637, 651), False, 'import json\n')] |
import os
import lancedb
import numpy as np
import pytest
from lancedb.conftest import MockTextEmbeddingFunction # noqa
from lancedb.embeddings import EmbeddingFunctionRegistry
from lancedb.pydantic import LanceModel, Vector
from lancedb.rerankers import (
CohereReranker,
ColbertReranker,
CrossEncoderReranker,
OpenaiReranker,
)
from lancedb.table import LanceTable
# Tests rely on FTS index
pytest.importorskip("lancedb.fts")
def get_test_table(tmp_path):
db = lancedb.connect(tmp_path)
# Create a LanceDB table schema with a vector and a text column
emb = EmbeddingFunctionRegistry.get_instance().get("test")()
class MyTable(LanceModel):
text: str = emb.SourceField()
vector: Vector(emb.ndims()) = emb.VectorField()
# Initialize the table using the schema
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
# Need to test with a bunch of phrases to make sure sorting is consistent
phrases = [
"great kid don't get cocky",
"now that's a name I haven't heard in a long time",
"if you strike me down I shall become more powerful than you imagine",
"I find your lack of faith disturbing",
"I've got a bad feeling about this",
"never tell me the odds",
"I am your father",
"somebody has to save our skins",
"New strategy R2 let the wookiee win",
"Arrrrggghhhhhhh",
"I see a mansard roof through the trees",
"I see a salty message written in the eves",
"the ground beneath my feet",
"the hot garbage and concrete",
"and now the tops of buildings",
"everybody with a worried mind could never forgive the sight",
"of wicked snakes inside a place you thought was dignified",
"I don't wanna live like this",
"but I don't wanna die",
"The templars want control",
"the brotherhood of assassins want freedom",
"if only they could both see the world as it really is",
"there would be peace",
"but the war goes on",
"altair's legacy was a warning",
"Kratos had a son",
"he was a god",
"the god of war",
"but his son was mortal",
"there hasn't been a good battlefield game since 2142",
"I wish they would make another one",
"campains are not as good as they used to be",
"Multiplayer and open world games have destroyed the single player experience",
"Maybe the future is console games",
"I don't know",
]
# Add the phrases and vectors to the table
table.add([{"text": p} for p in phrases])
# Create a fts index
table.create_fts_index("text")
return table, MyTable
def test_linear_combination(tmp_path):
table, schema = get_test_table(tmp_path)
# The default reranker
result1 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(normalize="score")
.to_pydantic(schema)
)
result2 = ( # noqa
table.search("Our father who art in heaven.", query_type="hybrid")
.rerank(normalize="rank")
.to_pydantic(schema)
)
result3 = table.search(
"Our father who art in heaven..", query_type="hybrid"
).to_pydantic(schema)
assert result1 == result3 # 2 & 3 should be the same as they use score as score
query = "Our father who art in heaven"
query_vector = table.to_pandas()["vector"][0]
result = (
table.search((query_vector, query))
.limit(30)
.rerank(normalize="score")
.to_arrow()
)
assert len(result) == 30
assert np.all(np.diff(result.column("_relevance_score").to_numpy()) <= 0), (
"The _relevance_score column of the results returned by the reranker "
"represents the relevance of the result to the query & should "
"be descending."
)
@pytest.mark.skipif(
os.environ.get("COHERE_API_KEY") is None, reason="COHERE_API_KEY not set"
)
def test_cohere_reranker(tmp_path):
pytest.importorskip("cohere")
table, schema = get_test_table(tmp_path)
# The default reranker
result1 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(normalize="score", reranker=CohereReranker())
.to_pydantic(schema)
)
result2 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(reranker=CohereReranker())
.to_pydantic(schema)
)
assert result1 == result2
query = "Our father who art in heaven"
query_vector = table.to_pandas()["vector"][0]
result = (
table.search((query_vector, query))
.limit(30)
.rerank(reranker=CohereReranker())
.to_arrow()
)
assert len(result) == 30
assert np.all(np.diff(result.column("_relevance_score").to_numpy()) <= 0), (
"The _relevance_score column of the results returned by the reranker "
"represents the relevance of the result to the query & should "
"be descending."
)
def test_cross_encoder_reranker(tmp_path):
pytest.importorskip("sentence_transformers")
table, schema = get_test_table(tmp_path)
result1 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(normalize="score", reranker=CrossEncoderReranker())
.to_pydantic(schema)
)
result2 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(reranker=CrossEncoderReranker())
.to_pydantic(schema)
)
assert result1 == result2
# test explicit hybrid query
query = "Our father who art in heaven"
query_vector = table.to_pandas()["vector"][0]
result = (
table.search((query_vector, query), query_type="hybrid")
.limit(30)
.rerank(reranker=CrossEncoderReranker())
.to_arrow()
)
assert len(result) == 30
assert np.all(np.diff(result.column("_relevance_score").to_numpy()) <= 0), (
"The _relevance_score column of the results returned by the reranker "
"represents the relevance of the result to the query & should "
"be descending."
)
def test_colbert_reranker(tmp_path):
pytest.importorskip("transformers")
table, schema = get_test_table(tmp_path)
result1 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(normalize="score", reranker=ColbertReranker())
.to_pydantic(schema)
)
result2 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(reranker=ColbertReranker())
.to_pydantic(schema)
)
assert result1 == result2
# test explicit hybrid query
query = "Our father who art in heaven"
query_vector = table.to_pandas()["vector"][0]
result = (
table.search((query_vector, query))
.limit(30)
.rerank(reranker=ColbertReranker())
.to_arrow()
)
assert len(result) == 30
assert np.all(np.diff(result.column("_relevance_score").to_numpy()) <= 0), (
"The _relevance_score column of the results returned by the reranker "
"represents the relevance of the result to the query & should "
"be descending."
)
@pytest.mark.skipif(
os.environ.get("OPENAI_API_KEY") is None, reason="OPENAI_API_KEY not set"
)
def test_openai_reranker(tmp_path):
pytest.importorskip("openai")
table, schema = get_test_table(tmp_path)
result1 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(normalize="score", reranker=OpenaiReranker())
.to_pydantic(schema)
)
result2 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(reranker=OpenaiReranker())
.to_pydantic(schema)
)
assert result1 == result2
# test explicit hybrid query
query = "Our father who art in heaven"
query_vector = table.to_pandas()["vector"][0]
result = (
table.search((query_vector, query))
.limit(30)
.rerank(reranker=OpenaiReranker())
.to_arrow()
)
assert len(result) == 30
assert np.all(np.diff(result.column("_relevance_score").to_numpy()) <= 0), (
"The _relevance_score column of the results returned by the reranker "
"represents the relevance of the result to the query & should "
"be descending."
)
| [
"lancedb.rerankers.ColbertReranker",
"lancedb.connect",
"lancedb.rerankers.CohereReranker",
"lancedb.rerankers.CrossEncoderReranker",
"lancedb.table.LanceTable.create",
"lancedb.rerankers.OpenaiReranker",
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((412, 446), 'pytest.importorskip', 'pytest.importorskip', (['"""lancedb.fts"""'], {}), "('lancedb.fts')\n", (431, 446), False, 'import pytest\n'), ((488, 513), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (503, 513), False, 'import lancedb\n'), ((830, 879), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (847, 879), False, 'from lancedb.table import LanceTable\n'), ((4082, 4111), 'pytest.importorskip', 'pytest.importorskip', (['"""cohere"""'], {}), "('cohere')\n", (4101, 4111), False, 'import pytest\n'), ((5153, 5197), 'pytest.importorskip', 'pytest.importorskip', (['"""sentence_transformers"""'], {}), "('sentence_transformers')\n", (5172, 5197), False, 'import pytest\n'), ((6278, 6313), 'pytest.importorskip', 'pytest.importorskip', (['"""transformers"""'], {}), "('transformers')\n", (6297, 6313), False, 'import pytest\n'), ((7458, 7487), 'pytest.importorskip', 'pytest.importorskip', (['"""openai"""'], {}), "('openai')\n", (7477, 7487), False, 'import pytest\n'), ((3966, 3998), 'os.environ.get', 'os.environ.get', (['"""COHERE_API_KEY"""'], {}), "('COHERE_API_KEY')\n", (3980, 3998), False, 'import os\n'), ((7342, 7374), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (7356, 7374), False, 'import os\n'), ((592, 632), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (630, 632), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n'), ((4318, 4334), 'lancedb.rerankers.CohereReranker', 'CohereReranker', ([], {}), '()\n', (4332, 4334), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((4486, 4502), 'lancedb.rerankers.CohereReranker', 'CohereReranker', ([], {}), '()\n', (4500, 4502), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((4766, 4782), 'lancedb.rerankers.CohereReranker', 'CohereReranker', ([], {}), '()\n', (4780, 4782), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((5377, 5399), 'lancedb.rerankers.CrossEncoderReranker', 'CrossEncoderReranker', ([], {}), '()\n', (5397, 5399), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((5551, 5573), 'lancedb.rerankers.CrossEncoderReranker', 'CrossEncoderReranker', ([], {}), '()\n', (5571, 5573), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((5891, 5913), 'lancedb.rerankers.CrossEncoderReranker', 'CrossEncoderReranker', ([], {}), '()\n', (5911, 5913), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((6493, 6510), 'lancedb.rerankers.ColbertReranker', 'ColbertReranker', ([], {}), '()\n', (6508, 6510), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((6662, 6679), 'lancedb.rerankers.ColbertReranker', 'ColbertReranker', ([], {}), '()\n', (6677, 6679), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((6976, 6993), 'lancedb.rerankers.ColbertReranker', 'ColbertReranker', ([], {}), '()\n', (6991, 6993), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((7667, 7683), 'lancedb.rerankers.OpenaiReranker', 'OpenaiReranker', ([], {}), '()\n', (7681, 7683), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((7835, 7851), 'lancedb.rerankers.OpenaiReranker', 'OpenaiReranker', ([], {}), '()\n', (7849, 7851), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n'), ((8148, 8164), 'lancedb.rerankers.OpenaiReranker', 'OpenaiReranker', ([], {}), '()\n', (8162, 8164), False, 'from lancedb.rerankers import CohereReranker, ColbertReranker, CrossEncoderReranker, OpenaiReranker\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from lancedb import LanceDBConnection
# TODO: setup integ test mark and script
@pytest.mark.skip(reason="Need to set up a local server")
def test_against_local_server():
conn = LanceDBConnection("lancedb+http://localhost:10024")
table = conn.open_table("sift1m_ivf1024_pq16")
df = table.search(np.random.rand(128)).to_pandas()
assert len(df) == 10
| [
"lancedb.LanceDBConnection"
] | [((707, 763), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Need to set up a local server"""'}), "(reason='Need to set up a local server')\n", (723, 763), False, 'import pytest\n'), ((808, 859), 'lancedb.LanceDBConnection', 'LanceDBConnection', (['"""lancedb+http://localhost:10024"""'], {}), "('lancedb+http://localhost:10024')\n", (825, 859), False, 'from lancedb import LanceDBConnection\n'), ((933, 952), 'numpy.random.rand', 'np.random.rand', (['(128)'], {}), '(128)\n', (947, 952), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from lancedb.utils import CONFIG
@click.group()
@click.version_option(help="LanceDB command line interface entry point")
def cli():
"LanceDB command line interface"
diagnostics_help = """
Enable or disable LanceDB diagnostics. When enabled, LanceDB will send anonymous events
to help us improve LanceDB. These diagnostics are used only for error reporting and no
data is collected. You can find more about diagnosis on our docs:
https://lancedb.github.io/lancedb/cli_config/
"""
@cli.command(help=diagnostics_help)
@click.option("--enabled/--disabled", default=True)
def diagnostics(enabled):
CONFIG.update({"diagnostics": True if enabled else False})
click.echo("LanceDB diagnostics is %s" % ("enabled" if enabled else "disabled"))
@cli.command(help="Show current LanceDB configuration")
def config():
# TODO: pretty print as table with colors and formatting
click.echo("Current LanceDB configuration:")
cfg = CONFIG.copy()
cfg.pop("uuid") # Don't show uuid as it is not configurable
for item, amount in cfg.items():
click.echo("{} ({})".format(item, amount))
| [
"lancedb.utils.CONFIG.copy",
"lancedb.utils.CONFIG.update"
] | [((641, 654), 'click.group', 'click.group', ([], {}), '()\n', (652, 654), False, 'import click\n'), ((656, 727), 'click.version_option', 'click.version_option', ([], {'help': '"""LanceDB command line interface entry point"""'}), "(help='LanceDB command line interface entry point')\n", (676, 727), False, 'import click\n'), ((1131, 1181), 'click.option', 'click.option', (['"""--enabled/--disabled"""'], {'default': '(True)'}), "('--enabled/--disabled', default=True)\n", (1143, 1181), False, 'import click\n'), ((1212, 1270), 'lancedb.utils.CONFIG.update', 'CONFIG.update', (["{'diagnostics': True if enabled else False}"], {}), "({'diagnostics': True if enabled else False})\n", (1225, 1270), False, 'from lancedb.utils import CONFIG\n'), ((1275, 1360), 'click.echo', 'click.echo', (["('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled'))"], {}), "('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled')\n )\n", (1285, 1360), False, 'import click\n'), ((1493, 1537), 'click.echo', 'click.echo', (['"""Current LanceDB configuration:"""'], {}), "('Current LanceDB configuration:')\n", (1503, 1537), False, 'import click\n'), ((1548, 1561), 'lancedb.utils.CONFIG.copy', 'CONFIG.copy', ([], {}), '()\n', (1559, 1561), False, 'from lancedb.utils import CONFIG\n')] |
# Copyright (c) Hegel AI, Inc.
# All rights reserved.
#
# This source code's license can be found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import pandas as pd
from typing import Callable, Optional
try:
import lancedb
from lancedb.embeddings import with_embeddings
except ImportError:
lancedb = None
import logging
from time import perf_counter
from .experiment import Experiment
from ._utils import _get_dynamic_columns
VALID_TASKS = [""]
def query_builder(
table: "lancedb.Table",
embed_fn: Callable,
text: str,
metric: str = "cosine",
limit: int = 3,
filter: str = None,
nprobes: int = None,
refine_factor: int = None,
):
if nprobes is not None or refine_factor is not None:
warnings.warn(
"`nprobes` and `refine_factor` are not used by the default `query_builder`. "
"Feel free to open an issue to request adding support for them."
)
query = table.search(embed_fn(text)[0]).metric(metric)
if filter:
query = query.where(filter)
return query.limit(limit).to_df()
class LanceDBExperiment(Experiment):
r"""
Perform an experiment with ``LanceDB`` to test different embedding functions or retrieval arguments.
You can query from an existing table, or create a new one (and insert documents into it) during
the experiment.
Args:
uri (str): LanceDB uri to interact with your database. Default is "lancedb"
table_name (str): the table that you will get or create. Default is "table"
use_existing_table (bool): determines whether to create a new collection or use
an existing one
embedding_fns (list[Callable]): embedding functions to test in the experiment
by default only uses the default one in LanceDB
query_args (dict[str, list]): parameters used to query the table
Each value is expected to be a list to create all possible combinations
data (Optional[list[dict]]): documents or embeddings that will be added to
the newly created table
text_col_name (str): name of the text column in the table. Default is "text"
clean_up (bool): determines whether to drop the table after the experiment ends
"""
def __init__(
self,
embedding_fns: dict[str, Callable],
query_args: dict[str, list],
uri: str = "lancedb",
table_name: str = "table",
use_existing_table: bool = False,
data: Optional[list[dict]] = None,
text_col_name: str = "text",
clean_up: bool = False,
):
if lancedb is None:
raise ModuleNotFoundError(
"Package `lancedb` is required to be installed to use this experiment."
"Please use `pip install lancedb` to install the package"
)
self.table_name = table_name
self.use_existing_table = use_existing_table
self.embedding_fns = embedding_fns
if use_existing_table and data:
raise RuntimeError("You can either use an existing collection or create a new one during the experiment.")
if not use_existing_table and data is None:
raise RuntimeError("If you choose to create a new collection, you must also add to it.")
self.data = data if data is not None else []
self.argument_combos: list[dict] = []
self.text_col_name = text_col_name
self.db = lancedb.connect(uri)
self.completion_fn = self.lancedb_completion_fn
self.query_args = query_args
self.clean_up = clean_up
super().__init__()
def prepare(self):
for combo in itertools.product(*self.query_args.values()):
self.argument_combos.append(dict(zip(self.query_args.keys(), combo)))
def run(self, runs: int = 1):
input_args = [] # This will be used to construct DataFrame table
results = []
latencies = []
if not self.argument_combos:
logging.info("Preparing first...")
self.prepare()
for emb_fn_name, emb_fn in self.embedding_fns.items():
if self.use_existing_table: # Use existing table
table = self.db.open_table(self.table_name)
if not table:
raise RuntimeError(f"Table {self.table_name} does not exist.")
else: # Create table and insert data
data = with_embeddings(emb_fn, self.data, self.text_col_name)
table = self.db.create_table(self.table_name, data, mode="overwrite")
# Query from table
for query_arg_dict in self.argument_combos:
query_args = query_arg_dict.copy()
for _ in range(runs):
start = perf_counter()
results.append(self.lancedb_completion_fn(table=table, embedding_fn=emb_fn, **query_args))
latencies.append(perf_counter() - start)
query_args["emb_fn"] = emb_fn_name # Saving for visualization
input_args.append(query_args)
# Clean up
if self.clean_up:
self.db.drop_table(self.table_name)
self._construct_result_dfs(input_args, results, latencies)
def lancedb_completion_fn(self, table, embedding_fn, **kwargs):
return query_builder(table, embedding_fn, **kwargs)
def _construct_result_dfs(
self,
input_args: list[dict[str, object]],
results: list[dict[str, object]],
latencies: list[float],
):
r"""
Construct a few DataFrames that contain all relevant data (i.e. input arguments, results, evaluation metrics).
This version only extract the most relevant objects returned by LanceDB.
Args:
input_args (list[dict[str, object]]): list of dictionaries, where each of them is a set of
input argument that was passed into the model
results (list[dict[str, object]]): list of responses from the model
latencies (list[float]): list of latency measurements
"""
# `input_arg_df` contains all all input args
input_arg_df = pd.DataFrame(input_args)
# `dynamic_input_arg_df` contains input args that has more than one unique values
dynamic_input_arg_df = _get_dynamic_columns(input_arg_df)
# `response_df` contains the extracted response (often being the text response)
response_dict = dict()
response_dict["top doc ids"] = [self._extract_top_doc_ids(result) for result in results]
response_dict["distances"] = [self._extract_lancedb_dists(result) for result in results]
response_dict["documents"] = [self._extract_lancedb_docs(result) for result in results]
response_df = pd.DataFrame(response_dict)
# `result_df` contains everything returned by the completion function
result_df = response_df # pd.concat([self.response_df, pd.DataFrame(results)], axis=1)
# `score_df` contains computed metrics (e.g. latency, evaluation metrics)
self.score_df = pd.DataFrame({"latency": latencies})
# `partial_df` contains some input arguments, extracted responses, and score
self.partial_df = pd.concat([dynamic_input_arg_df, response_df, self.score_df], axis=1)
# `full_df` contains all input arguments, responses, and score
self.full_df = pd.concat([input_arg_df, result_df, self.score_df], axis=1)
@staticmethod
def _extract_top_doc_ids(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["ids"]
@staticmethod
def _extract_lancedb_dists(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["_distance"]
@staticmethod
def _extract_lancedb_docs(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["text"]
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((797, 961), 'warnings.warn', 'warnings.warn', (['"""`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them."""'], {}), "(\n '`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them.'\n )\n", (810, 961), False, 'import warnings\n'), ((3496, 3516), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3511, 3516), False, 'import lancedb\n'), ((6251, 6275), 'pandas.DataFrame', 'pd.DataFrame', (['input_args'], {}), '(input_args)\n', (6263, 6275), True, 'import pandas as pd\n'), ((6864, 6891), 'pandas.DataFrame', 'pd.DataFrame', (['response_dict'], {}), '(response_dict)\n', (6876, 6891), True, 'import pandas as pd\n'), ((7173, 7209), 'pandas.DataFrame', 'pd.DataFrame', (["{'latency': latencies}"], {}), "({'latency': latencies})\n", (7185, 7209), True, 'import pandas as pd\n'), ((7322, 7391), 'pandas.concat', 'pd.concat', (['[dynamic_input_arg_df, response_df, self.score_df]'], {'axis': '(1)'}), '([dynamic_input_arg_df, response_df, self.score_df], axis=1)\n', (7331, 7391), True, 'import pandas as pd\n'), ((7486, 7545), 'pandas.concat', 'pd.concat', (['[input_arg_df, result_df, self.score_df]'], {'axis': '(1)'}), '([input_arg_df, result_df, self.score_df], axis=1)\n', (7495, 7545), True, 'import pandas as pd\n'), ((4045, 4079), 'logging.info', 'logging.info', (['"""Preparing first..."""'], {}), "('Preparing first...')\n", (4057, 4079), False, 'import logging\n'), ((4479, 4533), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['emb_fn', 'self.data', 'self.text_col_name'], {}), '(emb_fn, self.data, self.text_col_name)\n', (4494, 4533), False, 'from lancedb.embeddings import with_embeddings\n'), ((4825, 4839), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4837, 4839), False, 'from time import perf_counter\n'), ((4988, 5002), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5000, 5002), False, 'from time import perf_counter\n')] |
"""LanceDB vector store with cloud storage support."""
import os
from typing import Any, Optional
from dotenv import load_dotenv
from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase
from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities
from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult
from pandas import DataFrame
load_dotenv()
class LanceDBVectorStore(LanceDBVectorStoreBase):
"""Advanced LanceDB Vector Store supporting cloud storage and prefiltering."""
from lancedb.query import LanceQueryBuilder
from lancedb.table import Table
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
api_key: Optional[str] = None,
region: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self._setup_connection(uri, api_key, region)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
self.api_key = api_key
self.region = region
def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None):
"""Establishes a robust connection to LanceDB."""
api_key = api_key or os.getenv('LANCEDB_API_KEY')
region = region or os.getenv('LANCEDB_REGION')
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
if api_key and region:
self.connection = lancedb.connect(uri, api_key=api_key, region=region)
else:
self.connection = lancedb.connect(uri)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Enhanced query method to support prefiltering in LanceDB queries."""
table = self.connection.open_table(self.table_name)
lance_query = self._prepare_lance_query(query, table, **kwargs)
results = lance_query.to_df()
return self._construct_query_result(results)
def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder:
"""Prepares the LanceDB query considering prefiltering and additional parameters."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface.")
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
prefilter = kwargs.pop("prefilter", False)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding).limit(query.similarity_top_k).where(
where, prefilter=prefilter).nprobes(self.nprobes))
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
return lance_query
def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult:
"""Constructs a VectorStoreQueryResult from a LanceDB query result."""
nodes = []
for _, row in results.iterrows():
node = TextNode(
text=row.get('text', ''), # ensure text is a string
id_=row['id'],
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']),
})
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from pathlib import Path
from typing import Any, Callable
from lancedb import DBConnection as LanceDBConnection
from lancedb import connect as lancedb_connect
from lancedb.table import Table as LanceDBTable
from openai import Client as OpenAIClient
from pydantic import Field, PrivateAttr
from crewai_tools.tools.rag.rag_tool import Adapter
def _default_embedding_function():
client = OpenAIClient()
def _embedding_function(input):
rs = client.embeddings.create(input=input, model="text-embedding-ada-002")
return [record.embedding for record in rs.data]
return _embedding_function
class LanceDBAdapter(Adapter):
uri: str | Path
table_name: str
embedding_function: Callable = Field(default_factory=_default_embedding_function)
top_k: int = 3
vector_column_name: str = "vector"
text_column_name: str = "text"
_db: LanceDBConnection = PrivateAttr()
_table: LanceDBTable = PrivateAttr()
def model_post_init(self, __context: Any) -> None:
self._db = lancedb_connect(self.uri)
self._table = self._db.open_table(self.table_name)
return super().model_post_init(__context)
def query(self, question: str) -> str:
query = self.embedding_function([question])[0]
results = (
self._table.search(query, vector_column_name=self.vector_column_name)
.limit(self.top_k)
.select([self.text_column_name])
.to_list()
)
values = [result[self.text_column_name] for result in results]
return "\n".join(values)
| [
"lancedb.connect"
] | [((393, 407), 'openai.Client', 'OpenAIClient', ([], {}), '()\n', (405, 407), True, 'from openai import Client as OpenAIClient\n'), ((724, 774), 'pydantic.Field', 'Field', ([], {'default_factory': '_default_embedding_function'}), '(default_factory=_default_embedding_function)\n', (729, 774), False, 'from pydantic import Field, PrivateAttr\n'), ((898, 911), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (909, 911), False, 'from pydantic import Field, PrivateAttr\n'), ((939, 952), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (950, 952), False, 'from pydantic import Field, PrivateAttr\n'), ((1028, 1053), 'lancedb.connect', 'lancedb_connect', (['self.uri'], {}), '(self.uri)\n', (1043, 1053), True, 'from lancedb import connect as lancedb_connect\n')] |
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
Language,
LatexTextSplitter,
)
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
import argparse, os, arxiv
os.environ["OPENAI_API_KEY"] = "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"
embeddings = OpenAIEmbeddings()
from langchain.vectorstores import LanceDB
from lancedb.pydantic import Vector, LanceModel
from Typing import List
from datetime import datetime
import lancedb
global embedding_out_length
embedding_out_length = 1536
class Content(LanceModel):
id: str
arxiv_id: str
vector: Vector(embedding_out_length)
text: str
uploaded_date: datetime
title: str
authors: List[str]
abstract: str
categories: List[str]
url: str
def PyPDF_to_Vector(table: LanceDB, embeddings: OpenAIEmbeddings, src_dir: str, n_threads: int = 1):
pass
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description="Create Vector DB and perform ingestion from source files")
argparser.add_argument('-s', '--src_dir', type=str, required=True, help = "Source directory where arxiv sources are stored")
argparser.add_argument('-db', '--db_name', type=str, required=True, help = "Name of the LanceDB database to be created")
argparser.add_argument('-t', '--table_name', type=str, required=False, help = "Name of the LanceDB table to be created", default = "EIC_archive")
argparser.add_argument('-openai_key', '--openai_api_key', type=str, required=True, help = "OpenAI API key")
argparser.add_argument('-c', '--chunking', type = str, required=False, help = "Type of Chunking PDF or LATEX", default = "PDF")
argparser.add_argument('-n', '--nthreads', type=int, default=-1)
args = argparser.parse_args()
SRC_DIR = args.src_dir
DB_NAME = args.db_name
TABLE_NAME = args.table_name
OPENAI_API_KEY = args.openai_api_key
NTHREADS = args.nthreads
db = lancedb.connect(DB_NAME)
table = db.create_table(TABLE_NAME, schema=Content, mode="overwrite")
db = lancedb.connect()
meta_data = {"arxiv_id": "1", "title": "EIC LLM",
"category" : "N/A",
"authors": "N/A",
"sub_categories": "N/A",
"abstract": "N/A",
"published": "N/A",
"updated": "N/A",
"doi": "N/A"
},
table = db.create_table(
"EIC_archive",
data=[
{
"vector": embeddings.embed_query("EIC LLM"),
"text": "EIC LLM",
"id": "1",
"arxiv_id" : "N/A",
"title" : "N/A",
"category" : "N/A",
"published" : "N/A"
}
],
mode="overwrite",
)
vectorstore = LanceDB(connection = table, embedding = embeddings)
sourcedir = "PDFs"
count = 0
for source in os.listdir(sourcedir):
if not os.path.isdir(os.path.join("PDFs", source)):
continue
print (f"Adding the source document {source} to the Vector DB")
import arxiv
client = arxiv.Client()
search = arxiv.Search(id_list=[source])
paper = next(arxiv.Client().results(search))
meta_data = {"arxiv_id": paper.entry_id,
"title": paper.title,
"category" : categories[paper.primary_category],
"published": paper.published
}
for file in os.listdir(os.path.join(sourcedir, source)):
if file.endswith(".tex"):
latex_file = os.path.join(sourcedir, source, file)
print (source, latex_file)
documents = TextLoader(latex_file, encoding = 'latin-1').load()
latex_splitter = LatexTextSplitter(
chunk_size=120, chunk_overlap=10
)
documents = latex_splitter.split_documents(documents)
for doc in documents:
for k, v in meta_data.items():
doc.metadata[k] = v
vectorstore.add_documents(documents = documents)
count+=len(documents) | [
"lancedb.connect",
"lancedb.pydantic.Vector"
] | [((342, 360), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (358, 360), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2116, 2133), 'lancedb.connect', 'lancedb.connect', ([], {}), '()\n', (2131, 2133), False, 'import lancedb\n'), ((2820, 2867), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (2827, 2867), False, 'from langchain.vectorstores import LanceDB\n'), ((2916, 2937), 'os.listdir', 'os.listdir', (['sourcedir'], {}), '(sourcedir)\n', (2926, 2937), False, 'import argparse, os, arxiv\n'), ((648, 676), 'lancedb.pydantic.Vector', 'Vector', (['embedding_out_length'], {}), '(embedding_out_length)\n', (654, 676), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((978, 1078), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create Vector DB and perform ingestion from source files"""'}), "(description=\n 'Create Vector DB and perform ingestion from source files')\n", (1001, 1078), False, 'import argparse, os, arxiv\n'), ((2006, 2030), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2021, 2030), False, 'import lancedb\n'), ((3110, 3124), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3122, 3124), False, 'import arxiv\n'), ((3138, 3168), 'arxiv.Search', 'arxiv.Search', ([], {'id_list': '[source]'}), '(id_list=[source])\n', (3150, 3168), False, 'import arxiv\n'), ((3458, 3489), 'os.path.join', 'os.path.join', (['sourcedir', 'source'], {}), '(sourcedir, source)\n', (3470, 3489), False, 'import argparse, os, arxiv\n'), ((2964, 2992), 'os.path.join', 'os.path.join', (['"""PDFs"""', 'source'], {}), "('PDFs', source)\n", (2976, 2992), False, 'import argparse, os, arxiv\n'), ((3551, 3588), 'os.path.join', 'os.path.join', (['sourcedir', 'source', 'file'], {}), '(sourcedir, source, file)\n', (3563, 3588), False, 'import argparse, os, arxiv\n'), ((3733, 3784), 'langchain.text_splitter.LatexTextSplitter', 'LatexTextSplitter', ([], {'chunk_size': '(120)', 'chunk_overlap': '(10)'}), '(chunk_size=120, chunk_overlap=10)\n', (3750, 3784), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, Language, LatexTextSplitter\n'), ((3186, 3200), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3198, 3200), False, 'import arxiv\n'), ((3652, 3694), 'langchain.document_loaders.TextLoader', 'TextLoader', (['latex_file'], {'encoding': '"""latin-1"""'}), "(latex_file, encoding='latin-1')\n", (3662, 3694), False, 'from langchain.document_loaders import TextLoader\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4054, 4087), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4068, 4087), True, 'import numpy as np\n'), ((4421, 4456), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4439, 4456), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4758, 4800), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4764, 4800), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3794, 3813), True, 'import numpy as np\n'), ((3825, 3858), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3839, 3858), True, 'import numpy as np\n'), ((3886, 3921), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3894, 3921), True, 'import numpy as np\n'), ((4008, 4037), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (4016, 4037), True, 'import numpy as np\n'), ((4535, 4628), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4549, 4628), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4649, 4684), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4664, 4684), False, 'import getpass\n'), ((4693, 4744), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4708, 4744), False, 'from ultralytics.utils import SETTINGS\n'), ((3944, 3978), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3958, 3978), True, 'import numpy as np\n'), ((4114, 4141), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4122, 4141), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4054, 4087), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4068, 4087), True, 'import numpy as np\n'), ((4421, 4456), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4439, 4456), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4758, 4800), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4764, 4800), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3794, 3813), True, 'import numpy as np\n'), ((3825, 3858), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3839, 3858), True, 'import numpy as np\n'), ((3886, 3921), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3894, 3921), True, 'import numpy as np\n'), ((4008, 4037), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (4016, 4037), True, 'import numpy as np\n'), ((4535, 4628), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4549, 4628), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4649, 4684), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4664, 4684), False, 'import getpass\n'), ((4693, 4744), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4708, 4744), False, 'from ultralytics.utils import SETTINGS\n'), ((3944, 3978), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3958, 3978), True, 'import numpy as np\n'), ((4114, 4141), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4122, 4141), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
from click.testing import CliRunner
from lancedb.cli.cli import cli
from lancedb.utils import CONFIG
def test_entry():
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0 # Main check
assert "lancedb" in result.output.lower() # lazy check
def test_diagnostics():
runner = CliRunner()
result = runner.invoke(cli, ["diagnostics", "--disabled"])
assert result.exit_code == 0 # Main check
assert not CONFIG["diagnostics"]
result = runner.invoke(cli, ["diagnostics", "--enabled"])
assert result.exit_code == 0 # Main check
assert CONFIG["diagnostics"]
def test_config():
runner = CliRunner()
result = runner.invoke(cli, ["config"])
assert result.exit_code == 0 # Main check
cfg = CONFIG.copy()
cfg.pop("uuid")
for item in cfg: # check for keys only as formatting is subject to change
assert item in result.output
| [
"lancedb.utils.CONFIG.copy"
] | [((134, 145), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (143, 145), False, 'from click.testing import CliRunner\n'), ((324, 335), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (333, 335), False, 'from click.testing import CliRunner\n'), ((660, 671), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (669, 671), False, 'from click.testing import CliRunner\n'), ((773, 786), 'lancedb.utils.CONFIG.copy', 'CONFIG.copy', ([], {}), '()\n', (784, 786), False, 'from lancedb.utils import CONFIG\n')] |
import os
import argparse
import lancedb
from lancedb.context import contextualize
from lancedb.embeddings import with_embeddings
from datasets import load_dataset
import openai
import pytest
import subprocess
from main import embed_func, create_prompt, complete
# DOWNLOAD ==============================================================
subprocess.Popen(
"wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl",
shell=True,
).wait()
# Testing ===========================================================
@pytest.fixture
def mock_embed_func(monkeypatch):
def mock_api_call(*args, **kwargs):
return {"data": [{"embedding": [0.5]} for _ in range(10)]}
monkeypatch.setattr(openai.Embedding, "create", mock_api_call)
@pytest.fixture
def mock_complete(monkeypatch):
def mock_api_call(*args, **kwargs):
return {"choices": [{"text": "test"}]}
monkeypatch.setattr(openai.Completion, "create", mock_api_call)
def test_main(mock_embed_func, mock_complete):
args = argparse.Namespace(
query="test",
context_length=3,
window_size=20,
stride=4,
openai_key="test",
model="test",
)
db = lancedb.connect("~/tmp/lancedb")
table_name = "youtube-chatbot"
if table_name not in db.table_names():
data = load_dataset("jamescalam/youtube-transcriptions", split="train")
df = (
contextualize(data.to_pandas())
.groupby("title")
.text_col("text")
.window(args.window_size)
.stride(args.stride)
.to_df()
)
df = df.iloc[:10].reset_index(drop=True)
print(df.shape)
data = with_embeddings(embed_func, df, show_progress=True)
data.to_pandas().head(1)
tbl = db.create_table(table_name, data)
print(f"Created LaneDB table of length: {len(tbl)}")
else:
tbl = db.open_table(table_name)
load_dataset("jamescalam/youtube-transcriptions", split="train")
emb = embed_func(args.query)[0]
context = tbl.search(emb).limit(args.context_length).to_df()
prompt = create_prompt(args.query, context)
complete(prompt)
top_match = context.iloc[0]
print(f"Top Match: {top_match['url']}&t={top_match['start']}")
| [
"lancedb.connect",
"lancedb.embeddings.with_embeddings"
] | [((1071, 1184), 'argparse.Namespace', 'argparse.Namespace', ([], {'query': '"""test"""', 'context_length': '(3)', 'window_size': '(20)', 'stride': '(4)', 'openai_key': '"""test"""', 'model': '"""test"""'}), "(query='test', context_length=3, window_size=20, stride=4,\n openai_key='test', model='test')\n", (1089, 1184), False, 'import argparse\n'), ((1246, 1278), 'lancedb.connect', 'lancedb.connect', (['"""~/tmp/lancedb"""'], {}), "('~/tmp/lancedb')\n", (1261, 1278), False, 'import lancedb\n'), ((1995, 2059), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (2007, 2059), False, 'from datasets import load_dataset\n'), ((2174, 2208), 'main.create_prompt', 'create_prompt', (['args.query', 'context'], {}), '(args.query, context)\n', (2187, 2208), False, 'from main import embed_func, create_prompt, complete\n'), ((2213, 2229), 'main.complete', 'complete', (['prompt'], {}), '(prompt)\n', (2221, 2229), False, 'from main import embed_func, create_prompt, complete\n'), ((339, 498), 'subprocess.Popen', 'subprocess.Popen', (['"""wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl"""'], {'shell': '(True)'}), "(\n 'wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl'\n , shell=True)\n", (355, 498), False, 'import subprocess\n'), ((1372, 1436), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (1384, 1436), False, 'from datasets import load_dataset\n'), ((1746, 1797), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_func', 'df'], {'show_progress': '(True)'}), '(embed_func, df, show_progress=True)\n', (1761, 1797), False, 'from lancedb.embeddings import with_embeddings\n'), ((2070, 2092), 'main.embed_func', 'embed_func', (['args.query'], {}), '(args.query)\n', (2080, 2092), False, 'from main import embed_func, create_prompt, complete\n')] |
from datetime import timedelta
import pyarrow as pa
import pytest
import pytest_asyncio
from lancedb import AsyncConnection, AsyncTable, connect_async
from lancedb.index import BTree, IvfPq
@pytest_asyncio.fixture
async def db_async(tmp_path) -> AsyncConnection:
return await connect_async(tmp_path, read_consistency_interval=timedelta(seconds=0))
def sample_fixed_size_list_array(nrows, dim):
vector_data = pa.array([float(i) for i in range(dim * nrows)], pa.float32())
return pa.FixedSizeListArray.from_arrays(vector_data, dim)
DIM = 8
NROWS = 256
@pytest_asyncio.fixture
async def some_table(db_async):
data = pa.Table.from_pydict(
{
"id": list(range(256)),
"vector": sample_fixed_size_list_array(NROWS, DIM),
}
)
return await db_async.create_table(
"some_table",
data,
)
@pytest.mark.asyncio
async def test_create_scalar_index(some_table: AsyncTable):
# Can create
await some_table.create_index("id")
# Can recreate if replace=True
await some_table.create_index("id", replace=True)
indices = await some_table.list_indices()
assert len(indices) == 1
assert indices[0].index_type == "BTree"
assert indices[0].columns == ["id"]
# Can't recreate if replace=False
with pytest.raises(RuntimeError, match="already exists"):
await some_table.create_index("id", replace=False)
# can also specify index type
await some_table.create_index("id", config=BTree())
@pytest.mark.asyncio
async def test_create_vector_index(some_table: AsyncTable):
# Can create
await some_table.create_index("vector")
# Can recreate if replace=True
await some_table.create_index("vector", replace=True)
# Can't recreate if replace=False
with pytest.raises(RuntimeError, match="already exists"):
await some_table.create_index("vector", replace=False)
# Can also specify index type
await some_table.create_index("vector", config=IvfPq(num_partitions=100))
indices = await some_table.list_indices()
assert len(indices) == 1
assert indices[0].index_type == "IvfPq"
assert indices[0].columns == ["vector"]
| [
"lancedb.index.IvfPq",
"lancedb.index.BTree"
] | [((495, 546), 'pyarrow.FixedSizeListArray.from_arrays', 'pa.FixedSizeListArray.from_arrays', (['vector_data', 'dim'], {}), '(vector_data, dim)\n', (528, 546), True, 'import pyarrow as pa\n'), ((470, 482), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (480, 482), True, 'import pyarrow as pa\n'), ((1303, 1354), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""already exists"""'}), "(RuntimeError, match='already exists')\n", (1316, 1354), False, 'import pytest\n'), ((1789, 1840), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""already exists"""'}), "(RuntimeError, match='already exists')\n", (1802, 1840), False, 'import pytest\n'), ((333, 353), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (342, 353), False, 'from datetime import timedelta\n'), ((1496, 1503), 'lancedb.index.BTree', 'BTree', ([], {}), '()\n', (1501, 1503), False, 'from lancedb.index import BTree, IvfPq\n'), ((1990, 2015), 'lancedb.index.IvfPq', 'IvfPq', ([], {'num_partitions': '(100)'}), '(num_partitions=100)\n', (1995, 2015), False, 'from lancedb.index import BTree, IvfPq\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from copy import copy
from datetime import date, datetime, timedelta
from pathlib import Path
from time import sleep
from typing import List
from unittest.mock import PropertyMock, patch
import lance
import lancedb
import numpy as np
import pandas as pd
import polars as pl
import pyarrow as pa
import pytest
import pytest_asyncio
from lancedb.conftest import MockTextEmbeddingFunction
from lancedb.db import AsyncConnection, LanceDBConnection
from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry
from lancedb.pydantic import LanceModel, Vector
from lancedb.table import LanceTable
from pydantic import BaseModel
class MockDB:
def __init__(self, uri: Path):
self.uri = uri
self.read_consistency_interval = None
@functools.cached_property
def is_managed_remote(self) -> bool:
return False
@pytest.fixture
def db(tmp_path) -> MockDB:
return MockDB(tmp_path)
@pytest_asyncio.fixture
async def db_async(tmp_path) -> AsyncConnection:
return await lancedb.connect_async(
tmp_path, read_consistency_interval=timedelta(seconds=0)
)
def test_basic(db):
ds = LanceTable.create(
db,
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
).to_lance()
table = LanceTable(db, "test")
assert table.name == "test"
assert table.schema == ds.schema
assert table.to_lance().to_table() == ds.to_table()
@pytest.mark.asyncio
async def test_close(db_async: AsyncConnection):
table = await db_async.create_table("some_table", data=[{"id": 0}])
assert table.is_open()
table.close()
assert not table.is_open()
with pytest.raises(Exception, match="Table some_table is closed"):
await table.count_rows()
assert str(table) == "ClosedTable(some_table)"
@pytest.mark.asyncio
async def test_update_async(db_async: AsyncConnection):
table = await db_async.create_table("some_table", data=[{"id": 0}])
assert await table.count_rows("id == 0") == 1
assert await table.count_rows("id == 7") == 0
await table.update({"id": 7})
assert await table.count_rows("id == 7") == 1
assert await table.count_rows("id == 0") == 0
await table.add([{"id": 2}])
await table.update(where="id % 2 == 0", updates_sql={"id": "5"})
assert await table.count_rows("id == 7") == 1
assert await table.count_rows("id == 2") == 0
assert await table.count_rows("id == 5") == 1
await table.update({"id": 10}, where="id == 5")
assert await table.count_rows("id == 10") == 1
def test_create_table(db):
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.string()),
pa.field("price", pa.float32()),
]
)
expected = pa.Table.from_arrays(
[
pa.FixedSizeListArray.from_arrays(pa.array([3.1, 4.1, 5.9, 26.5]), 2),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
schema=schema,
)
data = [
[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
]
]
df = pd.DataFrame(data[0])
data.append(df)
data.append(pa.Table.from_pandas(df, schema=schema))
for i, d in enumerate(data):
tbl = (
LanceTable.create(db, f"test_{i}", data=d, schema=schema)
.to_lance()
.to_table()
)
assert expected == tbl
def test_empty_table(db):
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.string()),
pa.field("price", pa.float32()),
]
)
tbl = LanceTable.create(db, "test", schema=schema)
data = [
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
]
tbl.add(data=data)
def test_add(db):
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.string()),
pa.field("price", pa.float64()),
]
)
table = LanceTable.create(
db,
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
_add(table, schema)
table = LanceTable.create(db, "test2", schema=schema)
table.add(
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
_add(table, schema)
def test_add_pydantic_model(db):
# https://github.com/lancedb/lancedb/issues/562
class Metadata(BaseModel):
source: str
timestamp: datetime
class Document(BaseModel):
content: str
meta: Metadata
class LanceSchema(LanceModel):
id: str
vector: Vector(2)
li: List[int]
payload: Document
tbl = LanceTable.create(db, "mytable", schema=LanceSchema, mode="overwrite")
assert tbl.schema == LanceSchema.to_arrow_schema()
# add works
expected = LanceSchema(
id="id",
vector=[0.0, 0.0],
li=[1, 2, 3],
payload=Document(
content="foo", meta=Metadata(source="bar", timestamp=datetime.now())
),
)
tbl.add([expected])
result = tbl.search([0.0, 0.0]).limit(1).to_pydantic(LanceSchema)[0]
assert result == expected
flattened = tbl.search([0.0, 0.0]).limit(1).to_pandas(flatten=1)
assert len(flattened.columns) == 6 # _distance is automatically added
really_flattened = tbl.search([0.0, 0.0]).limit(1).to_pandas(flatten=True)
assert len(really_flattened.columns) == 7
@pytest.mark.asyncio
async def test_add_async(db_async: AsyncConnection):
table = await db_async.create_table(
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
assert await table.count_rows() == 2
await table.add(
data=[
{"vector": [10.0, 11.0], "item": "baz", "price": 30.0},
],
)
table = await db_async.open_table("test")
assert await table.count_rows() == 3
def test_polars(db):
data = {
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
# Ingest polars dataframe
table = LanceTable.create(db, "test", data=pl.DataFrame(data))
assert len(table) == 2
result = table.to_pandas()
assert np.allclose(result["vector"].tolist(), data["vector"])
assert result["item"].tolist() == data["item"]
assert np.allclose(result["price"].tolist(), data["price"])
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.large_string()),
pa.field("price", pa.float64()),
]
)
assert table.schema == schema
# search results to polars dataframe
q = [3.1, 4.1]
result = table.search(q).limit(1).to_polars()
assert np.allclose(result["vector"][0], q)
assert result["item"][0] == "foo"
assert np.allclose(result["price"][0], 10.0)
# enter table to polars dataframe
result = table.to_polars()
assert np.allclose(result.collect()["vector"].to_list(), data["vector"])
# make sure filtering isn't broken
filtered_result = result.filter(pl.col("item").is_in(["foo", "bar"])).collect()
assert len(filtered_result) == 2
def _add(table, schema):
# table = LanceTable(db, "test")
assert len(table) == 2
table.add([{"vector": [6.3, 100.5], "item": "new", "price": 30.0}])
assert len(table) == 3
expected = pa.Table.from_arrays(
[
pa.FixedSizeListArray.from_arrays(
pa.array([3.1, 4.1, 5.9, 26.5, 6.3, 100.5]), 2
),
pa.array(["foo", "bar", "new"]),
pa.array([10.0, 20.0, 30.0]),
],
schema=schema,
)
assert expected == table.to_arrow()
def test_versioning(db):
table = LanceTable.create(
db,
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
assert len(table.list_versions()) == 2
assert table.version == 2
table.add([{"vector": [6.3, 100.5], "item": "new", "price": 30.0}])
assert len(table.list_versions()) == 3
assert table.version == 3
assert len(table) == 3
table.checkout(2)
assert table.version == 2
assert len(table) == 2
def test_create_index_method():
with patch.object(
LanceTable, "_dataset_mut", new_callable=PropertyMock
) as mock_dataset:
# Setup mock responses
mock_dataset.return_value.create_index.return_value = None
# Create a LanceTable object
connection = LanceDBConnection(uri="mock.uri")
table = LanceTable(connection, "test_table")
# Call the create_index method
table.create_index(
metric="L2",
num_partitions=256,
num_sub_vectors=96,
vector_column_name="vector",
replace=True,
index_cache_size=256,
)
# Check that the _dataset.create_index method was called
# with the right parameters
mock_dataset.return_value.create_index.assert_called_once_with(
column="vector",
index_type="IVF_PQ",
metric="L2",
num_partitions=256,
num_sub_vectors=96,
replace=True,
accelerator=None,
index_cache_size=256,
)
def test_add_with_nans(db):
# by default we raise an error on bad input vectors
bad_data = [
{"vector": [np.nan], "item": "bar", "price": 20.0},
{"vector": [5], "item": "bar", "price": 20.0},
{"vector": [np.nan, np.nan], "item": "bar", "price": 20.0},
{"vector": [np.nan, 5.0], "item": "bar", "price": 20.0},
]
for row in bad_data:
with pytest.raises(ValueError):
LanceTable.create(
db,
"error_test",
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, row],
)
table = LanceTable.create(
db,
"drop_test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [np.nan], "item": "bar", "price": 20.0},
{"vector": [5], "item": "bar", "price": 20.0},
{"vector": [np.nan, np.nan], "item": "bar", "price": 20.0},
],
on_bad_vectors="drop",
)
assert len(table) == 1
# We can fill bad input with some value
table = LanceTable.create(
db,
"fill_test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [np.nan], "item": "bar", "price": 20.0},
{"vector": [np.nan, np.nan], "item": "bar", "price": 20.0},
],
on_bad_vectors="fill",
fill_value=0.0,
)
assert len(table) == 3
arrow_tbl = table.to_lance().to_table(filter="item == 'bar'")
v = arrow_tbl["vector"].to_pylist()[0]
assert np.allclose(v, np.array([0.0, 0.0]))
def test_restore(db):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "type": "vector"}],
)
table.add([{"vector": [0.5, 0.2], "type": "vector"}])
table.restore(2)
assert len(table.list_versions()) == 4
assert len(table) == 1
expected = table.to_arrow()
table.checkout(2)
table.restore()
assert len(table.list_versions()) == 5
assert table.to_arrow() == expected
table.restore(5) # latest version should be no-op
assert len(table.list_versions()) == 5
with pytest.raises(ValueError):
table.restore(6)
with pytest.raises(ValueError):
table.restore(0)
def test_merge(db, tmp_path):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}],
)
other_table = pa.table({"document": ["foo", "bar"], "id": [0, 1]})
table.merge(other_table, left_on="id")
assert len(table.list_versions()) == 3
expected = pa.table(
{"vector": [[1.1, 0.9], [1.2, 1.9]], "id": [0, 1], "document": ["foo", "bar"]},
schema=table.schema,
)
assert table.to_arrow() == expected
other_dataset = lance.write_dataset(other_table, tmp_path / "other_table.lance")
table.restore(1)
table.merge(other_dataset, left_on="id")
def test_delete(db):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}],
)
assert len(table) == 2
assert len(table.list_versions()) == 2
table.delete("id=0")
assert len(table.list_versions()) == 3
assert table.version == 3
assert len(table) == 1
assert table.to_pandas()["id"].tolist() == [1]
def test_update(db):
table = LanceTable.create(
db,
"my_table",
data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}],
)
assert len(table) == 2
assert len(table.list_versions()) == 2
table.update(where="id=0", values={"vector": [1.1, 1.1]})
assert len(table.list_versions()) == 3
assert table.version == 3
assert len(table) == 2
v = table.to_arrow()["vector"].combine_chunks()
v = v.values.to_numpy().reshape(2, 2)
assert np.allclose(v, np.array([[1.2, 1.9], [1.1, 1.1]]))
def test_update_types(db):
table = LanceTable.create(
db,
"my_table",
data=[
{
"id": 0,
"str": "foo",
"float": 1.1,
"timestamp": datetime(2021, 1, 1),
"date": date(2021, 1, 1),
"vector1": [1.0, 0.0],
"vector2": [1.0, 1.0],
}
],
)
# Update with SQL
table.update(
values_sql=dict(
id="1",
str="'bar'",
float="2.2",
timestamp="TIMESTAMP '2021-01-02 00:00:00'",
date="DATE '2021-01-02'",
vector1="[2.0, 2.0]",
vector2="[3.0, 3.0]",
)
)
actual = table.to_arrow().to_pylist()[0]
expected = dict(
id=1,
str="bar",
float=2.2,
timestamp=datetime(2021, 1, 2),
date=date(2021, 1, 2),
vector1=[2.0, 2.0],
vector2=[3.0, 3.0],
)
assert actual == expected
# Update with values
table.update(
values=dict(
id=2,
str="baz",
float=3.3,
timestamp=datetime(2021, 1, 3),
date=date(2021, 1, 3),
vector1=[3.0, 3.0],
vector2=np.array([4.0, 4.0]),
)
)
actual = table.to_arrow().to_pylist()[0]
expected = dict(
id=2,
str="baz",
float=3.3,
timestamp=datetime(2021, 1, 3),
date=date(2021, 1, 3),
vector1=[3.0, 3.0],
vector2=[4.0, 4.0],
)
assert actual == expected
def test_merge_insert(db):
table = LanceTable.create(
db,
"my_table",
data=pa.table({"a": [1, 2, 3], "b": ["a", "b", "c"]}),
)
assert len(table) == 3
version = table.version
new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]})
# upsert
table.merge_insert(
"a"
).when_matched_update_all().when_not_matched_insert_all().execute(new_data)
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "x", "y", "z"]})
assert table.to_arrow().sort_by("a") == expected
table.restore(version)
# conditional update
table.merge_insert("a").when_matched_update_all(where="target.b = 'b'").execute(
new_data
)
expected = pa.table({"a": [1, 2, 3], "b": ["a", "x", "c"]})
assert table.to_arrow().sort_by("a") == expected
table.restore(version)
# insert-if-not-exists
table.merge_insert("a").when_not_matched_insert_all().execute(new_data)
expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "z"]})
assert table.to_arrow().sort_by("a") == expected
table.restore(version)
new_data = pa.table({"a": [2, 4], "b": ["x", "z"]})
# replace-range
table.merge_insert(
"a"
).when_matched_update_all().when_not_matched_insert_all().when_not_matched_by_source_delete(
"a > 2"
).execute(new_data)
expected = pa.table({"a": [1, 2, 4], "b": ["a", "x", "z"]})
assert table.to_arrow().sort_by("a") == expected
table.restore(version)
# replace-range no condition
table.merge_insert(
"a"
).when_matched_update_all().when_not_matched_insert_all().when_not_matched_by_source_delete().execute(
new_data
)
expected = pa.table({"a": [2, 4], "b": ["x", "z"]})
assert table.to_arrow().sort_by("a") == expected
def test_create_with_embedding_function(db):
class MyTable(LanceModel):
text: str
vector: Vector(10)
func = MockTextEmbeddingFunction()
texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"]
df = pd.DataFrame({"text": texts, "vector": func.compute_source_embeddings(texts)})
conf = EmbeddingFunctionConfig(
source_column="text", vector_column="vector", function=func
)
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
embedding_functions=[conf],
)
table.add(df)
query_str = "hi how are you?"
query_vector = func.compute_query_embeddings(query_str)[0]
expected = table.search(query_vector).limit(2).to_arrow()
actual = table.search(query_str).limit(2).to_arrow()
assert actual == expected
def test_create_f16_table(db):
class MyTable(LanceModel):
text: str
vector: Vector(128, value_type=pa.float16())
df = pd.DataFrame(
{
"text": [f"s-{i}" for i in range(10000)],
"vector": [np.random.randn(128).astype(np.float16) for _ in range(10000)],
}
)
table = LanceTable.create(
db,
"f16_tbl",
schema=MyTable,
)
table.add(df)
table.create_index(num_partitions=2, num_sub_vectors=8)
query = df.vector.iloc[2]
expected = table.search(query).limit(2).to_arrow()
assert "s-2" in expected["text"].to_pylist()
def test_add_with_embedding_function(db):
emb = EmbeddingFunctionRegistry.get_instance().get("test")()
class MyTable(LanceModel):
text: str = emb.SourceField()
vector: Vector(emb.ndims()) = emb.VectorField()
table = LanceTable.create(db, "my_table", schema=MyTable)
texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"]
df = pd.DataFrame({"text": texts})
table.add(df)
texts = ["the quick brown fox", "jumped over the lazy dog"]
table.add([{"text": t} for t in texts])
query_str = "hi how are you?"
query_vector = emb.compute_query_embeddings(query_str)[0]
expected = table.search(query_vector).limit(2).to_arrow()
actual = table.search(query_str).limit(2).to_arrow()
assert actual == expected
def test_multiple_vector_columns(db):
class MyTable(LanceModel):
text: str
vector1: Vector(10)
vector2: Vector(10)
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
v1 = np.random.randn(10)
v2 = np.random.randn(10)
data = [
{"vector1": v1, "vector2": v2, "text": "foo"},
{"vector1": v2, "vector2": v1, "text": "bar"},
]
df = pd.DataFrame(data)
table.add(df)
q = np.random.randn(10)
result1 = table.search(q, vector_column_name="vector1").limit(1).to_pandas()
result2 = table.search(q, vector_column_name="vector2").limit(1).to_pandas()
assert result1["text"].iloc[0] != result2["text"].iloc[0]
def test_create_scalar_index(db):
vec_array = pa.array(
[[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]], pa.list_(pa.float32(), 2)
)
test_data = pa.Table.from_pydict(
{"x": ["c", "b", "a", "e", "b"], "y": [1, 2, 3, 4, 5], "vector": vec_array}
)
table = LanceTable.create(
db,
"my_table",
data=test_data,
)
table.create_scalar_index("x")
indices = table.to_lance().list_indices()
assert len(indices) == 1
scalar_index = indices[0]
assert scalar_index["type"] == "Scalar"
# Confirm that prefiltering still works with the scalar index column
results = table.search().where("x = 'c'").to_arrow()
assert results == test_data.slice(0, 1)
results = table.search([5, 5]).to_arrow()
assert results["_distance"][0].as_py() == 0
results = table.search([5, 5]).where("x != 'b'").to_arrow()
assert results["_distance"][0].as_py() > 0
def test_empty_query(db):
table = LanceTable.create(
db,
"my_table",
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
)
df = table.search().select(["id"]).where("text='bar'").limit(1).to_pandas()
val = df.id.iloc[0]
assert val == 1
table = LanceTable.create(db, "my_table2", data=[{"id": i} for i in range(100)])
df = table.search().select(["id"]).to_pandas()
assert len(df) == 10
df = table.search().select(["id"]).limit(None).to_pandas()
assert len(df) == 100
df = table.search().select(["id"]).limit(-1).to_pandas()
assert len(df) == 100
def test_search_with_schema_inf_single_vector(db):
class MyTable(LanceModel):
text: str
vector_col: Vector(10)
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
v1 = np.random.randn(10)
v2 = np.random.randn(10)
data = [
{"vector_col": v1, "text": "foo"},
{"vector_col": v2, "text": "bar"},
]
df = pd.DataFrame(data)
table.add(df)
q = np.random.randn(10)
result1 = table.search(q, vector_column_name="vector_col").limit(1).to_pandas()
result2 = table.search(q).limit(1).to_pandas()
assert result1["text"].iloc[0] == result2["text"].iloc[0]
def test_search_with_schema_inf_multiple_vector(db):
class MyTable(LanceModel):
text: str
vector1: Vector(10)
vector2: Vector(10)
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
v1 = np.random.randn(10)
v2 = np.random.randn(10)
data = [
{"vector1": v1, "vector2": v2, "text": "foo"},
{"vector1": v2, "vector2": v1, "text": "bar"},
]
df = pd.DataFrame(data)
table.add(df)
q = np.random.randn(10)
with pytest.raises(ValueError):
table.search(q).limit(1).to_pandas()
def test_compact_cleanup(db):
table = LanceTable.create(
db,
"my_table",
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
)
table.add([{"text": "baz", "id": 2}])
assert len(table) == 3
assert table.version == 3
stats = table.compact_files()
assert len(table) == 3
# Compact_files bump 2 versions.
assert table.version == 5
assert stats.fragments_removed > 0
assert stats.fragments_added == 1
stats = table.cleanup_old_versions()
assert stats.bytes_removed == 0
stats = table.cleanup_old_versions(older_than=timedelta(0), delete_unverified=True)
assert stats.bytes_removed > 0
assert table.version == 5
with pytest.raises(Exception, match="Version 3 no longer exists"):
table.checkout(3)
def test_count_rows(db):
table = LanceTable.create(
db,
"my_table",
data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}],
)
assert len(table) == 2
assert table.count_rows() == 2
assert table.count_rows(filter="text='bar'") == 1
def test_hybrid_search(db, tmp_path):
# This test uses an FTS index
pytest.importorskip("lancedb.fts")
db = MockDB(str(tmp_path))
# Create a LanceDB table schema with a vector and a text column
emb = EmbeddingFunctionRegistry.get_instance().get("test")()
class MyTable(LanceModel):
text: str = emb.SourceField()
vector: Vector(emb.ndims()) = emb.VectorField()
# Initialize the table using the schema
table = LanceTable.create(
db,
"my_table",
schema=MyTable,
)
# Create a list of 10 unique english phrases
phrases = [
"great kid don't get cocky",
"now that's a name I haven't heard in a long time",
"if you strike me down I shall become more powerful than you imagine",
"I find your lack of faith disturbing",
"I've got a bad feeling about this",
"never tell me the odds",
"I am your father",
"somebody has to save our skins",
"New strategy R2 let the wookiee win",
"Arrrrggghhhhhhh",
]
# Add the phrases and vectors to the table
table.add([{"text": p} for p in phrases])
# Create a fts index
table.create_fts_index("text")
result1 = (
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(normalize="score")
.to_pydantic(MyTable)
)
result2 = ( # noqa
table.search("Our father who art in heaven", query_type="hybrid")
.rerank(normalize="rank")
.to_pydantic(MyTable)
)
result3 = table.search(
"Our father who art in heaven", query_type="hybrid"
).to_pydantic(MyTable)
assert result1 == result3
# with post filters
result = (
table.search("Arrrrggghhhhhhh", query_type="hybrid")
.where("text='Arrrrggghhhhhhh'")
.to_list()
)
len(result) == 1
@pytest.mark.parametrize(
"consistency_interval", [None, timedelta(seconds=0), timedelta(seconds=0.1)]
)
def test_consistency(tmp_path, consistency_interval):
db = lancedb.connect(tmp_path)
table = LanceTable.create(db, "my_table", data=[{"id": 0}])
db2 = lancedb.connect(tmp_path, read_consistency_interval=consistency_interval)
table2 = db2.open_table("my_table")
assert table2.version == table.version
table.add([{"id": 1}])
if consistency_interval is None:
assert table2.version == table.version - 1
table2.checkout_latest()
assert table2.version == table.version
elif consistency_interval == timedelta(seconds=0):
assert table2.version == table.version
else:
# (consistency_interval == timedelta(seconds=0.1)
assert table2.version == table.version - 1
sleep(0.1)
assert table2.version == table.version
def test_restore_consistency(tmp_path):
db = lancedb.connect(tmp_path)
table = LanceTable.create(db, "my_table", data=[{"id": 0}])
db2 = lancedb.connect(tmp_path, read_consistency_interval=timedelta(seconds=0))
table2 = db2.open_table("my_table")
assert table2.version == table.version
# If we call checkout, it should lose consistency
table_fixed = copy(table2)
table_fixed.checkout(table.version)
# But if we call checkout_latest, it should be consistent again
table_ref_latest = copy(table_fixed)
table_ref_latest.checkout_latest()
table.add([{"id": 2}])
assert table_fixed.version == table.version - 1
assert table_ref_latest.version == table.version
# Schema evolution
def test_add_columns(tmp_path):
db = lancedb.connect(tmp_path)
data = pa.table({"id": [0, 1]})
table = LanceTable.create(db, "my_table", data=data)
table.add_columns({"new_col": "id + 2"})
assert table.to_arrow().column_names == ["id", "new_col"]
assert table.to_arrow()["new_col"].to_pylist() == [2, 3]
def test_alter_columns(tmp_path):
db = lancedb.connect(tmp_path)
data = pa.table({"id": [0, 1]})
table = LanceTable.create(db, "my_table", data=data)
table.alter_columns({"path": "id", "rename": "new_id"})
assert table.to_arrow().column_names == ["new_id"]
def test_drop_columns(tmp_path):
db = lancedb.connect(tmp_path)
data = pa.table({"id": [0, 1], "category": ["a", "b"]})
table = LanceTable.create(db, "my_table", data=data)
table.drop_columns(["category"])
assert table.to_arrow().column_names == ["id"]
@pytest.mark.asyncio
async def test_time_travel(db_async: AsyncConnection):
# Setup
table = await db_async.create_table("some_table", data=[{"id": 0}])
version = await table.version()
await table.add([{"id": 1}])
assert await table.count_rows() == 2
# Make sure we can rewind
await table.checkout(version)
assert await table.count_rows() == 1
# Can't add data in time travel mode
with pytest.raises(
ValueError,
match="table cannot be modified when a specific version is checked out",
):
await table.add([{"id": 2}])
# Can go back to normal mode
await table.checkout_latest()
assert await table.count_rows() == 2
# Should be able to add data again
await table.add([{"id": 3}])
assert await table.count_rows() == 3
# Now checkout and restore
await table.checkout(version)
await table.restore()
assert await table.count_rows() == 1
# Should be able to add data
await table.add([{"id": 4}])
assert await table.count_rows() == 2
# Can't use restore if not checked out
with pytest.raises(ValueError, match="checkout before running restore"):
await table.restore()
| [
"lancedb.pydantic.Vector",
"lancedb.conftest.MockTextEmbeddingFunction",
"lancedb.table.LanceTable",
"lancedb.connect",
"lancedb.table.LanceTable.create",
"lancedb.db.LanceDBConnection",
"lancedb.embeddings.EmbeddingFunctionConfig",
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((1992, 2014), 'lancedb.table.LanceTable', 'LanceTable', (['db', '"""test"""'], {}), "(db, 'test')\n", (2002, 2014), False, 'from lancedb.table import LanceTable\n'), ((3907, 3928), 'pandas.DataFrame', 'pd.DataFrame', (['data[0]'], {}), '(data[0])\n', (3919, 3928), True, 'import pandas as pd\n'), ((4450, 4494), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'schema': 'schema'}), "(db, 'test', schema=schema)\n", (4467, 4494), False, 'from lancedb.table import LanceTable\n'), ((4892, 5041), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [5.9, \n 26.5], 'item': 'bar', 'price': 20.0}]"}), "(db, 'test', data=[{'vector': [3.1, 4.1], 'item': 'foo',\n 'price': 10.0}, {'vector': [5.9, 26.5], 'item': 'bar', 'price': 20.0}])\n", (4909, 5041), False, 'from lancedb.table import LanceTable\n'), ((5141, 5186), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test2"""'], {'schema': 'schema'}), "(db, 'test2', schema=schema)\n", (5158, 5186), False, 'from lancedb.table import LanceTable\n'), ((5771, 5841), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""mytable"""'], {'schema': 'LanceSchema', 'mode': '"""overwrite"""'}), "(db, 'mytable', schema=LanceSchema, mode='overwrite')\n", (5788, 5841), False, 'from lancedb.table import LanceTable\n'), ((7925, 7960), 'numpy.allclose', 'np.allclose', (["result['vector'][0]", 'q'], {}), "(result['vector'][0], q)\n", (7936, 7960), True, 'import numpy as np\n'), ((8010, 8047), 'numpy.allclose', 'np.allclose', (["result['price'][0]", '(10.0)'], {}), "(result['price'][0], 10.0)\n", (8021, 8047), True, 'import numpy as np\n'), ((8926, 9075), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [5.9, \n 26.5], 'item': 'bar', 'price': 20.0}]"}), "(db, 'test', data=[{'vector': [3.1, 4.1], 'item': 'foo',\n 'price': 10.0}, {'vector': [5.9, 26.5], 'item': 'bar', 'price': 20.0}])\n", (8943, 9075), False, 'from lancedb.table import LanceTable\n'), ((11156, 11447), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""drop_test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [np.nan],\n 'item': 'bar', 'price': 20.0}, {'vector': [5], 'item': 'bar', 'price': \n 20.0}, {'vector': [np.nan, np.nan], 'item': 'bar', 'price': 20.0}]", 'on_bad_vectors': '"""drop"""'}), "(db, 'drop_test', data=[{'vector': [3.1, 4.1], 'item':\n 'foo', 'price': 10.0}, {'vector': [np.nan], 'item': 'bar', 'price': \n 20.0}, {'vector': [5], 'item': 'bar', 'price': 20.0}, {'vector': [np.\n nan, np.nan], 'item': 'bar', 'price': 20.0}], on_bad_vectors='drop')\n", (11173, 11447), False, 'from lancedb.table import LanceTable\n'), ((11616, 11875), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""fill_test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [np.nan],\n 'item': 'bar', 'price': 20.0}, {'vector': [np.nan, np.nan], 'item':\n 'bar', 'price': 20.0}]", 'on_bad_vectors': '"""fill"""', 'fill_value': '(0.0)'}), "(db, 'fill_test', data=[{'vector': [3.1, 4.1], 'item':\n 'foo', 'price': 10.0}, {'vector': [np.nan], 'item': 'bar', 'price': \n 20.0}, {'vector': [np.nan, np.nan], 'item': 'bar', 'price': 20.0}],\n on_bad_vectors='fill', fill_value=0.0)\n", (11633, 11875), False, 'from lancedb.table import LanceTable\n'), ((12177, 12263), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'type': 'vector'}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'type':\n 'vector'}])\n", (12194, 12263), False, 'from lancedb.table import LanceTable\n'), ((12865, 12976), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'id': 0}, {'vector': [1.2, 1.9], 'id': 1}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'id': 0}, {\n 'vector': [1.2, 1.9], 'id': 1}])\n", (12882, 12976), False, 'from lancedb.table import LanceTable\n'), ((13021, 13073), 'pyarrow.table', 'pa.table', (["{'document': ['foo', 'bar'], 'id': [0, 1]}"], {}), "({'document': ['foo', 'bar'], 'id': [0, 1]})\n", (13029, 13073), True, 'import pyarrow as pa\n'), ((13175, 13289), 'pyarrow.table', 'pa.table', (["{'vector': [[1.1, 0.9], [1.2, 1.9]], 'id': [0, 1], 'document': ['foo', 'bar']}"], {'schema': 'table.schema'}), "({'vector': [[1.1, 0.9], [1.2, 1.9]], 'id': [0, 1], 'document': [\n 'foo', 'bar']}, schema=table.schema)\n", (13183, 13289), True, 'import pyarrow as pa\n'), ((13369, 13433), 'lance.write_dataset', 'lance.write_dataset', (['other_table', "(tmp_path / 'other_table.lance')"], {}), "(other_table, tmp_path / 'other_table.lance')\n", (13388, 13433), False, 'import lance\n'), ((13535, 13646), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'id': 0}, {'vector': [1.2, 1.9], 'id': 1}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'id': 0}, {\n 'vector': [1.2, 1.9], 'id': 1}])\n", (13552, 13646), False, 'from lancedb.table import LanceTable\n'), ((13954, 14065), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'id': 0}, {'vector': [1.2, 1.9], 'id': 1}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'id': 0}, {\n 'vector': [1.2, 1.9], 'id': 1}])\n", (13971, 14065), False, 'from lancedb.table import LanceTable\n'), ((16294, 16342), 'pyarrow.table', 'pa.table', (["{'a': [2, 3, 4], 'b': ['x', 'y', 'z']}"], {}), "({'a': [2, 3, 4], 'b': ['x', 'y', 'z']})\n", (16302, 16342), True, 'import pyarrow as pa\n'), ((16489, 16545), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3, 4], 'b': ['a', 'x', 'y', 'z']}"], {}), "({'a': [1, 2, 3, 4], 'b': ['a', 'x', 'y', 'z']})\n", (16497, 16545), True, 'import pyarrow as pa\n'), ((16776, 16824), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3], 'b': ['a', 'x', 'c']}"], {}), "({'a': [1, 2, 3], 'b': ['a', 'x', 'c']})\n", (16784, 16824), True, 'import pyarrow as pa\n'), ((17026, 17082), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3, 4], 'b': ['a', 'b', 'c', 'z']}"], {}), "({'a': [1, 2, 3, 4], 'b': ['a', 'b', 'c', 'z']})\n", (17034, 17082), True, 'import pyarrow as pa\n'), ((17180, 17220), 'pyarrow.table', 'pa.table', (["{'a': [2, 4], 'b': ['x', 'z']}"], {}), "({'a': [2, 4], 'b': ['x', 'z']})\n", (17188, 17220), True, 'import pyarrow as pa\n'), ((17431, 17479), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 4], 'b': ['a', 'x', 'z']}"], {}), "({'a': [1, 2, 4], 'b': ['a', 'x', 'z']})\n", (17439, 17479), True, 'import pyarrow as pa\n'), ((17777, 17817), 'pyarrow.table', 'pa.table', (["{'a': [2, 4], 'b': ['x', 'z']}"], {}), "({'a': [2, 4], 'b': ['x', 'z']})\n", (17785, 17817), True, 'import pyarrow as pa\n'), ((18006, 18033), 'lancedb.conftest.MockTextEmbeddingFunction', 'MockTextEmbeddingFunction', ([], {}), '()\n', (18031, 18033), False, 'from lancedb.conftest import MockTextEmbeddingFunction\n'), ((18204, 18292), 'lancedb.embeddings.EmbeddingFunctionConfig', 'EmbeddingFunctionConfig', ([], {'source_column': '"""text"""', 'vector_column': '"""vector"""', 'function': 'func'}), "(source_column='text', vector_column='vector',\n function=func)\n", (18227, 18292), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry\n'), ((18315, 18392), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable', 'embedding_functions': '[conf]'}), "(db, 'my_table', schema=MyTable, embedding_functions=[conf])\n", (18332, 18392), False, 'from lancedb.table import LanceTable\n'), ((19036, 19084), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""f16_tbl"""'], {'schema': 'MyTable'}), "(db, 'f16_tbl', schema=MyTable)\n", (19053, 19084), False, 'from lancedb.table import LanceTable\n'), ((19578, 19627), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (19595, 19627), False, 'from lancedb.table import LanceTable\n'), ((19708, 19737), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': texts}"], {}), "({'text': texts})\n", (19720, 19737), True, 'import pandas as pd\n'), ((20270, 20319), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (20287, 20319), False, 'from lancedb.table import LanceTable\n'), ((20361, 20380), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (20376, 20380), True, 'import numpy as np\n'), ((20390, 20409), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (20405, 20409), True, 'import numpy as np\n'), ((20548, 20566), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (20560, 20566), True, 'import pandas as pd\n'), ((20594, 20613), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (20609, 20613), True, 'import numpy as np\n'), ((20999, 21100), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'x': ['c', 'b', 'a', 'e', 'b'], 'y': [1, 2, 3, 4, 5], 'vector': vec_array}"], {}), "({'x': ['c', 'b', 'a', 'e', 'b'], 'y': [1, 2, 3, 4, 5],\n 'vector': vec_array})\n", (21019, 21100), True, 'import pyarrow as pa\n'), ((21123, 21172), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'test_data'}), "(db, 'my_table', data=test_data)\n", (21140, 21172), False, 'from lancedb.table import LanceTable\n'), ((21808, 21904), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'text': 'foo', 'id': 0}, {'text': 'bar', 'id': 1}]"}), "(db, 'my_table', data=[{'text': 'foo', 'id': 0}, {'text':\n 'bar', 'id': 1}])\n", (21825, 21904), False, 'from lancedb.table import LanceTable\n'), ((22540, 22589), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (22557, 22589), False, 'from lancedb.table import LanceTable\n'), ((22631, 22650), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (22646, 22650), True, 'import numpy as np\n'), ((22660, 22679), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (22675, 22679), True, 'import numpy as np\n'), ((22794, 22812), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (22806, 22812), True, 'import pandas as pd\n'), ((22840, 22859), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (22855, 22859), True, 'import numpy as np\n'), ((23231, 23280), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (23248, 23280), False, 'from lancedb.table import LanceTable\n'), ((23322, 23341), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (23337, 23341), True, 'import numpy as np\n'), ((23351, 23370), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (23366, 23370), True, 'import numpy as np\n'), ((23509, 23527), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (23521, 23527), True, 'import pandas as pd\n'), ((23555, 23574), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (23570, 23574), True, 'import numpy as np\n'), ((23700, 23796), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'text': 'foo', 'id': 0}, {'text': 'bar', 'id': 1}]"}), "(db, 'my_table', data=[{'text': 'foo', 'id': 0}, {'text':\n 'bar', 'id': 1}])\n", (23717, 23796), False, 'from lancedb.table import LanceTable\n'), ((24499, 24595), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'text': 'foo', 'id': 0}, {'text': 'bar', 'id': 1}]"}), "(db, 'my_table', data=[{'text': 'foo', 'id': 0}, {'text':\n 'bar', 'id': 1}])\n", (24516, 24595), False, 'from lancedb.table import LanceTable\n'), ((24817, 24851), 'pytest.importorskip', 'pytest.importorskip', (['"""lancedb.fts"""'], {}), "('lancedb.fts')\n", (24836, 24851), False, 'import pytest\n'), ((25200, 25249), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (25217, 25249), False, 'from lancedb.table import LanceTable\n'), ((26793, 26818), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (26808, 26818), False, 'import lancedb\n'), ((26831, 26882), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'id': 0}]"}), "(db, 'my_table', data=[{'id': 0}])\n", (26848, 26882), False, 'from lancedb.table import LanceTable\n'), ((26894, 26967), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {'read_consistency_interval': 'consistency_interval'}), '(tmp_path, read_consistency_interval=consistency_interval)\n', (26909, 26967), False, 'import lancedb\n'), ((27586, 27611), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (27601, 27611), False, 'import lancedb\n'), ((27624, 27675), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'id': 0}]"}), "(db, 'my_table', data=[{'id': 0}])\n", (27641, 27675), False, 'from lancedb.table import LanceTable\n'), ((27917, 27929), 'copy.copy', 'copy', (['table2'], {}), '(table2)\n', (27921, 27929), False, 'from copy import copy\n'), ((28061, 28078), 'copy.copy', 'copy', (['table_fixed'], {}), '(table_fixed)\n', (28065, 28078), False, 'from copy import copy\n'), ((28312, 28337), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (28327, 28337), False, 'import lancedb\n'), ((28349, 28373), 'pyarrow.table', 'pa.table', (["{'id': [0, 1]}"], {}), "({'id': [0, 1]})\n", (28357, 28373), True, 'import pyarrow as pa\n'), ((28386, 28430), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'data'}), "(db, 'my_table', data=data)\n", (28403, 28430), False, 'from lancedb.table import LanceTable\n'), ((28644, 28669), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (28659, 28669), False, 'import lancedb\n'), ((28681, 28705), 'pyarrow.table', 'pa.table', (["{'id': [0, 1]}"], {}), "({'id': [0, 1]})\n", (28689, 28705), True, 'import pyarrow as pa\n'), ((28718, 28762), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'data'}), "(db, 'my_table', data=data)\n", (28735, 28762), False, 'from lancedb.table import LanceTable\n'), ((28922, 28947), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (28937, 28947), False, 'import lancedb\n'), ((28959, 29007), 'pyarrow.table', 'pa.table', (["{'id': [0, 1], 'category': ['a', 'b']}"], {}), "({'id': [0, 1], 'category': ['a', 'b']})\n", (28967, 29007), True, 'import pyarrow as pa\n'), ((29020, 29064), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'data'}), "(db, 'my_table', data=data)\n", (29037, 29064), False, 'from lancedb.table import LanceTable\n'), ((2370, 2430), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""Table some_table is closed"""'}), "(Exception, match='Table some_table is closed')\n", (2383, 2430), False, 'import pytest\n'), ((3965, 4004), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['df'], {'schema': 'schema'}), '(df, schema=schema)\n', (3985, 4004), True, 'import pyarrow as pa\n'), ((5702, 5711), 'lancedb.pydantic.Vector', 'Vector', (['(2)'], {}), '(2)\n', (5708, 5711), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((9508, 9575), 'unittest.mock.patch.object', 'patch.object', (['LanceTable', '"""_dataset_mut"""'], {'new_callable': 'PropertyMock'}), "(LanceTable, '_dataset_mut', new_callable=PropertyMock)\n", (9520, 9575), False, 'from unittest.mock import PropertyMock, patch\n'), ((9764, 9797), 'lancedb.db.LanceDBConnection', 'LanceDBConnection', ([], {'uri': '"""mock.uri"""'}), "(uri='mock.uri')\n", (9781, 9797), False, 'from lancedb.db import AsyncConnection, LanceDBConnection\n'), ((9814, 9850), 'lancedb.table.LanceTable', 'LanceTable', (['connection', '"""test_table"""'], {}), "(connection, 'test_table')\n", (9824, 9850), False, 'from lancedb.table import LanceTable\n'), ((12119, 12139), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (12127, 12139), True, 'import numpy as np\n'), ((12707, 12732), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12720, 12732), False, 'import pytest\n'), ((12769, 12794), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12782, 12794), False, 'import pytest\n'), ((14444, 14478), 'numpy.array', 'np.array', (['[[1.2, 1.9], [1.1, 1.1]]'], {}), '([[1.2, 1.9], [1.1, 1.1]])\n', (14452, 14478), True, 'import numpy as np\n'), ((17983, 17993), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (17989, 17993), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((20218, 20228), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (20224, 20228), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((20246, 20256), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (20252, 20256), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((22516, 22526), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (22522, 22526), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((23179, 23189), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (23185, 23189), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((23207, 23217), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (23213, 23217), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((23584, 23609), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23597, 23609), False, 'import pytest\n'), ((24372, 24432), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""Version 3 no longer exists"""'}), "(Exception, match='Version 3 no longer exists')\n", (24385, 24432), False, 'import pytest\n'), ((26682, 26702), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (26691, 26702), False, 'from datetime import date, datetime, timedelta\n'), ((26704, 26726), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0.1)'}), '(seconds=0.1)\n', (26713, 26726), False, 'from datetime import date, datetime, timedelta\n'), ((29580, 29683), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""table cannot be modified when a specific version is checked out"""'}), "(ValueError, match=\n 'table cannot be modified when a specific version is checked out')\n", (29593, 29683), False, 'import pytest\n'), ((30252, 30318), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""checkout before running restore"""'}), "(ValueError, match='checkout before running restore')\n", (30265, 30318), False, 'import pytest\n'), ((1756, 1905), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [5.9, \n 26.5], 'item': 'bar', 'price': 20.0}]"}), "(db, 'test', data=[{'vector': [3.1, 4.1], 'item': 'foo',\n 'price': 10.0}, {'vector': [5.9, 26.5], 'item': 'bar', 'price': 20.0}])\n", (1773, 1905), False, 'from lancedb.table import LanceTable\n'), ((3624, 3648), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (3632, 3648), True, 'import pyarrow as pa\n'), ((3662, 3684), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (3670, 3684), True, 'import pyarrow as pa\n'), ((7305, 7323), 'polars.DataFrame', 'pl.DataFrame', (['data'], {}), '(data)\n', (7317, 7323), True, 'import polars as pl\n'), ((8732, 8763), 'pyarrow.array', 'pa.array', (["['foo', 'bar', 'new']"], {}), "(['foo', 'bar', 'new'])\n", (8740, 8763), True, 'import pyarrow as pa\n'), ((8777, 8805), 'pyarrow.array', 'pa.array', (['[10.0, 20.0, 30.0]'], {}), '([10.0, 20.0, 30.0])\n', (8785, 8805), True, 'import pyarrow as pa\n'), ((10939, 10964), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10952, 10964), False, 'import pytest\n'), ((10978, 11083), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""error_test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, row]"}), "(db, 'error_test', data=[{'vector': [3.1, 4.1], 'item':\n 'foo', 'price': 10.0}, row])\n", (10995, 11083), False, 'from lancedb.table import LanceTable\n'), ((15338, 15358), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)'], {}), '(2021, 1, 2)\n', (15346, 15358), False, 'from datetime import date, datetime, timedelta\n'), ((15373, 15389), 'datetime.date', 'date', (['(2021)', '(1)', '(2)'], {}), '(2021, 1, 2)\n', (15377, 15389), False, 'from datetime import date, datetime, timedelta\n'), ((15917, 15937), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15925, 15937), False, 'from datetime import date, datetime, timedelta\n'), ((15952, 15968), 'datetime.date', 'date', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15956, 15968), False, 'from datetime import date, datetime, timedelta\n'), ((16167, 16215), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3], 'b': ['a', 'b', 'c']}"], {}), "({'a': [1, 2, 3], 'b': ['a', 'b', 'c']})\n", (16175, 16215), True, 'import pyarrow as pa\n'), ((20960, 20972), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (20970, 20972), True, 'import pyarrow as pa\n'), ((24259, 24271), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (24268, 24271), False, 'from datetime import date, datetime, timedelta\n'), ((27281, 27301), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (27290, 27301), False, 'from datetime import date, datetime, timedelta\n'), ((27477, 27487), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (27482, 27487), False, 'from time import sleep\n'), ((27739, 27759), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (27748, 27759), False, 'from datetime import date, datetime, timedelta\n'), ((1698, 1718), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (1707, 1718), False, 'from datetime import date, datetime, timedelta\n'), ((3407, 3418), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3416, 3418), True, 'import pyarrow as pa\n'), ((3451, 3463), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3461, 3463), True, 'import pyarrow as pa\n'), ((3575, 3606), 'pyarrow.array', 'pa.array', (['[3.1, 4.1, 5.9, 26.5]'], {}), '([3.1, 4.1, 5.9, 26.5])\n', (3583, 3606), True, 'import pyarrow as pa\n'), ((4365, 4376), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4374, 4376), True, 'import pyarrow as pa\n'), ((4409, 4421), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4419, 4421), True, 'import pyarrow as pa\n'), ((4804, 4815), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4813, 4815), True, 'import pyarrow as pa\n'), ((4848, 4860), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4858, 4860), True, 'import pyarrow as pa\n'), ((7688, 7705), 'pyarrow.large_string', 'pa.large_string', ([], {}), '()\n', (7703, 7705), True, 'import pyarrow as pa\n'), ((7738, 7750), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (7748, 7750), True, 'import pyarrow as pa\n'), ((8658, 8701), 'pyarrow.array', 'pa.array', (['[3.1, 4.1, 5.9, 26.5, 6.3, 100.5]'], {}), '([3.1, 4.1, 5.9, 26.5, 6.3, 100.5])\n', (8666, 8701), True, 'import pyarrow as pa\n'), ((18819, 18831), 'pyarrow.float16', 'pa.float16', ([], {}), '()\n', (18829, 18831), True, 'import pyarrow as pa\n'), ((19384, 19424), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (19422, 19424), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry\n'), ((24962, 25002), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (25000, 25002), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry\n'), ((3359, 3371), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3369, 3371), True, 'import pyarrow as pa\n'), ((4317, 4329), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4327, 4329), True, 'import pyarrow as pa\n'), ((4756, 4768), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4766, 4768), True, 'import pyarrow as pa\n'), ((7640, 7652), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7650, 7652), True, 'import pyarrow as pa\n'), ((14715, 14735), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (14723, 14735), False, 'from datetime import date, datetime, timedelta\n'), ((14761, 14777), 'datetime.date', 'date', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (14765, 14777), False, 'from datetime import date, datetime, timedelta\n'), ((15634, 15654), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15642, 15654), False, 'from datetime import date, datetime, timedelta\n'), ((15673, 15689), 'datetime.date', 'date', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15677, 15689), False, 'from datetime import date, datetime, timedelta\n'), ((15743, 15763), 'numpy.array', 'np.array', (['[4.0, 4.0]'], {}), '([4.0, 4.0])\n', (15751, 15763), True, 'import numpy as np\n'), ((4068, 4125), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', 'f"""test_{i}"""'], {'data': 'd', 'schema': 'schema'}), "(db, f'test_{i}', data=d, schema=schema)\n", (4085, 4125), False, 'from lancedb.table import LanceTable\n'), ((8271, 8285), 'polars.col', 'pl.col', (['"""item"""'], {}), "('item')\n", (8277, 8285), True, 'import polars as pl\n'), ((18944, 18964), 'numpy.random.randn', 'np.random.randn', (['(128)'], {}), '(128)\n', (18959, 18964), True, 'import numpy as np\n'), ((6099, 6113), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6111, 6113), False, 'from datetime import date, datetime, timedelta\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from typing import Any, Callable, Dict, List, Optional, Union
from urllib.parse import urljoin
import attrs
import pyarrow as pa
import requests
from pydantic import BaseModel
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from lancedb.common import Credential
from lancedb.remote import VectorQuery, VectorQueryResult
from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory
from lancedb.remote.errors import LanceDBClientError
ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream"
def _check_not_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.closed:
raise ValueError("Connection is closed")
return f(self, *args, **kwargs)
return wrapped
def _read_ipc(resp: requests.Response) -> pa.Table:
resp_body = resp.content
with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader:
return reader.read_all()
@attrs.define(slots=False)
class RestfulLanceDBClient:
db_name: str
region: str
api_key: Credential
host_override: Optional[str] = attrs.field(default=None)
closed: bool = attrs.field(default=False, init=False)
@functools.cached_property
def session(self) -> requests.Session:
sess = requests.Session()
retry_adapter_instance = retry_adapter(retry_adapter_options())
sess.mount(urljoin(self.url, "/v1/table/"), retry_adapter_instance)
adapter_class = LanceDBClientHTTPAdapterFactory()
sess.mount("https://", adapter_class())
return sess
@property
def url(self) -> str:
return (
self.host_override
or f"https://{self.db_name}.{self.region}.api.lancedb.com"
)
def close(self):
self.session.close()
self.closed = True
@functools.cached_property
def headers(self) -> Dict[str, str]:
headers = {
"x-api-key": self.api_key,
}
if self.region == "local": # Local test mode
headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com"
if self.host_override:
headers["x-lancedb-database"] = self.db_name
return headers
@staticmethod
def _check_status(resp: requests.Response):
if resp.status_code == 404:
raise LanceDBClientError(f"Not found: {resp.text}")
elif 400 <= resp.status_code < 500:
raise LanceDBClientError(
f"Bad Request: {resp.status_code}, error: {resp.text}"
)
elif 500 <= resp.status_code < 600:
raise LanceDBClientError(
f"Internal Server Error: {resp.status_code}, error: {resp.text}"
)
elif resp.status_code != 200:
raise LanceDBClientError(
f"Unknown Error: {resp.status_code}, error: {resp.text}"
)
@_check_not_closed
def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None):
"""Send a GET request and returns the deserialized response payload."""
if isinstance(params, BaseModel):
params: Dict[str, Any] = params.dict(exclude_none=True)
with self.session.get(
urljoin(self.url, uri),
params=params,
headers=self.headers,
timeout=(120.0, 300.0),
) as resp:
self._check_status(resp)
return resp.json()
@_check_not_closed
def post(
self,
uri: str,
data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None,
params: Optional[Dict[str, Any]] = None,
content_type: Optional[str] = None,
deserialize: Callable = lambda resp: resp.json(),
request_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Send a POST request and returns the deserialized response payload.
Parameters
----------
uri : str
The uri to send the POST request to.
data: Union[Dict[str, Any], BaseModel]
request_id: Optional[str]
Optional client side request id to be sent in the request headers.
"""
if isinstance(data, BaseModel):
data: Dict[str, Any] = data.dict(exclude_none=True)
if isinstance(data, bytes):
req_kwargs = {"data": data}
else:
req_kwargs = {"json": data}
headers = self.headers.copy()
if content_type is not None:
headers["content-type"] = content_type
if request_id is not None:
headers["x-request-id"] = request_id
with self.session.post(
urljoin(self.url, uri),
headers=headers,
params=params,
timeout=(120.0, 300.0),
**req_kwargs,
) as resp:
self._check_status(resp)
return deserialize(resp)
@_check_not_closed
def list_tables(self, limit: int, page_token: Optional[str] = None) -> List[str]:
"""List all tables in the database."""
if page_token is None:
page_token = ""
json = self.get("/v1/table/", {"limit": limit, "page_token": page_token})
return json["tables"]
@_check_not_closed
def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
"""Query a table."""
tbl = self.post(f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc)
return VectorQueryResult(tbl)
def mount_retry_adapter_for_table(self, table_name: str) -> None:
"""
Adds an http adapter to session that will retry retryable requests to the table.
"""
retry_options = retry_adapter_options(methods=["GET", "POST"])
retry_adapter_instance = retry_adapter(retry_options)
session = self.session
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/query/"), retry_adapter_instance
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/describe/"),
retry_adapter_instance,
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/index/list/"),
retry_adapter_instance,
)
def retry_adapter_options(methods=["GET"]) -> Dict[str, Any]:
return {
"retries": int(os.environ.get("LANCE_CLIENT_MAX_RETRIES", "3")),
"connect_retries": int(os.environ.get("LANCE_CLIENT_CONNECT_RETRIES", "3")),
"read_retries": int(os.environ.get("LANCE_CLIENT_READ_RETRIES", "3")),
"backoff_factor": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_FACTOR", "0.25")
),
"backoff_jitter": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_JITTER", "0.25")
),
"statuses": [
int(i.strip())
for i in os.environ.get(
"LANCE_CLIENT_RETRY_STATUSES", "429, 500, 502, 503"
).split(",")
],
"methods": methods,
}
def retry_adapter(options: Dict[str, Any]) -> HTTPAdapter:
total_retries = options["retries"]
connect_retries = options["connect_retries"]
read_retries = options["read_retries"]
backoff_factor = options["backoff_factor"]
backoff_jitter = options["backoff_jitter"]
statuses = options["statuses"]
methods = frozenset(options["methods"])
logging.debug(
f"Setting up retry adapter with {total_retries} retries," # noqa G003
+ f"connect retries {connect_retries}, read retries {read_retries},"
+ f"backoff factor {backoff_factor}, statuses {statuses}, "
+ f"methods {methods}"
)
return HTTPAdapter(
max_retries=Retry(
total=total_retries,
connect=connect_retries,
read=read_retries,
backoff_factor=backoff_factor,
backoff_jitter=backoff_jitter,
status_forcelist=statuses,
allowed_methods=methods,
)
)
| [
"lancedb.remote.VectorQueryResult",
"lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory",
"lancedb.remote.errors.LanceDBClientError"
] | [((1587, 1612), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1599, 1612), False, 'import attrs\n'), ((1207, 1225), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1222, 1225), False, 'import functools\n'), ((1733, 1758), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1744, 1758), False, 'import attrs\n'), ((1779, 1817), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1790, 1817), False, 'import attrs\n'), ((7965, 8201), 'logging.debug', 'logging.debug', (["(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')"], {}), "(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')\n", (7978, 8201), False, 'import logging\n'), ((1908, 1926), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1924, 1926), False, 'import requests\n'), ((2101, 2134), 'lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory', 'LanceDBClientHTTPAdapterFactory', ([], {}), '()\n', (2132, 2134), False, 'from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory\n'), ((6057, 6079), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (6074, 6079), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1512, 1538), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1527, 1538), True, 'import pyarrow as pa\n'), ((2019, 2050), 'urllib.parse.urljoin', 'urljoin', (['self.url', '"""/v1/table/"""'], {}), "(self.url, '/v1/table/')\n", (2026, 2050), False, 'from urllib.parse import urljoin\n'), ((2957, 3002), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Not found: {resp.text}"""'], {}), "(f'Not found: {resp.text}')\n", (2975, 3002), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((6464, 6515), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/query/"""'], {}), "(self.url, f'/v1/table/{table_name}/query/')\n", (6471, 6515), False, 'from urllib.parse import urljoin\n'), ((6585, 6639), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/describe/"""'], {}), "(self.url, f'/v1/table/{table_name}/describe/')\n", (6592, 6639), False, 'from urllib.parse import urljoin\n'), ((6722, 6778), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/index/list/"""'], {}), "(self.url, f'/v1/table/{table_name}/index/list/')\n", (6729, 6778), False, 'from urllib.parse import urljoin\n'), ((6926, 6973), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_MAX_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_MAX_RETRIES', '3')\n", (6940, 6973), False, 'import os\n'), ((7007, 7058), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_CONNECT_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_CONNECT_RETRIES', '3')\n", (7021, 7058), False, 'import os\n'), ((7089, 7137), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_READ_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_READ_RETRIES', '3')\n", (7103, 7137), False, 'import os\n'), ((7185, 7244), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_FACTOR"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_FACTOR', '0.25')\n", (7199, 7244), False, 'import os\n'), ((7301, 7360), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_JITTER"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_JITTER', '0.25')\n", (7315, 7360), False, 'import os\n'), ((8286, 8478), 'urllib3.Retry', 'Retry', ([], {'total': 'total_retries', 'connect': 'connect_retries', 'read': 'read_retries', 'backoff_factor': 'backoff_factor', 'backoff_jitter': 'backoff_jitter', 'status_forcelist': 'statuses', 'allowed_methods': 'methods'}), '(total=total_retries, connect=connect_retries, read=read_retries,\n backoff_factor=backoff_factor, backoff_jitter=backoff_jitter,\n status_forcelist=statuses, allowed_methods=methods)\n', (8291, 8478), False, 'from urllib3 import Retry\n'), ((3065, 3139), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Bad Request: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Bad Request: {resp.status_code}, error: {resp.text}')\n", (3083, 3139), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3845, 3867), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (3852, 3867), False, 'from urllib.parse import urljoin\n'), ((5259, 5281), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (5266, 5281), False, 'from urllib.parse import urljoin\n'), ((3232, 3321), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Internal Server Error: {resp.status_code}, error: {resp.text}"""'], {}), "(\n f'Internal Server Error: {resp.status_code}, error: {resp.text}')\n", (3250, 3321), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3403, 3479), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Unknown Error: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Unknown Error: {resp.status_code}, error: {resp.text}')\n", (3421, 3479), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((7442, 7509), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_STATUSES"""', '"""429, 500, 502, 503"""'], {}), "('LANCE_CLIENT_RETRY_STATUSES', '429, 500, 502, 503')\n", (7456, 7509), False, 'import os\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
from datetime import date, datetime
from typing import List, Optional, Tuple
import pyarrow as pa
import pydantic
import pytest
from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema
from pydantic import Field
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="using native type alias requires python3.9 or higher",
)
def test_pydantic_to_arrow():
class StructModel(pydantic.BaseModel):
a: str
b: Optional[float]
class TestModel(pydantic.BaseModel):
id: int
s: str
vec: list[float]
li: list[int]
lili: list[list[float]]
litu: list[tuple[float, float]]
opt: Optional[str] = None
st: StructModel
dt: date
dtt: datetime
dt_with_tz: datetime = Field(json_schema_extra={"tz": "Asia/Shanghai"})
# d: dict
# TODO: test we can actually convert the model into data.
# m = TestModel(
# id=1,
# s="hello",
# vec=[1.0, 2.0, 3.0],
# li=[2, 3, 4],
# lili=[[2.5, 1.5], [3.5, 4.5], [5.5, 6.5]],
# litu=[(2.5, 1.5), (3.5, 4.5), (5.5, 6.5)],
# st=StructModel(a="a", b=1.0),
# dt=date.today(),
# dtt=datetime.now(),
# dt_with_tz=datetime.now(pytz.timezone("Asia/Shanghai")),
# )
schema = pydantic_to_schema(TestModel)
expect_schema = pa.schema(
[
pa.field("id", pa.int64(), False),
pa.field("s", pa.utf8(), False),
pa.field("vec", pa.list_(pa.float64()), False),
pa.field("li", pa.list_(pa.int64()), False),
pa.field("lili", pa.list_(pa.list_(pa.float64())), False),
pa.field("litu", pa.list_(pa.list_(pa.float64())), False),
pa.field("opt", pa.utf8(), True),
pa.field(
"st",
pa.struct(
[pa.field("a", pa.utf8(), False), pa.field("b", pa.float64(), True)]
),
False,
),
pa.field("dt", pa.date32(), False),
pa.field("dtt", pa.timestamp("us"), False),
pa.field("dt_with_tz", pa.timestamp("us", tz="Asia/Shanghai"), False),
]
)
assert schema == expect_schema
@pytest.mark.skipif(
sys.version_info < (3, 10),
reason="using | type syntax requires python3.10 or higher",
)
def test_optional_types_py310():
class TestModel(pydantic.BaseModel):
a: str | None
b: None | str
c: Optional[str]
schema = pydantic_to_schema(TestModel)
expect_schema = pa.schema(
[
pa.field("a", pa.utf8(), True),
pa.field("b", pa.utf8(), True),
pa.field("c", pa.utf8(), True),
]
)
assert schema == expect_schema
@pytest.mark.skipif(
sys.version_info > (3, 8),
reason="using native type alias requires python3.9 or higher",
)
def test_pydantic_to_arrow_py38():
class StructModel(pydantic.BaseModel):
a: str
b: Optional[float]
class TestModel(pydantic.BaseModel):
id: int
s: str
vec: List[float]
li: List[int]
lili: List[List[float]]
litu: List[Tuple[float, float]]
opt: Optional[str] = None
st: StructModel
dt: date
dtt: datetime
dt_with_tz: datetime = Field(json_schema_extra={"tz": "Asia/Shanghai"})
# d: dict
# TODO: test we can actually convert the model to Arrow data.
# m = TestModel(
# id=1,
# s="hello",
# vec=[1.0, 2.0, 3.0],
# li=[2, 3, 4],
# lili=[[2.5, 1.5], [3.5, 4.5], [5.5, 6.5]],
# litu=[(2.5, 1.5), (3.5, 4.5), (5.5, 6.5)],
# st=StructModel(a="a", b=1.0),
# dt=date.today(),
# dtt=datetime.now(),
# dt_with_tz=datetime.now(pytz.timezone("Asia/Shanghai")),
# )
schema = pydantic_to_schema(TestModel)
expect_schema = pa.schema(
[
pa.field("id", pa.int64(), False),
pa.field("s", pa.utf8(), False),
pa.field("vec", pa.list_(pa.float64()), False),
pa.field("li", pa.list_(pa.int64()), False),
pa.field("lili", pa.list_(pa.list_(pa.float64())), False),
pa.field("litu", pa.list_(pa.list_(pa.float64())), False),
pa.field("opt", pa.utf8(), True),
pa.field(
"st",
pa.struct(
[pa.field("a", pa.utf8(), False), pa.field("b", pa.float64(), True)]
),
False,
),
pa.field("dt", pa.date32(), False),
pa.field("dtt", pa.timestamp("us"), False),
pa.field("dt_with_tz", pa.timestamp("us", tz="Asia/Shanghai"), False),
]
)
assert schema == expect_schema
def test_fixed_size_list_field():
class TestModel(pydantic.BaseModel):
vec: Vector(16)
li: List[int]
data = TestModel(vec=list(range(16)), li=[1, 2, 3])
if PYDANTIC_VERSION >= (2,):
assert json.loads(data.model_dump_json()) == {
"vec": list(range(16)),
"li": [1, 2, 3],
}
else:
assert data.dict() == {
"vec": list(range(16)),
"li": [1, 2, 3],
}
schema = pydantic_to_schema(TestModel)
assert schema == pa.schema(
[
pa.field("vec", pa.list_(pa.float32(), 16), False),
pa.field("li", pa.list_(pa.int64()), False),
]
)
if PYDANTIC_VERSION >= (2,):
json_schema = TestModel.model_json_schema()
else:
json_schema = TestModel.schema()
assert json_schema == {
"properties": {
"vec": {
"items": {"type": "number"},
"maxItems": 16,
"minItems": 16,
"title": "Vec",
"type": "array",
},
"li": {"items": {"type": "integer"}, "title": "Li", "type": "array"},
},
"required": ["vec", "li"],
"title": "TestModel",
"type": "object",
}
def test_fixed_size_list_validation():
class TestModel(pydantic.BaseModel):
vec: Vector(8)
with pytest.raises(pydantic.ValidationError):
TestModel(vec=range(9))
with pytest.raises(pydantic.ValidationError):
TestModel(vec=range(7))
TestModel(vec=range(8))
def test_lance_model():
class TestModel(LanceModel):
vector: Vector(16) = Field(default=[0.0] * 16)
li: List[int] = Field(default=[1, 2, 3])
schema = pydantic_to_schema(TestModel)
assert schema == TestModel.to_arrow_schema()
assert TestModel.field_names() == ["vector", "li"]
t = TestModel()
assert t == TestModel(vec=[0.0] * 16, li=[1, 2, 3])
| [
"lancedb.pydantic.Vector",
"lancedb.pydantic.pydantic_to_schema"
] | [((860, 973), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info < (3, 9))'], {'reason': '"""using native type alias requires python3.9 or higher"""'}), "(sys.version_info < (3, 9), reason=\n 'using native type alias requires python3.9 or higher')\n", (878, 973), False, 'import pytest\n'), ((2877, 2988), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info < (3, 10))'], {'reason': '"""using | type syntax requires python3.10 or higher"""'}), "(sys.version_info < (3, 10), reason=\n 'using | type syntax requires python3.10 or higher')\n", (2895, 2988), False, 'import pytest\n'), ((3410, 3523), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info > (3, 8))'], {'reason': '"""using native type alias requires python3.9 or higher"""'}), "(sys.version_info > (3, 8), reason=\n 'using native type alias requires python3.9 or higher')\n", (3428, 3523), False, 'import pytest\n'), ((1950, 1979), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (1968, 1979), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((3152, 3181), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (3170, 3181), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((4509, 4538), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (4527, 4538), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((5907, 5936), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (5925, 5936), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((7183, 7212), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (7201, 7212), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((1415, 1463), 'pydantic.Field', 'Field', ([], {'json_schema_extra': "{'tz': 'Asia/Shanghai'}"}), "(json_schema_extra={'tz': 'Asia/Shanghai'})\n", (1420, 1463), False, 'from pydantic import Field\n'), ((3970, 4018), 'pydantic.Field', 'Field', ([], {'json_schema_extra': "{'tz': 'Asia/Shanghai'}"}), "(json_schema_extra={'tz': 'Asia/Shanghai'})\n", (3975, 4018), False, 'from pydantic import Field\n'), ((5523, 5533), 'lancedb.pydantic.Vector', 'Vector', (['(16)'], {}), '(16)\n', (5529, 5533), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((6801, 6810), 'lancedb.pydantic.Vector', 'Vector', (['(8)'], {}), '(8)\n', (6807, 6810), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((6821, 6860), 'pytest.raises', 'pytest.raises', (['pydantic.ValidationError'], {}), '(pydantic.ValidationError)\n', (6834, 6860), False, 'import pytest\n'), ((6904, 6943), 'pytest.raises', 'pytest.raises', (['pydantic.ValidationError'], {}), '(pydantic.ValidationError)\n', (6917, 6943), False, 'import pytest\n'), ((7081, 7091), 'lancedb.pydantic.Vector', 'Vector', (['(16)'], {}), '(16)\n', (7087, 7091), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((7094, 7119), 'pydantic.Field', 'Field', ([], {'default': '([0.0] * 16)'}), '(default=[0.0] * 16)\n', (7099, 7119), False, 'from pydantic import Field\n'), ((7144, 7168), 'pydantic.Field', 'Field', ([], {'default': '[1, 2, 3]'}), '(default=[1, 2, 3])\n', (7149, 7168), False, 'from pydantic import Field\n'), ((2049, 2059), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2057, 2059), True, 'import pyarrow as pa\n'), ((2095, 2104), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2102, 2104), True, 'import pyarrow as pa\n'), ((2401, 2410), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2408, 2410), True, 'import pyarrow as pa\n'), ((2663, 2674), 'pyarrow.date32', 'pa.date32', ([], {}), '()\n', (2672, 2674), True, 'import pyarrow as pa\n'), ((2712, 2730), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {}), "('us')\n", (2724, 2730), True, 'import pyarrow as pa\n'), ((2775, 2813), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {'tz': '"""Asia/Shanghai"""'}), "('us', tz='Asia/Shanghai')\n", (2787, 2813), True, 'import pyarrow as pa\n'), ((3250, 3259), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (3257, 3259), True, 'import pyarrow as pa\n'), ((3294, 3303), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (3301, 3303), True, 'import pyarrow as pa\n'), ((3338, 3347), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (3345, 3347), True, 'import pyarrow as pa\n'), ((4608, 4618), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (4616, 4618), True, 'import pyarrow as pa\n'), ((4654, 4663), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (4661, 4663), True, 'import pyarrow as pa\n'), ((4960, 4969), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (4967, 4969), True, 'import pyarrow as pa\n'), ((5222, 5233), 'pyarrow.date32', 'pa.date32', ([], {}), '()\n', (5231, 5233), True, 'import pyarrow as pa\n'), ((5271, 5289), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {}), "('us')\n", (5283, 5289), True, 'import pyarrow as pa\n'), ((5334, 5372), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {'tz': '"""Asia/Shanghai"""'}), "('us', tz='Asia/Shanghai')\n", (5346, 5372), True, 'import pyarrow as pa\n'), ((2151, 2163), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2161, 2163), True, 'import pyarrow as pa\n'), ((2210, 2220), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2218, 2220), True, 'import pyarrow as pa\n'), ((4710, 4722), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4720, 4722), True, 'import pyarrow as pa\n'), ((4769, 4779), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (4777, 4779), True, 'import pyarrow as pa\n'), ((2278, 2290), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2288, 2290), True, 'import pyarrow as pa\n'), ((2349, 2361), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2359, 2361), True, 'import pyarrow as pa\n'), ((4837, 4849), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4847, 4849), True, 'import pyarrow as pa\n'), ((4908, 4920), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4918, 4920), True, 'import pyarrow as pa\n'), ((6016, 6028), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (6026, 6028), True, 'import pyarrow as pa\n'), ((6079, 6089), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (6087, 6089), True, 'import pyarrow as pa\n'), ((2525, 2534), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2532, 2534), True, 'import pyarrow as pa\n'), ((2558, 2570), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2568, 2570), True, 'import pyarrow as pa\n'), ((5084, 5093), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (5091, 5093), True, 'import pyarrow as pa\n'), ((5117, 5129), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (5127, 5129), True, 'import pyarrow as pa\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest.mock as mock
from datetime import timedelta
import lance
import lancedb
import numpy as np
import pandas.testing as tm
import pyarrow as pa
import pytest
import pytest_asyncio
from lancedb.db import LanceDBConnection
from lancedb.pydantic import LanceModel, Vector
from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query
from lancedb.table import AsyncTable, LanceTable
class MockTable:
def __init__(self, tmp_path):
self.uri = tmp_path
self._conn = LanceDBConnection(self.uri)
def to_lance(self):
return lance.dataset(self.uri)
def _execute_query(self, query):
ds = self.to_lance()
return ds.to_table(
columns=query.columns,
filter=query.filter,
prefilter=query.prefilter,
nearest={
"column": query.vector_column,
"q": query.vector,
"k": query.k,
"metric": query.metric,
"nprobes": query.nprobes,
"refine_factor": query.refine_factor,
},
)
@pytest.fixture
def table(tmp_path) -> MockTable:
df = pa.table(
{
"vector": pa.array(
[[1, 2], [3, 4]], type=pa.list_(pa.float32(), list_size=2)
),
"id": pa.array([1, 2]),
"str_field": pa.array(["a", "b"]),
"float_field": pa.array([1.0, 2.0]),
}
)
lance.write_dataset(df, tmp_path)
return MockTable(tmp_path)
@pytest_asyncio.fixture
async def table_async(tmp_path) -> AsyncTable:
conn = await lancedb.connect_async(
tmp_path, read_consistency_interval=timedelta(seconds=0)
)
data = pa.table(
{
"vector": pa.array(
[[1, 2], [3, 4]], type=pa.list_(pa.float32(), list_size=2)
),
"id": pa.array([1, 2]),
"str_field": pa.array(["a", "b"]),
"float_field": pa.array([1.0, 2.0]),
}
)
return await conn.create_table("test", data)
def test_cast(table):
class TestModel(LanceModel):
vector: Vector(2)
id: int
str_field: str
float_field: float
q = LanceVectorQueryBuilder(table, [0, 0], "vector").limit(1)
results = q.to_pydantic(TestModel)
assert len(results) == 1
r0 = results[0]
assert isinstance(r0, TestModel)
assert r0.id == 1
assert r0.vector == [1, 2]
assert r0.str_field == "a"
assert r0.float_field == 1.0
def test_query_builder(table):
rs = (
LanceVectorQueryBuilder(table, [0, 0], "vector")
.limit(1)
.select(["id", "vector"])
.to_list()
)
assert rs[0]["id"] == 1
assert all(np.array(rs[0]["vector"]) == [1, 2])
def test_dynamic_projection(table):
rs = (
LanceVectorQueryBuilder(table, [0, 0], "vector")
.limit(1)
.select({"id": "id", "id2": "id * 2"})
.to_list()
)
assert rs[0]["id"] == 1
assert rs[0]["id2"] == 2
def test_query_builder_with_filter(table):
rs = LanceVectorQueryBuilder(table, [0, 0], "vector").where("id = 2").to_list()
assert rs[0]["id"] == 2
assert all(np.array(rs[0]["vector"]) == [3, 4])
def test_query_builder_with_prefilter(table):
df = (
LanceVectorQueryBuilder(table, [0, 0], "vector")
.where("id = 2")
.limit(1)
.to_pandas()
)
assert len(df) == 0
df = (
LanceVectorQueryBuilder(table, [0, 0], "vector")
.where("id = 2", prefilter=True)
.limit(1)
.to_pandas()
)
assert df["id"].values[0] == 2
assert all(df["vector"].values[0] == [3, 4])
def test_query_builder_with_metric(table):
query = [4, 8]
vector_column_name = "vector"
df_default = LanceVectorQueryBuilder(table, query, vector_column_name).to_pandas()
df_l2 = (
LanceVectorQueryBuilder(table, query, vector_column_name)
.metric("L2")
.to_pandas()
)
tm.assert_frame_equal(df_default, df_l2)
df_cosine = (
LanceVectorQueryBuilder(table, query, vector_column_name)
.metric("cosine")
.limit(1)
.to_pandas()
)
assert df_cosine._distance[0] == pytest.approx(
cosine_distance(query, df_cosine.vector[0]),
abs=1e-6,
)
assert 0 <= df_cosine._distance[0] <= 1
def test_query_builder_with_different_vector_column():
table = mock.MagicMock(spec=LanceTable)
query = [4, 8]
vector_column_name = "foo_vector"
builder = (
LanceVectorQueryBuilder(table, query, vector_column_name)
.metric("cosine")
.where("b < 10")
.select(["b"])
.limit(2)
)
ds = mock.Mock()
table.to_lance.return_value = ds
builder.to_arrow()
table._execute_query.assert_called_once_with(
Query(
vector=query,
filter="b < 10",
k=2,
metric="cosine",
columns=["b"],
nprobes=20,
refine_factor=None,
vector_column="foo_vector",
)
)
def cosine_distance(vec1, vec2):
return 1 - np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
async def check_query(
query: AsyncQueryBase, *, expected_num_rows=None, expected_columns=None
):
num_rows = 0
results = await query.to_batches()
async for batch in results:
if expected_columns is not None:
assert batch.schema.names == expected_columns
num_rows += batch.num_rows
if expected_num_rows is not None:
assert num_rows == expected_num_rows
@pytest.mark.asyncio
async def test_query_async(table_async: AsyncTable):
await check_query(
table_async.query(),
expected_num_rows=2,
expected_columns=["vector", "id", "str_field", "float_field"],
)
await check_query(table_async.query().where("id = 2"), expected_num_rows=1)
await check_query(
table_async.query().select(["id", "vector"]), expected_columns=["id", "vector"]
)
await check_query(
table_async.query().select({"foo": "id", "bar": "id + 1"}),
expected_columns=["foo", "bar"],
)
await check_query(table_async.query().limit(1), expected_num_rows=1)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])), expected_num_rows=2
)
# Support different types of inputs for the vector query
for vector_query in [
[1, 2],
[1.0, 2.0],
np.array([1, 2]),
(1, 2),
]:
await check_query(
table_async.query().nearest_to(vector_query), expected_num_rows=2
)
# No easy way to check these vector query parameters are doing what they say. We
# just check that they don't raise exceptions and assume this is tested at a lower
# level.
await check_query(
table_async.query().where("id = 2").nearest_to(pa.array([1, 2])).postfilter(),
expected_num_rows=1,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).refine_factor(1),
expected_num_rows=2,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).nprobes(10),
expected_num_rows=2,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).bypass_vector_index(),
expected_num_rows=2,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).distance_type("dot"),
expected_num_rows=2,
)
await check_query(
table_async.query().nearest_to(pa.array([1, 2])).distance_type("DoT"),
expected_num_rows=2,
)
# Make sure we can use a vector query as a base query (e.g. call limit on it)
# Also make sure `vector_search` works
await check_query(table_async.vector_search([1, 2]).limit(1), expected_num_rows=1)
# Also check an empty query
await check_query(table_async.query().where("id < 0"), expected_num_rows=0)
@pytest.mark.asyncio
async def test_query_to_arrow_async(table_async: AsyncTable):
table = await table_async.to_arrow()
assert table.num_rows == 2
assert table.num_columns == 4
table = await table_async.query().to_arrow()
assert table.num_rows == 2
assert table.num_columns == 4
table = await table_async.query().where("id < 0").to_arrow()
assert table.num_rows == 0
assert table.num_columns == 4
@pytest.mark.asyncio
async def test_query_to_pandas_async(table_async: AsyncTable):
df = await table_async.to_pandas()
assert df.shape == (2, 4)
df = await table_async.query().to_pandas()
assert df.shape == (2, 4)
df = await table_async.query().where("id < 0").to_pandas()
assert df.shape == (0, 4)
| [
"lancedb.pydantic.Vector",
"lancedb.query.Query",
"lancedb.query.LanceVectorQueryBuilder",
"lancedb.db.LanceDBConnection"
] | [((2041, 2074), 'lance.write_dataset', 'lance.write_dataset', (['df', 'tmp_path'], {}), '(df, tmp_path)\n', (2060, 2074), False, 'import lance\n'), ((4585, 4625), 'pandas.testing.assert_frame_equal', 'tm.assert_frame_equal', (['df_default', 'df_l2'], {}), '(df_default, df_l2)\n', (4606, 4625), True, 'import pandas.testing as tm\n'), ((5024, 5055), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'spec': 'LanceTable'}), '(spec=LanceTable)\n', (5038, 5055), True, 'import unittest.mock as mock\n'), ((5302, 5313), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (5311, 5313), True, 'import unittest.mock as mock\n'), ((1097, 1124), 'lancedb.db.LanceDBConnection', 'LanceDBConnection', (['self.uri'], {}), '(self.uri)\n', (1114, 1124), False, 'from lancedb.db import LanceDBConnection\n'), ((1165, 1188), 'lance.dataset', 'lance.dataset', (['self.uri'], {}), '(self.uri)\n', (1178, 1188), False, 'import lance\n'), ((2713, 2722), 'lancedb.pydantic.Vector', 'Vector', (['(2)'], {}), '(2)\n', (2719, 2722), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((5432, 5569), 'lancedb.query.Query', 'Query', ([], {'vector': 'query', 'filter': '"""b < 10"""', 'k': '(2)', 'metric': '"""cosine"""', 'columns': "['b']", 'nprobes': '(20)', 'refine_factor': 'None', 'vector_column': '"""foo_vector"""'}), "(vector=query, filter='b < 10', k=2, metric='cosine', columns=['b'],\n nprobes=20, refine_factor=None, vector_column='foo_vector')\n", (5437, 5569), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((7085, 7101), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (7093, 7101), True, 'import numpy as np\n'), ((1907, 1923), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (1915, 1923), True, 'import pyarrow as pa\n'), ((1950, 1970), 'pyarrow.array', 'pa.array', (["['a', 'b']"], {}), "(['a', 'b'])\n", (1958, 1970), True, 'import pyarrow as pa\n'), ((1999, 2019), 'pyarrow.array', 'pa.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2007, 2019), True, 'import pyarrow as pa\n'), ((2461, 2477), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (2469, 2477), True, 'import pyarrow as pa\n'), ((2504, 2524), 'pyarrow.array', 'pa.array', (["['a', 'b']"], {}), "(['a', 'b'])\n", (2512, 2524), True, 'import pyarrow as pa\n'), ((2553, 2573), 'pyarrow.array', 'pa.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2561, 2573), True, 'import pyarrow as pa\n'), ((2798, 2846), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (2821, 2846), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((3319, 3344), 'numpy.array', 'np.array', (["rs[0]['vector']"], {}), "(rs[0]['vector'])\n", (3327, 3344), True, 'import numpy as np\n'), ((3781, 3806), 'numpy.array', 'np.array', (["rs[0]['vector']"], {}), "(rs[0]['vector'])\n", (3789, 3806), True, 'import numpy as np\n'), ((4382, 4439), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (4405, 4439), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((5729, 5747), 'numpy.dot', 'np.dot', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (5735, 5747), True, 'import numpy as np\n'), ((2263, 2283), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (2272, 2283), False, 'from datetime import timedelta\n'), ((5751, 5771), 'numpy.linalg.norm', 'np.linalg.norm', (['vec1'], {}), '(vec1)\n', (5765, 5771), True, 'import numpy as np\n'), ((5774, 5794), 'numpy.linalg.norm', 'np.linalg.norm', (['vec2'], {}), '(vec2)\n', (5788, 5794), True, 'import numpy as np\n'), ((6909, 6925), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (6917, 6925), True, 'import pyarrow as pa\n'), ((3663, 3711), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3686, 3711), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((4474, 4531), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (4497, 4531), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((1847, 1859), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1857, 1859), True, 'import pyarrow as pa\n'), ((2401, 2413), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2411, 2413), True, 'import pyarrow as pa\n'), ((7506, 7522), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7514, 7522), True, 'import pyarrow as pa\n'), ((7635, 7651), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7643, 7651), True, 'import pyarrow as pa\n'), ((7768, 7784), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7776, 7784), True, 'import pyarrow as pa\n'), ((7896, 7912), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7904, 7912), True, 'import pyarrow as pa\n'), ((8034, 8050), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (8042, 8050), True, 'import pyarrow as pa\n'), ((8171, 8187), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (8179, 8187), True, 'import pyarrow as pa\n'), ((3150, 3198), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3173, 3198), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((3413, 3461), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3436, 3461), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((3885, 3933), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3908, 3933), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((4048, 4096), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (4071, 4096), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((4653, 4710), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (4676, 4710), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((5137, 5194), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (5160, 5194), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n')] |
# Copyright (c) 2023. LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import io
import os
import lancedb
import numpy as np
import pandas as pd
import pytest
import requests
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
# These are integration tests for embedding functions.
# They are slow because they require downloading models
# or connection to external api
try:
if importlib.util.find_spec("mlx.core") is not None:
_mlx = True
else:
_mlx = None
except Exception:
_mlx = None
try:
if importlib.util.find_spec("imagebind") is not None:
_imagebind = True
else:
_imagebind = None
except Exception:
_imagebind = None
@pytest.mark.slow
@pytest.mark.parametrize("alias", ["sentence-transformers", "openai"])
def test_basic_text_embeddings(alias, tmp_path):
db = lancedb.connect(tmp_path)
registry = get_registry()
func = registry.get(alias).create(max_retries=0)
func2 = registry.get(alias).create(max_retries=0)
class Words(LanceModel):
text: str = func.SourceField()
text2: str = func2.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
vector2: Vector(func2.ndims()) = func2.VectorField()
table = db.create_table("words", schema=Words)
table.add(
pd.DataFrame(
{
"text": [
"hello world",
"goodbye world",
"fizz",
"buzz",
"foo",
"bar",
"baz",
],
"text2": [
"to be or not to be",
"that is the question",
"for whether tis nobler",
"in the mind to suffer",
"the slings and arrows",
"of outrageous fortune",
"or to take arms",
],
}
)
)
query = "greetings"
actual = (
table.search(query, vector_column_name="vector").limit(1).to_pydantic(Words)[0]
)
vec = func.compute_query_embeddings(query)[0]
expected = (
table.search(vec, vector_column_name="vector").limit(1).to_pydantic(Words)[0]
)
assert actual.text == expected.text
assert actual.text == "hello world"
assert not np.allclose(actual.vector, actual.vector2)
actual = (
table.search(query, vector_column_name="vector2").limit(1).to_pydantic(Words)[0]
)
assert actual.text != "hello world"
assert not np.allclose(actual.vector, actual.vector2)
@pytest.mark.slow
def test_openclip(tmp_path):
from PIL import Image
db = lancedb.connect(tmp_path)
registry = get_registry()
func = registry.get("open-clip").create(max_retries=0)
class Images(LanceModel):
label: str
image_uri: str = func.SourceField()
image_bytes: bytes = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
vec_from_bytes: Vector(func.ndims()) = func.VectorField()
table = db.create_table("images", schema=Images)
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
uris = [
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
]
# get each uri as bytes
image_bytes = [requests.get(uri).content for uri in uris]
table.add(
pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes})
)
# text search
actual = (
table.search("man's best friend", vector_column_name="vector")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == "dog"
frombytes = (
table.search("man's best friend", vector_column_name="vec_from_bytes")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == frombytes.label
assert np.allclose(actual.vector, frombytes.vector)
# image search
query_image_uri = "http://farm1.staticflickr.com/200/467715466_ed4a31801f_z.jpg"
image_bytes = requests.get(query_image_uri).content
query_image = Image.open(io.BytesIO(image_bytes))
actual = (
table.search(query_image, vector_column_name="vector")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == "dog"
other = (
table.search(query_image, vector_column_name="vec_from_bytes")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == other.label
arrow_table = table.search().select(["vector", "vec_from_bytes"]).to_arrow()
assert np.allclose(
arrow_table["vector"].combine_chunks().values.to_numpy(),
arrow_table["vec_from_bytes"].combine_chunks().values.to_numpy(),
)
@pytest.mark.skipif(
_imagebind is None,
reason="skip if imagebind not installed.",
)
@pytest.mark.slow
def test_imagebind(tmp_path):
import os
import shutil
import tempfile
import lancedb.embeddings.imagebind
import pandas as pd
import requests
from lancedb.embeddings import get_registry
from lancedb.pydantic import LanceModel, Vector
with tempfile.TemporaryDirectory() as temp_dir:
print(f"Created temporary directory {temp_dir}")
def download_images(image_uris):
downloaded_image_paths = []
for uri in image_uris:
try:
response = requests.get(uri, stream=True)
if response.status_code == 200:
# Extract image name from URI
image_name = os.path.basename(uri)
image_path = os.path.join(temp_dir, image_name)
with open(image_path, "wb") as out_file:
shutil.copyfileobj(response.raw, out_file)
downloaded_image_paths.append(image_path)
except Exception as e: # noqa: PERF203
print(f"Failed to download {uri}. Error: {e}")
return temp_dir, downloaded_image_paths
db = lancedb.connect(tmp_path)
registry = get_registry()
func = registry.get("imagebind").create(max_retries=0)
class Images(LanceModel):
label: str
image_uri: str = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
table = db.create_table("images", schema=Images)
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
uris = [
"http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg",
"http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg",
"http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg",
"http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg",
"http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg",
"http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg",
]
temp_dir, downloaded_images = download_images(uris)
table.add(pd.DataFrame({"label": labels, "image_uri": downloaded_images}))
# text search
actual = (
table.search("man's best friend", vector_column_name="vector")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == "dog"
# image search
query_image_uri = [
"https://live.staticflickr.com/65535/33336453970_491665f66e_h.jpg"
]
temp_dir, downloaded_images = download_images(query_image_uri)
query_image_uri = downloaded_images[0]
actual = (
table.search(query_image_uri, vector_column_name="vector")
.limit(1)
.to_pydantic(Images)[0]
)
assert actual.label == "dog"
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
print(f"Deleted temporary directory {temp_dir}")
@pytest.mark.slow
@pytest.mark.skipif(
os.environ.get("COHERE_API_KEY") is None, reason="COHERE_API_KEY not set"
) # also skip if cohere not installed
def test_cohere_embedding_function():
cohere = (
get_registry()
.get("cohere")
.create(name="embed-multilingual-v2.0", max_retries=0)
)
class TextModel(LanceModel):
text: str = cohere.SourceField()
vector: Vector(cohere.ndims()) = cohere.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect("~/lancedb")
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == cohere.ndims()
@pytest.mark.slow
def test_instructor_embedding(tmp_path):
model = get_registry().get("instructor").create(max_retries=0)
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
@pytest.mark.slow
@pytest.mark.skipif(
os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set"
)
def test_gemini_embedding(tmp_path):
model = get_registry().get("gemini-text").create(max_retries=0)
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
@pytest.mark.skipif(
_mlx is None,
reason="mlx tests only required for apple users.",
)
@pytest.mark.slow
def test_gte_embedding(tmp_path):
import lancedb.embeddings.gte
model = get_registry().get("gte-text").create()
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
def aws_setup():
try:
import boto3
sts = boto3.client("sts")
sts.get_caller_identity()
return True
except Exception:
return False
@pytest.mark.slow
@pytest.mark.skipif(
not aws_setup(), reason="AWS credentials not set or libraries not installed"
)
def test_bedrock_embedding(tmp_path):
for name in [
"amazon.titan-embed-text-v1",
"cohere.embed-english-v3",
"cohere.embed-multilingual-v3",
]:
model = get_registry().get("bedrock-text").create(max_retries=0, name=name)
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
@pytest.mark.slow
@pytest.mark.skipif(
os.environ.get("OPENAI_API_KEY") is None, reason="OPENAI_API_KEY not set"
)
def test_openai_embedding(tmp_path):
def _get_table(model):
class TextModel(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
db = lancedb.connect(tmp_path)
tbl = db.create_table("test", schema=TextModel, mode="overwrite")
return tbl
model = get_registry().get("openai").create(max_retries=0)
tbl = _get_table(model)
df = pd.DataFrame({"text": ["hello world", "goodbye world"]})
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
model = (
get_registry()
.get("openai")
.create(max_retries=0, name="text-embedding-3-large")
)
tbl = _get_table(model)
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
model = (
get_registry()
.get("openai")
.create(max_retries=0, name="text-embedding-3-large", dim=1024)
)
tbl = _get_table(model)
tbl.add(df)
assert len(tbl.to_pandas()["vector"][0]) == model.ndims()
assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
| [
"lancedb.connect",
"lancedb.embeddings.get_registry"
] | [((1288, 1357), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""alias"""', "['sentence-transformers', 'openai']"], {}), "('alias', ['sentence-transformers', 'openai'])\n", (1311, 1357), False, 'import pytest\n'), ((5687, 5773), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(_imagebind is None)'], {'reason': '"""skip if imagebind not installed."""'}), "(_imagebind is None, reason=\n 'skip if imagebind not installed.')\n", (5705, 5773), False, 'import pytest\n'), ((10771, 10859), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(_mlx is None)'], {'reason': '"""mlx tests only required for apple users."""'}), "(_mlx is None, reason=\n 'mlx tests only required for apple users.')\n", (10789, 10859), False, 'import pytest\n'), ((1416, 1441), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (1431, 1441), False, 'import lancedb\n'), ((1457, 1471), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (1469, 1471), False, 'from lancedb.embeddings import get_registry\n'), ((3273, 3298), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (3288, 3298), False, 'import lancedb\n'), ((3314, 3328), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (3326, 3328), False, 'from lancedb.embeddings import get_registry\n'), ((4825, 4869), 'numpy.allclose', 'np.allclose', (['actual.vector', 'frombytes.vector'], {}), '(actual.vector, frombytes.vector)\n', (4836, 4869), True, 'import numpy as np\n'), ((8732, 8755), 'os.path.isdir', 'os.path.isdir', (['temp_dir'], {}), '(temp_dir)\n', (8745, 8755), False, 'import os\n'), ((9319, 9375), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (9331, 9375), True, 'import pandas as pd\n'), ((9385, 9413), 'lancedb.connect', 'lancedb.connect', (['"""~/lancedb"""'], {}), "('~/lancedb')\n", (9400, 9413), False, 'import lancedb\n'), ((9836, 9892), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (9848, 9892), True, 'import pandas as pd\n'), ((9902, 9927), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (9917, 9927), False, 'import lancedb\n'), ((10447, 10503), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (10459, 10503), True, 'import pandas as pd\n'), ((10513, 10538), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (10528, 10538), False, 'import lancedb\n'), ((11149, 11205), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (11161, 11205), True, 'import pandas as pd\n'), ((11215, 11240), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (11230, 11240), False, 'import lancedb\n'), ((13017, 13073), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (13029, 13073), True, 'import pandas as pd\n'), ((967, 1003), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""mlx.core"""'], {}), "('mlx.core')\n", (991, 1003), False, 'import importlib\n'), ((1114, 1151), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""imagebind"""'], {}), "('imagebind')\n", (1138, 1151), False, 'import importlib\n'), ((1883, 2169), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world', 'fizz', 'buzz', 'foo', 'bar',\n 'baz'], 'text2': ['to be or not to be', 'that is the question',\n 'for whether tis nobler', 'in the mind to suffer',\n 'the slings and arrows', 'of outrageous fortune', 'or to take arms']}"], {}), "({'text': ['hello world', 'goodbye world', 'fizz', 'buzz',\n 'foo', 'bar', 'baz'], 'text2': ['to be or not to be',\n 'that is the question', 'for whether tis nobler',\n 'in the mind to suffer', 'the slings and arrows',\n 'of outrageous fortune', 'or to take arms']})\n", (1895, 2169), True, 'import pandas as pd\n'), ((2936, 2978), 'numpy.allclose', 'np.allclose', (['actual.vector', 'actual.vector2'], {}), '(actual.vector, actual.vector2)\n', (2947, 2978), True, 'import numpy as np\n'), ((3145, 3187), 'numpy.allclose', 'np.allclose', (['actual.vector', 'actual.vector2'], {}), '(actual.vector, actual.vector2)\n', (3156, 3187), True, 'import numpy as np\n'), ((4339, 4417), 'pandas.DataFrame', 'pd.DataFrame', (["{'label': labels, 'image_uri': uris, 'image_bytes': image_bytes}"], {}), "({'label': labels, 'image_uri': uris, 'image_bytes': image_bytes})\n", (4351, 4417), True, 'import pandas as pd\n'), ((4993, 5022), 'requests.get', 'requests.get', (['query_image_uri'], {}), '(query_image_uri)\n', (5005, 5022), False, 'import requests\n'), ((5060, 5083), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (5070, 5083), False, 'import io\n'), ((6075, 6104), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6102, 6104), False, 'import tempfile\n'), ((7003, 7028), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (7018, 7028), False, 'import lancedb\n'), ((7048, 7062), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (7060, 7062), False, 'from lancedb.embeddings import get_registry\n'), ((8765, 8788), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (8778, 8788), False, 'import shutil\n'), ((8891, 8923), 'os.environ.get', 'os.environ.get', (['"""COHERE_API_KEY"""'], {}), "('COHERE_API_KEY')\n", (8905, 8923), False, 'import os\n'), ((10122, 10154), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_API_KEY"""'], {}), "('GOOGLE_API_KEY')\n", (10136, 10154), False, 'import os\n'), ((11534, 11553), 'boto3.client', 'boto3.client', (['"""sts"""'], {}), "('sts')\n", (11546, 11553), False, 'import boto3\n'), ((12195, 12251), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (12207, 12251), True, 'import pandas as pd\n'), ((12265, 12290), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (12280, 12290), False, 'import lancedb\n'), ((12796, 12821), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (12811, 12821), False, 'import lancedb\n'), ((12497, 12529), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (12511, 12529), False, 'import os\n'), ((4273, 4290), 'requests.get', 'requests.get', (['uri'], {}), '(uri)\n', (4285, 4290), False, 'import requests\n'), ((7984, 8047), 'pandas.DataFrame', 'pd.DataFrame', (["{'label': labels, 'image_uri': downloaded_images}"], {}), "({'label': labels, 'image_uri': downloaded_images})\n", (7996, 8047), True, 'import pandas as pd\n'), ((6344, 6374), 'requests.get', 'requests.get', (['uri'], {'stream': '(True)'}), '(uri, stream=True)\n', (6356, 6374), False, 'import requests\n'), ((9065, 9079), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (9077, 9079), False, 'from lancedb.embeddings import get_registry\n'), ((9637, 9651), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (9649, 9651), False, 'from lancedb.embeddings import get_registry\n'), ((10247, 10261), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (10259, 10261), False, 'from lancedb.embeddings import get_registry\n'), ((10965, 10979), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (10977, 10979), False, 'from lancedb.embeddings import get_registry\n'), ((12929, 12943), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (12941, 12943), False, 'from lancedb.embeddings import get_registry\n'), ((13256, 13270), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (13268, 13270), False, 'from lancedb.embeddings import get_registry\n'), ((13572, 13586), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (13584, 13586), False, 'from lancedb.embeddings import get_registry\n'), ((6518, 6539), 'os.path.basename', 'os.path.basename', (['uri'], {}), '(uri)\n', (6534, 6539), False, 'import os\n'), ((6577, 6611), 'os.path.join', 'os.path.join', (['temp_dir', 'image_name'], {}), '(temp_dir, image_name)\n', (6589, 6611), False, 'import os\n'), ((11967, 11981), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (11979, 11981), False, 'from lancedb.embeddings import get_registry\n'), ((6705, 6747), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response.raw', 'out_file'], {}), '(response.raw, out_file)\n', (6723, 6747), False, 'import shutil\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lancedb
import pyarrow as pa
from lancedb.remote.client import VectorQuery, VectorQueryResult
class FakeLanceDBClient:
def close(self):
pass
def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
assert table_name == "test"
t = pa.schema([]).empty_table()
return VectorQueryResult(t)
def post(self, path: str):
pass
def mount_retry_adapter_for_table(self, table_name: str):
pass
def test_remote_db():
conn = lancedb.connect("db://client-will-be-injected", api_key="fake")
setattr(conn, "_client", FakeLanceDBClient())
table = conn["test"]
table.schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), 2))])
table.search([1.0, 2.0]).to_pandas()
| [
"lancedb.connect",
"lancedb.remote.client.VectorQueryResult"
] | [((1101, 1164), 'lancedb.connect', 'lancedb.connect', (['"""db://client-will-be-injected"""'], {'api_key': '"""fake"""'}), "('db://client-will-be-injected', api_key='fake')\n", (1116, 1164), False, 'import lancedb\n'), ((924, 944), 'lancedb.remote.client.VectorQueryResult', 'VectorQueryResult', (['t'], {}), '(t)\n', (941, 944), False, 'from lancedb.remote.client import VectorQuery, VectorQueryResult\n'), ((881, 894), 'pyarrow.schema', 'pa.schema', (['[]'], {}), '([])\n', (890, 894), True, 'import pyarrow as pa\n'), ((1299, 1311), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1309, 1311), True, 'import pyarrow as pa\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytest
from lancedb.context import contextualize
@pytest.fixture
def raw_df() -> pd.DataFrame:
return pd.DataFrame(
{
"token": [
"The",
"quick",
"brown",
"fox",
"jumped",
"over",
"the",
"lazy",
"dog",
"I",
"love",
"sandwiches",
],
"document_id": [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2],
}
)
def test_contextualizer(raw_df: pd.DataFrame):
result = (
contextualize(raw_df)
.window(6)
.stride(3)
.text_col("token")
.groupby("document_id")
.to_pandas()["token"]
.to_list()
)
assert result == [
"The quick brown fox jumped over",
"fox jumped over the lazy dog",
"the lazy dog",
"I love sandwiches",
]
def test_contextualizer_with_threshold(raw_df: pd.DataFrame):
result = (
contextualize(raw_df)
.window(6)
.stride(3)
.text_col("token")
.groupby("document_id")
.min_window_size(4)
.to_pandas()["token"]
.to_list()
)
assert result == [
"The quick brown fox jumped over",
"fox jumped over the lazy dog",
]
| [
"lancedb.context.contextualize"
] | [((726, 911), 'pandas.DataFrame', 'pd.DataFrame', (["{'token': ['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy',\n 'dog', 'I', 'love', 'sandwiches'], 'document_id': [1, 1, 1, 1, 1, 1, 1,\n 1, 1, 2, 2, 2]}"], {}), "({'token': ['The', 'quick', 'brown', 'fox', 'jumped', 'over',\n 'the', 'lazy', 'dog', 'I', 'love', 'sandwiches'], 'document_id': [1, 1,\n 1, 1, 1, 1, 1, 1, 1, 2, 2, 2]})\n", (738, 911), True, 'import pandas as pd\n'), ((1232, 1253), 'lancedb.context.contextualize', 'contextualize', (['raw_df'], {}), '(raw_df)\n', (1245, 1253), False, 'from lancedb.context import contextualize\n'), ((1659, 1680), 'lancedb.context.contextualize', 'contextualize', (['raw_df'], {}), '(raw_df)\n', (1672, 1680), False, 'from lancedb.context import contextualize\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import List, Union
import lance
import lancedb
import numpy as np
import pyarrow as pa
import pytest
from lancedb.conftest import MockTextEmbeddingFunction
from lancedb.embeddings import (
EmbeddingFunctionConfig,
EmbeddingFunctionRegistry,
with_embeddings,
)
from lancedb.embeddings.base import TextEmbeddingFunction
from lancedb.embeddings.registry import get_registry, register
from lancedb.pydantic import LanceModel, Vector
def mock_embed_func(input_data):
return [np.random.randn(128).tolist() for _ in range(len(input_data))]
def test_with_embeddings():
for wrap_api in [True, False]:
if wrap_api and sys.version_info.minor >= 11:
# ratelimiter package doesn't work on 3.11
continue
data = pa.Table.from_arrays(
[
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
names=["text", "price"],
)
data = with_embeddings(mock_embed_func, data, wrap_api=wrap_api)
assert data.num_columns == 3
assert data.num_rows == 2
assert data.column_names == ["text", "price", "vector"]
assert data.column("text").to_pylist() == ["foo", "bar"]
assert data.column("price").to_pylist() == [10.0, 20.0]
def test_embedding_function(tmp_path):
registry = EmbeddingFunctionRegistry.get_instance()
# let's create a table
table = pa.table(
{
"text": pa.array(["hello world", "goodbye world"]),
"vector": [np.random.randn(10), np.random.randn(10)],
}
)
conf = EmbeddingFunctionConfig(
source_column="text",
vector_column="vector",
function=MockTextEmbeddingFunction(),
)
metadata = registry.get_table_metadata([conf])
table = table.replace_schema_metadata(metadata)
# Write it to disk
lance.write_dataset(table, tmp_path / "test.lance")
# Load this back
ds = lance.dataset(tmp_path / "test.lance")
# can we get the serialized version back out?
configs = registry.parse_functions(ds.schema.metadata)
conf = configs["vector"]
func = conf.function
actual = func.compute_query_embeddings("hello world")
# And we make sure we can call it
expected = func.compute_query_embeddings("hello world")
assert np.allclose(actual, expected)
@pytest.mark.slow
def test_embedding_function_rate_limit(tmp_path):
def _get_schema_from_model(model):
class Schema(LanceModel):
text: str = model.SourceField()
vector: Vector(model.ndims()) = model.VectorField()
return Schema
db = lancedb.connect(tmp_path)
registry = EmbeddingFunctionRegistry.get_instance()
model = registry.get("test-rate-limited").create(max_retries=0)
schema = _get_schema_from_model(model)
table = db.create_table("test", schema=schema, mode="overwrite")
table.add([{"text": "hello world"}])
with pytest.raises(Exception):
table.add([{"text": "hello world"}])
assert len(table) == 1
model = registry.get("test-rate-limited").create()
schema = _get_schema_from_model(model)
table = db.create_table("test", schema=schema, mode="overwrite")
table.add([{"text": "hello world"}])
table.add([{"text": "hello world"}])
assert len(table) == 2
def test_add_optional_vector(tmp_path):
@register("mock-embedding")
class MockEmbeddingFunction(TextEmbeddingFunction):
def ndims(self):
return 128
def generate_embeddings(
self, texts: Union[List[str], np.ndarray]
) -> List[np.array]:
"""
Generate the embeddings for the given texts
"""
return [np.random.randn(self.ndims()).tolist() for _ in range(len(texts))]
registry = get_registry()
model = registry.get("mock-embedding").create()
class LanceSchema(LanceModel):
id: str
vector: Vector(model.ndims()) = model.VectorField(default=None)
text: str = model.SourceField()
db = lancedb.connect(tmp_path)
tbl = db.create_table("optional_vector", schema=LanceSchema)
# add works
expected = LanceSchema(id="id", text="text")
tbl.add([expected])
assert not (np.abs(tbl.to_pandas()["vector"][0]) < 1e-6).all()
| [
"lancedb.embeddings.registry.register",
"lancedb.conftest.MockTextEmbeddingFunction",
"lancedb.connect",
"lancedb.embeddings.with_embeddings",
"lancedb.embeddings.registry.get_registry",
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((1948, 1988), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (1986, 1988), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings\n'), ((2476, 2527), 'lance.write_dataset', 'lance.write_dataset', (['table', "(tmp_path / 'test.lance')"], {}), "(table, tmp_path / 'test.lance')\n", (2495, 2527), False, 'import lance\n'), ((2559, 2597), 'lance.dataset', 'lance.dataset', (["(tmp_path / 'test.lance')"], {}), "(tmp_path / 'test.lance')\n", (2572, 2597), False, 'import lance\n'), ((2932, 2961), 'numpy.allclose', 'np.allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (2943, 2961), True, 'import numpy as np\n'), ((3246, 3271), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (3261, 3271), False, 'import lancedb\n'), ((3287, 3327), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (3325, 3327), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings\n'), ((3980, 4006), 'lancedb.embeddings.registry.register', 'register', (['"""mock-embedding"""'], {}), "('mock-embedding')\n", (3988, 4006), False, 'from lancedb.embeddings.registry import get_registry, register\n'), ((4419, 4433), 'lancedb.embeddings.registry.get_registry', 'get_registry', ([], {}), '()\n', (4431, 4433), False, 'from lancedb.embeddings.registry import get_registry, register\n'), ((4660, 4685), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (4675, 4685), False, 'import lancedb\n'), ((1570, 1627), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['mock_embed_func', 'data'], {'wrap_api': 'wrap_api'}), '(mock_embed_func, data, wrap_api=wrap_api)\n', (1585, 1627), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings\n'), ((3558, 3582), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3571, 3582), False, 'import pytest\n'), ((2069, 2111), 'pyarrow.array', 'pa.array', (["['hello world', 'goodbye world']"], {}), "(['hello world', 'goodbye world'])\n", (2077, 2111), True, 'import pyarrow as pa\n'), ((2310, 2337), 'lancedb.conftest.MockTextEmbeddingFunction', 'MockTextEmbeddingFunction', ([], {}), '()\n', (2335, 2337), False, 'from lancedb.conftest import MockTextEmbeddingFunction\n'), ((1102, 1122), 'numpy.random.randn', 'np.random.randn', (['(128)'], {}), '(128)\n', (1117, 1122), True, 'import numpy as np\n'), ((1427, 1451), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (1435, 1451), True, 'import pyarrow as pa\n'), ((1469, 1491), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (1477, 1491), True, 'import pyarrow as pa\n'), ((2136, 2155), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (2151, 2155), True, 'import numpy as np\n'), ((2157, 2176), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (2172, 2176), True, 'import numpy as np\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import pytest
from lancedb.util import get_uri_scheme, join_uri
def test_normalize_uri():
uris = [
"relative/path",
"/absolute/path",
"file:///absolute/path",
"s3://bucket/path",
"gs://bucket/path",
"c:\\windows\\path",
]
schemes = ["file", "file", "file", "s3", "gs", "file"]
for uri, expected_scheme in zip(uris, schemes):
parsed_scheme = get_uri_scheme(uri)
assert parsed_scheme == expected_scheme
def test_join_uri_remote():
schemes = ["s3", "az", "gs"]
for scheme in schemes:
expected = f"{scheme}://bucket/path/to/table.lance"
base_uri = f"{scheme}://bucket/path/to/"
parts = ["table.lance"]
assert join_uri(base_uri, *parts) == expected
base_uri = f"{scheme}://bucket"
parts = ["path", "to", "table.lance"]
assert join_uri(base_uri, *parts) == expected
# skip this test if on windows
@pytest.mark.skipif(os.name == "nt", reason="Windows paths are not POSIX")
def test_join_uri_posix():
for base in [
# relative path
"relative/path",
"relative/path/",
# an absolute path
"/absolute/path",
"/absolute/path/",
# a file URI
"file:///absolute/path",
"file:///absolute/path/",
]:
joined = join_uri(base, "table.lance")
assert joined == str(pathlib.Path(base) / "table.lance")
joined = join_uri(pathlib.Path(base), "table.lance")
assert joined == pathlib.Path(base) / "table.lance"
# skip this test if not on windows
@pytest.mark.skipif(os.name != "nt", reason="Windows paths are not POSIX")
def test_local_join_uri_windows():
# https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats
for base in [
# windows relative path
"relative\\path",
"relative\\path\\",
# windows absolute path from current drive
"c:\\absolute\\path",
# relative path from root of current drive
"\\relative\\path",
]:
joined = join_uri(base, "table.lance")
assert joined == str(pathlib.Path(base) / "table.lance")
joined = join_uri(pathlib.Path(base), "table.lance")
assert joined == pathlib.Path(base) / "table.lance"
| [
"lancedb.util.join_uri",
"lancedb.util.get_uri_scheme"
] | [((1561, 1634), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(os.name == 'nt')"], {'reason': '"""Windows paths are not POSIX"""'}), "(os.name == 'nt', reason='Windows paths are not POSIX')\n", (1579, 1634), False, 'import pytest\n'), ((2201, 2274), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(os.name != 'nt')"], {'reason': '"""Windows paths are not POSIX"""'}), "(os.name != 'nt', reason='Windows paths are not POSIX')\n", (2219, 2274), False, 'import pytest\n'), ((1033, 1052), 'lancedb.util.get_uri_scheme', 'get_uri_scheme', (['uri'], {}), '(uri)\n', (1047, 1052), False, 'from lancedb.util import get_uri_scheme, join_uri\n'), ((1947, 1976), 'lancedb.util.join_uri', 'join_uri', (['base', '"""table.lance"""'], {}), "(base, 'table.lance')\n", (1955, 1976), False, 'from lancedb.util import get_uri_scheme, join_uri\n'), ((2675, 2704), 'lancedb.util.join_uri', 'join_uri', (['base', '"""table.lance"""'], {}), "(base, 'table.lance')\n", (2683, 2704), False, 'from lancedb.util import get_uri_scheme, join_uri\n'), ((1347, 1373), 'lancedb.util.join_uri', 'join_uri', (['base_uri', '*parts'], {}), '(base_uri, *parts)\n', (1355, 1373), False, 'from lancedb.util import get_uri_scheme, join_uri\n'), ((1488, 1514), 'lancedb.util.join_uri', 'join_uri', (['base_uri', '*parts'], {}), '(base_uri, *parts)\n', (1496, 1514), False, 'from lancedb.util import get_uri_scheme, join_uri\n'), ((2068, 2086), 'pathlib.Path', 'pathlib.Path', (['base'], {}), '(base)\n', (2080, 2086), False, 'import pathlib\n'), ((2796, 2814), 'pathlib.Path', 'pathlib.Path', (['base'], {}), '(base)\n', (2808, 2814), False, 'import pathlib\n'), ((2128, 2146), 'pathlib.Path', 'pathlib.Path', (['base'], {}), '(base)\n', (2140, 2146), False, 'import pathlib\n'), ((2856, 2874), 'pathlib.Path', 'pathlib.Path', (['base'], {}), '(base)\n', (2868, 2874), False, 'import pathlib\n'), ((2006, 2024), 'pathlib.Path', 'pathlib.Path', (['base'], {}), '(base)\n', (2018, 2024), False, 'import pathlib\n'), ((2734, 2752), 'pathlib.Path', 'pathlib.Path', (['base'], {}), '(base)\n', (2746, 2752), False, 'import pathlib\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import importlib.metadata
import platform
import random
import sys
import time
from lancedb.utils import CONFIG
from lancedb.utils.general import TryExcept
from .general import (
PLATFORMS,
get_git_origin_url,
is_git_dir,
is_github_actions_ci,
is_online,
is_pip_package,
is_pytest_running,
threaded_request,
)
class _Events:
"""
A class for collecting anonymous event analytics. Event analytics are enabled when
``diagnostics=True`` in config and disabled when ``diagnostics=False``.
You can enable or disable diagnostics by running ``lancedb diagnostics --enabled``
or ``lancedb diagnostics --disabled``.
Attributes
----------
url : str
The URL to send anonymous events.
rate_limit : float
The rate limit in seconds for sending events.
metadata : dict
A dictionary containing metadata about the environment.
enabled : bool
A flag to enable or disable Events based on certain conditions.
"""
_instance = None
url = "https://app.posthog.com/capture/"
headers = {"Content-Type": "application/json"}
api_key = "phc_oENDjGgHtmIDrV6puUiFem2RB4JA8gGWulfdulmMdZP"
# This api-key is write only and is safe to expose in the codebase.
def __init__(self):
"""
Initializes the Events object with default values for events, rate_limit,
and metadata.
"""
self.events = [] # events list
self.throttled_event_names = ["search_table"]
self.throttled_events = set()
self.max_events = 5 # max events to store in memory
self.rate_limit = 60.0 * 5 # rate limit (seconds)
self.time = 0.0
if is_git_dir():
install = "git"
elif is_pip_package():
install = "pip"
else:
install = "other"
self.metadata = {
"cli": sys.argv[0],
"install": install,
"python": ".".join(platform.python_version_tuple()[:2]),
"version": importlib.metadata.version("lancedb"),
"platforms": PLATFORMS,
"session_id": round(random.random() * 1e15),
# TODO: In future we might be interested in this metric
# 'engagement_time_msec': 1000
}
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
ONLINE = is_online()
self.enabled = (
CONFIG["diagnostics"]
and not TESTS_RUNNING
and ONLINE
and (
is_pip_package()
or get_git_origin_url() == "https://github.com/lancedb/lancedb.git"
)
)
def __call__(self, event_name, params={}):
"""
Attempts to add a new event to the events list and send events if the rate
limit is reached.
Args
----
event_name : str
The name of the event to be logged.
params : dict, optional
A dictionary of additional parameters to be logged with the event.
"""
### NOTE: We might need a way to tag a session with a label to check usage
### from a source. Setting label should be exposed to the user.
if not self.enabled:
return
if (
len(self.events) < self.max_events
): # Events list limited to self.max_events (drop any events past this)
params.update(self.metadata)
event = {
"event": event_name,
"properties": params,
"timestamp": datetime.datetime.now(
tz=datetime.timezone.utc
).isoformat(),
"distinct_id": CONFIG["uuid"],
}
if event_name not in self.throttled_event_names:
self.events.append(event)
elif event_name not in self.throttled_events:
self.throttled_events.add(event_name)
self.events.append(event)
# Check rate limit
t = time.time()
if (t - self.time) < self.rate_limit:
return
# Time is over rate limiter, send now
data = {
"api_key": self.api_key,
"distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event
"batch": self.events,
}
# POST equivalent to requests.post(self.url, json=data).
# threaded request is used to avoid blocking, retries are disabled, and
# verbose is disabled to avoid any possible disruption in the console.
threaded_request(
method="post",
url=self.url,
headers=self.headers,
json=data,
retry=0,
verbose=False,
)
# Flush & Reset
self.events = []
self.throttled_events = set()
self.time = t
@TryExcept(verbose=False)
def register_event(name: str, **kwargs):
if _Events._instance is None:
_Events._instance = _Events()
_Events._instance(name, **kwargs)
| [
"lancedb.utils.general.TryExcept"
] | [((5466, 5490), 'lancedb.utils.general.TryExcept', 'TryExcept', ([], {'verbose': '(False)'}), '(verbose=False)\n', (5475, 5490), False, 'from lancedb.utils.general import TryExcept\n'), ((4628, 4639), 'time.time', 'time.time', ([], {}), '()\n', (4637, 4639), False, 'import time\n'), ((2579, 2610), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (2608, 2610), False, 'import platform\n'), ((2747, 2762), 'random.random', 'random.random', ([], {}), '()\n', (2760, 2762), False, 'import random\n'), ((4171, 4218), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (4192, 4218), False, 'import datetime\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import attrs
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from aiohttp import web
from lancedb.remote.client import RestfulLanceDBClient, VectorQuery
@attrs.define
class MockLanceDBServer:
runner: web.AppRunner = attrs.field(init=False)
site: web.TCPSite = attrs.field(init=False)
async def query_handler(self, request: web.Request) -> web.Response:
table_name = request.match_info["table_name"]
assert table_name == "test_table"
await request.json()
# TODO: do some matching
vecs = pd.Series([np.random.rand(128) for x in range(10)], name="vector")
ids = pd.Series(range(10), name="id")
df = pd.DataFrame([vecs, ids]).T
batch = pa.RecordBatch.from_pandas(
df,
schema=pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 128)),
pa.field("id", pa.int64()),
]
),
)
sink = pa.BufferOutputStream()
with pa.ipc.new_file(sink, batch.schema) as writer:
writer.write_batch(batch)
return web.Response(body=sink.getvalue().to_pybytes())
async def setup(self):
app = web.Application()
app.add_routes([web.post("/table/{table_name}", self.query_handler)])
self.runner = web.AppRunner(app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, "localhost", 8111)
async def start(self):
await self.site.start()
async def stop(self):
await self.runner.cleanup()
@pytest.mark.skip(reason="flaky somehow, fix later")
@pytest.mark.asyncio
async def test_e2e_with_mock_server():
mock_server = MockLanceDBServer()
await mock_server.setup()
await mock_server.start()
try:
client = RestfulLanceDBClient("lancedb+http://localhost:8111")
df = (
await client.query(
"test_table",
VectorQuery(
vector=np.random.rand(128).tolist(),
k=10,
_metric="L2",
columns=["id", "vector"],
),
)
).to_pandas()
assert "vector" in df.columns
assert "id" in df.columns
finally:
# make sure we don't leak resources
await mock_server.stop()
| [
"lancedb.remote.client.RestfulLanceDBClient"
] | [((2188, 2239), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""flaky somehow, fix later"""'}), "(reason='flaky somehow, fix later')\n", (2204, 2239), False, 'import pytest\n'), ((839, 862), 'attrs.field', 'attrs.field', ([], {'init': '(False)'}), '(init=False)\n', (850, 862), False, 'import attrs\n'), ((887, 910), 'attrs.field', 'attrs.field', ([], {'init': '(False)'}), '(init=False)\n', (898, 910), False, 'import attrs\n'), ((1599, 1622), 'pyarrow.BufferOutputStream', 'pa.BufferOutputStream', ([], {}), '()\n', (1620, 1622), True, 'import pyarrow as pa\n'), ((1827, 1844), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (1842, 1844), False, 'from aiohttp import web\n'), ((1945, 1963), 'aiohttp.web.AppRunner', 'web.AppRunner', (['app'], {}), '(app)\n', (1958, 1963), False, 'from aiohttp import web\n'), ((2018, 2061), 'aiohttp.web.TCPSite', 'web.TCPSite', (['self.runner', '"""localhost"""', '(8111)'], {}), "(self.runner, 'localhost', 8111)\n", (2029, 2061), False, 'from aiohttp import web\n'), ((2425, 2478), 'lancedb.remote.client.RestfulLanceDBClient', 'RestfulLanceDBClient', (['"""lancedb+http://localhost:8111"""'], {}), "('lancedb+http://localhost:8111')\n", (2445, 2478), False, 'from lancedb.remote.client import RestfulLanceDBClient, VectorQuery\n'), ((1286, 1311), 'pandas.DataFrame', 'pd.DataFrame', (['[vecs, ids]'], {}), '([vecs, ids])\n', (1298, 1311), True, 'import pandas as pd\n'), ((1636, 1671), 'pyarrow.ipc.new_file', 'pa.ipc.new_file', (['sink', 'batch.schema'], {}), '(sink, batch.schema)\n', (1651, 1671), True, 'import pyarrow as pa\n'), ((1171, 1190), 'numpy.random.rand', 'np.random.rand', (['(128)'], {}), '(128)\n', (1185, 1190), True, 'import numpy as np\n'), ((1869, 1920), 'aiohttp.web.post', 'web.post', (['"""/table/{table_name}"""', 'self.query_handler'], {}), "('/table/{table_name}', self.query_handler)\n", (1877, 1920), False, 'from aiohttp import web\n'), ((1527, 1537), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (1535, 1537), True, 'import pyarrow as pa\n'), ((1471, 1483), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1481, 1483), True, 'import pyarrow as pa\n'), ((2612, 2631), 'numpy.random.rand', 'np.random.rand', (['(128)'], {}), '(128)\n', (2626, 2631), True, 'import numpy as np\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from datetime import timedelta
import lancedb
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from lancedb.pydantic import LanceModel, Vector
def test_basic(tmp_path):
db = lancedb.connect(tmp_path)
assert db.uri == str(tmp_path)
assert db.table_names() == []
table = db.create_table(
"test",
data=[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
)
rs = table.search([100, 100]).limit(1).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "bar"
rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "foo"
assert db.table_names() == ["test"]
assert "test" in db
assert len(db) == 1
assert db.open_table("test").name == db["test"].name
def test_ingest_pd(tmp_path):
db = lancedb.connect(tmp_path)
assert db.uri == str(tmp_path)
assert db.table_names() == []
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
table = db.create_table("test", data=data)
rs = table.search([100, 100]).limit(1).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "bar"
rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas()
assert len(rs) == 1
assert rs["item"].iloc[0] == "foo"
assert db.table_names() == ["test"]
assert "test" in db
assert len(db) == 1
assert db.open_table("test").name == db["test"].name
def test_ingest_iterator(tmp_path):
class PydanticSchema(LanceModel):
vector: Vector(2)
item: str
price: float
arrow_schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), 2)),
pa.field("item", pa.utf8()),
pa.field("price", pa.float32()),
]
)
def make_batches():
for _ in range(5):
yield from [
# pandas
pd.DataFrame(
{
"vector": [[3.1, 4.1], [1, 1]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
),
# pylist
[
{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0},
],
# recordbatch
pa.RecordBatch.from_arrays(
[
pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"],
),
# pa Table
pa.Table.from_arrays(
[
pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)),
pa.array(["foo", "bar"]),
pa.array([10.0, 20.0]),
],
["vector", "item", "price"],
),
# pydantic list
[
PydanticSchema(vector=[3.1, 4.1], item="foo", price=10.0),
PydanticSchema(vector=[5.9, 26.5], item="bar", price=20.0),
],
# TODO: test pydict separately. it is unique column number and
# name constraints
]
def run_tests(schema):
db = lancedb.connect(tmp_path)
tbl = db.create_table("table2", make_batches(), schema=schema, mode="overwrite")
tbl.to_pandas()
assert tbl.search([3.1, 4.1]).limit(1).to_pandas()["_distance"][0] == 0.0
assert tbl.search([5.9, 26.5]).limit(1).to_pandas()["_distance"][0] == 0.0
tbl_len = len(tbl)
tbl.add(make_batches())
assert tbl_len == 50
assert len(tbl) == tbl_len * 2
assert len(tbl.list_versions()) == 3
db.drop_database()
run_tests(arrow_schema)
run_tests(PydanticSchema)
def test_table_names(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
db.create_table("test2", data=data)
db.create_table("test1", data=data)
db.create_table("test3", data=data)
assert db.table_names() == ["test1", "test2", "test3"]
@pytest.mark.asyncio
async def test_table_names_async(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
db.create_table("test2", data=data)
db.create_table("test1", data=data)
db.create_table("test3", data=data)
db = await lancedb.connect_async(tmp_path)
assert await db.table_names() == ["test1", "test2", "test3"]
assert await db.table_names(limit=1) == ["test1"]
assert await db.table_names(start_after="test1", limit=1) == ["test2"]
assert await db.table_names(start_after="test1") == ["test2", "test3"]
def test_create_mode(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
db.create_table("test", data=data)
with pytest.raises(Exception):
db.create_table("test", data=data)
new_data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["fizz", "buzz"],
"price": [10.0, 20.0],
}
)
tbl = db.create_table("test", data=new_data, mode="overwrite")
assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"]
def test_create_exist_ok(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
tbl = db.create_table("test", data=data)
with pytest.raises(OSError):
db.create_table("test", data=data)
# open the table but don't add more rows
tbl2 = db.create_table("test", data=data, exist_ok=True)
assert tbl.name == tbl2.name
assert tbl.schema == tbl2.schema
assert len(tbl) == len(tbl2)
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
pa.field("item", pa.utf8()),
pa.field("price", pa.float64()),
]
)
tbl3 = db.create_table("test", schema=schema, exist_ok=True)
assert tbl3.schema == schema
bad_schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
pa.field("item", pa.utf8()),
pa.field("price", pa.float64()),
pa.field("extra", pa.float32()),
]
)
with pytest.raises(ValueError):
db.create_table("test", schema=bad_schema, exist_ok=True)
@pytest.mark.asyncio
async def test_connect(tmp_path):
db = await lancedb.connect_async(tmp_path)
assert str(db) == f"NativeDatabase(uri={tmp_path}, read_consistency_interval=None)"
db = await lancedb.connect_async(
tmp_path, read_consistency_interval=timedelta(seconds=5)
)
assert str(db) == f"NativeDatabase(uri={tmp_path}, read_consistency_interval=5s)"
@pytest.mark.asyncio
async def test_close(tmp_path):
db = await lancedb.connect_async(tmp_path)
assert db.is_open()
db.close()
assert not db.is_open()
with pytest.raises(RuntimeError, match="is closed"):
await db.table_names()
@pytest.mark.asyncio
async def test_create_mode_async(tmp_path):
db = await lancedb.connect_async(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
await db.create_table("test", data=data)
with pytest.raises(RuntimeError):
await db.create_table("test", data=data)
new_data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["fizz", "buzz"],
"price": [10.0, 20.0],
}
)
_tbl = await db.create_table("test", data=new_data, mode="overwrite")
# MIGRATION: to_pandas() is not available in async
# assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"]
@pytest.mark.asyncio
async def test_create_exist_ok_async(tmp_path):
db = await lancedb.connect_async(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
tbl = await db.create_table("test", data=data)
with pytest.raises(RuntimeError):
await db.create_table("test", data=data)
# open the table but don't add more rows
tbl2 = await db.create_table("test", data=data, exist_ok=True)
assert tbl.name == tbl2.name
assert await tbl.schema() == await tbl2.schema()
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), list_size=2)),
pa.field("item", pa.utf8()),
pa.field("price", pa.float64()),
]
)
tbl3 = await db.create_table("test", schema=schema, exist_ok=True)
assert await tbl3.schema() == schema
# Migration: When creating a table, but the table already exists, but
# the schema is different, it should raise an error.
# bad_schema = pa.schema(
# [
# pa.field("vector", pa.list_(pa.float32(), list_size=2)),
# pa.field("item", pa.utf8()),
# pa.field("price", pa.float64()),
# pa.field("extra", pa.float32()),
# ]
# )
# with pytest.raises(ValueError):
# await db.create_table("test", schema=bad_schema, exist_ok=True)
@pytest.mark.asyncio
async def test_open_table(tmp_path):
db = await lancedb.connect_async(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
await db.create_table("test", data=data)
tbl = await db.open_table("test")
assert tbl.name == "test"
assert (
re.search(
r"NativeTable\(test, uri=.*test\.lance, read_consistency_interval=None\)",
str(tbl),
)
is not None
)
assert await tbl.schema() == pa.schema(
{
"vector": pa.list_(pa.float32(), list_size=2),
"item": pa.utf8(),
"price": pa.float64(),
}
)
with pytest.raises(ValueError, match="was not found"):
await db.open_table("does_not_exist")
def test_delete_table(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
db.create_table("test", data=data)
with pytest.raises(Exception):
db.create_table("test", data=data)
assert db.table_names() == ["test"]
db.drop_table("test")
assert db.table_names() == []
db.create_table("test", data=data)
assert db.table_names() == ["test"]
# dropping a table that does not exist should pass
# if ignore_missing=True
db.drop_table("does_not_exist", ignore_missing=True)
def test_drop_database(tmp_path):
db = lancedb.connect(tmp_path)
data = pd.DataFrame(
{
"vector": [[3.1, 4.1], [5.9, 26.5]],
"item": ["foo", "bar"],
"price": [10.0, 20.0],
}
)
new_data = pd.DataFrame(
{
"vector": [[5.1, 4.1], [5.9, 10.5]],
"item": ["kiwi", "avocado"],
"price": [12.0, 17.0],
}
)
db.create_table("test", data=data)
with pytest.raises(Exception):
db.create_table("test", data=data)
assert db.table_names() == ["test"]
db.create_table("new_test", data=new_data)
db.drop_database()
assert db.table_names() == []
# it should pass when no tables are present
db.create_table("test", data=new_data)
db.drop_table("test")
assert db.table_names() == []
db.drop_database()
assert db.table_names() == []
# creating an empty database with schema
schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), list_size=2))])
db.create_table("empty_table", schema=schema)
# dropping a empty database should pass
db.drop_database()
assert db.table_names() == []
def test_empty_or_nonexistent_table(tmp_path):
db = lancedb.connect(tmp_path)
with pytest.raises(Exception):
db.create_table("test_with_no_data")
with pytest.raises(Exception):
db.open_table("does_not_exist")
schema = pa.schema([pa.field("a", pa.int64(), nullable=False)])
test = db.create_table("test", schema=schema)
class TestModel(LanceModel):
a: int
test2 = db.create_table("test2", schema=TestModel)
assert test.schema == test2.schema
def test_replace_index(tmp_path):
db = lancedb.connect(uri=tmp_path)
table = db.create_table(
"test",
[
{"vector": np.random.rand(128), "item": "foo", "price": float(i)}
for i in range(1000)
],
)
table.create_index(
num_partitions=2,
num_sub_vectors=4,
)
with pytest.raises(Exception):
table.create_index(
num_partitions=2,
num_sub_vectors=4,
replace=False,
)
table.create_index(
num_partitions=2,
num_sub_vectors=4,
replace=True,
index_cache_size=10,
)
def test_prefilter_with_index(tmp_path):
db = lancedb.connect(uri=tmp_path)
data = [
{"vector": np.random.rand(128), "item": "foo", "price": float(i)}
for i in range(1000)
]
sample_key = data[100]["vector"]
table = db.create_table(
"test",
data,
)
table.create_index(
num_partitions=2,
num_sub_vectors=4,
)
table = (
table.search(sample_key)
.where("price == 500", prefilter=True)
.limit(5)
.to_arrow()
)
assert table.num_rows == 1
| [
"lancedb.connect",
"lancedb.pydantic.Vector",
"lancedb.connect_async"
] | [((807, 832), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (822, 832), False, 'import lancedb\n'), ((1559, 1584), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (1574, 1584), False, 'import lancedb\n'), ((1667, 1769), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (1679, 1769), True, 'import pandas as pd\n'), ((4917, 4942), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (4932, 4942), False, 'import lancedb\n'), ((4954, 5056), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (4966, 5056), True, 'import pandas as pd\n'), ((5369, 5394), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (5384, 5394), False, 'import lancedb\n'), ((5406, 5508), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (5418, 5508), True, 'import pandas as pd\n'), ((6047, 6072), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (6062, 6072), False, 'import lancedb\n'), ((6084, 6186), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (6096, 6186), True, 'import pandas as pd\n'), ((6378, 6482), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'],\n 'price': [10.0, 20.0]})\n", (6390, 6482), True, 'import pandas as pd\n'), ((6715, 6740), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (6730, 6740), False, 'import lancedb\n'), ((6752, 6854), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (6764, 6854), True, 'import pandas as pd\n'), ((8676, 8778), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (8688, 8778), True, 'import pandas as pd\n'), ((8985, 9089), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'],\n 'price': [10.0, 20.0]})\n", (8997, 9089), True, 'import pandas as pd\n'), ((9469, 9571), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (9481, 9571), True, 'import pandas as pd\n'), ((10917, 11019), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (10929, 11019), True, 'import pandas as pd\n'), ((11713, 11738), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (11728, 11738), False, 'import lancedb\n'), ((11750, 11852), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (11762, 11852), True, 'import pandas as pd\n'), ((12397, 12422), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (12412, 12422), False, 'import lancedb\n'), ((12434, 12536), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (12446, 12536), True, 'import pandas as pd\n'), ((12609, 12716), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[5.1, 4.1], [5.9, 10.5]], 'item': ['kiwi', 'avocado'], 'price':\n [12.0, 17.0]}"], {}), "({'vector': [[5.1, 4.1], [5.9, 10.5]], 'item': ['kiwi',\n 'avocado'], 'price': [12.0, 17.0]})\n", (12621, 12716), True, 'import pandas as pd\n'), ((13583, 13608), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (13598, 13608), False, 'import lancedb\n'), ((14073, 14102), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'tmp_path'}), '(uri=tmp_path)\n', (14088, 14102), False, 'import lancedb\n'), ((14718, 14747), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'tmp_path'}), '(uri=tmp_path)\n', (14733, 14747), False, 'import lancedb\n'), ((2370, 2379), 'lancedb.pydantic.Vector', 'Vector', (['(2)'], {}), '(2)\n', (2376, 2379), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((4312, 4337), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (4327, 4337), False, 'import lancedb\n'), ((5702, 5733), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (5723, 5733), False, 'import lancedb\n'), ((6293, 6317), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6306, 6317), False, 'import pytest\n'), ((6967, 6989), 'pytest.raises', 'pytest.raises', (['OSError'], {}), '(OSError)\n', (6980, 6989), False, 'import pytest\n'), ((7812, 7837), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7825, 7837), False, 'import pytest\n'), ((7977, 8008), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (7998, 8008), False, 'import lancedb\n'), ((8363, 8394), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (8384, 8394), False, 'import lancedb\n'), ((8472, 8518), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""is closed"""'}), "(RuntimeError, match='is closed')\n", (8485, 8518), False, 'import pytest\n'), ((8633, 8664), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (8654, 8664), False, 'import lancedb\n'), ((8891, 8918), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (8904, 8918), False, 'import pytest\n'), ((9426, 9457), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (9447, 9457), False, 'import lancedb\n'), ((9690, 9717), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (9703, 9717), False, 'import pytest\n'), ((10874, 10905), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (10895, 10905), False, 'import lancedb\n'), ((11573, 11621), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""was not found"""'}), "(ValueError, match='was not found')\n", (11586, 11621), False, 'import pytest\n'), ((11959, 11983), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (11972, 11983), False, 'import pytest\n'), ((12822, 12846), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (12835, 12846), False, 'import pytest\n'), ((13618, 13642), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (13631, 13642), False, 'import pytest\n'), ((13699, 13723), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (13712, 13723), False, 'import pytest\n'), ((14379, 14403), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (14392, 14403), False, 'import pytest\n'), ((14780, 14799), 'numpy.random.rand', 'np.random.rand', (['(128)'], {}), '(128)\n', (14794, 14799), True, 'import numpy as np\n'), ((2548, 2557), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2555, 2557), True, 'import pyarrow as pa\n'), ((2590, 2602), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2600, 2602), True, 'import pyarrow as pa\n'), ((7377, 7386), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (7384, 7386), True, 'import pyarrow as pa\n'), ((7419, 7431), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (7429, 7431), True, 'import pyarrow as pa\n'), ((7685, 7694), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (7692, 7694), True, 'import pyarrow as pa\n'), ((7727, 7739), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (7737, 7739), True, 'import pyarrow as pa\n'), ((7772, 7784), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7782, 7784), True, 'import pyarrow as pa\n'), ((8180, 8200), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (8189, 8200), False, 'from datetime import timedelta\n'), ((10100, 10109), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (10107, 10109), True, 'import pyarrow as pa\n'), ((10142, 10154), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (10152, 10154), True, 'import pyarrow as pa\n'), ((11501, 11510), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (11508, 11510), True, 'import pyarrow as pa\n'), ((11533, 11545), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (11543, 11545), True, 'import pyarrow as pa\n'), ((13804, 13814), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (13812, 13814), True, 'import pyarrow as pa\n'), ((14181, 14200), 'numpy.random.rand', 'np.random.rand', (['(128)'], {}), '(128)\n', (14195, 14200), True, 'import numpy as np\n'), ((2500, 2512), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2510, 2512), True, 'import pyarrow as pa\n'), ((2739, 2836), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [1, 1]], 'item': ['foo', 'bar'], 'price': [10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [1, 1]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (2751, 2836), True, 'import pandas as pd\n'), ((7319, 7331), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7329, 7331), True, 'import pyarrow as pa\n'), ((7627, 7639), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7637, 7639), True, 'import pyarrow as pa\n'), ((10042, 10054), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (10052, 10054), True, 'import pyarrow as pa\n'), ((11453, 11465), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (11463, 11465), True, 'import pyarrow as pa\n'), ((13344, 13356), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (13354, 13356), True, 'import pyarrow as pa\n'), ((3386, 3410), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (3394, 3410), True, 'import pyarrow as pa\n'), ((3436, 3458), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (3444, 3458), True, 'import pyarrow as pa\n'), ((3750, 3774), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (3758, 3774), True, 'import pyarrow as pa\n'), ((3800, 3822), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (3808, 3822), True, 'import pyarrow as pa\n'), ((3343, 3355), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3353, 3355), True, 'import pyarrow as pa\n'), ((3707, 3719), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3717, 3719), True, 'import pyarrow as pa\n')] |
# Ultralytics YOLO 🚀, AGPL-3.0 license
from io import BytesIO
from pathlib import Path
from typing import Any, List, Tuple, Union
import cv2
import numpy as np
import torch
from PIL import Image
from matplotlib import pyplot as plt
from pandas import DataFrame
from tqdm import tqdm
from ultralytics.data.augment import Format
from ultralytics.data.dataset import YOLODataset
from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.model import YOLO
from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR
from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch
class ExplorerDataset(YOLODataset):
def __init__(self, *args, data: dict = None, **kwargs) -> None:
super().__init__(*args, data=data, **kwargs)
def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]:
"""Loads 1 image from dataset index 'i' without any resize ops."""
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i]
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
if im is None:
raise FileNotFoundError(f"Image Not Found {f}")
h0, w0 = im.shape[:2] # orig hw
return im, (h0, w0), im.shape[:2]
return self.ims[i], self.im_hw0[i], self.im_hw[i]
def build_transforms(self, hyp: IterableSimpleNamespace = None):
"""Creates transforms for dataset images without resizing."""
return Format(
bbox_format="xyxy",
normalize=False,
return_mask=self.use_segments,
return_keypoint=self.use_keypoints,
batch_idx=True,
mask_ratio=hyp.mask_ratio,
mask_overlap=hyp.overlap_mask,
)
class Explorer:
def __init__(
self,
data: Union[str, Path] = "coco128.yaml",
model: str = "yolov8n.pt",
uri: str = USER_CONFIG_DIR / "explorer",
) -> None:
# Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181
checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"])
import lancedb
self.connection = lancedb.connect(uri)
self.table_name = Path(data).name.lower() + "_" + model.lower()
self.sim_idx_base_name = (
f"{self.table_name}_sim_idx".lower()
) # Use this name and append thres and top_k to reuse the table
self.model = YOLO(model)
self.data = data # None
self.choice_set = None
self.table = None
self.progress = 0
def create_embeddings_table(self, force: bool = False, split: str = "train") -> None:
"""
Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it
already exists. Pass force=True to overwrite the existing table.
Args:
force (bool): Whether to overwrite the existing table or not. Defaults to False.
split (str): Split of the dataset to use. Defaults to 'train'.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
```
"""
if self.table is not None and not force:
LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.")
return
if self.table_name in self.connection.table_names() and not force:
LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.")
self.table = self.connection.open_table(self.table_name)
self.progress = 1
return
if self.data is None:
raise ValueError("Data must be provided to create embeddings table")
data_info = check_det_dataset(self.data)
if split not in data_info:
raise ValueError(
f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}"
)
choice_set = data_info[split]
choice_set = choice_set if isinstance(choice_set, list) else [choice_set]
self.choice_set = choice_set
dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task)
# Create the table schema
batch = dataset[0]
vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0]
table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite")
table.add(
self._yield_batches(
dataset,
data_info,
self.model,
exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"],
)
)
self.table = table
def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]):
"""Generates batches of data for embedding, excluding specified keys."""
for i in tqdm(range(len(dataset))):
self.progress = float(i + 1) / len(dataset)
batch = dataset[i]
for k in exclude_keys:
batch.pop(k, None)
batch = sanitize_batch(batch, data_info)
batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist()
yield [batch]
def query(
self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25
) -> Any: # pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
imgs (str or list): Path to the image or a list of paths to the images.
limit (int): Number of results to return.
Returns:
(pyarrow.Table): An arrow table containing the results. Supports converting to:
- pandas dataframe: `result.to_pandas()`
- dict of lists: `result.to_pydict()`
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.query(img='https://ultralytics.com/images/zidane.jpg')
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
if isinstance(imgs, str):
imgs = [imgs]
assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}"
embeds = self.model.embed(imgs)
# Get avg if multiple images are passed (len > 1)
embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy()
return self.table.search(embeds).limit(limit).to_arrow()
def sql_query(
self, query: str, return_type: str = "pandas"
) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table
"""
Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown.
Args:
query (str): SQL query to run.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pyarrow.Table): An arrow table containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.sql_query(query)
```
"""
assert return_type in {
"pandas",
"arrow",
}, f"Return type should be either `pandas` or `arrow`, but got {return_type}"
import duckdb
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
# Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this.
table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB
if not query.startswith("SELECT") and not query.startswith("WHERE"):
raise ValueError(
f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}"
)
if query.startswith("WHERE"):
query = f"SELECT * FROM 'table' {query}"
LOGGER.info(f"Running query: {query}")
rs = duckdb.sql(query)
if return_type == "arrow":
return rs.arrow()
elif return_type == "pandas":
return rs.df()
def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image:
"""
Plot the results of a SQL-Like query on the table.
Args:
query (str): SQL query to run.
labels (bool): Whether to plot the labels or not.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'"
result = exp.plot_sql_query(query)
```
"""
result = self.sql_query(query, return_type="arrow")
if len(result) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(result, plot_labels=labels)
return Image.fromarray(img)
def get_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
return_type: str = "pandas",
) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table
"""
Query the table for similar images. Accepts a single image or a list of images.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
limit (int): Number of results to return. Defaults to 25.
return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'.
Returns:
(pandas.DataFrame): A dataframe containing the results.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
assert return_type in {
"pandas",
"arrow",
}, f"Return type should be either `pandas` or `arrow`, but got {return_type}"
img = self._check_imgs_or_idxs(img, idx)
similar = self.query(img, limit=limit)
if return_type == "arrow":
return similar
elif return_type == "pandas":
return similar.to_pandas()
def plot_similar(
self,
img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None,
idx: Union[int, List[int]] = None,
limit: int = 25,
labels: bool = True,
) -> Image.Image:
"""
Plot the similar images. Accepts images or indexes.
Args:
img (str or list): Path to the image or a list of paths to the images.
idx (int or list): Index of the image in the table or a list of indexes.
labels (bool): Whether to plot the labels or not.
limit (int): Number of results to return. Defaults to 25.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg')
```
"""
similar = self.get_similar(img, idx, limit, return_type="arrow")
if len(similar) == 0:
LOGGER.info("No results found.")
return None
img = plot_query_result(similar, plot_labels=labels)
return Image.fromarray(img)
def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame:
"""
Calculate the similarity index of all the images in the table. Here, the index will contain the data points that
are max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running
vector search. Defaults: None.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns
include indices of similar images and their respective distances.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
sim_idx = exp.similarity_index()
```
"""
if self.table is None:
raise ValueError("Table is not created. Please create the table first.")
sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower()
if sim_idx_table_name in self.connection.table_names() and not force:
LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.")
return self.connection.open_table(sim_idx_table_name).to_pandas()
if top_k and not (1.0 >= top_k >= 0.0):
raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}")
if max_dist < 0.0:
raise ValueError(f"max_dist must be greater than 0. Got {max_dist}")
top_k = int(top_k * len(self.table)) if top_k else len(self.table)
top_k = max(top_k, 1)
features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict()
im_files = features["im_file"]
embeddings = features["vector"]
sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite")
def _yield_sim_idx():
"""Generates a dataframe with similarity indices and distances for images."""
for i in tqdm(range(len(embeddings))):
sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}")
yield [
{
"idx": i,
"im_file": im_files[i],
"count": len(sim_idx),
"sim_im_files": sim_idx["im_file"].tolist(),
}
]
sim_table.add(_yield_sim_idx())
self.sim_index = sim_table
return sim_table.to_pandas()
def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image:
"""
Plot the similarity index of all the images in the table. Here, the index will contain the data points that are
max_dist or closer to the image in the embedding space at a given index.
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when
running vector search. Defaults to 0.01.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(PIL.Image): Image containing the plot.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
similarity_idx_plot = exp.plot_similarity_index()
similarity_idx_plot.show() # view image preview
similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file
```
"""
sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force)
sim_count = sim_idx["count"].tolist()
sim_count = np.array(sim_count)
indices = np.arange(len(sim_count))
# Create the bar plot
plt.bar(indices, sim_count)
# Customize the plot (optional)
plt.xlabel("data idx")
plt.ylabel("Count")
plt.title("Similarity Count")
buffer = BytesIO()
plt.savefig(buffer, format="png")
buffer.seek(0)
# Use Pillow to open the image from the buffer
return Image.fromarray(np.array(Image.open(buffer)))
def _check_imgs_or_idxs(
self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]]
) -> List[np.ndarray]:
if img is None and idx is None:
raise ValueError("Either img or idx must be provided.")
if img is not None and idx is not None:
raise ValueError("Only one of img or idx must be provided.")
if idx is not None:
idx = idx if isinstance(idx, list) else [idx]
img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"]
return img if isinstance(img, list) else [img]
def ask_ai(self, query):
"""
Ask AI a question.
Args:
query (str): Question to ask.
Returns:
(pandas.DataFrame): A dataframe containing filtered results to the SQL query.
Example:
```python
exp = Explorer()
exp.create_embeddings_table()
answer = exp.ask_ai('Show images with 1 person and 2 dogs')
```
"""
result = prompt_sql_query(query)
try:
df = self.sql_query(result)
except Exception as e:
LOGGER.error("AI generated query is not valid. Please try again with a different prompt")
LOGGER.error(e)
return None
return df
def visualize(self, result):
"""
Visualize the results of a query. TODO.
Args:
result (pyarrow.Table): Table containing the results of a query.
"""
pass
def generate_report(self, result):
"""
Generate a report of the dataset.
TODO
"""
pass
| [
"lancedb.connect"
] | [((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2270, 2332), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb<=0.9.2']"], {}), "(['lancedb>=0.4.3', 'duckdb<=0.9.2'])\n", (2295, 2332), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2383, 2403), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2398, 2403), False, 'import lancedb\n'), ((2654, 2665), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2658, 2665), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3997, 4025), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (4014, 4025), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8632, 8670), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8643, 8670), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8685, 8702), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8695, 8702), False, 'import duckdb\n'), ((9664, 9684), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9679, 9684), False, 'from PIL import Image\n'), ((12309, 12329), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12324, 12329), False, 'from PIL import Image\n'), ((16581, 16600), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16589, 16600), True, 'import numpy as np\n'), ((16685, 16712), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16692, 16712), True, 'from matplotlib import pyplot as plt\n'), ((16762, 16784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16772, 16784), True, 'from matplotlib import pyplot as plt\n'), ((16793, 16812), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16803, 16812), True, 'from matplotlib import pyplot as plt\n'), ((16821, 16850), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16830, 16850), True, 'from matplotlib import pyplot as plt\n'), ((16868, 16877), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16875, 16877), False, 'from io import BytesIO\n'), ((16886, 16919), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16897, 16919), True, 'from matplotlib import pyplot as plt\n'), ((3458, 3544), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3469, 3544), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3646, 3756), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3657, 3756), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9532, 9564), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9543, 9564), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12176, 12208), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12187, 12208), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13786, 13889), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13797, 13889), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((17039, 17057), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (17049, 17057), False, 'from PIL import Image\n'), ((18275, 18374), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18287, 18374), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18377, 18392), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18389, 18392), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2430, 2440), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2434, 2440), False, 'from pathlib import Path\n'), ((6857, 6876), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6868, 6876), False, 'import torch\n')] |
import logging
import lancedb
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import CTransformers
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import LanceDB
# Configure basic logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class ChatWithVideo:
def __init__(self, input_file, llm_model_name, llm_model_file, llm_model_type, embedding_model_name):
self.input_file = input_file
self.llm_model_name = llm_model_name
self.llm_model_file = llm_model_file
self.llm_model_type = llm_model_type
self.embedding_model_name = embedding_model_name
def load_llm_model(self):
try:
logger.info(f"Starting to download the {self.llm_model_name} model...")
llm_model = CTransformers(
model=self.llm_model_name, model_file=self.llm_model_file, model_type=self.llm_model_type)
logger.info(f"{self.llm_model_name} model successfully loaded.")
return llm_model
except Exception as e:
logger.error(f"Error loading the {self.llm_model_name} model: {e}")
return None
def load_text_file(self):
try:
logger.info(f"Loading transcript file from {self.input_file}...")
loader = TextLoader(self.input_file)
docs = loader.load()
logger.info("Transcript file successfully loaded.")
return docs
except Exception as e:
logger.error(f"Error loading text file: {e}")
return None
@staticmethod
def setup_database(embeddings):
try:
logger.info("Setting up the database...")
db = lancedb.connect('/tmp/lancedb')
table = db.create_table(
"xxxxxxx",
data=[{
"vector": embeddings.embed_query("Hello World"),
"text": "Hellos World",
"id": "1"
}],
mode="overwrite")
logger.info("Database setup complete.")
return table
except Exception as e:
logger.error(f"Error setting up the database: {e}")
raise e # Raising the exception for further debugging
@staticmethod
def prepare_embeddings(model_name):
try:
logger.info(f"Preparing embeddings with model: {model_name}...")
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs={'device': 'cpu'})
logger.info("Embeddings prepared successfully.")
return embeddings
except Exception as e:
logger.error(f"Error preparing embeddings: {e}")
return None
@staticmethod
def prepare_documents(docs):
if not docs:
logger.info("No documents provided for preparation.")
return None
try:
logger.info("Preparing documents...")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=50)
documents = text_splitter.split_documents(docs)
logger.info("Documents prepared successfully.")
return documents
except Exception as e:
logger.error(f"Error preparing documents: {e}")
return None
def run_query(self, query):
if not query:
logger.info("No query provided.")
return "No query provided."
logger.info(f"Running query: {query}")
docs = self.load_text_file()
if not docs:
return "Failed to load documents."
documents = self.prepare_documents(docs)
if not documents:
return "Failed to prepare documents."
embeddings = self.prepare_embeddings(self.embedding_model_name)
if not embeddings:
return "Failed to prepare embeddings."
db = self.setup_database(embeddings)
if not db:
return "Failed to setup database."
try:
docsearch = LanceDB.from_documents(documents, embeddings, connection=db)
llm = self.load_llm_model()
if not llm:
return "Failed to load LLM model."
template = """Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Use three sentences maximum and keep the answer as concise as possible.
Always say "thanks for asking!" at the end of the answer.
{context}
Question: {question}
Helpful Answer:"""
QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"], template=template)
logger.info("Prompt loaded")
qa = RetrievalQA.from_chain_type(
llm,
chain_type='stuff',
retriever=docsearch.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
logger.info("Query processed successfully.")
result = qa.run(query)
logger.info(f"Result of the query: {result}")
return result
except Exception as e:
logger.error(f"Error running query: {e}")
return f"Error: {e}"
| [
"lancedb.connect"
] | [((400, 496), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (419, 496), False, 'import logging\n'), ((501, 528), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (518, 528), False, 'import logging\n'), ((1040, 1148), 'langchain.llms.CTransformers', 'CTransformers', ([], {'model': 'self.llm_model_name', 'model_file': 'self.llm_model_file', 'model_type': 'self.llm_model_type'}), '(model=self.llm_model_name, model_file=self.llm_model_file,\n model_type=self.llm_model_type)\n', (1053, 1148), False, 'from langchain.llms import CTransformers\n'), ((1546, 1573), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.input_file'], {}), '(self.input_file)\n', (1556, 1573), False, 'from langchain.document_loaders import TextLoader\n'), ((1947, 1978), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (1962, 1978), False, 'import lancedb\n'), ((2677, 2753), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name=model_name, model_kwargs={'device': 'cpu'})\n", (2698, 2753), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((3215, 3279), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(200)', 'chunk_overlap': '(50)'}), '(chunk_size=200, chunk_overlap=50)\n', (3245, 3279), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((4265, 4325), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'db'}), '(documents, embeddings, connection=db)\n', (4287, 4325), False, 'from langchain.vectorstores import LanceDB\n'), ((4914, 4988), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (4928, 4988), False, 'from langchain.prompts import PromptTemplate\n')] |
from collections import OrderedDict
from typing import List, Optional
import lancedb
import pandas as pd
import pyarrow as pa
from lance.vector import vec_to_table
import duckdb
import json
from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
from mindsdb.integrations.libs.response import RESPONSE_TYPE
from mindsdb.integrations.libs.response import HandlerResponse
from mindsdb.integrations.libs.response import HandlerResponse as Response
from mindsdb.integrations.libs.response import HandlerStatusResponse as StatusResponse
from mindsdb.integrations.libs.vectordatabase_handler import (
FilterCondition,
FilterOperator,
TableField,
VectorStoreHandler,
)
from mindsdb.utilities import log
logger = log.getLogger(__name__)
class LanceDBHandler(VectorStoreHandler):
"""This handler handles connection and execution of the LanceDB statements."""
name = "lancedb"
def __init__(self, name: str, **kwargs):
super().__init__(name, **kwargs)
self._connection_data = kwargs.get("connection_data")
self._client_config = {
"uri": self._connection_data.get("persist_directory"),
"api_key": self._connection_data.get("api_key", None),
"region": self._connection_data.get("region"),
"host_override": self._connection_data.get("host_override"),
}
# uri is required either for LanceDB Cloud or local
if not self._client_config["uri"]:
raise Exception(
"persist_directory is required for LanceDB connection!"
)
# uri, api_key and region is required either for LanceDB Cloud
elif self._client_config["uri"] and self._client_config["api_key"] and not self._client_config["region"]:
raise Exception(
"region is required for LanceDB Cloud connection!"
)
self._client = None
self.is_connected = False
self.connect()
def _get_client(self):
client_config = self._client_config
if client_config is None:
raise Exception("Client config is not set!")
return lancedb.connect(**client_config)
def __del__(self):
if self.is_connected is True:
self.disconnect()
def connect(self):
"""Connect to a LanceDB database."""
if self.is_connected is True:
return
try:
self._client = self._get_client()
self.is_connected = True
except Exception as e:
logger.error(f"Error connecting to LanceDB client, {e}!")
self.is_connected = False
def disconnect(self):
"""Close the database connection."""
if self.is_connected is False:
return
self._client = None
self.is_connected = False
def check_connection(self):
"""Check the connection to the LanceDB database."""
response_code = StatusResponse(False)
need_to_close = self.is_connected is False
try:
self._client.table_names()
response_code.success = True
except Exception as e:
logger.error(f"Error connecting to LanceDB , {e}!")
response_code.error_message = str(e)
finally:
if response_code.success is True and need_to_close:
self.disconnect()
if response_code.success is False and self.is_connected is True:
self.is_connected = False
return response_code
def _get_lancedb_operator(self, operator: FilterOperator) -> str:
# The in values are not returned with () and only one element is returned. Bug
mapping = {
FilterOperator.EQUAL: "=",
FilterOperator.NOT_EQUAL: "!=",
FilterOperator.LESS_THAN: "<",
FilterOperator.LESS_THAN_OR_EQUAL: "<=",
FilterOperator.GREATER_THAN: ">",
FilterOperator.GREATER_THAN_OR_EQUAL: ">=",
# FilterOperator.IN: "in",
# FilterOperator.NOT_IN: "not in",
FilterOperator.LIKE: "like",
FilterOperator.NOT_LIKE: "not like",
FilterOperator.IS_NULL: "is null",
FilterOperator.IS_NOT_NULL: "is not null",
}
if operator not in mapping:
raise Exception(f"Operator {operator} is not supported by LanceDB!")
return mapping[operator]
def _translate_condition(
self, conditions: List[FilterCondition]
) -> Optional[dict]:
"""
Translate a list of FilterCondition objects to string that can be used by LanceDB.
E.g.,
[
FilterCondition(
column="content",
op=FilterOperator.NOT_EQUAL,
value="a",
),
FilterCondition(
column="id",
op=FilterOperator.EQUAL,
value="6",
)
]
-->
"content != 'a' and id = '6'"
"""
# we ignore all non-metadata conditions
if not conditions:
return
filtered_conditions = [
condition
for condition in conditions
if condition.column.startswith(TableField.ID.value) or condition.column.startswith(TableField.CONTENT.value)
]
if len(filtered_conditions) == 0:
return None
# generate the LanceDB filter string
lancedb_conditions = []
for condition in filtered_conditions:
if isinstance(condition.value, str):
condition.value = f"'{condition.value}'"
condition_key = condition.column.split(".")[-1]
lancedb_conditions.append(
' '.join([condition_key, self._get_lancedb_operator(condition.op), str(condition.value)])
)
# Combine all conditions into a single string and return
return " and ".join(lancedb_conditions) if lancedb_conditions else None
def select(
self,
table_name: str,
columns: List[str] = None,
conditions: List[FilterCondition] = None,
offset: int = None,
limit: int = None,
) -> HandlerResponse:
try:
# Load collection table
collection = self._client.open_table(table_name)
except Exception as e:
return Response(
resp_type=RESPONSE_TYPE.ERROR,
error_message=f"Error loading collection {table_name}: {e}",
)
filters = self._translate_condition(conditions)
# check if embedding vector filter is present
vector_filter = (
[]
if conditions is None
else [
condition
for condition in conditions
if condition.column == TableField.SEARCH_VECTOR.value
]
)
if len(vector_filter) > 0:
vector_filter = vector_filter[0]
else:
vector_filter = None
if vector_filter is not None:
vec = json.loads(vector_filter.value) if isinstance(vector_filter.value, str) else vector_filter.value
result = collection.search(vec).select(columns).to_pandas()
result = result.rename(columns={"_distance": TableField.DISTANCE.value})
else:
result = self._client.open_table(table_name).to_pandas()
new_columns = columns + [TableField.DISTANCE.value] if TableField.DISTANCE.value in result.columns else columns
col_str = ', '.join([col for col in new_columns if col in (TableField.ID.value, TableField.CONTENT.value, TableField.METADATA.value, TableField.EMBEDDINGS.value, TableField.DISTANCE.value)])
where_str = f'where {filters}' if filters else ''
# implementing limit and offset. Not supported natively in lancedb
if limit and offset:
sql = f"""select {col_str} from result {where_str} limit {limit} offset {offset}"""
elif limit and not offset:
sql = f"""select {col_str} from result {where_str} limit {limit}"""
elif offset and not limit:
sql = f"""select {col_str} from result {where_str} offset {offset}"""
else:
sql = f"""select {col_str} from result {where_str}"""
data_df = duckdb.query(sql).to_df()
return Response(resp_type=RESPONSE_TYPE.TABLE, data_frame=data_df)
def insert(
self, table_name: str, data: pd.DataFrame, columns: List[str] = None
) -> HandlerResponse:
"""
Insert data into the LanceDB database.
In case of create table statements the there is a mismatch between the column types of the `data` pandas dataframe filled with data
and the empty base table column types which raises a pa.lib.ArrowNotImplementedError, in that case the base table is deleted (doesn't matter as it is empty)
and recreated with the right datatypes
"""
try:
collection = self._client.open_table(table_name)
df = data[[TableField.ID.value, TableField.CONTENT.value, TableField.METADATA.value, TableField.EMBEDDINGS.value]]
pa_data = pa.Table.from_pandas(df, preserve_index=False)
vec_data = vec_to_table(df[TableField.EMBEDDINGS.value].values.tolist())
new_pa_data = pa_data.append_column("vector", vec_data["vector"])
collection.add(new_pa_data)
except pa.lib.ArrowNotImplementedError:
collection_df = collection.to_pandas()
column_dtypes = collection_df.dtypes
df = df.astype(column_dtypes)
new_df = pd.concat([collection_df, df])
new_df['id'] = new_df['id'].apply(str)
pa_data = pa.Table.from_pandas(new_df, preserve_index=False)
vec_data = vec_to_table(df[TableField.EMBEDDINGS.value].values.tolist())
new_pa_data = pa_data.append_column("vector", vec_data["vector"])
self.drop_table(table_name)
self._client.create_table(table_name, new_pa_data)
except Exception as e:
return Response(
resp_type=RESPONSE_TYPE.ERROR,
error_message=f"Unable to insert data into collection `{table_name}`: {e}"
)
return Response(resp_type=RESPONSE_TYPE.OK)
def update(
self, table_name: str, data: pd.DataFrame, columns: List[str] = None
) -> HandlerResponse:
"""
Update data in the LanceDB database.
TODO: not implemented yet
"""
return super().update(table_name, data, columns)
def delete(
self, table_name: str, conditions: List[FilterCondition] = None
) -> HandlerResponse:
try:
filters = self._translate_condition(conditions)
if filters is None:
raise Exception("Delete query must have at least one condition!")
collection = self._client.open_table(table_name)
collection.delete(filters)
except Exception as e:
return Response(
resp_type=RESPONSE_TYPE.ERROR,
error_message=f"Error deleting from collection `{table_name}`: {e}",
)
return Response(resp_type=RESPONSE_TYPE.OK)
def create_table(self, table_name: str, if_not_exists=True) -> HandlerResponse:
"""
Create a collection with the given name in the LanceDB database.
"""
try:
data = {
TableField.ID.value: str,
TableField.CONTENT.value: str,
TableField.METADATA.value: object,
TableField.EMBEDDINGS.value: object,
}
df = pd.DataFrame(columns=data.keys()).astype(data)
self._client.create_table(table_name, df)
except Exception as e:
return Response(
resp_type=RESPONSE_TYPE.ERROR,
error_message=f"Unable to create collection `{table_name}`: {e}"
)
return Response(resp_type=RESPONSE_TYPE.OK)
def drop_table(self, table_name: str, if_exists=True) -> HandlerResponse:
"""
Delete a collection from the LanceDB database.
"""
try:
self._client.drop_table(table_name)
except ValueError:
if if_exists:
return Response(resp_type=RESPONSE_TYPE.OK)
else:
return Response(
resp_type=RESPONSE_TYPE.ERROR,
error_message=f"Table {table_name} does not exist!",
)
return Response(resp_type=RESPONSE_TYPE.OK)
def get_tables(self) -> HandlerResponse:
"""
Get the list of collections in the LanceDB database.
"""
collections = self._client.table_names()
collections_name = pd.DataFrame(
columns=["table_name"],
data=collections,
)
return Response(resp_type=RESPONSE_TYPE.TABLE, data_frame=collections_name)
def get_columns(self, table_name: str) -> HandlerResponse:
# check if collection exists
try:
df = self._client.open_table(table_name).to_pandas()
column_df = pd.DataFrame(df.dtypes).reset_index()
column_df.columns = ['column_name', 'data_type']
except ValueError:
return Response(
resp_type=RESPONSE_TYPE.ERROR,
error_message=f"Table {table_name} does not exist!",
)
return Response(resp_type=RESPONSE_TYPE.TABLE, data_frame=column_df)
connection_args = OrderedDict(
persist_directory={
"type": ARG_TYPE.STR,
"description": "The uri of the database.",
"required": True,
},
api_key={
"type": ARG_TYPE.STR,
"description": "If presented, connect to LanceDB cloud. Otherwise, connect to a database on file system or cloud storage.",
"required": False,
},
region={
"type": ARG_TYPE.STR,
"description": "The region to use for LanceDB Cloud.",
"required": False,
},
host_override={
"type": ARG_TYPE.STR,
"description": "The override url for LanceDB Cloud.",
"required": False,
},
)
connection_args_example = OrderedDict(
persist_directory="~/lancedb",
api_key=None,
region="us-west-2",
host_override=None,
)
| [
"lancedb.connect"
] | [((752, 775), 'mindsdb.utilities.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (765, 775), False, 'from mindsdb.utilities import log\n'), ((13628, 14162), 'collections.OrderedDict', 'OrderedDict', ([], {'persist_directory': "{'type': ARG_TYPE.STR, 'description': 'The uri of the database.',\n 'required': True}", 'api_key': "{'type': ARG_TYPE.STR, 'description':\n 'If presented, connect to LanceDB cloud. Otherwise, connect to a database on file system or cloud storage.'\n , 'required': False}", 'region': "{'type': ARG_TYPE.STR, 'description':\n 'The region to use for LanceDB Cloud.', 'required': False}", 'host_override': "{'type': ARG_TYPE.STR, 'description': 'The override url for LanceDB Cloud.',\n 'required': False}"}), "(persist_directory={'type': ARG_TYPE.STR, 'description':\n 'The uri of the database.', 'required': True}, api_key={'type':\n ARG_TYPE.STR, 'description':\n 'If presented, connect to LanceDB cloud. Otherwise, connect to a database on file system or cloud storage.'\n , 'required': False}, region={'type': ARG_TYPE.STR, 'description':\n 'The region to use for LanceDB Cloud.', 'required': False},\n host_override={'type': ARG_TYPE.STR, 'description':\n 'The override url for LanceDB Cloud.', 'required': False})\n", (13639, 14162), False, 'from collections import OrderedDict\n'), ((14304, 14404), 'collections.OrderedDict', 'OrderedDict', ([], {'persist_directory': '"""~/lancedb"""', 'api_key': 'None', 'region': '"""us-west-2"""', 'host_override': 'None'}), "(persist_directory='~/lancedb', api_key=None, region='us-west-2',\n host_override=None)\n", (14315, 14404), False, 'from collections import OrderedDict\n'), ((2161, 2193), 'lancedb.connect', 'lancedb.connect', ([], {}), '(**client_config)\n', (2176, 2193), False, 'import lancedb\n'), ((2956, 2977), 'mindsdb.integrations.libs.response.HandlerStatusResponse', 'StatusResponse', (['(False)'], {}), '(False)\n', (2970, 2977), True, 'from mindsdb.integrations.libs.response import HandlerStatusResponse as StatusResponse\n'), ((8378, 8437), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.TABLE', 'data_frame': 'data_df'}), '(resp_type=RESPONSE_TYPE.TABLE, data_frame=data_df)\n', (8386, 8437), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((10313, 10349), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (10321, 10349), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((11253, 11289), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (11261, 11289), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12048, 12084), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (12056, 12084), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12625, 12661), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (12633, 12661), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12869, 12923), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['table_name']", 'data': 'collections'}), "(columns=['table_name'], data=collections)\n", (12881, 12923), True, 'import pandas as pd\n'), ((12974, 13042), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.TABLE', 'data_frame': 'collections_name'}), '(resp_type=RESPONSE_TYPE.TABLE, data_frame=collections_name)\n', (12982, 13042), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((13546, 13607), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.TABLE', 'data_frame': 'column_df'}), '(resp_type=RESPONSE_TYPE.TABLE, data_frame=column_df)\n', (13554, 13607), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((9204, 9250), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['df'], {'preserve_index': '(False)'}), '(df, preserve_index=False)\n', (9224, 9250), True, 'import pyarrow as pa\n'), ((6388, 6493), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Error loading collection {table_name}: {e}"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Error loading collection {table_name}: {e}')\n", (6396, 6493), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((7089, 7120), 'json.loads', 'json.loads', (['vector_filter.value'], {}), '(vector_filter.value)\n', (7099, 7120), False, 'import json\n'), ((8337, 8354), 'duckdb.query', 'duckdb.query', (['sql'], {}), '(sql)\n', (8349, 8354), False, 'import duckdb\n'), ((9665, 9695), 'pandas.concat', 'pd.concat', (['[collection_df, df]'], {}), '([collection_df, df])\n', (9674, 9695), True, 'import pandas as pd\n'), ((9769, 9819), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['new_df'], {'preserve_index': '(False)'}), '(new_df, preserve_index=False)\n', (9789, 9819), True, 'import pyarrow as pa\n'), ((10136, 10256), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Unable to insert data into collection `{table_name}`: {e}"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Unable to insert data into collection `{table_name}`: {e}')\n", (10144, 10256), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((11082, 11195), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Error deleting from collection `{table_name}`: {e}"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Error deleting from collection `{table_name}`: {e}')\n", (11090, 11195), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((11881, 11991), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Unable to create collection `{table_name}`: {e}"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Unable to create collection `{table_name}`: {e}')\n", (11889, 11991), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((13391, 13488), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Table {table_name} does not exist!"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Table {table_name} does not exist!')\n", (13399, 13488), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12380, 12416), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (12388, 12416), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12458, 12555), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Table {table_name} does not exist!"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Table {table_name} does not exist!')\n", (12466, 12555), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((13246, 13269), 'pandas.DataFrame', 'pd.DataFrame', (['df.dtypes'], {}), '(df.dtypes)\n', (13258, 13269), True, 'import pandas as pd\n')] |
"""Vector store."""
from langchain.vectorstores import LanceDB
import lancedb
class VectorStoreLanceDB:
"""Vector store lance DB."""
def __init__(self, db_path, table_name, mode, embedding_model):
self.db = lancedb.connect(db_path)
self.embedding_model = embedding_model
print(db_path)
hello_world_vector = self.embedding_model.embed_text("Hello world")
if mode == "read":
table = self.db.open_table(table_name)
elif mode == "overwrite":
# pylint: disable=unexpected-keyword-arg
table = self.db.create_table(name=table_name,
data=[
{
"vector": hello_world_vector,
"text": "Hello World",
"id": "1"
}
],
mode="overwrite")
else:
table = self.db.create_table(name=table_name,
data=[
{
"vector": hello_world_vector,
"text": "Hello World",
"id": "1"
}
])
# pylint: disable=not-callable
self.vec_db = LanceDB(
connection=table, embedding=self.embedding_model.embedding_model)
def drop_table(self, table_name):
"""Drop table."""
self.db.drop_table(table_name)
def add_text(self, text):
"""Add text."""
self.vec_db.add_texts([text])
def add_text_list(self, text_list):
"""Add text list."""
self.vec_db.add_texts(text_list)
def add_document(self, doc):
"""Add document."""
self.vec_db.add_documents([doc])
def add_document_list(self, doc_list):
"""Add document list."""
self.vec_db.add_documents(doc_list)
def similarity_search(self, query, k=3):
"""Similarity search."""
docs = self.vec_db.similarity_search(query, k=k)
text_list = [doc.page_content for doc in docs]
return text_list
| [
"lancedb.connect"
] | [((226, 250), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (241, 250), False, 'import lancedb\n'), ((1608, 1681), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'self.embedding_model.embedding_model'}), '(connection=table, embedding=self.embedding_model.embedding_model)\n', (1615, 1681), False, 'from langchain.vectorstores import LanceDB\n')] |
import os
from dotenv import load_dotenv
import streamlit as st
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import ConversationalRetrievalChain
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.agents.agent_toolkits import create_conversational_retrieval_agent
from langchain.callbacks import StreamlitCallbackHandler
from langchain.tools import BaseTool, Tool, tool
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import ChatMessage
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain import PromptTemplate, LLMChain
from langchain.vectorstores import LanceDB
import lancedb
import pandas as pd
from langchain.chains import RetrievalQA
st.set_page_config(page_title="GlobeBotter", page_icon="🎬")
st.header('🎬 Welcome to MovieHarbor, your favourite movie recommender')
load_dotenv()
#os.environ["HUGGINGFACEHUB_API_TOKEN"]
openai_api_key = os.environ['OPENAI_API_KEY']
embeddings = OpenAIEmbeddings()
uri = "data/sample-lancedb"
db = lancedb.connect(uri)
table = db.open_table('movies')
docsearch = LanceDB(connection = table, embedding = embeddings)
# Import the movie dataset
md = pd.read_pickle('movies.pkl')
# Create a sidebar for user input
st.sidebar.title("Movie Recommendation System")
st.sidebar.markdown("Please enter your details and preferences below:")
# Ask the user for age, gender and favourite movie genre
age = st.sidebar.slider("What is your age?", 1, 100, 25)
gender = st.sidebar.radio("What is your gender?", ("Male", "Female", "Other"))
genre = st.sidebar.selectbox("What is your favourite movie genre?", md.explode('genres')["genres"].unique())
# Filter the movies based on the user input
df_filtered = md[md['genres'].apply(lambda x: genre in x)]
template_prefix = """You are a movie recommender system that help users to find movies that match their preferences.
Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}"""
user_info = """This is what we know about the user, and you can use this information to better tune your research:
Age: {age}
Gender: {gender}"""
template_suffix= """Question: {question}
Your response:"""
user_info = user_info.format(age = age, gender = gender)
COMBINED_PROMPT = template_prefix +'\n'+ user_info +'\n'+ template_suffix
print(COMBINED_PROMPT)
#setting up the chain
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff",
retriever=docsearch.as_retriever(search_kwargs={'data': df_filtered}), return_source_documents=True)
query = st.text_input('Enter your question:', placeholder = 'What action movies do you suggest?')
if query:
result = qa({"query": query})
st.write(result['result'])
| [
"lancedb.connect"
] | [((924, 983), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""GlobeBotter"""', 'page_icon': '"""🎬"""'}), "(page_title='GlobeBotter', page_icon='🎬')\n", (942, 983), True, 'import streamlit as st\n'), ((984, 1055), 'streamlit.header', 'st.header', (['"""🎬 Welcome to MovieHarbor, your favourite movie recommender"""'], {}), "('🎬 Welcome to MovieHarbor, your favourite movie recommender')\n", (993, 1055), True, 'import streamlit as st\n'), ((1057, 1070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1068, 1070), False, 'from dotenv import load_dotenv\n'), ((1172, 1190), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1188, 1190), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1224, 1244), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1239, 1244), False, 'import lancedb\n'), ((1290, 1337), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (1297, 1337), False, 'from langchain.vectorstores import LanceDB\n'), ((1375, 1403), 'pandas.read_pickle', 'pd.read_pickle', (['"""movies.pkl"""'], {}), "('movies.pkl')\n", (1389, 1403), True, 'import pandas as pd\n'), ((1439, 1486), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Movie Recommendation System"""'], {}), "('Movie Recommendation System')\n", (1455, 1486), True, 'import streamlit as st\n'), ((1487, 1558), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""Please enter your details and preferences below:"""'], {}), "('Please enter your details and preferences below:')\n", (1506, 1558), True, 'import streamlit as st\n'), ((1623, 1673), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""What is your age?"""', '(1)', '(100)', '(25)'], {}), "('What is your age?', 1, 100, 25)\n", (1640, 1673), True, 'import streamlit as st\n'), ((1683, 1752), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""What is your gender?"""', "('Male', 'Female', 'Other')"], {}), "('What is your gender?', ('Male', 'Female', 'Other'))\n", (1699, 1752), True, 'import streamlit as st\n'), ((2834, 2926), 'streamlit.text_input', 'st.text_input', (['"""Enter your question:"""'], {'placeholder': '"""What action movies do you suggest?"""'}), "('Enter your question:', placeholder=\n 'What action movies do you suggest?')\n", (2847, 2926), True, 'import streamlit as st\n'), ((2972, 2998), 'streamlit.write', 'st.write', (["result['result']"], {}), "(result['result'])\n", (2980, 2998), True, 'import streamlit as st\n'), ((2688, 2696), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2694, 2696), False, 'from langchain.llms import OpenAI\n')] |
from pathlib import Path
import geopandas as gpd
import lancedb
import matplotlib.pyplot as plt
from skimage import io
# Set working directory
wd = "/home/usr/Desktop/"
# To download the existing embeddings run aws s3 sync
# aws s3 sync s3://clay-worldcover-embeddings /my/dir/clay-worldcover-embeddings
vector_dir = Path(wd + "clay-worldcover-embeddings/v002/2021/")
# Create new DB structure or open existing
db = lancedb.connect(wd + "worldcoverembeddings_db")
# Read all vector embeddings into a list
data = []
for strip in vector_dir.glob("*.gpq"):
print(strip)
tile_df = gpd.read_parquet(strip).to_crs("epsg:3857")
for _, row in tile_df.iterrows():
data.append(
{"vector": row["embeddings"], "year": 2021, "bbox": row.geometry.bounds}
)
# Show table names
db.table_names()
# Drop existing table if exists
db.drop_table("worldcover-2021-v001")
# Create embeddings table and insert the vector data
tbl = db.create_table("worldcover-2021-v001", data=data, mode="overwrite")
# Visualize some image chips
def plot(df, cols=10):
fig, axs = plt.subplots(1, cols, figsize=(20, 10))
for ax, (i, row) in zip(axs.flatten(), df.iterrows()):
bbox = row["bbox"]
url = f"https://services.terrascope.be/wms/v2?SERVICE=WMS&version=1.1.1&REQUEST=GetMap&layers=WORLDCOVER_2021_S2_TCC&BBOX={','.join([str(dat) for dat in bbox])}&SRS=EPSG:3857&FORMAT=image/png&WIDTH=512&HEIGHT=512" # noqa: E501
image = io.imread(url)
ax.imshow(image)
ax.set_axis_off()
plt.tight_layout()
plt.show()
# Select a vector by index, and search 10 similar pairs, and plot
v = tbl.to_pandas()["vector"].values[10540]
result = tbl.search(query=v).limit(5).to_pandas()
plot(result, 5)
| [
"lancedb.connect"
] | [((321, 371), 'pathlib.Path', 'Path', (["(wd + 'clay-worldcover-embeddings/v002/2021/')"], {}), "(wd + 'clay-worldcover-embeddings/v002/2021/')\n", (325, 371), False, 'from pathlib import Path\n'), ((421, 468), 'lancedb.connect', 'lancedb.connect', (["(wd + 'worldcoverembeddings_db')"], {}), "(wd + 'worldcoverembeddings_db')\n", (436, 468), False, 'import lancedb\n'), ((1096, 1135), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'cols'], {'figsize': '(20, 10)'}), '(1, cols, figsize=(20, 10))\n', (1108, 1135), True, 'import matplotlib.pyplot as plt\n'), ((1546, 1564), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1562, 1564), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1577, 1579), True, 'import matplotlib.pyplot as plt\n'), ((1475, 1489), 'skimage.io.imread', 'io.imread', (['url'], {}), '(url)\n', (1484, 1489), False, 'from skimage import io\n'), ((591, 614), 'geopandas.read_parquet', 'gpd.read_parquet', (['strip'], {}), '(strip)\n', (607, 614), True, 'import geopandas as gpd\n')] |
# Copyright 2023 llmware
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""The embeddings module implements the supported vector databases.
The common abstraction for all supported vector databases is the EmbeddingHandler class, which supports
creating a new embedding, as well as searching and deleting the vector index. The module also implements the
_EmbeddingUtils class, which provides a set of functions used by all vector database classes.
"""
import os
import logging
import numpy as np
import re
import time
import uuid
from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection
from pymongo import MongoClient
try:
import faiss
except ImportError:
pass
# note: update- adding psycopg and postgres to core llmware package in version 0.2.0
try:
from pgvector.psycopg import register_vector
import psycopg
except ImportError:
pass
# optional imports of redis - not in project requirements
try:
import redis
from redis.commands.search.field import TagField, TextField, NumericField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query
from redis.commands.search.field import VectorField
except ImportError:
pass
# optional imports of qdrant - not in project requirements
try:
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams, PointStruct
except ImportError:
pass
# optional import of pinecone - not in project requirements
try:
import pinecone
except ImportError:
pass
# optional import of lancedb - not in project requirements
try:
import lancedb
except ImportError:
pass
# optional import of neo4j - not in project requirements
try:
import neo4j
from neo4j import GraphDatabase
except:
pass
# optional import of chromadb - not in project requirements
try:
import chromadb
except:
pass
from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, \
PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig
from llmware.exceptions import (UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException,
DependencyNotInstalledException)
from llmware.resources import CollectionRetrieval, CollectionWriter
from llmware.status import Status
from llmware.util import Utilities
class EmbeddingHandler:
"""Provides an interface to all supported vector dabases, which is used by the ``Library`` class.
``EmbeddingHandler`` is responsible for embedding-related interactions between a library and a vector
store. This includes creating, reading, updating, and deleting (CRUD) embeddings. The ``EmbeddingHandler``,
in addition, synchronizes the vector store with the text collection database, this includes incremental
updates to the embeddings. Finally, it also allows one library to have multiple embeddings.
Parameters
----------
library : Library
The library with which the ``EmbeddingHandler`` interacts.
Returns
-------
embedding_handler : EmbeddingHandler
A new ``EmbeddingHandler`` object.
"""
def __init__(self, library):
self.supported_embedding_dbs = LLMWareConfig().get_supported_vector_db()
self.library = library
def create_new_embedding(self, embedding_db, model, doc_ids=None, batch_size=500):
""" Creates new embedding - routes to correct vector db and loads the model and text collection """
embedding_class = self._load_embedding_db(embedding_db, model=model)
embedding_status = embedding_class.create_new_embedding(doc_ids, batch_size)
if embedding_status:
if "embeddings_created" in embedding_status:
if embedding_status["embeddings_created"] > 0:
# only update if non-zero embeddings created
if "embedded_blocks" in embedding_status:
embedded_blocks = embedding_status["embedded_blocks"]
else:
embedded_blocks = -1
logging.warning("update: embedding_handler - unable to determine if embeddings have "
"been properly counted and captured. Please check if databases connected.")
self.library.update_embedding_status("yes", model.model_name, embedding_db,
embedded_blocks=embedded_blocks,
embedding_dims=embedding_status["embedding_dims"],
time_stamp=embedding_status["time_stamp"])
return embedding_status
def search_index(self, query_vector, embedding_db, model, sample_count=10):
""" Main entry point to vector search query """
# Need to normalize the query_vector.
# Sometimes it comes in as [[1.1,2.1,3.1]] (from Transformers) and sometimes as [1.1,2.1,3.1]
# We'll make sure it's the latter and then each Embedding Class will deal with it how it needs to
if len(query_vector) == 1:
query_vector = query_vector[0]
embedding_class = self._load_embedding_db(embedding_db, model=model)
return embedding_class.search_index(query_vector,sample_count=sample_count)
def delete_index(self, embedding_db, model_name, embedding_dims):
""" Deletes vector embedding - note: does not delete the underlying text collection """
embedding_class = self._load_embedding_db(embedding_db, model_name=model_name,
embedding_dims=embedding_dims)
embedding_class.delete_index()
self.library.update_embedding_status("delete", model_name, embedding_db,
embedded_blocks=0, delete_record=True)
return 0
def _load_embedding_db(self, embedding_db, model=None, model_name=None, embedding_dims=None):
""" Looks up and loads the selected vector database """
if not embedding_db in self.supported_embedding_dbs:
raise UnsupportedEmbeddingDatabaseException(embedding_db)
if embedding_db == "milvus":
return EmbeddingMilvus(self.library, model=model, model_name=model_name,
embedding_dims=embedding_dims)
if embedding_db == "faiss":
return EmbeddingFAISS(self.library, model=model, model_name=model_name,
embedding_dims=embedding_dims)
if embedding_db == "pinecone":
return EmbeddingPinecone(self.library, model=model, model_name=model_name,
embedding_dims=embedding_dims)
if embedding_db == "mongo_atlas":
return EmbeddingMongoAtlas(self.library, model=model,model_name=model_name,
embedding_dims=embedding_dims)
if embedding_db == "redis":
return EmbeddingRedis(self.library, model=model, model_name=model_name,
embedding_dims=embedding_dims)
if embedding_db == "qdrant":
return EmbeddingQdrant(self.library, model=model, model_name=model_name,
embedding_dims=embedding_dims)
if embedding_db == "lancedb":
return EmbeddingLanceDB(self.library, model=model, model_name=model_name,
embedding_dims=embedding_dims)
# note: pg_vector == postgres (two aliases provided)
if embedding_db in ["pg_vector", "postgres"]:
return EmbeddingPGVector(self.library,model=model, model_name=model_name,
embedding_dims=embedding_dims)
if embedding_db == "neo4j":
return EmbeddingNeo4j(self.library, model=model, model_name=model_name,
embedding_dims=embedding_dims)
if embedding_db == "chromadb":
return EmbeddingChromaDB(self.library, model=model, model_name=model_name,
embedding_dims=embedding_dims)
def generate_index_name(self, account_name, library_name, model_name, max_component_length=19):
""" Creates a unique name for the vector index that concats library_name + model_name + account_name """
index_name = account_name
# Remove non-alphanumerics from the remaining components and if still longer than the max, remove middle chars
for s in [library_name, model_name]:
s = re.sub(r'\W+', '', s)
if len(s) > max_component_length:
excess_length = len(s) - max_component_length
left_length = (len(s) - excess_length) // 2
right_start = left_length + excess_length
index_name += s[:left_length] + s[right_start:]
# Return the lowercase name:
return index_name.lower()
class _EmbeddingUtils:
"""Provides functions to vector stores, such as creating names for the text collection database as well
as creating names for vector such, and creating a summary of an embedding process.
``_EmbeddingUTils`` provides utilities used by all vector stores, especially in interaction and
synchronization with the underlying text collection database. In short, it has functions for
creating names, the text index, the embedding flag, the block curser, and the embedding summary.
Parameters
----------
library_name : str, default=None
Name of the library.
model_name : str, default=None
Name of the model.
account_name : str, default=None
Name of the account.
db_name : str, default=None
Name of the vector store.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_utils : _EmbeddingUtils
A new ``_EmbeddingUtils`` object.
"""
def __init__(self, library_name=None, model_name=None, account_name=None,db_name=None,
embedding_dims=None):
self.library_name = library_name
self.account_name = account_name
self.model_name = model_name
self.db_name = db_name
self.embedding_dims = embedding_dims
self.collection_key= None
self.collection_name= None
def create_safe_collection_name(self):
""" Creates concatenated safe name for collection """
converted_library_name = re.sub(r"[-@_.\/ ]", "", self.library_name).lower()
if len(converted_library_name) > 18:
converted_library_name = converted_library_name[0:18]
converted_model_name = re.sub(r"[-@_.\/ ]", "", self.model_name).lower()
if len(converted_model_name) > 18:
# chops off the start of the model name if longer than 18 chars
starter = len(converted_model_name) - 18
converted_model_name = converted_model_name[starter:]
converted_account_name = re.sub(r"[-@_.\/ ]", "", self.account_name).lower()
if len(converted_model_name) > 7:
converted_account_name = converted_account_name[0:7]
# create collection name here - based on account + library + model_name
self.collection_name = f"{converted_account_name}_{converted_library_name}_{converted_model_name}"
return self.collection_name
def create_db_specific_key(self):
""" Creates db_specific key """
# will leave "-" and "_" in file path, but remove "@" and " "
model_safe_path = re.sub(r"[@ ]", "", self.model_name).lower()
self.collection_key = f"embedding_{self.db_name}_" + model_safe_path
return self.collection_key
def get_blocks_cursor(self, doc_ids = None):
""" Retrieves a cursor from the text collection database that will define the scope of text chunks
to be embedded """
if not self.collection_key:
self.create_db_specific_key()
cr = CollectionRetrieval(self.library_name, account_name=self.account_name)
num_of_blocks, all_blocks_cursor = cr.embedding_job_cursor(self.collection_key,doc_id=doc_ids)
return all_blocks_cursor, num_of_blocks
def generate_embedding_summary(self, embeddings_created):
""" Common summary dictionary at end of embedding job """
if not self.collection_key:
self.create_db_specific_key()
cr = CollectionRetrieval(self.library_name,account_name=self.account_name)
embedded_blocks = cr.count_embedded_blocks(self.collection_key)
embedding_summary = {"embeddings_created": embeddings_created,
"embedded_blocks": embedded_blocks,
"embedding_dims": self.embedding_dims,
"time_stamp": Utilities().get_current_time_now()}
# print("update: embedding_summary - ", embedding_summary)
return embedding_summary
def update_text_index(self, block_ids, current_index):
""" Update main text collection db """
for block_id in block_ids:
cw = CollectionWriter(self.library_name, account_name=self.account_name)
cw.add_new_embedding_flag(block_id,self.collection_key,current_index)
current_index += 1
return current_index
def lookup_text_index(self, _id, key="_id"):
"""Returns a single block entry from text index collection with lookup by _id - returns a list, not a cursor"""
cr = CollectionRetrieval(self.library_name, account_name=self.account_name)
block_cursor = cr.lookup(key, _id)
return block_cursor
def lookup_embedding_flag(self, key, value):
""" Used to look up an embedding flag in text collection index """
# used specifically by FAISS index - which uses the embedding flag value as lookup
cr = CollectionRetrieval(self.library_name, account_name=self.account_name)
block_cursor = cr.embedding_key_lookup(key,value)
return block_cursor
def unset_text_index(self):
"""Removes embedding key flag for library, e.g., 'unsets' a group of blocks in text index """
cw = CollectionWriter(self.library_name, account_name=self.account_name)
cw.unset_embedding_flag(self.collection_key)
return 0
class EmbeddingMilvus:
"""Implements the vector database Milvius.
``EmbeddingMivlus`` implements the interface to the ``Milvus`` vector store. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_milvus : EmbeddingMilvus
A new ``EmbeddingMilvus`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None):
self.library = library
self.library_name = library.library_name
self.account_name = library.account_name
self.milvus_alias = "default"
# Connect to milvus
connections.connect(self.milvus_alias,
host=MilvusConfig.get_config("host"),
port=MilvusConfig.get_config("port"),
db_name=MilvusConfig.get_config("db_name"))
# look up model card
if not model and not model_name:
raise EmbeddingModelNotFoundException("no-model-or-model-name-provided")
self.model=model
self.model_name=model_name
self.embedding_dims = embedding_dims
# if model passed (not None), then use model name
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = self.model.embedding_dims
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="milvus",
embedding_dims=self.embedding_dims)
self.collection_name = self.utils.create_safe_collection_name()
self.collection_key = self.utils.create_db_specific_key()
# if collection does not exist, create it
if not utility.has_collection(self.collection_name):
fields = [
FieldSchema(name="block_mongo_id", dtype=DataType.VARCHAR, is_primary=True, max_length=30,auto_id=False),
FieldSchema(name="block_doc_id", dtype=DataType.INT64),
FieldSchema(name="embedding_vector", dtype=DataType.FLOAT_VECTOR, dim=self.embedding_dims)
]
collection = Collection(self.collection_name, CollectionSchema(fields))
index_params = {
"metric_type": "L2",
"index_type": "IVF_FLAT",
"params": {"nlist": 1024}
}
collection.create_index("embedding_vector", index_params)
self.collection = Collection(self.collection_name)
def create_new_embedding(self, doc_ids = None, batch_size=500):
""" Create new embedding """
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.account_name)
status.new_embedding_status(self.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
current_index = 0
finished = False
# all_blocks_iter = iter(all_blocks_cursor)
while not finished:
block_ids, doc_ids, sentences = [], [], []
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
# data model
block_ids.append(str(block["_id"]))
doc_ids.append(int(block["doc_ID"]))
sentences.append(text_search)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences)
data = [block_ids, doc_ids, vectors]
self.collection.insert(data)
current_index = self.utils.update_text_index(block_ids,current_index)
embeddings_created += len(sentences)
status.increment_embedding_status(self.library_name, self.model_name, len(sentences))
# will add configuration options to show/display
print (f"update: embedding_handler - Milvus - Embeddings Created: {embeddings_created} of {num_of_blocks}")
self.collection.flush()
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
logging.info("update: EmbeddingHandler - Milvus - embedding_summary - %s", embedding_summary)
return embedding_summary
def search_index(self, query_embedding_vector, sample_count=10):
self.collection.load()
search_params = {
"metric_type": "L2",
"params": {"nprobe": 10}
}
# TODO: add optional / configurable partitions
result = self.collection.search(
data=[query_embedding_vector],
anns_field="embedding_vector",
param=search_params,
limit=sample_count,
output_fields=["block_mongo_id"]
)
block_list = []
for hits in result:
for hit in hits:
_id = hit.entity.get('block_mongo_id')
block_result_list = self.utils.lookup_text_index(_id)
for block in block_result_list:
block_list.append((block, hit.distance))
"""
try:
block = block_cursor.next()
block_list.append((block, hit.distance))
except StopIteration:
# The cursor is empty (no blocks found)
continue
"""
return block_list
def delete_index(self):
collection = Collection(self.collection_name)
collection.release()
utility.drop_collection(self.collection_name)
connections.disconnect(self.milvus_alias)
# Synchronize and remove embedding flag from collection db
self.utils.unset_text_index()
return 1
class EmbeddingFAISS:
"""Implements the vector database FAISS.
``EmbeddingFAISS`` implements the interface to the ``FAISS`` vector database. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_faiss : EmbeddingFAISS
A new ``EmbeddingFAISS`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None):
self.library = library
self.library_name = library.library_name
self.account_name = library.account_name
self.index = None
# look up model card
if not model and not model_name:
raise EmbeddingModelNotFoundException("no-model-or-model-name-provided")
self.model=model
self.model_name=model_name
self.embedding_dims=embedding_dims
# if model passed (not None), then use model name and embedding dims
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = self.model.embedding_dims
# embedding file name here
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="faiss",
embedding_dims=self.embedding_dims)
self.collection_name = self.utils.create_safe_collection_name()
self.collection_key = self.utils.create_db_specific_key()
# will leave "-" and "_" in file path, but remove "@" and " "
model_safe_path = re.sub(r"[@\/. ]", "", self.model_name).lower()
self.embedding_file_path = os.path.join(self.library.embedding_path, model_safe_path, "embedding_file_faiss")
# self.collection_key = "embedding_faiss_" + model_safe_path
def create_new_embedding(self, doc_ids=None, batch_size=100):
""" Load or create index """
if not self.index:
if os.path.exists(self.embedding_file_path):
# shifted faiss to optional dependency
# note: there may be an edge case where this faiss command would fail even with
# library installed, but we throw dependency not installed error as most likely cause
try:
self.index = faiss.read_index(self.embedding_file_path)
except:
raise DependencyNotInstalledException("faiss-cpu")
else:
try:
self.index = faiss.IndexFlatL2(self.embedding_dims)
except:
raise DependencyNotInstalledException("faiss-cpu")
# get cursor for text collection with blocks requiring embedding
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.account_name)
status.new_embedding_status(self.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
finished = False
# batch_size = 50
# all_blocks_iter = iter(all_blocks_cursor)
while not finished:
block_ids, sentences = [], []
current_index = self.index.ntotal
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
block_ids.append(str(block["_id"]))
sentences.append(text_search)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences)
self.index.add(np.array(vectors))
current_index = self.utils.update_text_index(block_ids,current_index)
embeddings_created += len(sentences)
status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences))
# will add options to display/hide
print (f"update: embedding_handler - FAISS - Embeddings Created: {embeddings_created} of {num_of_blocks}")
# Ensure any existing file is removed before saving
if os.path.exists(self.embedding_file_path):
os.remove(self.embedding_file_path)
os.makedirs(os.path.dirname(self.embedding_file_path), exist_ok=True)
faiss.write_index(self.index, self.embedding_file_path)
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
logging.info("update: EmbeddingHandler - FAISS - embedding_summary - %s", embedding_summary)
return embedding_summary
def search_index (self, query_embedding_vector, sample_count=10):
""" Search FAISS index """
if not self.index:
self.index = faiss.read_index(self.embedding_file_path)
distance_list, index_list = self.index.search(np.array([query_embedding_vector]), sample_count)
block_list = []
for i, index in enumerate(index_list[0]):
index_int = int(index.item())
# FAISS is unique in that it requires a 'reverse lookup' to match the FAISS index in the
# text collection
block_result_list = self.utils.lookup_embedding_flag(self.collection_key,index_int)
# block_result_list = self.utils.lookup_text_index(index_int, key=self.collection_key)
for block in block_result_list:
block_list.append((block, distance_list[0][i]))
return block_list
def delete_index(self):
""" Delete FAISS index """
if os.path.exists(self.embedding_file_path):
os.remove(self.embedding_file_path)
# remove emb key - 'unset' the blocks in the text collection
self.utils.unset_text_index()
return 1
class EmbeddingLanceDB:
"""Implements the vector database LanceDB.
``EmbeddingLancDB`` implements the interface to the ``LanceDB`` vector database. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_lancedb : EmbeddingLanceDB
A new ``EmbeddingLanceDB`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None):
self.uri = LanceDBConfig().get_config("uri")
self.library = library
self.library_name = self.library.library_name
self.account_name = self.library.account_name
# look up model card
if not model and not model_name:
raise EmbeddingModelNotFoundException("no-model-or-model-name-provided")
self.model = model
self.model_name = model_name
self.embedding_dims = embedding_dims
# if model passed (not None), then use model name
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = model.embedding_dims
# initialize LanceDB
self.index = None
# initiate connection to LanceDB locally
try:
self.db = lancedb.connect(self.uri)
except:
raise ImportError(
"Exception - could not connect to LanceDB - please check:"
"1. LanceDB python package is installed, e.g,. 'pip install lancedb', and"
"2. The uri is properly set.")
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="lancedb",
embedding_dims=self.embedding_dims)
self.collection_name = self.utils.create_safe_collection_name()
self.collection_key = self.utils.create_db_specific_key()
# build new name here
# self.index_name = self.collection_name
if self.collection_name not in self.db.table_names():
self.index = self._init_table(self.collection_name)
# you don't need to create an index with lanceDB upto million vectors is efficiently supported with peak performance,
# Creating an index will fasten the search process and it needs to be done once table has some vectors already.
# connect to table
self.index = self.db.open_table(self.collection_name)
def _init_table(self,table_name):
try:
import pyarrow as pa
except:
raise DependencyNotInstalledException("pyarrow")
schema = pa.schema([
pa.field("vector", pa.list_(pa.float32(), int(self.embedding_dims))),
pa.field("id", pa.string()),
])
tbl = self.db.create_table(table_name, schema=schema, mode="overwrite")
return tbl
def create_new_embedding(self, doc_ids = None, batch_size=500):
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.library.account_name)
status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
# starting current_index @ 0
current_index = 0
finished = False
# all_blocks_iter = iter(all_blocks_cursor)
while not finished:
block_ids, doc_ids, sentences = [], [], []
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
# block = next(all_blocks_iter, None)
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
block_ids.append(str(block["_id"]))
doc_ids.append(int(block["doc_ID"]))
sentences.append(text_search)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences)
# expects records as tuples - (batch of _ids, batch of vectors, batch of dict metadata)
# records = zip(block_ids, vectors) #, doc_ids)
# upsert to lanceDB
try :
vectors_ingest = [{ 'id' : block_id,'vector': vector.tolist()} for block_id,vector in zip(block_ids,vectors)]
self.index.add(vectors_ingest)
except Exception as e :
print(self.index)
print('schema',self.index.schema)
raise e
current_index = self.utils.update_text_index(block_ids,current_index)
embeddings_created += len(sentences)
status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences))
# will add options to configure to show/hide
print (f"update: embedding_handler - Lancedb - Embeddings Created: {embeddings_created} of {num_of_blocks}")
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
logging.info("update: EmbeddingHandler - Lancedb - embedding_summary - %s", embedding_summary)
return embedding_summary
def search_index(self, query_embedding_vector, sample_count=10):
try:
result = self.index.search(query=query_embedding_vector.tolist())\
.select(["id", "vector"])\
.limit(sample_count).to_pandas()
block_list = []
for (_, id, vec, score) in result.itertuples(name=None):
_id = id
block_result_list = self.utils.lookup_text_index(_id)
for block in block_result_list:
block_list.append((block, score))
# for match in result.itertuples(index=False):
# _id = match.id
# block_result_list = self.utils.lookup_text_index(_id)
# for block in block_result_list:
# block_list.append((block, match._distance))
except Exception as e:
print("result df cols" ,result.columns, type(result))
raise e
return block_list
def delete_index(self):
self.db.drop_table(self.collection_name)
# remove emb key - 'unset' the blocks in the text collection
self.utils.unset_text_index()
return 1
class EmbeddingPinecone:
"""Implements the vector database Pinecone.
``EmbeddingPinecone`` implements the interface to the ``Pinecone`` vector database. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_pinecone : EmbeddingPinecone
A new ``EmbeddingPinecone`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None):
self.api_key = PineconeConfig().get_config("pinecone_api_key")
self.environment = PineconeConfig().get_config("pinecone_environment")
self.library = library
self.library_name = self.library.library_name
self.account_name = self.library.account_name
# look up model card
if not model and not model_name:
raise EmbeddingModelNotFoundException("no-model-or-model-name-provided")
self.model = model
self.model_name = model_name
self.embedding_dims = embedding_dims
# if model passed (not None), then use model name
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = model.embedding_dims
# initialize pinecone
self.index = None
# initiate connection to Pinecone
try:
pinecone.init(api_key=self.api_key, environment=self.environment)
except:
raise ImportError(
"Exception - could not connect to Pinecone - please check:"
"1. Pinecone python package is installed, e.g,. 'pip install pinecone-client', and"
"2. The api key and environment is properly set.")
# check index name - pinecone - 45 chars - numbers, letters and "-" ok - no "_" and all lowercase
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="pinecone",
embedding_dims=self.embedding_dims)
self.collection_name = self.utils.create_safe_collection_name()
self.collection_key = self.utils.create_db_specific_key()
# build new name here
# self.index_name = self.collection_name
if self.collection_name not in pinecone.list_indexes():
pinecone.create_index(self.collection_name, dimension=self.embedding_dims, metric="euclidean")
pinecone.describe_index(self.collection_name) # Waits for index to be created
# describe_index_stats() # Returns: {'dimension': 8, 'index_fullness': 0.0, 'namespaces': {'': {'vector_count': 5}}}
# connect to index
self.index = pinecone.Index(self.collection_name)
def create_new_embedding(self, doc_ids = None, batch_size=500):
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.library.account_name)
status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
# starting current_index @ 0
current_index = 0
finished = False
# all_blocks_iter = iter(all_blocks_cursor)
while not finished:
block_ids, doc_ids, sentences = [], [], []
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
# block = next(all_blocks_iter, None)
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
block_ids.append(str(block["_id"]))
doc_ids.append(int(block["doc_ID"]))
sentences.append(text_search)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences).tolist()
# expects records as tuples - (batch of _ids, batch of vectors, batch of dict metadata)
records = zip(block_ids, vectors) #, doc_ids)
# upsert to Pinecone
self.index.upsert(vectors=records)
current_index = self.utils.update_text_index(block_ids,current_index)
embeddings_created += len(sentences)
status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences))
# will add options to configure to show/hide
print (f"update: embedding_handler - Pinecone - Embeddings Created: {embeddings_created} of {num_of_blocks}")
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
logging.info("update: EmbeddingHandler - Pinecone - embedding_summary - %s", embedding_summary)
return embedding_summary
def search_index(self, query_embedding_vector, sample_count=10):
result = self.index.query(vector=query_embedding_vector.tolist(), top_k=sample_count,include_values=True)
block_list = []
for match in result["matches"]:
_id = match["id"]
block_result_list = self.utils.lookup_text_index(_id)
for block in block_result_list:
block_list.append((block, match["score"]))
return block_list
def delete_index(self, index_name):
pinecone.delete_index(index_name)
# remove emb key - 'unset' the blocks in the text collection
self.utils.unset_text_index()
return 1
class EmbeddingMongoAtlas:
"""Implements the use of MongoDB Atlas as a vector database.
``EmbeddingMongoAtlas`` implements the interface to ``MongoDB Atlas``. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_mongoatlas : EmbeddingMongoAtlas
A new ``EmbeddingMongoAtlas`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None):
# Use a specified Mongo Atlas connection string if supplied.
# Otherwise fallback to the the Mongo DB connection string
# self.connection_uri = os.environ.get("MONGO_ATLAS_CONNECTION_URI", MongoConfig.get_config("collection_db_uri"))
self.connection_uri = MongoConfig().get_config("atlas_db_uri")
self.library = library
self.library_name = self.library.library_name
self.account_name = self.library.account_name
# look up model card
self.model_name = model.model_name
self.model = model
self.embedding_dims = embedding_dims
# look up model card
if not model and not model_name:
raise EmbeddingModelNotFoundException("no-model-or-model-name-provided")
# if model passed (not None), then use model name
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = model.embedding_dims
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="mongoatlas",
embedding_dims=self.embedding_dims)
self.collection_name = self.utils.create_safe_collection_name()
self.collection_key = self.utils.create_db_specific_key()
# build new name here
# self.index_name = self.collection_name
# Connect and create a MongoClient
self.mongo_client = MongoClient(self.connection_uri)
# Make sure the Database exists by creating a dummy metadata collection
self.embedding_db_name = "llmware_embeddings"
self.embedding_db = self.mongo_client["llmware_embeddings"]
if self.embedding_db_name not in self.mongo_client.list_database_names():
self.embedding_db["metadata"].insert_one({"created": Utilities().get_current_time_now()})
# Connect to collection and create it if it doesn't exist by creating a dummy doc
self.embedding_collection = self.embedding_db[self.collection_name]
if self.collection_name not in self.embedding_db.list_collection_names():
self.embedding_collection.insert_one({"created": Utilities().get_current_time_now()})
# If the collection does not have a search index (e.g if it's new), create one
if len (list(self.embedding_collection.list_search_indexes())) < 1:
model = {
'name': self.collection_name,
'definition': {
'mappings': {
'dynamic': True,
'fields': {
'eVector': {
'type': 'knnVector',
'dimensions': self.embedding_dims,
'similarity': 'euclidean'
},
}
}
}
}
self.embedding_collection.create_search_index(model)
def create_new_embedding(self, doc_ids = None, batch_size=500):
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.library.account_name)
status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
# starting current_index @ 0
current_index = 0
finished = False
# all_blocks_iter = iter(all_blocks_cursor)
last_block_id = ""
while not finished:
block_ids, doc_ids, sentences = [], [], []
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
# block = next(all_blocks_iter, None)
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
block_ids.append(str(block["_id"]))
doc_ids.append(int(block["doc_ID"]))
sentences.append(text_search)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences).tolist()
docs_to_insert = []
for i, vector in enumerate(vectors):
doc = {
"id": str(block_ids[i]),
"doc_ID": str(doc_ids[i]),
"eVector": vector
}
docs_to_insert.append(doc)
insert_result = self.embedding_collection.insert_many(docs_to_insert)
current_index = self.utils.update_text_index(block_ids,current_index)
embeddings_created += len(sentences)
status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences))
# will add configuration options to hide/show
print (f"update: embedding_handler - Mongo Atlas - Embeddings Created: {embeddings_created} of {num_of_blocks}")
last_block_id = block_ids[-1]
if embeddings_created > 0:
print(f"Embedding(Mongo Atlas): Waiting for {self.embedding_db_name}.{self.collection_name} to be ready for vector search...")
start_time = time.time()
self.wait_for_search_index(last_block_id, start_time)
wait_time = time.time() - start_time
print(f"Embedding(Mongo Atlas): {self.embedding_db_name}.{self.collection_name} ready ({wait_time: .2f} seconds)")
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
logging.info("update: EmbeddingHandler - Mongo Atlas - embedding_summary - %s", embedding_summary)
return embedding_summary
# After doc insertion we want to make sure the index is ready before proceeding
def wait_for_search_index(self, last_block_id, start_time):
# If we've been waiting for 5 mins, then time out and just return
if time.time() > start_time + (5 * 60):
return
# Get the atlas search index
the_index = self.embedding_collection.list_search_indexes().next()
# If the index doesn't have status="READY" or queryable=True, wait
if the_index["status"] != "READY" or not the_index["queryable"]:
time.sleep(3)
return self.wait_for_search_index(last_block_id, start_time)
# If we can't find the last block yet in the search index, wait
search_query = {
"$search": {
"index": self.collection_name,
"text": {
"query": str(last_block_id),
"path": "id" # The field in your documents you're matching against
}
}
}
results = self.embedding_collection.aggregate([search_query])
if not results.alive:
time.sleep(1)
return self.wait_for_search_index(last_block_id, start_time)
def search_index(self, query_embedding_vector, sample_count=10):
search_results = self.embedding_collection.aggregate([
{
"$vectorSearch": {
"index": self.collection_name,
"path": "eVector",
"queryVector": query_embedding_vector.tolist(),
"numCandidates": sample_count * 10, # Following recommendation here: https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/
"limit": sample_count
}
},
{
"$project": {
"_id": 0,
"id": 1,
"doc_ID": 1,
"score": { "$meta": "vectorSearchScore" }
}
}
])
block_list = []
for search_result in search_results:
_id = search_result["id"]
block_result_list = self.utils.lookup_text_index(_id)
for block in block_result_list:
distance = 1 - search_result["score"] # Atlas returns a score from 0 to 1.0
block_list.append((block, distance))
return block_list
def delete_index(self, index_name):
self.embedding_db.drop_collection(index_name)
# remove emb key - 'unset' the blocks in the text collection
self.utils.unset_text_index()
return 1
class EmbeddingRedis:
"""Implements the use of Redis as a vector database.
``EmbeddingRedis`` implements the interface to ``Redis``. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_redis : EmbeddingRedis
A new ``EmbeddingRedis`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None):
self.library = library
self.library_name = library.library_name
self.account_name = library.account_name
# Connect to redis - use "localhost" & 6379 by default
redis_host = RedisConfig().get_config("host")
redis_port = RedisConfig().get_config("port")
self.r = redis.Redis(host=redis_host, port=redis_port, decode_responses=True)
# look up model card
self.model = model
self.model_name = model_name
self.embedding_dims = embedding_dims
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = self.model.embedding_dims
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="redis",
embedding_dims=self.embedding_dims)
self.collection_name = self.utils.create_safe_collection_name()
self.collection_key = self.utils.create_db_specific_key()
self.DOC_PREFIX = self.collection_name # key prefix used for the index
try:
# check to see if index exists
self.r.ft(self.collection_name).info()
logging.info("update: embedding_handler - Redis - index already exists - %s", self.collection_name)
except:
# schema
schema = (
NumericField("id"),
TextField("text"),
TextField("block_mongo_id"),
NumericField("block_id"),
NumericField("block_doc_id"),
VectorField("vector", # Vector Field Name
"FLAT", { # Vector Index Type: FLAT or HNSW
"TYPE": "FLOAT32", # FLOAT32 or FLOAT64
"DIM": self.embedding_dims,
"DISTANCE_METRIC": "L2", # "COSINE" alternative
}
),
)
# index Definition
definition = IndexDefinition(prefix=[self.DOC_PREFIX], index_type=IndexType.HASH)
# create Index
self.r.ft(self.collection_name).create_index(fields=schema, definition=definition)
logging.info("update: embedding_handler - Redis - creating new index - %s ", self.collection_name)
def create_new_embedding(self, doc_ids=None, batch_size=500):
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.library.account_name)
status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
current_index = 0
finished = False
# all_blocks_iter = iter(all_blocks_cursor)
obj_batch = []
while not finished:
block_ids, doc_ids, sentences = [], [], []
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
# block = next(all_blocks_iter, None)
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
block_ids.append(str(block["_id"]))
doc_ids.append(int(block["doc_ID"]))
sentences.append(text_search)
obj = {"block_mongo_id": str(block["_id"]),
"block_doc_id": int(block["doc_ID"]),
"block_id": int(block["block_ID"]),
"text": text_search
}
obj_batch.append(obj)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences)
pipe = self.r.pipeline()
for i, embedding in enumerate(vectors):
redis_dict = obj_batch[i]
embedding = np.array(embedding)
redis_dict.update({"vector": embedding.astype(np.float32).tobytes()})
key_name = f"{self.DOC_PREFIX}:{redis_dict['block_mongo_id']}"
pipe.hset(key_name, mapping=redis_dict)
res = pipe.execute()
obj_batch = []
# end - insert
current_index = self.utils.update_text_index(block_ids,current_index)
embeddings_created += len(sentences)
status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences))
# will add configuration options to show/display
print(f"update: embedding_handler - Redis - Embeddings Created: {embeddings_created} of {num_of_blocks}")
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
logging.info("update: EmbeddingHandler - Redis - embedding_summary - %s", embedding_summary)
return embedding_summary
def search_index(self, query_embedding_vector, sample_count=10):
query_embedding_vector = np.array(query_embedding_vector)
query = (
Query(f"*=>[KNN {sample_count} @vector $vec as score]")
.sort_by("score")
.return_fields("score", "block_mongo_id", "block_doc_id", "block_id","text")
.paging(0, sample_count)
.dialect(2)
)
query_params = {
"vec": query_embedding_vector.astype(np.float32).tobytes()
}
results = self.r.ft(self.collection_name).search(query, query_params).docs
block_list = []
for j, res in enumerate(results):
# print("results: ", j, res)
_id = str(res["block_mongo_id"])
score = float(res["score"])
block_result_list = self.utils.lookup_text_index(_id)
for block in block_result_list:
block_list.append((block, score))
return block_list
def delete_index(self):
# delete index
self.r.ft(self.collection_name).dropindex(delete_documents=True)
# remove emb key - 'unset' the blocks in the text collection
self.utils.unset_text_index()
return 0
class EmbeddingQdrant:
"""Implements the Qdrant vector database.
``EmbeddingQdrant`` implements the interface to ``Qdrant``. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_qdrant : EmbeddingQdrant
A new ``EmbeddingQdrant`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None):
self.library = library
self.library_name = library.library_name
self.account_name = library.account_name
self.qclient = QdrantClient(**QdrantConfig.get_config())
# look up model card
self.model = model
self.model_name = model_name
self.embedding_dims = embedding_dims
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = self.model.embedding_dims
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="qdrant",
embedding_dims=self.embedding_dims)
self.collection_name = self.utils.create_safe_collection_name()
self.collection_key = self.utils.create_db_specific_key()
# check if collection already exists, or if needs to be created
collections = self.qclient.get_collections()
collection_exists = False
for i, cols in enumerate(collections.collections):
if cols.name == self.collection_name:
collection_exists = True
break
if not collection_exists:
self.collection = (
self.qclient.create_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(size=self.embedding_dims, distance=Distance.DOT), ))
logging.info("update: embedding_handler - QDRANT - creating new collection - %s",
self.collection_name)
else:
# if collection already exists, then 'get' collection
self.collection = self.qclient.get_collection(self.collection_name)
def create_new_embedding(self, doc_ids=None, batch_size=500):
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.library.account_name)
status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
current_index = 0
finished = False
# all_blocks_iter = iter(all_blocks_cursor)
points_batch = []
while not finished:
block_ids, doc_ids, sentences = [], [], []
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
block_ids.append(str(block["_id"]))
doc_ids.append(int(block["doc_ID"]))
sentences.append(text_search)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences)
for i, embedding in enumerate(vectors):
point_id = str(uuid.uuid4())
ps = PointStruct(id=point_id, vector=embedding,
payload={"block_doc_id": doc_ids[i], "sentences": sentences[i],
"block_mongo_id": block_ids[i]})
points_batch.append(ps)
# upsert a batch of points
self.qclient.upsert(collection_name=self.collection_name, wait=True,
points=points_batch)
points_batch = []
# end - insert
current_index = self.utils.update_text_index(block_ids,current_index)
embeddings_created += len(sentences)
status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences))
# will add configuration options to show/display
print(
f"update: embedding_handler - Qdrant - Embeddings Created: {embeddings_created} of {num_of_blocks}")
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
logging.info("update: EmbeddingHandler - Qdrant - embedding_summary - %s", embedding_summary)
return embedding_summary
def search_index(self, query_embedding_vector, sample_count=10):
search_results = self.qclient.search(collection_name=self.collection_name,
query_vector=query_embedding_vector, limit=sample_count)
block_list = []
for j, res in enumerate(search_results):
# print("results: ", j, res)
_id = res.payload["block_mongo_id"]
block_result_list = self.utils.lookup_text_index(_id)
for block in block_result_list:
block_list.append((block, res.score))
return block_list
def delete_index(self):
# delete index - need to add
self.qclient.delete_collection(collection_name=f"{self.collection_name}")
# remove emb key - 'unset' the blocks in the text collection
self.utils.unset_text_index()
return 0
class EmbeddingPGVector:
"""Implements the interface to the PGVector vector database.
``EmbeddingPGVector`` implements the interface to ``PGVector``. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_pgvector : EmbeddingPGVector
A new ``EmbeddingPGVector`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None, full_schema=False):
self.library = library
self.library_name = library.library_name
self.account_name = library.account_name
# look up model card
self.model = model
self.model_name = model_name
self.embedding_dims = embedding_dims
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = self.model.embedding_dims
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="pg_vector",
embedding_dims=self.embedding_dims)
self.collection_name = self.utils.create_safe_collection_name()
self.collection_key = self.utils.create_db_specific_key()
# Connect to postgres
postgres_host = PostgresConfig().get_config("host")
postgres_port = PostgresConfig().get_config("port")
postgres_db_name = PostgresConfig().get_config("db_name")
postgres_user_name = PostgresConfig().get_config("user_name")
postgres_pw = PostgresConfig().get_config("pw")
postgres_schema = PostgresConfig().get_config("pgvector_schema")
# default schema captures only minimum required for tracking vectors
if postgres_schema == "vector_only":
self.full_schema = False
else:
self.full_schema = True
# determines whether to use 'skinny' schema or 'full' schema
# --note: in future releases, we will be building out more support for PostGres
# self.full_schema = full_schema
# Session connection
# note: for initial connection, need to confirm that the database exists
self.conn = psycopg.connect(host=postgres_host, port=postgres_port, dbname=postgres_db_name,
user=postgres_user_name, password=postgres_pw)
# register vector extension
self.conn.execute('CREATE EXTENSION IF NOT EXISTS vector')
register_vector(self.conn)
if not self.full_schema:
table_create = (f"CREATE TABLE IF NOT EXISTS {self.collection_name} "
f"(id bigserial PRIMARY KEY, "
f"text text, "
f"embedding vector({self.embedding_dims}), "
f"block_mongo_id text, "
f"block_doc_id integer);")
else:
# full schema is a replica of the Mongo parsing output key structure
table_create = (f"CREATE TABLE IF NOT EXISTS {self.collection_name} "
f"(id bigserial PRIMARY KEY, "
f"embedding vector({self.embedding_dims}),"
f"block_mongo_id text, "
f"block_doc_id integer,"
f"block_ID integer, "
f"doc_ID integer, "
f"content_type text, "
f"file_type text, "
f"master_index integer, "
f"master_index2 integer, "
f"coords_x integer, "
f"coords_y integer, "
f"coords_cx integer, "
f"coords_cy integer, "
f"author_or_speaker text, "
f"modified_date text, "
f"created_date text, "
f"creator_tool text,"
f"added_to_collection text,"
f"table_block text,"
f"text text,"
f"external_files text,"
f"file_source text,"
f"header_text text,"
f"text_search text,"
f"user_tags text,"
f"special_field1 text,"
f"special_field2 text,"
f"special_field3 text,"
f"graph_status text,"
f"embedding_flags json,"
f"dialog text);")
# execute the creation of the table, if needed
self.conn.execute(table_create)
self.conn.commit()
def create_new_embedding(self, doc_ids=None, batch_size=500):
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.library.account_name)
status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
current_index = 0
finished = False
# all_blocks_iter = iter(all_blocks_cursor)
obj_batch = []
while not finished:
block_ids, doc_ids, sentences = [], [], []
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
# block = next(all_blocks_iter, None)
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
block_ids.append(str(block["_id"]))
doc_ids.append(int(block["doc_ID"]))
sentences.append(text_search)
if not self.full_schema:
obj = {"block_mongo_id": str(block["_id"]),
"block_doc_id": int(block["doc_ID"]),
"text": text_search}
else:
obj = {}
for keys in block:
if keys == "_id":
value = str(block["_id"])
obj.update({"block_mongo_id": value})
else:
value = block[keys]
obj.update({keys:value})
obj.update({"block_doc_id": int(block["doc_ID"])})
obj_batch.append(obj)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences)
for i, embedding in enumerate(vectors):
if not self.full_schema:
insert_command=(f"INSERT INTO {self.collection_name} (text, embedding, block_mongo_id,"
f"block_doc_id) VALUES (%s, %s, %s, %s)")
insert_array=(obj_batch[i]["text"], embedding,
obj_batch[i]["block_mongo_id"], obj_batch[i]["block_doc_id"],)
else:
insert_command=(f"INSERT INTO {self.collection_name} "
f"(embedding, block_mongo_id, block_doc_id,"
f"block_ID, doc_ID, content_type, file_type, master_index,"
f"master_index2, coords_x, coords_y,coords_cx, coords_cy,"
f"author_or_speaker, modified_date, created_date, creator_tool,"
f"added_to_collection, table_block, text, external_files,file_source,"
f"header_text, text_search, user_tags, special_field1, special_field2,"
f"special_field3, graph_status, dialog) "
f"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, "
f"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, "
f"%s, %s, %s, %s)")
insert_array=(embedding, obj_batch[i]["block_mongo_id"],
obj_batch[i]["block_doc_id"], obj_batch[i]["block_ID"],
obj_batch[i]["doc_ID"], obj_batch[i]["content_type"],
obj_batch[i]["file_type"], obj_batch[i]["master_index"],
obj_batch[i]["master_index2"], obj_batch[i]["coords_x"],
obj_batch[i]["coords_y"], obj_batch[i]["coords_cx"],
obj_batch[i]["coords_cy"], obj_batch[i]["author_or_speaker"],
obj_batch[i]["modified_date"], obj_batch[i]["created_date"],
obj_batch[i]["creator_tool"], obj_batch[i]["added_to_collection"],
obj_batch[i]["table"], obj_batch[i]["text"], obj_batch[i]["external_files"],
obj_batch[i]["file_source"], obj_batch[i]["header_text"],
obj_batch[i]["text_search"], obj_batch[i]["user_tags"],
obj_batch[i]["special_field1"], obj_batch[i]["special_field2"], obj_batch[i]["special_field3"],
obj_batch[i]["graph_status"], obj_batch[i]["dialog"])
self.conn.execute(insert_command, insert_array)
self.conn.commit()
obj_batch = []
# end - insert
current_index = self.utils.update_text_index(block_ids,current_index)
embeddings_created += len(sentences)
status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences))
# will add configuration options to show/display
print(f"update: embedding_handler - PGVector - Embeddings Created: "
f"{embeddings_created} of {num_of_blocks}")
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
embedded_blocks = embedding_summary["embedded_blocks"]
logging.info("update: EmbeddingHandler - PG_Vector - embedding_summary - %s", embedding_summary)
# safety check on output
if not isinstance(embedded_blocks, int):
if len(embedded_blocks) > 0:
embedded_blocks = embedded_blocks[0]
else:
embedded_blocks = embeddings_created
# create index
lists = max(embedded_blocks // 1000, 10)
create_index_command = (f"CREATE INDEX ON {self.collection_name} "
f"USING ivfflat(embedding vector_l2_ops) WITH(lists={lists});")
self.conn.execute(create_index_command)
self.conn.commit()
# tbd - next steps - will create text index and options to query directly against PG
# Closing the connection
self.conn.close()
return embedding_summary
def search_index(self, query_embedding_vector, sample_count=10):
# note: converting to np.array is 'safety' for postgres vector type
query_embedding_vector = np.array(query_embedding_vector)
q = (f"SELECT id, block_mongo_id, embedding <-> %s AS distance, text "
f"FROM {self.collection_name} ORDER BY distance LIMIT %s")
"""
# look to generalize the query
q = (f"SELECT embedding <-> %s AS distance, * FROM {self.collection_name} ORDER BY "
f"distance LIMIT %s")
"""
cursor = self.conn.cursor()
results = cursor.execute(q, (query_embedding_vector,sample_count))
block_list = []
for j, res in enumerate(results):
pg_id = res[0]
_id = res[1]
distance = res[2]
text = res[3]
block_result_list = self.utils.lookup_text_index(_id)
for block in block_result_list:
block_list.append((block, distance))
# Closing the connection
self.conn.close()
return block_list
def delete_index(self, collection_name=None):
# delete index - drop table
if collection_name:
self.collection_name = collection_name
drop_command = f'''DROP TABLE {self.collection_name} '''
# Executing the query
cursor = self.conn.cursor()
cursor.execute(drop_command)
logging.info("update: embedding_handler - PG Vector - table dropped - %s", self.collection_name)
# Commit your changes in the database
self.conn.commit()
# Closing the connection
self.conn.close()
# remove emb key - 'unset' the blocks in the text collection
self.utils.unset_text_index()
return 0
class EmbeddingNeo4j:
"""Implements the interface to Neo4j as a vector database.
``EmbeddingNeo4j`` implements the interface to ``Neo4j``. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_Neo4j : EmbeddingNeo4j
A new ``EmbeddingNeo4j`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None):
# look up model card
if not model and not model_name:
raise EmbeddingModelNotFoundException("no-model-or-model-name-provided")
self.library = library
self.library_name = library.library_name
self.model = model
self.model_name = model_name
self.embedding_dims = embedding_dims
self.account_name = library.account_name
# if model passed (not None), then use model name
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = model.embedding_dims
# user and password names are taken from environmen variables
# Names for user and password are taken from the link below
# https://neo4j.com/docs/operations-manual/current/tools/neo4j-admin/upload-to-aura/#_options
uri = Neo4jConfig.get_config('uri')
user = Neo4jConfig.get_config('user')
password = Neo4jConfig.get_config('password')
database = Neo4jConfig.get_config('database')
# Connect to Neo4J and verify connection.
# Code taken from the code below
# https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/vectorstores/neo4j_vector.py#L165C9-L177C14
try:
self.driver = GraphDatabase.driver(uri, auth=(user, password))
self.driver.verify_connectivity()
except neo4j.exceptions.ServiceUnavailable:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the url is correct and that Neo4j is up and running.")
except neo4j.exceptions.AuthError:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the username and password are correct.")
except Exception as err:
# We raise here any other excpetion that happend.
# This is usefull for debugging when some other error occurs.
raise
# Make sure that the Neo4j version supports vector indexing.
neo4j_version = self._query('call dbms.components() '
'yield name, versions, edition '
'unwind versions as version '
'return version')[0]['version']
neo4j_version = tuple(map(int, neo4j_version.split('.')))
target_version = (5, 11, 0)
if neo4j_version < target_version:
raise ValueError('Vector indexing requires a Neo4j version >= 5.11.0')
# If the index does not exist, then we create the vector search index.
neo4j_indexes = self._query('SHOW INDEXES yield name')
neo4j_indexes = [neo4j_index['name'] for neo4j_index in neo4j_indexes]
if 'vectorIndex' not in neo4j_indexes:
self._query(
query='CALL '
'db.index.vector.createNodeIndex('
'$indexName, '
'$label, '
'$propertyKey, '
'toInteger($vectorDimension), '
'"euclidean"'
')',
parameters={
'indexName': 'vectorIndex',
'label': 'Chunk',
'propertyKey': 'embedding',
'vectorDimension': int(self.model.embedding_dims)
})
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="neo4j",
embedding_dims=self.embedding_dims)
def create_new_embedding(self, doc_ids=None, batch_size=500):
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.library.account_name)
status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
current_index = 0
finished = False
# all_blocks_iter = all_blocks_cursor.pull_one()
while not finished:
block_ids, doc_ids, sentences = [], [], []
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
block_ids.append(str(block["_id"]))
doc_ids.append(int(block["doc_ID"]))
sentences.append(text_search)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences)
data = [block_ids, doc_ids, vectors]
# Insert into Neo4J
insert_query = (
"UNWIND $data AS row "
"CALL "
"{ "
"WITH row "
"MERGE (c:Chunk {id: row.doc_id, block_id: row.block_id}) "
"WITH c, row "
"CALL db.create.setVectorProperty(c, 'embedding', row.embedding) "
"YIELD node "
"SET c.sentence = row.sentence "
"} "
f"IN TRANSACTIONS OF {batch_size} ROWS"
)
parameters = {
"data": [
{"block_id": block_id, "doc_id": doc_id, "sentence": sentences, "embedding": vector}
for block_id, doc_id, sentence, vector in zip(
block_ids, doc_ids, sentences, vectors
)
]
}
self._query(query=insert_query, parameters=parameters)
current_index = self.utils.update_text_index(block_ids, current_index)
# Update statistics
embeddings_created += len(sentences)
status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences))
print(f"update: embedding_handler - Neo4j - Embeddings Created: {embeddings_created} of {num_of_blocks}")
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
logging.info(f'update: EmbeddingHandler - Neo4j - embedding_summary - {embedding_summary}')
return embedding_summary
def search_index(self, query_embedding_vector, sample_count=10):
block_list = []
search_query = 'CALL db.index.vector.queryNodes("vectorIndex" , $sample_count, $query_embedding_vector) '\
'YIELD node, score '
parameters = {'sample_count': sample_count, 'query_embedding_vector': query_embedding_vector}
results = self._query(query=search_query, parameters=parameters)
for result in results:
block_id = result['node']['block_id']
block_result_list = self.utils.lookup_text_index(block_id)
for block in block_result_list:
block_list.append((block, result["score"]))
return block_list
def delete_index(self, index_name):
try:
self._query(f"DROP INDEX $index_name", {'index_name': index_name})
except DatabaseError: # Index did not exist yet
pass
self.utils.unset_text_index()
def _query(self, query, parameters=None):
from neo4j.exceptions import CypherSyntaxError
parameters = parameters or {}
with self.driver.session(database='neo4j') as session:
try:
data = session.run(query, parameters)
return [d.data() for d in data]
except CypherSyntaxError as e:
raise ValueError(f'Cypher Statement is not valid\n{e}')
class EmbeddingChromaDB:
"""Implements the interface to the ChromaDB vector database.
``EmbeddingChromaDB`` implements the interface to ``ChromaDB``. It is used by the
``EmbeddingHandler``.
Parameters
----------
library : object
A ``Library`` object.
model : object
A model object. See :mod:`models` for available models.
model_name : str, default=None
Name of the model.
embedding_dims : int, default=None
Dimension of the embedding.
Returns
-------
embedding_chromadb : EmbeddingChromaDB
A new ``EmbeddingPGVector`` object.
"""
def __init__(self, library, model=None, model_name=None, embedding_dims=None):
#
# General llmware set up code
#
# look up model card
if not model and not model_name:
raise EmbeddingModelNotFoundException("no-model-or-model-name-provided")
self.library = library
self.library_name = library.library_name
self.model = model
self.model_name = model_name
self.embedding_dims = embedding_dims
self.account_name = library.account_name
# if model passed (not None), then use model name
if self.model:
self.model_name = self.model.model_name
self.embedding_dims = model.embedding_dims
#
# ChromaDB instantiation
#
# Get environment variables to decide which client to use.
persistent_path = ChromaDBConfig.get_config('persistent_path')
host = ChromaDBConfig.get_config('host')
# Instantiate client.
if host is None and persistent_path is None:
self.client = chromadb.EphemeralClient()
if persistent_path is not None:
self.client = chromadb.PersistentClient(path=persistent_path)
if host is not None:
self.client = chromadb.HttpClient(host=host,
port=ChromaDBConfig.get_config('port'),
ssl=ChromaDBConfig.get_config('ssl'),
headers=ChromaDBConfig.get_config('headers'))
collection_name = ChromaDBConfig.get_config('collection')
# If the collection already exists, it is returned.
self._collection = self.client.create_collection(name=collection_name, get_or_create=True)
#
# Embedding utils
#
self.utils = _EmbeddingUtils(library_name=self.library_name,
model_name=self.model_name,
account_name=self.account_name,
db_name="chromadb",
embedding_dims=self.embedding_dims)
def create_new_embedding(self, doc_ids=None, batch_size=500):
all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids)
# Initialize a new status
status = Status(self.library.account_name)
status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks)
embeddings_created = 0
current_index = 0
finished = False
# all_blocks_iter = all_blocks_cursor.pull_one()
while not finished:
block_ids, doc_ids, sentences = [], [], []
# Build the next batch
for i in range(batch_size):
block = all_blocks_cursor.pull_one()
if not block:
finished = True
break
text_search = block["text_search"].strip()
if not text_search or len(text_search) < 1:
continue
block_ids.append(str(block["_id"]))
doc_ids.append(int(block["doc_ID"]))
sentences.append(text_search)
if len(sentences) > 0:
# Process the batch
vectors = self.model.embedding(sentences)
# Insert into ChromaDB
ids = [f'{doc_id}-{block_id}' for doc_id, block_id in zip(doc_ids, block_ids)]
metadatas = [{'doc_id': doc_id, 'block_id': block_id, 'sentence': sentence}
for doc_id, block_id, sentence in zip(doc_ids, block_ids, sentences)]
self._collection.add(ids=ids,
documents=doc_ids,
embeddings=vectors,
metadatas=metadatas)
current_index = self.utils.update_text_index(block_ids, current_index)
# Update statistics
embeddings_created += len(sentences)
status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences))
print(f"update: embedding_handler - ChromaDB - Embeddings Created: {embeddings_created} of {num_of_blocks}")
embedding_summary = self.utils.generate_embedding_summary(embeddings_created)
logging.info(f'update: EmbeddingHandler - ChromaDB - embedding_summary - {embedding_summary}')
return embedding_summary
def search_index(self, query_embedding_vector, sample_count=10):
block_list = []
# add one dimension because chroma expects two dimensions - a list of lists
query_embedding_vector = query_embedding_vector.reshape(1, -1)
results = self._collection.query(query_embeddings=query_embedding_vector, n_results=sample_count)
for idx_result, _ in enumerate(results['ids'][0]):
block_id = results['metadatas'][0][idx_result]['block_id']
block_result_list = self.utils.lookup_text_index(block_id)
for block in block_result_list:
block_list.append((block, results['distances'][0][idx_result]))
return block_list
def delete_index(self):
self.client.delete_collection(self._collection.name)
self.utils.unset_text_index()
| [
"lancedb.connect"
] | [((12701, 12771), 'llmware.resources.CollectionRetrieval', 'CollectionRetrieval', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (12720, 12771), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((13147, 13217), 'llmware.resources.CollectionRetrieval', 'CollectionRetrieval', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (13166, 13217), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((14234, 14304), 'llmware.resources.CollectionRetrieval', 'CollectionRetrieval', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (14253, 14304), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((14608, 14678), 'llmware.resources.CollectionRetrieval', 'CollectionRetrieval', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (14627, 14678), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((14916, 14983), 'llmware.resources.CollectionWriter', 'CollectionWriter', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (14932, 14983), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((17928, 17960), 'pymilvus.Collection', 'Collection', (['self.collection_name'], {}), '(self.collection_name)\n', (17938, 17960), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((18210, 18235), 'llmware.status.Status', 'Status', (['self.account_name'], {}), '(self.account_name)\n', (18216, 18235), False, 'from llmware.status import Status\n'), ((19908, 20005), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Milvus - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Milvus - embedding_summary - %s',\n embedding_summary)\n", (19920, 20005), False, 'import logging\n'), ((21247, 21279), 'pymilvus.Collection', 'Collection', (['self.collection_name'], {}), '(self.collection_name)\n', (21257, 21279), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((21317, 21362), 'pymilvus.utility.drop_collection', 'utility.drop_collection', (['self.collection_name'], {}), '(self.collection_name)\n', (21340, 21362), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((21371, 21412), 'pymilvus.connections.disconnect', 'connections.disconnect', (['self.milvus_alias'], {}), '(self.milvus_alias)\n', (21393, 21412), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((23546, 23632), 'os.path.join', 'os.path.join', (['self.library.embedding_path', 'model_safe_path', '"""embedding_file_faiss"""'], {}), "(self.library.embedding_path, model_safe_path,\n 'embedding_file_faiss')\n", (23558, 23632), False, 'import os\n'), ((24762, 24787), 'llmware.status.Status', 'Status', (['self.account_name'], {}), '(self.account_name)\n', (24768, 24787), False, 'from llmware.status import Status\n'), ((26300, 26340), 'os.path.exists', 'os.path.exists', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (26314, 26340), False, 'import os\n'), ((26476, 26531), 'faiss.write_index', 'faiss.write_index', (['self.index', 'self.embedding_file_path'], {}), '(self.index, self.embedding_file_path)\n', (26493, 26531), False, 'import faiss\n'), ((26628, 26724), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - FAISS - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - FAISS - embedding_summary - %s',\n embedding_summary)\n", (26640, 26724), False, 'import logging\n'), ((27727, 27767), 'os.path.exists', 'os.path.exists', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (27741, 27767), False, 'import os\n'), ((31634, 31667), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (31640, 31667), False, 'from llmware.status import Status\n'), ((34008, 34106), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Lancedb - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Lancedb - embedding_summary - %s',\n embedding_summary)\n", (34020, 34106), False, 'import logging\n'), ((38394, 38430), 'pinecone.Index', 'pinecone.Index', (['self.collection_name'], {}), '(self.collection_name)\n', (38408, 38430), False, 'import pinecone\n'), ((38642, 38675), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (38648, 38675), False, 'from llmware.status import Status\n'), ((40547, 40646), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Pinecone - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Pinecone - embedding_summary - %s',\n embedding_summary)\n", (40559, 40646), False, 'import logging\n'), ((41212, 41245), 'pinecone.delete_index', 'pinecone.delete_index', (['index_name'], {}), '(index_name)\n', (41233, 41245), False, 'import pinecone\n'), ((43697, 43729), 'pymongo.MongoClient', 'MongoClient', (['self.connection_uri'], {}), '(self.connection_uri)\n', (43708, 43729), False, 'from pymongo import MongoClient\n'), ((45570, 45603), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (45576, 45603), False, 'from llmware.status import Status\n'), ((48174, 48276), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Mongo Atlas - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Mongo Atlas - embedding_summary - %s',\n embedding_summary)\n", (48186, 48276), False, 'import logging\n'), ((51989, 52057), 'redis.Redis', 'redis.Redis', ([], {'host': 'redis_host', 'port': 'redis_port', 'decode_responses': '(True)'}), '(host=redis_host, port=redis_port, decode_responses=True)\n', (52000, 52057), False, 'import redis\n'), ((54393, 54426), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (54399, 54426), False, 'from llmware.status import Status\n'), ((56835, 56931), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Redis - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Redis - embedding_summary - %s',\n embedding_summary)\n", (56847, 56931), False, 'import logging\n'), ((57066, 57098), 'numpy.array', 'np.array', (['query_embedding_vector'], {}), '(query_embedding_vector)\n', (57074, 57098), True, 'import numpy as np\n'), ((60948, 60981), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (60954, 60981), False, 'from llmware.status import Status\n'), ((63210, 63307), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Qdrant - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Qdrant - embedding_summary - %s',\n embedding_summary)\n", (63222, 63307), False, 'import logging\n'), ((66808, 66940), 'psycopg.connect', 'psycopg.connect', ([], {'host': 'postgres_host', 'port': 'postgres_port', 'dbname': 'postgres_db_name', 'user': 'postgres_user_name', 'password': 'postgres_pw'}), '(host=postgres_host, port=postgres_port, dbname=\n postgres_db_name, user=postgres_user_name, password=postgres_pw)\n', (66823, 66940), False, 'import psycopg\n'), ((67084, 67110), 'pgvector.psycopg.register_vector', 'register_vector', (['self.conn'], {}), '(self.conn)\n', (67099, 67110), False, 'from pgvector.psycopg import register_vector\n'), ((69675, 69708), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (69681, 69708), False, 'from llmware.status import Status\n'), ((75178, 75278), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - PG_Vector - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - PG_Vector - embedding_summary - %s',\n embedding_summary)\n", (75190, 75278), False, 'import logging\n'), ((76214, 76246), 'numpy.array', 'np.array', (['query_embedding_vector'], {}), '(query_embedding_vector)\n', (76222, 76246), True, 'import numpy as np\n'), ((77477, 77577), 'logging.info', 'logging.info', (['"""update: embedding_handler - PG Vector - table dropped - %s"""', 'self.collection_name'], {}), "('update: embedding_handler - PG Vector - table dropped - %s',\n self.collection_name)\n", (77489, 77577), False, 'import logging\n'), ((79367, 79396), 'llmware.configs.Neo4jConfig.get_config', 'Neo4jConfig.get_config', (['"""uri"""'], {}), "('uri')\n", (79389, 79396), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((79412, 79442), 'llmware.configs.Neo4jConfig.get_config', 'Neo4jConfig.get_config', (['"""user"""'], {}), "('user')\n", (79434, 79442), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((79462, 79496), 'llmware.configs.Neo4jConfig.get_config', 'Neo4jConfig.get_config', (['"""password"""'], {}), "('password')\n", (79484, 79496), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((79516, 79550), 'llmware.configs.Neo4jConfig.get_config', 'Neo4jConfig.get_config', (['"""database"""'], {}), "('database')\n", (79538, 79550), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((82554, 82587), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (82560, 82587), False, 'from llmware.status import Status\n'), ((85166, 85267), 'logging.info', 'logging.info', (['f"""update: EmbeddingHandler - Neo4j - embedding_summary - {embedding_summary}"""'], {}), "(\n f'update: EmbeddingHandler - Neo4j - embedding_summary - {embedding_summary}'\n )\n", (85178, 85267), False, 'import logging\n'), ((88197, 88241), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""persistent_path"""'], {}), "('persistent_path')\n", (88222, 88241), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((88257, 88290), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""host"""'], {}), "('host')\n", (88282, 88290), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((88919, 88958), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""collection"""'], {}), "('collection')\n", (88944, 88958), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((89708, 89741), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (89714, 89741), False, 'from llmware.status import Status\n'), ((91769, 91873), 'logging.info', 'logging.info', (['f"""update: EmbeddingHandler - ChromaDB - embedding_summary - {embedding_summary}"""'], {}), "(\n f'update: EmbeddingHandler - ChromaDB - embedding_summary - {embedding_summary}'\n )\n", (91781, 91873), False, 'import logging\n'), ((6766, 6817), 'llmware.exceptions.UnsupportedEmbeddingDatabaseException', 'UnsupportedEmbeddingDatabaseException', (['embedding_db'], {}), '(embedding_db)\n', (6803, 6817), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((9261, 9282), 're.sub', 're.sub', (['"""\\\\W+"""', '""""""', 's'], {}), "('\\\\W+', '', s)\n", (9267, 9282), False, 'import re\n'), ((13837, 13904), 'llmware.resources.CollectionWriter', 'CollectionWriter', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (13853, 13904), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((16292, 16358), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (16323, 16358), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((17198, 17242), 'pymilvus.utility.has_collection', 'utility.has_collection', (['self.collection_name'], {}), '(self.collection_name)\n', (17220, 17242), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((22477, 22543), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (22508, 22543), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((23846, 23886), 'os.path.exists', 'os.path.exists', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (23860, 23886), False, 'import os\n'), ((26354, 26389), 'os.remove', 'os.remove', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (26363, 26389), False, 'import os\n'), ((26410, 26451), 'os.path.dirname', 'os.path.dirname', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (26425, 26451), False, 'import os\n'), ((26915, 26957), 'faiss.read_index', 'faiss.read_index', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (26931, 26957), False, 'import faiss\n'), ((27013, 27047), 'numpy.array', 'np.array', (['[query_embedding_vector]'], {}), '([query_embedding_vector])\n', (27021, 27047), True, 'import numpy as np\n'), ((27781, 27816), 'os.remove', 'os.remove', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (27790, 27816), False, 'import os\n'), ((28967, 29033), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (28998, 29033), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((29522, 29547), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (29537, 29547), False, 'import lancedb\n'), ((36444, 36510), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (36475, 36510), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((36935, 37000), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'self.api_key', 'environment': 'self.environment'}), '(api_key=self.api_key, environment=self.environment)\n', (36948, 37000), False, 'import pinecone\n'), ((37993, 38016), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (38014, 38016), False, 'import pinecone\n'), ((38030, 38128), 'pinecone.create_index', 'pinecone.create_index', (['self.collection_name'], {'dimension': 'self.embedding_dims', 'metric': '"""euclidean"""'}), "(self.collection_name, dimension=self.embedding_dims,\n metric='euclidean')\n", (38051, 38128), False, 'import pinecone\n'), ((38137, 38182), 'pinecone.describe_index', 'pinecone.describe_index', (['self.collection_name'], {}), '(self.collection_name)\n', (38160, 38182), False, 'import pinecone\n'), ((42814, 42880), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (42845, 42880), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((47823, 47834), 'time.time', 'time.time', ([], {}), '()\n', (47832, 47834), False, 'import time\n'), ((48545, 48556), 'time.time', 'time.time', ([], {}), '()\n', (48554, 48556), False, 'import time\n'), ((48883, 48896), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (48893, 48896), False, 'import time\n'), ((49457, 49470), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (49467, 49470), False, 'import time\n'), ((53004, 53107), 'logging.info', 'logging.info', (['"""update: embedding_handler - Redis - index already exists - %s"""', 'self.collection_name'], {}), "('update: embedding_handler - Redis - index already exists - %s',\n self.collection_name)\n", (53016, 53107), False, 'import logging\n'), ((60449, 60561), 'logging.info', 'logging.info', (['"""update: embedding_handler - QDRANT - creating new collection - %s"""', 'self.collection_name'], {}), "(\n 'update: embedding_handler - QDRANT - creating new collection - %s',\n self.collection_name)\n", (60461, 60561), False, 'import logging\n'), ((78616, 78682), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (78647, 78682), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((79826, 79874), 'neo4j.GraphDatabase.driver', 'GraphDatabase.driver', (['uri'], {'auth': '(user, password)'}), '(uri, auth=(user, password))\n', (79846, 79874), False, 'from neo4j import GraphDatabase\n'), ((87552, 87618), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (87583, 87618), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((88401, 88427), 'chromadb.EphemeralClient', 'chromadb.EphemeralClient', ([], {}), '()\n', (88425, 88427), False, 'import chromadb\n'), ((88495, 88542), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'persistent_path'}), '(path=persistent_path)\n', (88520, 88542), False, 'import chromadb\n'), ((3807, 3822), 'llmware.configs.LLMWareConfig', 'LLMWareConfig', ([], {}), '()\n', (3820, 3822), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((11183, 11226), 're.sub', 're.sub', (['"""[-@_.\\\\/ ]"""', '""""""', 'self.library_name'], {}), "('[-@_.\\\\/ ]', '', self.library_name)\n", (11189, 11226), False, 'import re\n'), ((11378, 11419), 're.sub', 're.sub', (['"""[-@_.\\\\/ ]"""', '""""""', 'self.model_name'], {}), "('[-@_.\\\\/ ]', '', self.model_name)\n", (11384, 11419), False, 'import re\n'), ((11700, 11743), 're.sub', 're.sub', (['"""[-@_.\\\\/ ]"""', '""""""', 'self.account_name'], {}), "('[-@_.\\\\/ ]', '', self.account_name)\n", (11706, 11743), False, 'import re\n'), ((12261, 12296), 're.sub', 're.sub', (['"""[@ ]"""', '""""""', 'self.model_name'], {}), "('[@ ]', '', self.model_name)\n", (12267, 12296), False, 'import re\n'), ((16032, 16063), 'llmware.configs.MilvusConfig.get_config', 'MilvusConfig.get_config', (['"""host"""'], {}), "('host')\n", (16055, 16063), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((16098, 16129), 'llmware.configs.MilvusConfig.get_config', 'MilvusConfig.get_config', (['"""port"""'], {}), "('port')\n", (16121, 16129), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((16167, 16201), 'llmware.configs.MilvusConfig.get_config', 'MilvusConfig.get_config', (['"""db_name"""'], {}), "('db_name')\n", (16190, 16201), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((17283, 17392), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""block_mongo_id"""', 'dtype': 'DataType.VARCHAR', 'is_primary': '(True)', 'max_length': '(30)', 'auto_id': '(False)'}), "(name='block_mongo_id', dtype=DataType.VARCHAR, is_primary=True,\n max_length=30, auto_id=False)\n", (17294, 17392), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((17405, 17459), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""block_doc_id"""', 'dtype': 'DataType.INT64'}), "(name='block_doc_id', dtype=DataType.INT64)\n", (17416, 17459), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((17477, 17572), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""embedding_vector"""', 'dtype': 'DataType.FLOAT_VECTOR', 'dim': 'self.embedding_dims'}), "(name='embedding_vector', dtype=DataType.FLOAT_VECTOR, dim=self.\n embedding_dims)\n", (17488, 17572), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((17641, 17665), 'pymilvus.CollectionSchema', 'CollectionSchema', (['fields'], {}), '(fields)\n', (17657, 17665), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((23463, 23502), 're.sub', 're.sub', (['"""[@\\\\/. ]"""', '""""""', 'self.model_name'], {}), "('[@\\\\/. ]', '', self.model_name)\n", (23469, 23502), False, 'import re\n'), ((28681, 28696), 'llmware.configs.LanceDBConfig', 'LanceDBConfig', ([], {}), '()\n', (28694, 28696), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((31040, 31082), 'llmware.exceptions.DependencyNotInstalledException', 'DependencyNotInstalledException', (['"""pyarrow"""'], {}), "('pyarrow')\n", (31071, 31082), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((36088, 36104), 'llmware.configs.PineconeConfig', 'PineconeConfig', ([], {}), '()\n', (36102, 36104), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((36163, 36179), 'llmware.configs.PineconeConfig', 'PineconeConfig', ([], {}), '()\n', (36177, 36179), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((42399, 42412), 'llmware.configs.MongoConfig', 'MongoConfig', ([], {}), '()\n', (42410, 42412), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((47925, 47936), 'time.time', 'time.time', ([], {}), '()\n', (47934, 47936), False, 'import time\n'), ((51884, 51897), 'llmware.configs.RedisConfig', 'RedisConfig', ([], {}), '()\n', (51895, 51897), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((51938, 51951), 'llmware.configs.RedisConfig', 'RedisConfig', ([], {}), '()\n', (51949, 51951), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((53880, 53948), 'redis.commands.search.indexDefinition.IndexDefinition', 'IndexDefinition', ([], {'prefix': '[self.DOC_PREFIX]', 'index_type': 'IndexType.HASH'}), '(prefix=[self.DOC_PREFIX], index_type=IndexType.HASH)\n', (53895, 53948), False, 'from redis.commands.search.indexDefinition import IndexDefinition, IndexType\n'), ((54085, 54187), 'logging.info', 'logging.info', (['"""update: embedding_handler - Redis - creating new index - %s """', 'self.collection_name'], {}), "('update: embedding_handler - Redis - creating new index - %s ',\n self.collection_name)\n", (54097, 54187), False, 'import logging\n'), ((59054, 59079), 'llmware.configs.QdrantConfig.get_config', 'QdrantConfig.get_config', ([], {}), '()\n', (59077, 59079), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((65897, 65913), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (65911, 65913), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((65957, 65973), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (65971, 65973), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((66020, 66036), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (66034, 66036), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((66088, 66104), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (66102, 66104), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((66151, 66167), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (66165, 66167), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((66211, 66227), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (66225, 66227), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((13537, 13548), 'llmware.util.Utilities', 'Utilities', ([], {}), '()\n', (13546, 13548), False, 'from llmware.util import Utilities\n'), ((24203, 24245), 'faiss.read_index', 'faiss.read_index', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (24219, 24245), False, 'import faiss\n'), ((24413, 24451), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['self.embedding_dims'], {}), '(self.embedding_dims)\n', (24430, 24451), False, 'import faiss\n'), ((25775, 25792), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (25783, 25792), True, 'import numpy as np\n'), ((31258, 31269), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (31267, 31269), True, 'import pyarrow as pa\n'), ((53182, 53200), 'redis.commands.search.field.NumericField', 'NumericField', (['"""id"""'], {}), "('id')\n", (53194, 53200), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53218, 53235), 'redis.commands.search.field.TextField', 'TextField', (['"""text"""'], {}), "('text')\n", (53227, 53235), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53253, 53280), 'redis.commands.search.field.TextField', 'TextField', (['"""block_mongo_id"""'], {}), "('block_mongo_id')\n", (53262, 53280), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53298, 53322), 'redis.commands.search.field.NumericField', 'NumericField', (['"""block_id"""'], {}), "('block_id')\n", (53310, 53322), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53340, 53368), 'redis.commands.search.field.NumericField', 'NumericField', (['"""block_doc_id"""'], {}), "('block_doc_id')\n", (53352, 53368), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53386, 53494), 'redis.commands.search.field.VectorField', 'VectorField', (['"""vector"""', '"""FLAT"""', "{'TYPE': 'FLOAT32', 'DIM': self.embedding_dims, 'DISTANCE_METRIC': 'L2'}"], {}), "('vector', 'FLAT', {'TYPE': 'FLOAT32', 'DIM': self.\n embedding_dims, 'DISTANCE_METRIC': 'L2'})\n", (53397, 53494), False, 'from redis.commands.search.field import VectorField\n'), ((55944, 55963), 'numpy.array', 'np.array', (['embedding'], {}), '(embedding)\n', (55952, 55963), True, 'import numpy as np\n'), ((60370, 60431), 'qdrant_client.http.models.VectorParams', 'VectorParams', ([], {'size': 'self.embedding_dims', 'distance': 'Distance.DOT'}), '(size=self.embedding_dims, distance=Distance.DOT)\n', (60382, 60431), False, 'from qdrant_client.http.models import Distance, VectorParams, PointStruct\n'), ((62112, 62256), 'qdrant_client.http.models.PointStruct', 'PointStruct', ([], {'id': 'point_id', 'vector': 'embedding', 'payload': "{'block_doc_id': doc_ids[i], 'sentences': sentences[i], 'block_mongo_id':\n block_ids[i]}"}), "(id=point_id, vector=embedding, payload={'block_doc_id': doc_ids\n [i], 'sentences': sentences[i], 'block_mongo_id': block_ids[i]})\n", (62123, 62256), False, 'from qdrant_client.http.models import Distance, VectorParams, PointStruct\n'), ((88681, 88714), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""port"""'], {}), "('port')\n", (88706, 88714), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((88766, 88798), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""ssl"""'], {}), "('ssl')\n", (88791, 88798), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((88854, 88890), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""headers"""'], {}), "('headers')\n", (88879, 88890), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((4693, 4863), 'logging.warning', 'logging.warning', (['"""update: embedding_handler - unable to determine if embeddings have been properly counted and captured. Please check if databases connected."""'], {}), "(\n 'update: embedding_handler - unable to determine if embeddings have been properly counted and captured. Please check if databases connected.'\n )\n", (4708, 4863), False, 'import logging\n'), ((24296, 24340), 'llmware.exceptions.DependencyNotInstalledException', 'DependencyNotInstalledException', (['"""faiss-cpu"""'], {}), "('faiss-cpu')\n", (24327, 24340), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((24502, 24546), 'llmware.exceptions.DependencyNotInstalledException', 'DependencyNotInstalledException', (['"""faiss-cpu"""'], {}), "('faiss-cpu')\n", (24533, 24546), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((31173, 31185), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (31183, 31185), True, 'import pyarrow as pa\n'), ((62073, 62085), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (62083, 62085), False, 'import uuid\n'), ((44080, 44091), 'llmware.util.Utilities', 'Utilities', ([], {}), '()\n', (44089, 44091), False, 'from llmware.util import Utilities\n'), ((44427, 44438), 'llmware.util.Utilities', 'Utilities', ([], {}), '()\n', (44436, 44438), False, 'from llmware.util import Utilities\n'), ((57130, 57185), 'redis.commands.search.query.Query', 'Query', (['f"""*=>[KNN {sample_count} @vector $vec as score]"""'], {}), "(f'*=>[KNN {sample_count} @vector $vec as score]')\n", (57135, 57185), False, 'from redis.commands.search.query import Query\n')] |
"""LanceDB vector store."""
import logging
from typing import Any, List, Optional
import numpy as np
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.schema import (
BaseNode,
MetadataMode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
from llama_index.core.vector_stores.types import (
MetadataFilters,
BasePydanticVectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
DEFAULT_DOC_ID_KEY,
DEFAULT_TEXT_KEY,
legacy_metadata_dict_to_node,
metadata_dict_to_node,
node_to_metadata_dict,
)
from pandas import DataFrame
import lancedb
_logger = logging.getLogger(__name__)
def _to_lance_filter(standard_filters: MetadataFilters) -> Any:
"""Translate standard metadata filters to Lance specific spec."""
filters = []
for filter in standard_filters.legacy_filters():
if isinstance(filter.value, str):
filters.append(filter.key + ' = "' + filter.value + '"')
else:
filters.append(filter.key + " = " + str(filter.value))
return " AND ".join(filters)
def _to_llama_similarities(results: DataFrame) -> List[float]:
keys = results.keys()
normalized_similarities: np.ndarray
if "score" in keys:
normalized_similarities = np.exp(results["score"] - np.max(results["score"]))
elif "_distance" in keys:
normalized_similarities = np.exp(-results["_distance"])
else:
normalized_similarities = np.linspace(1, 0, len(results))
return normalized_similarities.tolist()
class LanceDBVectorStore(BasePydanticVectorStore):
"""
The LanceDB Vector Store.
Stores text and embeddings in LanceDB. The vector store will open an existing
LanceDB dataset or create the dataset if it does not exist.
Args:
uri (str, required): Location where LanceDB will store its files.
table_name (str, optional): The table name where the embeddings will be stored.
Defaults to "vectors".
vector_column_name (str, optional): The vector column name in the table if different from default.
Defaults to "vector", in keeping with lancedb convention.
nprobes (int, optional): The number of probes used.
A higher number makes search more accurate but also slower.
Defaults to 20.
refine_factor: (int, optional): Refine the results by reading extra elements
and re-ranking them in memory.
Defaults to None
Raises:
ImportError: Unable to import `lancedb`.
Returns:
LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and
querying it.
"""
stores_text = True
flat_metadata: bool = True
_connection: Any = PrivateAttr()
uri: Optional[str]
table_name: Optional[str]
vector_column_name: Optional[str]
nprobes: Optional[int]
refine_factor: Optional[int]
text_key: Optional[str]
doc_id_key: Optional[str]
def __init__(
self,
uri: Optional[str],
table_name: str = "vectors",
vector_column_name: str = "vector",
nprobes: int = 20,
refine_factor: Optional[int] = None,
text_key: str = DEFAULT_TEXT_KEY,
doc_id_key: str = DEFAULT_DOC_ID_KEY,
**kwargs: Any,
) -> None:
"""Init params."""
self._connection = lancedb.connect(uri)
super().__init__(
uri=uri,
table_name=table_name,
vector_column_name=vector_column_name,
nprobes=nprobes,
refine_factor=refine_factor,
text_key=text_key,
doc_id_key=doc_id_key,
**kwargs,
)
@property
def client(self) -> None:
"""Get client."""
return self._connection
@classmethod
def from_params(
cls,
uri: Optional[str],
table_name: str = "vectors",
vector_column_name: str = "vector",
nprobes: int = 20,
refine_factor: Optional[int] = None,
text_key: str = DEFAULT_TEXT_KEY,
doc_id_key: str = DEFAULT_DOC_ID_KEY,
**kwargs: Any,
) -> "LanceDBVectorStore":
"""Create instance from params."""
_connection_ = cls._connection
return cls(
_connection=_connection_,
uri=uri,
table_name=table_name,
vector_column_name=vector_column_name,
nprobes=nprobes,
refine_factor=refine_factor,
text_key=text_key,
doc_id_key=doc_id_key,
**kwargs,
)
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
if not nodes:
_logger.debug("No nodes to add. Skipping the database operation.")
return []
data = []
ids = []
for node in nodes:
metadata = node_to_metadata_dict(
node, remove_text=False, flat_metadata=self.flat_metadata
)
append_data = {
"id": node.node_id,
"doc_id": node.ref_doc_id,
"vector": node.get_embedding(),
"text": node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
}
data.append(append_data)
ids.append(node.node_id)
if self.table_name in self._connection.table_names():
tbl = self._connection.open_table(self.table_name)
tbl.add(data)
else:
self._connection.create_table(self.table_name, data)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
table = self._connection.open_table(self.table_name)
table.delete('doc_id = "' + ref_doc_id + '"')
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface."
)
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
table = self._connection.open_table(self.table_name)
lance_query = (
table.search(
query=query.query_embedding,
vector_column_name=self.vector_column_name,
)
.limit(query.similarity_top_k)
.where(where)
.nprobes(self.nprobes)
)
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
results = lance_query.to_pandas()
nodes = []
for _, item in results.iterrows():
try:
node = metadata_dict_to_node(item.metadata)
node.embedding = list(item[self.vector_column_name])
except Exception:
# deprecated legacy logic for backward compatibility
_logger.debug(
"Failed to parse Node metadata, fallback to legacy logic."
)
if "metadata" in item:
metadata, node_info, _relation = legacy_metadata_dict_to_node(
item.metadata, text_key=self.text_key
)
else:
metadata, node_info = {}, {}
node = TextNode(
text=item[self.text_key] or "",
id_=item.id,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id=item[self.doc_id_key]
),
},
)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((685, 712), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (702, 712), False, 'import logging\n'), ((2814, 2827), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2825, 2827), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((3431, 3451), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3446, 3451), False, 'import lancedb\n'), ((1449, 1478), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1455, 1478), True, 'import numpy as np\n'), ((4960, 5045), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (4981, 5045), False, 'from llama_index.core.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1359, 1383), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1365, 1383), True, 'import numpy as np\n'), ((7233, 7269), 'llama_index.core.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (7254, 7269), False, 'from llama_index.core.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((7658, 7725), 'llama_index.core.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (7686, 7725), False, 'from llama_index.core.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((8211, 8257), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item[self.doc_id_key]'}), '(node_id=item[self.doc_id_key])\n', (8226, 8257), False, 'from llama_index.core.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')] |
# Copyright [2024] [Holosun ApS]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ..logging import setup_logging
from ..utils.config import load_config_key
logging = setup_logging()
def pick_vectorstore(vs_str, vector_name, embeddings):
logging.debug('Picking vectorstore')
if vs_str == 'supabase':
from supabase import Client, create_client
from langchain.vectorstores import SupabaseVectorStore
from ..database.database import setup_supabase
logging.debug(f"Initiating Supabase store: {vector_name}")
setup_supabase(vector_name)
# init embedding and vector store
supabase_url = os.getenv('SUPABASE_URL')
supabase_key = os.getenv('SUPABASE_KEY')
logging.debug(f"Supabase URL: {supabase_url} vector_name: {vector_name}")
supabase: Client = create_client(supabase_url, supabase_key)
vectorstore = SupabaseVectorStore(supabase,
embeddings,
table_name=vector_name,
query_name=f'match_documents_{vector_name}')
logging.debug("Chose Supabase")
return vectorstore
elif vs_str == 'cloudsql':
from langchain.vectorstores.pgvector import PGVector
logging.debug("Inititaing CloudSQL pgvector")
#setup_cloudsql(vector_name)
# https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/pgvector
CONNECTION_STRING = os.environ.get("PGVECTOR_CONNECTION_STRING")
# postgresql://brainuser:password@10.24.0.3:5432/brain
from ..database.database import get_vector_size
vector_size = get_vector_size(vector_name)
os.environ["PGVECTOR_VECTOR_SIZE"] = str(vector_size)
vectorstore = PGVector(connection_string=CONNECTION_STRING,
embedding_function=embeddings,
collection_name=vector_name,
#pre_delete_collection=True # for testing purposes
)
logging.debug("Chose CloudSQL")
return vectorstore
elif vs_str == 'alloydb':
from langchain_google_alloydb_pg import AlloyDBEngine, AlloyDBVectorStore
from google.cloud.alloydb.connector import IPTypes
from ..database.alloydb import create_alloydb_table
alloydb_config = load_config_key(
'alloydb_config',
vector_name=vector_name,
filename = "config/llm_config.yaml"
)
if alloydb_config is None:
logging.error("No alloydb_config was found")
ALLOYDB_DB = os.environ.get("ALLOYDB_DB")
if ALLOYDB_DB is None:
logging.error(f"Could not locate ALLOYDB_DB environment variable for {vector_name}")
logging.info(f"ALLOYDB_DB environment variable found for {vector_name} - {ALLOYDB_DB}")
logging.info("Inititaing AlloyDB Langchain")
engine = AlloyDBEngine.from_instance(
project_id=alloydb_config["project_id"],
region=alloydb_config["region"],
cluster=alloydb_config["cluster"],
instance=alloydb_config["instance"],
database=alloydb_config.get("database") or ALLOYDB_DB,
ip_type=alloydb_config.get("ip_type") or IPTypes.PRIVATE
)
create_alloydb_table(vector_name, engine)
logging.info("Chose AlloyDB")
vectorstore = AlloyDBVectorStore.create_sync(
engine=engine,
table_name=vector_name,
embedding_service=embeddings,
metadata_columns=["source"]
#metadata_columns=["source", "eventTime"]
)
return vectorstore
elif vs_str == "lancedb":
from ..patches.langchain.lancedb import LanceDB
import lancedb
LANCEDB_BUCKET = os.environ.get("LANCEDB_BUCKET")
if LANCEDB_BUCKET is None:
logging.error(f"Could not locate LANCEDB_BUCKET environment variable for {vector_name}")
logging.info(f"LANCEDB_BUCKET environment variable found for {vector_name} - {LANCEDB_BUCKET}")
db = lancedb.connect(LANCEDB_BUCKET)
logging.info(f"LanceDB Tables: {db.table_names()} using {LANCEDB_BUCKET}")
logging.info(f"Opening LanceDB table: {vector_name} using {LANCEDB_BUCKET}")
try:
table = db.open_table(vector_name)
except FileNotFoundError as err:
logging.info(f"{err} - Could not open table for {vector_name} - creating new table")
init = f"Creating new table for {vector_name}"
table = db.create_table(
vector_name,
data=[
{
"vector": embeddings.embed_query(init),
"text": init,
"id": "1",
}
],
mode="overwrite",
)
logging.info(f"Inititaing LanceDB object for {vector_name} using {LANCEDB_BUCKET}")
vectorstore = LanceDB(
connection=table,
embedding=embeddings,
)
logging.info(f"Chose LanceDB for {vector_name} using {LANCEDB_BUCKET}")
return vectorstore
else:
raise NotImplementedError(f'No llm implemented for {vs_str}')
| [
"lancedb.connect"
] | [((1190, 1215), 'os.getenv', 'os.getenv', (['"""SUPABASE_URL"""'], {}), "('SUPABASE_URL')\n", (1199, 1215), False, 'import os\n'), ((1239, 1264), 'os.getenv', 'os.getenv', (['"""SUPABASE_KEY"""'], {}), "('SUPABASE_KEY')\n", (1248, 1264), False, 'import os\n'), ((1376, 1417), 'supabase.create_client', 'create_client', (['supabase_url', 'supabase_key'], {}), '(supabase_url, supabase_key)\n', (1389, 1417), False, 'from supabase import Client, create_client\n'), ((1441, 1555), 'langchain.vectorstores.SupabaseVectorStore', 'SupabaseVectorStore', (['supabase', 'embeddings'], {'table_name': 'vector_name', 'query_name': 'f"""match_documents_{vector_name}"""'}), "(supabase, embeddings, table_name=vector_name,\n query_name=f'match_documents_{vector_name}')\n", (1460, 1555), False, 'from langchain.vectorstores import SupabaseVectorStore\n'), ((2064, 2108), 'os.environ.get', 'os.environ.get', (['"""PGVECTOR_CONNECTION_STRING"""'], {}), "('PGVECTOR_CONNECTION_STRING')\n", (2078, 2108), False, 'import os\n'), ((2365, 2474), 'langchain.vectorstores.pgvector.PGVector', 'PGVector', ([], {'connection_string': 'CONNECTION_STRING', 'embedding_function': 'embeddings', 'collection_name': 'vector_name'}), '(connection_string=CONNECTION_STRING, embedding_function=embeddings,\n collection_name=vector_name)\n', (2373, 2474), False, 'from langchain.vectorstores.pgvector import PGVector\n'), ((3162, 3190), 'os.environ.get', 'os.environ.get', (['"""ALLOYDB_DB"""'], {}), "('ALLOYDB_DB')\n", (3176, 3190), False, 'import os\n'), ((3985, 4117), 'langchain_google_alloydb_pg.AlloyDBVectorStore.create_sync', 'AlloyDBVectorStore.create_sync', ([], {'engine': 'engine', 'table_name': 'vector_name', 'embedding_service': 'embeddings', 'metadata_columns': "['source']"}), "(engine=engine, table_name=vector_name,\n embedding_service=embeddings, metadata_columns=['source'])\n", (4015, 4117), False, 'from langchain_google_alloydb_pg import AlloyDBEngine, AlloyDBVectorStore\n'), ((4421, 4453), 'os.environ.get', 'os.environ.get', (['"""LANCEDB_BUCKET"""'], {}), "('LANCEDB_BUCKET')\n", (4435, 4453), False, 'import os\n'), ((4708, 4739), 'lancedb.connect', 'lancedb.connect', (['LANCEDB_BUCKET'], {}), '(LANCEDB_BUCKET)\n', (4723, 4739), False, 'import lancedb\n')] |
import uuid
from ragna.core import Config, Document, PackageRequirement, Requirement, Source
from ._vector_database import VectorDatabaseSourceStorage
class LanceDB(VectorDatabaseSourceStorage):
"""[LanceDB vector database](https://lancedb.com/)
!!! info "Required packages"
- `chromadb>=0.4.13`
- `lancedb>=0.2`
- `pyarrow`
"""
@classmethod
def requirements(cls) -> list[Requirement]:
return [
*super().requirements(),
PackageRequirement("lancedb>=0.2"),
PackageRequirement(
"pyarrow",
# See https://github.com/apache/arrow/issues/38167
exclude_modules=["__dummy__"],
),
]
def __init__(self, config: Config) -> None:
super().__init__(config)
import lancedb
import pyarrow as pa
self._db = lancedb.connect(config.local_cache_root / "lancedb")
self._schema = pa.schema(
[
pa.field("id", pa.string()),
pa.field("document_id", pa.string()),
pa.field("page_numbers", pa.string()),
pa.field("text", pa.string()),
pa.field(
self._VECTOR_COLUMN_NAME,
pa.list_(pa.float32(), self._embedding_dimensions),
),
pa.field("num_tokens", pa.int32()),
]
)
_VECTOR_COLUMN_NAME = "embedded_text"
def store(
self,
documents: list[Document],
*,
chat_id: uuid.UUID,
chunk_size: int = 500,
chunk_overlap: int = 250,
) -> None:
table = self._db.create_table(name=str(chat_id), schema=self._schema)
for document in documents:
for chunk in self._chunk_pages(
document.extract_pages(),
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
):
table.add(
[
{
"id": str(uuid.uuid4()),
"document_id": str(document.id),
"page_numbers": self._page_numbers_to_str(
chunk.page_numbers
),
"text": chunk.text,
self._VECTOR_COLUMN_NAME: self._embedding_function(
[chunk.text]
)[0],
"num_tokens": chunk.num_tokens,
}
]
)
def retrieve(
self,
documents: list[Document],
prompt: str,
*,
chat_id: uuid.UUID,
chunk_size: int = 500,
num_tokens: int = 1024,
) -> list[Source]:
table = self._db.open_table(str(chat_id))
# We cannot retrieve source by a maximum number of tokens. Thus, we estimate how
# many sources we have to query. We overestimate by a factor of two to avoid
# retrieving to few sources and needed to query again.
limit = int(num_tokens * 2 / chunk_size)
results = (
table.search(vector_column_name=self._VECTOR_COLUMN_NAME)
.limit(limit)
.to_arrow()
)
document_map = {str(document.id): document for document in documents}
return self._take_sources_up_to_max_tokens(
(
Source(
id=result["id"],
document=document_map[result["document_id"]],
# For some reason adding an empty string during store() results
# in this field being None. Thus, we need to parse it back here.
# TODO: See if there is a configuration option for this
location=result["page_numbers"] or "",
content=result["text"],
num_tokens=result["num_tokens"],
)
for result in results.to_pylist()
),
max_tokens=num_tokens,
)
| [
"lancedb.connect"
] | [((892, 944), 'lancedb.connect', 'lancedb.connect', (["(config.local_cache_root / 'lancedb')"], {}), "(config.local_cache_root / 'lancedb')\n", (907, 944), False, 'import lancedb\n'), ((503, 537), 'ragna.core.PackageRequirement', 'PackageRequirement', (['"""lancedb>=0.2"""'], {}), "('lancedb>=0.2')\n", (521, 537), False, 'from ragna.core import Config, Document, PackageRequirement, Requirement, Source\n'), ((551, 611), 'ragna.core.PackageRequirement', 'PackageRequirement', (['"""pyarrow"""'], {'exclude_modules': "['__dummy__']"}), "('pyarrow', exclude_modules=['__dummy__'])\n", (569, 611), False, 'from ragna.core import Config, Document, PackageRequirement, Requirement, Source\n'), ((3496, 3669), 'ragna.core.Source', 'Source', ([], {'id': "result['id']", 'document': "document_map[result['document_id']]", 'location': "(result['page_numbers'] or '')", 'content': "result['text']", 'num_tokens': "result['num_tokens']"}), "(id=result['id'], document=document_map[result['document_id']],\n location=result['page_numbers'] or '', content=result['text'],\n num_tokens=result['num_tokens'])\n", (3502, 3669), False, 'from ragna.core import Config, Document, PackageRequirement, Requirement, Source\n'), ((1024, 1035), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1033, 1035), True, 'import pyarrow as pa\n'), ((1078, 1089), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1087, 1089), True, 'import pyarrow as pa\n'), ((1133, 1144), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1142, 1144), True, 'import pyarrow as pa\n'), ((1180, 1191), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1189, 1191), True, 'import pyarrow as pa\n'), ((1396, 1406), 'pyarrow.int32', 'pa.int32', ([], {}), '()\n', (1404, 1406), True, 'import pyarrow as pa\n'), ((1295, 1307), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1305, 1307), True, 'import pyarrow as pa\n'), ((2072, 2084), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2082, 2084), False, 'import uuid\n')] |