-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathvector_bench.py
More file actions
275 lines (223 loc) · 10 KB
/
vector_bench.py
File metadata and controls
275 lines (223 loc) · 10 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
import os
import sys
import argparse
import warnings
import numpy as np
import pandas as pd
from tabulate import tabulate
from datetime import datetime
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
# Suppress annoying library warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
os.environ["PYTHONWARNINGS"] = "ignore"
# --- UTILS & BENCHMARKS IMPORTS ---
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from utils.downloader import get_dataset
from benchmarks.core import EngineTaskRunner
from benchmarks.insertion import benchmark_insertion
from benchmarks.search import benchmark_standard_search, benchmark_sequential_search, benchmark_bulk_search
from benchmarks.filtering import benchmark_filtered_search
from benchmarks.concurrency import benchmark_concurrency
# --- Add Local Paths for Custom Engines ---
# Custom/private engine libraries are expected directly in the ./numerable/ folder
abs_numerable = os.path.abspath("./numerable")
if os.path.isdir(abs_numerable) and abs_numerable not in sys.path:
sys.path.insert(0, abs_numerable)
# Standard Open Source Engines
from engines.chroma_engine import ChromaEngine
from engines.lance_engine import LanceEngine
from engines.qdrant_engine import QdrantEngine
from engines.qdrant_edge_engine import QdrantEdgeEngine
from engines.usearch_engine import USearchEngine
from engines.faiss_engine import FaissEngine
# Optional Custom Engines (Available upon request)
try:
from engines.memo_engine import MeMoEngine
_HAS_MEMO = True
except ImportError:
_HAS_MEMO = False
try:
from engines.msearch_engine import MSearchEngine
_HAS_MSEARCH = True
except ImportError:
_HAS_MSEARCH = False
# --- CONFIGURATION ---
TOP_K = 10
NUM_QUERIES = 100
DATASET_PROPS = {
"sift": {"dim": 128, "metric": "l2"},
"glove-25": {"dim": 25, "metric": "cosine"},
"glove-100": {"dim": 100, "metric": "cosine"},
"nytimes": {"dim": 256, "metric": "cosine"},
"deep1b": {"dim": 96, "metric": "cosine"},
"fashion-mnist": {"dim": 784, "metric": "l2"}
}
def prepare_context_data(num_docs, dataset_name, vectors_full, queries_full, gt_full):
"""Subsets the data and calculates dynamic ground truth for a specific size."""
props = DATASET_PROPS[dataset_name]
dim = props["dim"]
metric = props["metric"]
print(f"\n[SCALE] Preparing {dataset_name} ({dim}d, {metric}) with {num_docs} vectors...")
actual_num = min(num_docs, len(vectors_full))
vectors = vectors_full[:actual_num].astype(np.float32)
query_vectors = queries_full[:NUM_QUERIES].astype(np.float32)
# Prepare metadata (shared across all datasets for benchmark consistency)
texts = [f"{dataset_name} Doc {i}" for i in range(len(vectors))]
metadatas = []
for i in range(len(vectors)):
metadatas.append({
"idx": i,
"category": "special" if i % 10 == 0 else "general",
"topic": dataset_name
})
# Standard Search GT
# If the dataset already came with ground truth (from HDF5), use it if indices are within bounds
# Otherwise, recalculate.
print(f"Calculating/Validating Top-{TOP_K} Ground Truth...")
# Simple check: if max GT index < actual_num, we can use existing GT
use_existing_gt = False
if gt_full:
max_idx = max([max(s) if s else 0 for s in gt_full[:NUM_QUERIES]])
if max_idx < actual_num:
use_existing_gt = True
if use_existing_gt:
ground_truths = gt_full[:NUM_QUERIES]
else:
# Recalculate based on current subset
if metric == "l2":
dists = euclidean_distances(query_vectors, vectors)
ground_truths = [set(int(i) for i in np.argsort(dists[qi])[:TOP_K]) for qi in range(NUM_QUERIES)]
else: # Cosine / IP
sims = cosine_similarity(query_vectors, vectors)
ground_truths = [set(int(i) for i in np.argsort(-sims[qi])[:TOP_K]) for qi in range(NUM_QUERIES)]
# Filtered Search GT (always recalculate as filters are synthetic)
print("Calculating Filtered Ground Truth...")
special_indices = [i for i, m in enumerate(metadatas) if m['category'] == "special"]
special_vectors = vectors[special_indices]
if metric == "l2":
all_dists_filt = euclidean_distances(query_vectors, special_vectors)
filt_gts = []
for dists in all_dists_filt:
top_idx = np.argsort(dists)[:TOP_K]
fgt = set(int(metadatas[special_indices[j]]['idx']) for j in top_idx)
filt_gts.append(fgt)
else:
all_sims_filt = cosine_similarity(query_vectors, special_vectors)
filt_gts = []
for sims in all_sims_filt:
top_idx = np.argsort(-sims)[:TOP_K]
fgt = set(int(metadatas[special_indices[j]]['idx']) for j in top_idx)
filt_gts.append(fgt)
return (vectors, query_vectors, ground_truths, filt_gts, texts, metadatas)
def run_dataset_benchmark(dataset_name, scales, engine_configs, working_dir, scenarios):
"""Executes all scales for a specific dataset."""
props = DATASET_PROPS[dataset_name]
dim = props["dim"]
metric = props["metric"]
print(f"\n" + "="*80)
print(f" DATASET: {dataset_name} ({dim} dimensions, {metric} metric)")
print("="*80)
try:
dataset_full = get_dataset(dataset_name, working_dir)
except Exception as e:
print(f"Failed to load dataset {dataset_name}: {e}")
return []
dataset_results = []
# Update engine configs with dataset-specific dimension and metric
dataset_engine_configs = []
for engine_class, _, engine_name in engine_configs:
dataset_engine_configs.append((engine_class, (dim, metric), engine_name))
for scale in scales:
vectors_f, queries_f, gt_f = dataset_full
dataset_context = prepare_context_data(scale, dataset_name, vectors_f, queries_f, gt_f)
for engine_class, engine_args, engine_name in dataset_engine_configs:
print(f" -> Benchmarking {engine_name} at scale {scale}...")
runner = EngineTaskRunner(engine_class, engine_args, dataset_context)
try:
results = runner.run_full(scenarios, TOP_K)
for res in results:
res["Dataset"] = dataset_name
res["Scale"] = scale
res["Database"] = engine_name
dataset_results.append(res)
except Exception as e:
print(f" Error benchmarking {engine_name}: {e}")
return dataset_results
def main():
parser = argparse.ArgumentParser(description="Vector Database Benchmarking Orchestrator")
parser.add_argument("--docs", type=int, default=1000, help="Number of documents (default 1000)")
parser.add_argument("--full", action="store_true", help="Run full suite: 1000, 10000, 100000 docs")
parser.add_argument("--datasets", type=str, default="all", help="Comma-separated list of datasets or 'all'")
parser.add_argument("--output", type=str, default="benchmark_results.md", help="Output markdown filename")
args = parser.parse_args()
# Determine scales
scales = [1000, 10000, 100000] if args.full else [args.docs]
# Determine datasets
if args.datasets.lower() == "all":
datasets_to_run = list(DATASET_PROPS.keys())
else:
datasets_to_run = [d.strip() for d in args.datasets.split(",") if d.strip() in DATASET_PROPS]
if not datasets_to_run:
print(f"No valid datasets found. Supported: {list(DATASET_PROPS.keys())}")
return
print("="*60)
print(" VECTOR DATABASE MULTI-DATASET ORCHESTRATOR ")
print(f" Scales: {scales}")
print(f" Datasets: {datasets_to_run}")
print("="*60)
working_dir = os.getcwd()
# Engines to test (Base Config)
engine_configs = [
(ChromaEngine, None, "ChromaDB"),
(LanceEngine, None, "LanceDB"),
(QdrantEngine, None, "Qdrant"),
]
if _HAS_MEMO:
engine_configs.append((MeMoEngine, None, "MeMo"))
engine_configs.extend([
(FaissEngine, None, "FAISS"),
(USearchEngine, None, "USearch"),
(QdrantEdgeEngine, None, "Qdrant-Edge"),
])
if _HAS_MSEARCH:
engine_configs.append((MSearchEngine, None, "mSEARCH"))
scenarios = [
benchmark_insertion,
benchmark_standard_search,
benchmark_sequential_search,
benchmark_filtered_search,
benchmark_bulk_search,
benchmark_concurrency
]
all_results = []
for dname in datasets_to_run:
all_results.extend(run_dataset_benchmark(dname, scales, engine_configs, working_dir, scenarios))
# FINAL SUMMARY
if not all_results:
print("No results to report.")
return
df = pd.DataFrame(all_results)
cols = ["Dataset", "Scale", "Database", "Operation", "Time (ms)", "Ops/sec", "Recall@K (%)", "p95 (ms)", "Peak Mem (MB)"]
df = df[cols]
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
md_content = f"# Vector Database Quality & Performance Multi-Dataset Benchmark\n"
md_content += f"*Generated: {timestamp}*\n\n"
for dname in datasets_to_run:
md_content += f"## Dataset: {dname.upper()}\n"
meta = DATASET_PROPS[dname]
md_content += f"**Config**: {meta['dim']} dimensions, {meta['metric']} distance\n\n"
d_df = df[df["Dataset"] == dname].drop(columns=["Dataset"])
for scale in scales:
md_content += f"### Scale: {scale} Documents\n"
ds_df = d_df[d_df["Scale"] == scale].drop(columns=["Scale"])
md_content += tabulate(ds_df, headers='keys', tablefmt='github', showindex=False)
md_content += "\n\n"
with open(args.output, "w", encoding="utf-8") as f:
f.write(md_content)
print("\n" + "="*120)
print(" VECTOR DATABASE CONSOLIDATED REPORT")
print("="*120)
print(tabulate(df, headers='keys', tablefmt='pretty', showindex=False))
print(f"\nFinal report saved to: {os.path.abspath(args.output)}")
if __name__ == "__main__":
main()