Binary Quantization (compression)
v1.23
BQ is available for the flat
index type from v1.23
onwards and for the hnsw
index type from v1.24
.
Binary quantization (BQ) is a vector compression technique that can reduce the size of a vector.
To use BQ, enable it as shown below and add data to the collection.
Additional information
- How to set the index type
Simple BQ configuration
Each collection can be configured to use BQ compression. BQ must be enabled at collection creation time, before data is added to it.
This can be done by setting the vector_index_config
of the collection to enable BQ compression.
- Python (v4)
- Python (v3)
- JS/TS (Beta)
- JS/TS
- Go
- Java
import weaviate.classes.config as wc
client.collections.create(
name="YourCollection",
vectorizer_config=wc.Configure.Vectorizer.text2vec_openai(),
vector_index_config=wc.Configure.VectorIndex.flat(
quantizer=wc.Configure.VectorIndex.Quantizer.bq()
),
)
client.close()
class_definition = {
"class": "YourCollection",
"vectorizer": "text2vec-openai", # Can be any vectorizer
"vectorIndexType": "flat",
"vectorIndexConfig": {
"bq": {
"enabled": True,
},
},
# Remainder not shown
}
client.schema.create_class(class_definition)
import { configure } from 'weaviate-client';
const collection = await client.collections.create({
name: collectionName,
vectorizer: [
configure.namedVectorizer(
"default",
{
vectorIndexConfig: configure.vectorIndex.hnsw({
quantizer: configure.vectorIndex.quantizer.bq()
})
}
)
]
})
async function enableBQ() {
const classObj = {
class: 'YourCollection',
vectorizer: 'text2vec-openai', // Can be any vectorizer
vectorIndexType: 'flat',
vectorIndexConfig: {
bq: {
enabled: true,
},
},
// Remainder not shown
};
const res = await client.schema.classCreator().withClass(classObj).do();
console.log(res);
}
await enableBQ();
simple_bq := map[string]interface{}{
"enabled": true,
}
class := &models.Class{
Class: "YourCollection",
Vectorizer: "text2vec-openai",
VectorIndexConfig: map[string]interface{}{
"bq": simple_bq,
},
// Remainder not shown
}
err = client.Schema().ClassCreator().
WithClass(class).Do(context.Background())
if err != nil {
log.Fatalf("create class: %v", err)
}
// Coming soon
BQ with custom settings
The following parameters are available for BQ compression, under vectorIndexConfig
:
Parameter | Type | Default | Details |
---|---|---|---|
bq : enabled | boolean | false | Enable BQ. Weaviate uses binary quantization (BQ) compression when true . The Python client v4 does not use the enabled parameter. To enable BQ with the v4 client, set a quantizer in the collection definition. |
bq : rescoreLimit | integer | -1 | The minimum number of candidates to fetch before rescoring. |
bq : cache | boolean | false | Whether to use the vector cache. |
vectorCacheMaxObjects | integer | 1e12 | Maximum number of objects in the memory cache. By default, this limit is set to one trillion (1e12 ) objects when a new collection is created. For sizing recommendations, see Vector cache considerations. |
For example:
- Python (v4)
- Python (v3)
- JS/TS (Beta)
- JS/TS
- Go
- Java
import weaviate.classes.config as wc
client.collections.create(
name="YourCollection",
vectorizer_config=wc.Configure.Vectorizer.text2vec_openai(),
vector_index_config=wc.Configure.VectorIndex.flat(
distance_metric=wc.VectorDistances.COSINE,
vector_cache_max_objects=100000,
quantizer=wc.Configure.VectorIndex.Quantizer.bq(
rescore_limit=200,
cache=True
)
),
)
client.close()
class_definition = {
"class": "YourCollection",
"vectorizer": "text2vec-openai", # Can be any vectorizer
"vectorIndexType": "flat",
"vectorIndexConfig": {
"bq": {
"enabled": True,
"rescoreLimit": 200, # The minimum number of candidates to fetch before rescoring
"cache": True, # Default: False
},
"vectorCacheMaxObjects": 100000, # Cache size (used if `cache` enabled)
},
# Remainder not shown
}
client.schema.create_class(class_definition)
import { configure } from 'weaviate-client';
const collection = await client.collections.create({
name: collectionName,
vectorizer: [
configure.namedVectorizer(
"default",
{
vectorIndexConfig: configure.vectorIndex.hnsw({
quantizer: configure.vectorIndex.quantizer.bq({
cache: true, // Enable caching
rescoreLimit: 200, // The minimum number of candidates to fetch before rescoring
}),
vectorCacheMaxObjects: 100000, // Cache size (used if `cache` enabled)
})
}
)
]
})
async function bqWithOptions() {
const classObj = {
class: 'YourCollection',
vectorizer: 'text2vec-openai', // Can be any vectorizer
vectorIndexType: 'flat',
vectorIndexConfig: {
bq: {
enabled: true,
rescoreLimit: 200, // The minimum number of candidates to fetch before rescoring
cache: true, // Default: false
},
vectorCacheMaxObjects: 100000, // Cache size (used if `cache` enabled)
},
// Remainder not shown
};
const res = await client.schema.classCreator().withClass(classObj).do();
console.log(res);
}
await bqWithOptions();
custom_bq := map[string]interface{}{
"enabled": true,
"rescoreLimit": 200,
"cache": true,
}
class := &models.Class{
Class: "YourCollection",
Vectorizer: "text2vec-openai",
VectorIndexConfig: map[string]interface{}{
"bq": custom_bq,
"vectorCacheMaxObjects": 100_000,
},
// Remainder not shown
}
err = client.Schema().ClassCreator().
WithClass(class).Do(context.Background())
if err != nil {
log.Fatalf("create class: %v", err)
}
// Coming soon
Multiple vectors
Weaviate collections support multiple, named vectors.
Collections can have multiple, named vectors. Each vector is independent. Each vector space has its own index, its own compression, and its own vectorizer. This means you can create vectors for properties, use different vectorization models, and apply different metrics to the same object.
You do not have to use multiple vectors in your collections, but if you do, you need to adjust your queries to specify a target vector for vector or hybrid queries.
Similarly, compression must be enabled independently for each vector. The procedure varies slightly by client language, but in each case the idea is the same. Each vector is independent and can use PQ, BQ, or no compression.
Related pages
Questions and feedback
If you have any questions or feedback, let us know in our user forum.