text2vec-openai
Introductionβ
The text2vec-βopenai
module allows you to use the OpenAI embeddings directly in the Weaviate vector search engine as a vectorization module. βWhen you create a Weaviate class that is set to use this module, it will automatically vectorize your data using OpenAI's text-embedding-ada-002
model (legacy Ada, Babbage, Curie, or Davinci models are also supported).
- Note: this module uses a third-party API and may incur costs.
- Note: make sure to check the OpenAI pricing page before vectorizing large amounts of data.
- Note: Weaviate automatically parallelizes requests to the OpenAI-API when using the batch endpoint, see the previous note.
- Note: Check-out the demo dataset.
How to enableβ
Request an OpenAI API-key via their website.
Weaviate Cloud Serviceβ
This module is enabled by default on the WCS
Weaviate open sourceβ
You can find an example Docker-compose file below, which will spin up Weaviate with the OpenAI module.
---
version: '3.4'
services:
weaviate:
image: semitechnologies/weaviate:1.17.2
restart: on-failure:0
ports:
- "8080:8080"
environment:
QUERY_DEFAULTS_LIMIT: 20
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true'
PERSISTENCE_DATA_PATH: "./data"
DEFAULT_VECTORIZER_MODULE: text2vec-openai
ENABLE_MODULES: text2vec-openai
OPENAI_APIKEY: sk-foobar # request a key on openai.com, setting this parameter is optional, you can also provide the API key on runtime
CLUSTER_HOSTNAME: 'node1'
...
- Note: you can also use the Weaviate configuration tool to create a Weaviate setup with this module.
- Note: Starting with
v1.11.0
theOPENAI_APIKEY
variable is now optional and you can instead provide the key at insert/query time as an HTTP header.
How to configureβ
βIn your Weaviate schema, you must define how you want this module to vectorize your data. If you are new to Weaviate schemas, you might want to check out the quickstart tutorial on the Weaviate schema first.
For example, the following schema configuration will set Weaviate to vectorize the Document
class with text2vec-openai
using the babbage
model.
{
"classes": [
{
"class": "Document",
"description": "A class called document",
"vectorizer": "text2vec-openai",
"moduleConfig": {
"text2vec-openai": {
"model": "ada",
"modelVersion": "002",
"type": "text"
}
},
"properties": [
{
"dataType": [
"text"
],
"description": "Content that will be vectorized",
"moduleConfig": {
"text2vec-openai": {
"skip": false,
"vectorizePropertyName": false
}
},
"name": "content"
}
],
"vectorizer": "text2vec-openai"
}
]
}
How to useβ
- If the OpenAI API key is not set in the
text2vec-βopenai
module, you can set the API key on query time by adding the following to the HTTP header:X-OpenAI-Api-Key: <openai-api-key>
. - Using this module will enable GraphQL vector search parameters in Weaviate. They can be found here.
Exampleβ
- GraphQL
- Python
- JavaScript
- Go
- Java
- Curl
{
Get{
Publication(
nearText: {
concepts: ["fashion"],
distance: 0.6 # prior to v1.14 use "certainty" instead of "distance"
moveAwayFrom: {
concepts: ["finance"],
force: 0.45
},
moveTo: {
concepts: ["haute couture"],
force: 0.85
}
}
){
name
_additional {
certainty # only supported if distance==cosine.
distance # always supported
}
}
}
}
import weaviate
client = weaviate.Client(
url="http://localhost:8080",
additional_headers={
"X-OpenAI-Api-Key": "<THE-KEY>"
}
)
nearText = {
"concepts": ["fashion"],
"distance": 0.6, # prior to v1.14 use "certainty" instead of "distance"
"moveAwayFrom": {
"concepts": ["finance"],
"force": 0.45
},
"moveTo": {
"concepts": ["haute couture"],
"force": 0.85
}
}
result = (
client.query
.get("Publication", "name")
.with_additional("certainty OR distance"]) # note that certainty is only supported if distance==cosine
.with_near_text(nearText)
.do()
)
print(result)
const weaviate = require('weaviate-client');
const client = weaviate.client({
scheme: 'http',
host: 'localhost:8080',
headers: {'X-OpenAI-Api-Key': '<THE-KEY>'},
});
client.graphql
.get()
.withClassName('Publication')
.withFields('name _additional{certainty distance}') // note that certainty is only supported if distance==cosine
.withNearText({
concepts: ['fashion'],
distance: 0.6, // prior to v1.14 use certainty instead of distance
moveAwayFrom: {
concepts: ['finance'],
force: 0.45
},
moveTo: {
concepts: ['haute couture'],
force: 0.85
}
})
.do()
.then(console.log)
.catch(console.error);
package main
import (
"context"
"fmt"
"github.com/weaviate/weaviate-go-client/v4/weaviate"
"github.com/weaviate/weaviate-go-client/v4/weaviate/graphql"
)
func main() {
cfg := weaviate.Config{
Host: "localhost:8080",
Scheme: "http",
Headers: map[string]string{"X-OpenAI-Api-Key": "<THE-KEY>"},
}
client := weaviate.New(cfg)
className := "Publication"
name := graphql.Field{Name: "name"}
_additional := graphql.Field{
Name: "_additional", Fields: []graphql.Field{
{Name: "certainty"}, // only supported if distance==cosine
{Name: "distance"}, // always supported
},
}
concepts := []string{"fashion"}
distance := float32(0.6)
moveAwayFrom := &graphql.MoveParameters{
Concepts: []string{"finance"},
Force: 0.45,
}
moveTo := &graphql.MoveParameters{
Concepts: []string{"haute couture"},
Force: 0.85,
}
nearText := client.GraphQL().NearTextArgBuilder().
WithConcepts(concepts).
WithDistance(distance). // use WithCertainty(certainty) prior to v1.14
WithMoveTo(moveTo).
WithMoveAwayFrom(moveAwayFrom)
ctx := context.Background()
result, err := client.GraphQL().Get().
WithClassName(className).
WithFields(name, _additional).
WithNearText(nearText).
Do(ctx)
if err != nil {
panic(err)
}
fmt.Printf("%v", result)
}
package technology.semi.weaviate;
import technology.semi.weaviate.client.Config;
import technology.semi.weaviate.client.WeaviateClient;
import technology.semi.weaviate.client.base.Result;
import technology.semi.weaviate.client.v1.graphql.model.GraphQLResponse;
import technology.semi.weaviate.client.v1.graphql.query.argument.NearTextArgument;
import technology.semi.weaviate.client.v1.graphql.query.argument.NearTextMoveParameters;
import technology.semi.weaviate.client.v1.graphql.query.fields.Field;
import java.util.HashMap;
import java.util.Map;
public class App {
public static void main(String[] args) {
Map<String, String> headers = new HashMap<String, String>() { {
put("X-OpenAI-Api-Key", "<THE-KEY>");
} };
Config config = new Config("http", "localhost:8080", headers);
WeaviateClient client = new WeaviateClient(config);
NearTextMoveParameters moveTo = NearTextMoveParameters.builder()
.concepts(new String[]{ "haute couture" }).force(0.85f).build();
NearTextMoveParameters moveAway = NearTextMoveParameters.builder()
.concepts(new String[]{ "finance" }).force(0.45f)
.build();
NearTextArgument nearText = client.graphQL().arguments().nearTextArgBuilder()
.concepts(new String[]{ "fashion" })
.distance(0.6f) // use .certainty(0.7f) prior to v1.14
.moveTo(moveTo)
.moveAwayFrom(moveAway)
.build();
Field name = Field.builder().name("name").build();
Field _additional = Field.builder()
.name("_additional")
.fields(new Field[]{
Field.builder().name("certainty").build(), // only supported if distance==cosine
Field.builder().name("distance").build(), // always supported
}).build();
Result<GraphQLResponse> result = client.graphQL().get()
.withClassName("Publication")
.withFields(name, _additional)
.withNearText(nearText)
.run();
if (result.hasErrors()) {
System.out.println(result.getError());
return;
}
System.out.println(result.getResult());
}
}
$ echo '{
"query": "{
Get{
Publication(
nearText: {
concepts: [\"fashion\"],
distance: 0.6, // use certainty instead of distance prior to v1.14
moveAwayFrom: {
concepts: [\"finance\"],
force: 0.45
},
moveTo: {
concepts: [\"haute couture\"],
force: 0.85
}
}
){
name
_additional {
certainty // only supported if distance==cosine
distance // always supported
}
}
}
}"
}' | curl \
-X POST \
-H 'Content-Type: application/json' \
-H "X-OpenAI-Api-Key: <THE-KEY>" \
-d @- \
http://localhost:8080/v1/graphql
π’ Try out this GraphQL example in the Weaviate Console.
Additional informationβ
Available modelsβ
OpenAI has multiple models available with different trade-offs. All the models offered by OpenAI can be used within Weaviate. Note that the more dimensions a model produces, the larger your data footprint will be. To estimate the total size of your dataset use this calculation.
The default model is: text-embedding-ada-002
but you can also specify it in your schema. An example as part of a class definition:
{
"classes": [
{
"class": "Document",
"vectorizer": "text2vec-openai",
"moduleConfig": {
"text2vec-openai": {
"model": "ada",
"modelVersion": "002",
"type": "text"
}
}
}
]
}
Legacy modelsβ
The default model is: text-embedding-ada-002
but you can also specify it in your schema. An example as part of a class definition:
{
"classes": [
{
"class": "Document",
"vectorizer": "text2vec-openai",
"moduleConfig": {
"text2vec-openai": {
"model": "ada",
"modelVersion": "002",
"type": "text"
}
}
}
]
}
Legacy modelsβ
- For document embeddings you can choose one of the following models:
- For code embeddings you can choose one of the following models:
In the moduleConfig
inside a class, you need to set two values:
model
β one of the models mentioned above. E.g.,babbage
.modelVersion
β one of the model version as mentioned above. E.g.,babbage
.type
βtext
orcode
.
Example (as part of a class definition):
{
"classes": [
{
"class": "Document",
"vectorizer": "text2vec-openai",
"moduleConfig": {
"text2vec-openai": {
"model": "babbage",
"modelVersion": "001",
"type": "text"
}
}
}
]
}
OpenAI Rate Limitsβ
Because you will be getting embeddings based on your own API key, you will be dealing with rate limits applied to your account. If you have a low rate limit set, Weaviate will output the error message generated by the OpenAI API. You can request to increase your rate limit by emailing OpenAI directly on support@openai.com
describing your use case with Weaviate.
Throttle the import inside your applicationβ
If you run into rate limits, you can also decide to throttle the import in your application.
E.g., in Python and Java using the Weaviate client.
- Python
- Go
from weaviate import Client
import time
def configure_batch(client: Client, batch_size: int, batch_target_rate: int):
"""
Configure the weaviate client's batch so it creates objects at `batch_target_rate`.
Parameters
----------
client : Client
The Weaviate client instance.
batch_size : int
The batch size.
batch_target_rate : int
The batch target rate as # of objects per second.
"""
def callback(batch_results: dict) -> None:
# you could print batch errors here
time_took_to_create_batch = batch_size * (client.batch.creation_time/client.batch.recommended_num_objects)
time.sleep(
max(batch_size/batch_target_rate - time_took_to_create_batch + 1, 0)
)
client.batch.configure(
batch_size=batch_size,
timeout_retries=5,
callback=callback,
)
package main
import (
"context"
"time"
"github.com/weaviate/weaviate-go-client/v4/weaviate"
"github.com/weaviate/weaviate/entities/models"
)
var (
// adjust to your liking
targetRatePerMin = 600
batchSize = 50
)
var cfg = weaviate.Config{
Host: "localhost:8080",
Scheme: "http",
}
var client = weaviate.New(cfg)
// replace those 10000 empty objects with your actual data
var objects = make([]*models.Object, 10000)
func main() {
// we aim to send one batch every tickInterval second.
tickInterval := time.Duration(batchSize/targetRatePerMinute) * time.Minute
t := time.NewTicker(tickInterval)
before := time.Now()
for i := 0; i < len(objects); i += batchSize {
// create a fresh batch
batch := client.Batch().ObjectsBatcher()
// add batchSize objects to the batch
for j := i; j < i+batchSize; j++ {
batch = batch.WithObject(objects[i+j])
}
// send off batch
res, err := batch.Do(context.Background())
// TODO: inspect result for individual errors
_ = res
// TODO: check request error
_ = err
// we wait for the next tick. If the previous batch took longer than
// tickInterval, we won't need to wait, effectively making this an
// unthrottled import.
<-t.C
}
}
The current rate limit will be displayed in the error message like:
{
"message": "Rate limit reached for requests. Limit: 600.000000 / min. Current: 1024.000000 / min. Contact support@openai.com if you continue to have issues."
}
More resourcesβ
If you can't find the answer to your question here, please look at the:
- Frequently Asked Questions. Or,
- Knowledge base of old issues. Or,
- For questions: Stackoverflow. Or,
- For issues: Github. Or,
- Ask your question in the Slack channel: Slack.