text2vec-openai

Weaviate on Stackoverflow badge Weaviate issues on Github badge Weaviate total Docker pulls badge

💡 You are looking at older or release candidate documentation. The current Weaviate version is v1.15.2


Introduction

The text2vec-​openai module allows you to use the OpenAI embeddings directly in the Weaviate vector search engine as a vectorization module. ​When you create a Weaviate class that is set to use this module, it will automatically vectorize your data using OpenAI’s Ada, Babbage, Curie, or Davinci models.

  • Note: make sure to check the OpenAI pricing page before vectorizing large amounts of data.
  • Note: Weaviate automatically parallelizes requests to the OpenAI-API when using the batch endpoint, see the previous note.
  • Note: Check-out the demo dataset.

How to enable

Request an OpenAI API-key via their website.

Weaviate Cloud Service

This module is enabled by default on the WCS

Weaviate open source

You can find an example Docker-compose file below, which will spin up Weaviate with the OpenAI module.

version: '3.4'
services:
  weaviate:
    image: semitechnologies/weaviate:1.14.1
    restart: on-failure:0
    ports:
     - "8080:8080"
    environment:
      QUERY_DEFAULTS_LIMIT: 20
      AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true'
      PERSISTENCE_DATA_PATH: "./data"
      DEFAULT_VECTORIZER_MODULE: text2vec-openai
      ENABLE_MODULES: text2vec-openai
      OPENAI_APIKEY: sk-foobar # request a key on openai.com, setting this parameter is optional, you can also provide the API key on runtime
      CLUSTER_HOSTNAME: 'node1'
  • Note: you can also use the Weaviate configuration tool to create a Weaviate setup with this module.
  • Note: Starting with v1.11.0 the OPENAI_APIKEY variable is now optional and you can instead provide the key at insert/query time as an HTTP header.

How to configure

​In your Weaviate schema, you must define how you want this module to vectorize your data. If you are new to Weaviate schemas, you might want to check out the getting started guide on the Weaviate schema first.

The following schema configuration uses the babbage model.

{
  "classes": [
    {
      "class": "Document",
      "description": "A class called document",
      "moduleConfig": {
        "text2vec-openai": {
          "model": "babbage",
          "type": "text"
        }
      },
      "properties": [
        {
          "dataType": [
            "text"
          ],
          "description": "Content that will be vectorized",
          "moduleConfig": {
            "text2vec-openai": {
              "skip": false,
              "vectorizePropertyName": false
            }
          },
          "name": "content"
        }
      ],
      "vectorizer": "text2vec-openai"
    }
  ]
}

How to use

  • When sending a request to Weaviate, you can set the API key on query time: X-OpenAI-Api-Key: <openai-api-key>.
  • New GraphQL vector search parameters made available by this module can be found here.

Example

  {
  Get{
    Publication(
      nearText: {
        concepts: ["fashion"],
        distance: 0.6 # prior to v1.14 use "certainty" instead of "distance"
        moveAwayFrom: {
          concepts: ["finance"],
          force: 0.45
        },
        moveTo: {
          concepts: ["haute couture"],
          force: 0.85
        }
      }
    ){
      name
      _additional {
        certainty # only supported if distance==cosine.
        distance  # always supported
      }
    }
  }
}
  import weaviate

client = weaviate.Client("http://localhost:8080")

nearText = {
  "concepts": ["fashion"],
  "distance": 0.6, # prior to v1.14 use "certainty" instead of "distance"
  "moveAwayFrom": {
    "concepts": ["finance"],
    "force": 0.45
  },
  "moveTo": {
    "concepts": ["haute couture"],
    "force": 0.85
  }
}

result = (
  client.query
  .get("Publication", ["name", "_additional {certainty distance} "]) # note that certainty is only supported if distance==cosine
  .with_near_text(nearText)
  .do()
)

print(result)
  const weaviate = require("weaviate-client");

const client = weaviate.client({
  scheme: 'http',
  host: 'localhost:8080',
});

client.graphql
  .get()
  .withClassName('Publication')
  .withFields('name _additional{certainty distance}') // note that certainty is only supported if distance==cosine
  .withNearText({
    concepts: ["fashion"],
    distance: 0.6, // prior to v1.14 use certainty instead of distance
    moveAwayFrom: {
      concepts: ["finance"],
      force: 0.45
    },
    moveTo: {
      concepts: ["haute couture"],
      force: 0.85
    }
  })
  .do()
  .then(console.log)
  .catch(console.error);
  package main

import (
  "context"
  "fmt"

  "github.com/semi-technologies/weaviate-go-client/v4/weaviate"
  "github.com/semi-technologies/weaviate-go-client/v4/weaviate/graphql"
)

func main() {
  cfg := weaviate.Config{
    Host:   "localhost:8080",
    Scheme: "http",
  }
  client := weaviate.New(cfg)

  className := "Publication"

  name := graphql.Field{Name: "name"}
  _additional := graphql.Field{
    Name: "_additional", Fields: []graphql.Field{
      {Name: "certainty"}, // only supported if distance==cosine
      {Name: "distance"},  // always supported
    },
  }

  concepts := []string{"fashion"}
  distance := float32(0.6)
  moveAwayFrom := &graphql.MoveParameters{
    Concepts: []string{"finance"},
    Force:    0.45,
  }
  moveTo := &graphql.MoveParameters{
    Concepts: []string{"haute couture"},
    Force:    0.85,
  }
  nearText := client.GraphQL().NearTextArgBuilder().
    WithConcepts(concepts).
    WithDistance(distance). // use WithCertainty(certainty) prior to v1.14
    WithMoveTo(moveTo).
    WithMoveAwayFrom(moveAwayFrom)

  ctx := context.Background()

  result, err := client.GraphQL().Get().
    WithClassName(className).
    WithFields(name, _additional).
    WithNearText(nearText).
    Do(ctx)

  if err != nil {
    panic(err)
  }
  fmt.Printf("%v", result)
}
  package technology.semi.weaviate;

import technology.semi.weaviate.client.Config;
import technology.semi.weaviate.client.WeaviateClient;
import technology.semi.weaviate.client.base.Result;
import technology.semi.weaviate.client.v1.graphql.model.GraphQLResponse;
import technology.semi.weaviate.client.v1.graphql.query.argument.NearTextArgument;
import technology.semi.weaviate.client.v1.graphql.query.argument.NearTextMoveParameters;
import technology.semi.weaviate.client.v1.graphql.query.fields.Field;

public class App {
  public static void main(String[] args) {
    Config config = new Config("http", "localhost:8080");
    WeaviateClient client = new WeaviateClient(config);

    NearTextMoveParameters moveTo = NearTextMoveParameters.builder()
      .concepts(new String[]{ "haute couture" }).force(0.85f).build();

    NearTextMoveParameters moveAway = NearTextMoveParameters.builder()
      .concepts(new String[]{ "finance" }).force(0.45f)
      .build();

    NearTextArgument nearText = client.graphQL().arguments().nearTextArgBuilder()
      .concepts(new String[]{ "fashion" })
      .distance(0.6f) // use .certainty(0.7f) prior to v1.14
      .moveTo(moveTo)
      .moveAwayFrom(moveAway)
      .build();

    Field name = Field.builder().name("name").build();
    Field _additional = Field.builder()
      .name("_additional")
      .fields(new Field[]{
        Field.builder().name("certainty").build(), // only supported if distance==cosine
        Field.builder().name("distance").build(),  // always supported
      }).build();

    Result<GraphQLResponse> result = client.graphQL().get()
      .withClassName("Publication")
      .withFields(name, _additional)
      .withNearText(nearText)
      .run();

    if (result.hasErrors()) {
      System.out.println(result.getError());
      return;
    }
    System.out.println(result.getResult());
  }
}
  $ echo '{
  "query": "{
    Get{
      Publication(
        nearText: {
          concepts: [\"fashion\"],
          distance: 0.6, // use certainty instead of distance prior to v1.14
          moveAwayFrom: {
            concepts: [\"finance\"],
            force: 0.45
          },
          moveTo: {
            concepts: [\"haute couture\"],
            force: 0.85
          }
        }
      ){
        name
        _additional {
          certainty // only supported if distance==cosine
          distance  // always supported
        }
      }
    }
  }"
}' | curl \
    -X POST \
    -H 'Content-Type: application/json' \
    -d @- \
    http://localhost:8080/v1/graphql

🟢 Click here to try out this graphql example in the Weaviate Console.

Additional information

Available models

OpenAI has multiple models available with different trade-offs. All the models offered by OpenAI can be used within Weaviate. Note that the more dimensions a model produces, the larger your data footprint will be. To estimate the total size of your dataset use this calculation.

  • For document embeddings you can choose one of the following models:
  • For code embeddings you can choose one of the following models:

In the moduleConfig inside a class, you need to set two values:

  1. model – one of the models mentioned above. E.g., babbage.
  2. typetext or code.

OpenAI Rate Limits

Because you will be getting embeddings based on your own API key, you will be dealing with rate limits applied to your account. If you have a low rate limit set, Weaviate will output the error message generated by the OpenAI API. You can request to increase your rate limit by emailing OpenAI directly on support@openai.com describing your use case with Weaviate.

Throttle the import inside your application

If you run into rate limits, you can also decide to throttle the import in your application.

E.g., in Python and Java using the Weaviate client.

  from weaviate import Client
import time


def configure_batch(client: Client, batch_size: int, batch_tagret_rate: int):
    """
    Configure the weaviate client's batch so it creates objects at `batch_tagret_rate`.

    Parameters
    ----------
    client : Client
        The Weaviate client instance.
    batch_size : int
        The batch size.
    batch_tagret_rate : int
        The batch target rate as # objects per second.
    """

    def callback(batch_results: dict) -> None:

        # you could print batch errors here
        time_took_to_create_batch = batch_size * (client.batch.creation_time/client.batch.recommended_num_objects)
        time.sleep(
            max(batch_size/batch_tagret_rate - time_took_to_create_batch + 1, 0)
        )

    client.batch.configure(
        batch_size=batch_size,
        timeout_retries=5,
        callback=callback,
    )

  package main

import (
	"context"
	"time"

	"github.com/semi-technologies/weaviate-go-client/v4/weaviate"
	"github.com/semi-technologies/weaviate/entities/models"
)

var (
	// adjust to your liking
	targetRatePerMin = 600
	batchSize        = 50
)

var cfg = weaviate.Config{
	Host:   "localhost:8080",
	Scheme: "http",
}
var client = weaviate.New(cfg)

// replace those 10000 empty objects with your actual data
var objects = make([]*models.Object, 10000)

func main() {
	// we aim to send one batch every tickInterval second.
	tickInterval := time.Duration(batchSize/targetRatePerMinute) * time.Minute
	t := time.NewTicker(tickInterval)
	before := time.Now()

	for i := 0; i < len(objects); i += batchSize {

		// create a fresh batch
		batch := client.Batch().ObjectsBatcher()

		// add batchSize objects to the batch
		for j := i; j < i+batchSize; j++ {
			batch = batch.WithObject(objects[i+j])
		}

		// send off batch
		res, err := batch.Do(context.Background())
		// TODO: inspect result for individual errors
		_ = res
		// TODO: check request error
		_ = err

		// we wait for the next tick. If the previous batch took longer than
		// tickInterval, we won't need to wait, effectively making this an
		// unthrottled import.
		<-t.C
	}
}

The current rate limit will be displayed in the error message like:

{
  "message": "Rate limit reached for requests. Limit: 600.000000 / min. Current: 1024.000000 / min. Contact support@openai.com if you continue to have issues."
}

More resources

If you can’t find the answer to your question here, please look at the:

  1. Frequently Asked Questions. Or,
  2. Knowledge base of old issues. Or,
  3. For questions: Stackoverflow. Or,
  4. For issues: Github. Or,
  5. Ask your question in the Slack channel: Slack.