openai-cookbook/apps/file-q-and-a/nextjs-with-flask-server/server/config.yaml
2023-04-02 17:29:19 -07:00

19 lines
788 B
YAML

# ----- PINECONE CONFIG -----
PINECONE_API_KEY: "<your Pinecone API key>"
PINECONE_INDEX: "<your Pinecone Index name>" # dimensions: 1536, metric: cosine similarity
PINECONE_ENV: "<your Pinecone env e.g.us-west1-gcp>"
# ----- SERVER PORT ----
SERVER_PORT: "8080"
# ---- OPENAI CONFIG -----
EMBEDDINGS_MODEL: "text-embedding-ada-002"
GENERATIVE_MODEL: "gpt-3.5-turbo" # use gpt-4 for better results
EMBEDDING_DIMENSIONS: 1536
TEXT_EMBEDDING_CHUNK_SIZE: 200
# This is the minimum cosine similarity score that a file must have with the search query to be considered relevant
# This is an arbitrary value, and you should vary/ remove this depending on the diversity of your dataset
COSINE_SIM_THRESHOLD: 0.7
MAX_TEXTS_TO_EMBED_BATCH_SIZE: 100
MAX_PINECONE_VECTORS_TO_UPSERT_PATCH_SIZE: 100