allow openai api key to be stored in streamlit secrets as well

main
Gustav von Zitzewitz 1 year ago
parent 20deaa79f4
commit 3ce0e8a292

1
.gitignore vendored

@ -1,4 +1,3 @@
data data
__pycache__ __pycache__
.streamlit/secrets.toml
.env .env

@ -15,6 +15,6 @@ This is an app that let's you ask questions about any data source by leveraging
## Good to know ## Good to know
- As default context this git repository is taken so you can directly start asking question about its functionality without chosing an own data source. - As default context this git repository is taken so you can directly start asking question about its functionality without chosing an own data source.
- To run locally or deploy somewhere, execute `cp .env.template .env` and set necessary keys in the newly created secrets file. Other options are manually setting of environment variables, or creating a `.streamlit/secrets.toml` file and storing credentials there. - To run locally or deploy somewhere, execute `cp .env.template .env` and set credentials in the newly created .env file. Other options are manually setting of system environment variables, or storing them into `.streamlit/secrets.toml`.
- Your data won't load? Feel free to open an Issue or PR and contribute! - Your data won't load? Feel free to open an Issue or PR and contribute!
- Yes, Chad in `DataChad` refers to the well-known [meme](https://www.google.com/search?q=chad+meme) - Yes, Chad in `DataChad` refers to the well-known [meme](https://www.google.com/search?q=chad+meme)

@ -21,7 +21,7 @@ from utils import (
) )
# Page options and header # Page options and header
st.set_option("client.showErrorDetails", True) st.set_option("client.showErrorDetails", False)
st.set_page_config( st.set_page_config(
page_title=APP_NAME, page_icon=PAGE_ICON, initial_sidebar_state="expanded" page_title=APP_NAME, page_icon=PAGE_ICON, initial_sidebar_state="expanded"
) )

@ -1,10 +1,5 @@
from pathlib import Path from pathlib import Path
from dotenv import load_dotenv
# loads environment variables
load_dotenv()
APP_NAME = "DataChad" APP_NAME = "DataChad"
MODEL = "gpt-3.5-turbo" MODEL = "gpt-3.5-turbo"
PAGE_ICON = "🤖" PAGE_ICON = "🤖"

@ -7,6 +7,7 @@ import sys
import deeplake import deeplake
import openai import openai
import streamlit as st import streamlit as st
from dotenv import load_dotenv
from langchain.callbacks import get_openai_callback from langchain.callbacks import get_openai_callback
from langchain.chains import ConversationalRetrievalChain from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI from langchain.chat_models import ChatOpenAI
@ -30,6 +31,9 @@ from langchain.vectorstores import DeepLake
from constants import APP_NAME, DATA_PATH, MODEL, PAGE_ICON from constants import APP_NAME, DATA_PATH, MODEL, PAGE_ICON
# loads environment variables
load_dotenv()
logger = logging.getLogger(APP_NAME) logger = logging.getLogger(APP_NAME)
@ -55,7 +59,11 @@ configure_logger(0)
def authenticate(openai_api_key, activeloop_token, activeloop_org_name): def authenticate(openai_api_key, activeloop_token, activeloop_org_name):
# Validate all credentials are set and correct # Validate all credentials are set and correct
# Check for env variables to enable local dev and deployments with shared credentials # Check for env variables to enable local dev and deployments with shared credentials
openai_api_key = openai_api_key or os.environ.get("OPENAI_API_KEY") openai_api_key = (
openai_api_key
or os.environ.get("OPENAI_API_KEY")
or st.secrets.get("OPENAI_API_KEY")
)
activeloop_token = ( activeloop_token = (
activeloop_token activeloop_token
or os.environ.get("ACTIVELOOP_TOKEN") or os.environ.get("ACTIVELOOP_TOKEN")
@ -238,7 +246,7 @@ def get_chain(data_source):
vector_store = setup_vector_store(data_source) vector_store = setup_vector_store(data_source)
retriever = vector_store.as_retriever() retriever = vector_store.as_retriever()
# Search params "fetch_k" and "k" define how many documents are pulled from the hub # Search params "fetch_k" and "k" define how many documents are pulled from the hub
# and selected after the document matching to build the context # and selected after the document matching to build the context
# that is fed to the model together with your prompt # that is fed to the model together with your prompt
search_kwargs = { search_kwargs = {
"maximal_marginal_relevance": True, "maximal_marginal_relevance": True,

Loading…
Cancel
Save