from pathlib import Path from langchain.llms import Replicate from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel from langchain.schema.output_parser import StrOutputParser from langchain.schema.runnable import RunnablePassthrough from langchain.utilities import SQLDatabase # make sure to set REPLICATE_API_TOKEN in your environment # use llama-2-13b model in replicate replicate_id = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d" # noqa: E501 llm = Replicate( model=replicate_id, model_kwargs={"temperature": 0.01, "max_length": 500, "top_p": 1}, ) db_path = Path(__file__).parent / "nba_roster.db" rel = db_path.relative_to(Path.cwd()) db_string = f"sqlite:///{rel}" db = SQLDatabase.from_uri(db_string, sample_rows_in_table_info=0) def get_schema(_): return db.get_table_info() def run_query(query): return db.run(query) template_query = """Based on the table schema below, write a SQL query that would answer the user's question: {schema} Question: {question} SQL Query:""" # noqa: E501 prompt = ChatPromptTemplate.from_messages( [ ("system", "Given an input question, convert it to a SQL query. No pre-amble."), ("human", template_query), ] ) sql_response = ( RunnablePassthrough.assign(schema=get_schema) | prompt | llm.bind(stop=["\nSQLResult:"]) | StrOutputParser() ) template_response = """Based on the table schema below, question, sql query, and sql response, write a natural language response: {schema} Question: {question} SQL Query: {query} SQL Response: {response}""" # noqa: E501 prompt_response = ChatPromptTemplate.from_messages( [ ( "system", "Given an input question and SQL response, convert it to a natural " "language answer. No pre-amble.", ), ("human", template_response), ] ) # Supply the input types to the prompt class InputType(BaseModel): question: str chain = ( RunnablePassthrough.assign(query=sql_response).with_types(input_type=InputType) | RunnablePassthrough.assign( schema=get_schema, response=lambda x: db.run(x["query"]), ) | prompt_response | llm )