Feature/better usability (#6)

* refactor: api, core

* feat: static uploader

* doc: update readme

* fix: mkdir static file

* doc: typo
pull/5/head^2
ChungHwan Han 1 year ago committed by GitHub
parent 264467d660
commit 3ec0cc786c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,10 +1,4 @@
BOT_NAME=<your-bot-name>
AWS_ACCESS_KEY_ID=***
AWS_SECRET_ACCESS_KEY=***
AWS_REGION=***
AWS_S3_BUCKET=***
WINEDB_HOST=***
WINEDB_PASSWORD=***
OPENAI_API_KEY=***
BING_SEARCH_URL=***
BING_SUBSCRIPTION_KEY=***

4
.gitignore vendored

@ -6,4 +6,6 @@ __pycache__/
image/
audio/
video/
dataframe/
dataframe/
static/*

@ -20,4 +20,6 @@ RUN poetry install --with tools
COPY . .
ENTRYPOINT ["poetry", "run", "python3", "-m", "uvicorn", "main:app", "--host=0.0.0.0", "--port=8000"]
ENV PORT 8000
ENTRYPOINT ["poetry", "run", "serve"]

@ -43,52 +43,37 @@ We also don't know what tools EVAL will create. Every day, It will create the ri
## Usage
1. S3 Settings
2. environments settings
3. `docker-compose up -d`
### S3
1. Create a bucket.
2. Turn off the "Block all public access" setting for the bucket. ![image](assets/block_public_access.png)
3. Add the following text to Bucket Policy.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowPublicRead",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::{your-bucket-name}/*"
}
]
}
```
1. environments settings
2. `docker-compose up -d`
### Environment
These environmental variables are essential, so please set them.
You need to write some environment variables in the `.env` file. Refer [.env.example](.env.example) if you don't know how to format it.
```
BOT_NAME: your custom bot name
OPENAI_API_KEY: openai api key
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
AWS_REGION
AWS_S3_BUCKET
```
**Mandatory**
These environment variables are necessary to use the following tools:
If you want to use it, set it up, and if you don't need it, you don't have to set it up.
Manatory envs are required in order to serve EVAL.
```
SERPAPI_API_KEY: need to append google search tool
BING_SEARCH_URL, BING_SUBSCRIPTION_KEY: need to append bing search tool
```
- `OPENAI_API_KEY` - OpenAI api key
**Optional**
Each optional env has default value, so you don't need to set unless you want to change it.
- `PORT` - port (default: 8000)
- `SERVER` - server address (default: http://localhost:8000)
- `LOG_LEVEL` - INFO | DEBUG (default: INFO)
- `BOT_NAME` - give it a name! (default: Orca)
**For More Tools**
Some tools requires environment variables. Set envs depend on which tools you want to use.
- Google search tool
- `SERPAPI_API_KEY`
- Bing search tool
- `BING_SEARCH_URL`
- `BING_SUBSCRIPTION_KEY`
## TODO

@ -1,35 +1,41 @@
from typing import Dict, List, TypedDict
import re
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
from s3 import upload
from env import settings
from prompts.error import ERROR_PROMPT
from agents.manager import AgentManager
from tools.base import BaseToolSet
from tools.cpu import (
from core.prompts.error import ERROR_PROMPT
from core.agents.manager import AgentManager
from core.tools.base import BaseToolSet
from core.tools.cpu import (
Terminal,
CodeEditor,
RequestsGet,
WineDB,
ExitConversation,
)
from tools.gpu import (
from core.tools.gpu import (
ImageEditing,
InstructPix2Pix,
Text2Image,
VisualQuestionAnswering,
)
from handlers.base import BaseHandler, FileHandler, FileType
from handlers.image import ImageCaptioning
from handlers.dataframe import CsvToDataframe
from core.handlers.base import BaseHandler, FileHandler, FileType
from core.handlers.image import ImageCaptioning
from core.handlers.dataframe import CsvToDataframe
from core.upload import StaticUploader
from logger import logger
app = FastAPI()
app.mount("/static", StaticFiles(directory=StaticUploader.STATIC_DIR), name="static")
uploader = StaticUploader.from_settings(settings)
toolsets: List[BaseToolSet] = [
Terminal(),
@ -104,6 +110,10 @@ async def command(request: Request) -> Response:
return {
"response": res["output"],
"files": [upload(image) for image in images]
+ [upload(dataframe) for dataframe in dataframes],
"files": [uploader.upload(image) for image in images]
+ [uploader.upload(dataframe) for dataframe in dataframes],
}
def serve():
uvicorn.run("api.main:app", host="0.0.0.0", port=settings["PORT"])

@ -1,15 +1,15 @@
from langchain.chat_models.base import BaseChatModel
from langchain.output_parsers.base import BaseOutputParser
from prompts.input import EVAL_PREFIX, EVAL_SUFFIX
from env import settings
from tools.base import BaseToolSet
from tools.factory import ToolsFactory
from core.prompts.input import EVAL_PREFIX, EVAL_SUFFIX
from core.tools.base import BaseToolSet
from core.tools.factory import ToolsFactory
from agents.llm import ChatOpenAI
from agents.chat_agent import ConversationalChatAgent
from agents.parser import EvalOutputParser
from .llm import ChatOpenAI
from .chat_agent import ConversationalChatAgent
from .parser import EvalOutputParser
class AgentBuilder:

@ -20,7 +20,7 @@ from langchain.schema import (
)
from langchain.tools.base import BaseTool
from prompts.input import EVAL_TOOL_RESPONSE
from core.prompts.input import EVAL_TOOL_RESPONSE
class ConversationalChatAgent(Agent):

@ -1,14 +1,14 @@
from typing import Dict, Any
from typing import Dict
from langchain.agents.tools import BaseTool
from langchain.agents.agent import Agent, AgentExecutor
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.memory.chat_memory import BaseChatMemory
from tools.base import BaseToolSet
from tools.factory import ToolsFactory
from core.tools.base import BaseToolSet
from core.tools.factory import ToolsFactory
from agents.builder import AgentBuilder
from .builder import AgentBuilder
class AgentManager:

@ -3,7 +3,7 @@ from typing import Dict
from langchain.output_parsers.base import BaseOutputParser
from prompts.input import EVAL_FORMAT_INSTRUCTIONS
from core.prompts.input import EVAL_FORMAT_INSTRUCTIONS
class EvalOutputParser(BaseOutputParser):

@ -1,8 +1,8 @@
import pandas as pd
from prompts.file import DATAFRAME_PROMPT
from core.prompts.file import DATAFRAME_PROMPT
from handlers.base import BaseHandler
from .base import BaseHandler
class CsvToDataframe(BaseHandler):

@ -5,9 +5,9 @@ from transformers import (
BlipForConditionalGeneration,
)
from prompts.file import IMAGE_PROMPT
from core.prompts.file import IMAGE_PROMPT
from handlers.base import BaseHandler
from .base import BaseHandler
class ImageCaptioning(BaseHandler):

@ -1,4 +1,4 @@
from typing import Optional, Callable, Tuple
from typing import Callable, Tuple
from enum import Enum
from langchain.agents.tools import Tool, BaseTool

@ -9,7 +9,7 @@ from bs4 import BeautifulSoup
import subprocess
from tools.base import tool, BaseToolSet, ToolScope, SessionGetter
from .base import tool, BaseToolSet, ToolScope, SessionGetter
from logger import logger

@ -3,7 +3,7 @@ from langchain.agents import load_tools
from langchain.agents.tools import BaseTool
from langchain.llms.base import BaseLLM
from tools.base import BaseToolSet, SessionGetter
from .base import BaseToolSet, SessionGetter
class ToolsFactory:

@ -23,7 +23,7 @@ from diffusers import EulerAncestralDiscreteScheduler
from utils import get_new_image_name
from logger import logger
from tools.base import tool, BaseToolSet
from .base import tool, BaseToolSet
class MaskFormer(BaseToolSet):

@ -0,0 +1,2 @@
from .s3 import S3Uploader
from .static import StaticUploader

@ -0,0 +1,15 @@
from abc import ABC, abstractmethod, abstractstaticmethod
from env import DotEnv
STATIC_DIR = "static"
class AbstractUploader(ABC):
@abstractmethod
def upload(self, filepath: str) -> str:
pass
@abstractstaticmethod
def from_settings(settings: DotEnv) -> "AbstractUploader":
pass

@ -0,0 +1,35 @@
import os
import boto3
from env import DotEnv
from .base import AbstractUploader
class S3Uploader(AbstractUploader):
def __init__(self, accessKey: str, secretKey: str, region: str, bucket: str):
self.accessKey = accessKey
self.secretKey = secretKey
self.region = region
self.bucket = bucket
self.client = boto3.client(
"s3",
aws_access_key_id=self.accessKey,
aws_secret_access_key=self.secretKey,
)
@staticmethod
def from_settings(settings: DotEnv) -> "S3Uploader":
return S3Uploader(
settings["AWS_ACCESS_KEY_ID"],
settings["AWS_SECRET_ACCESS_KEY"],
settings["AWS_REGION"],
settings["AWS_S3_BUCKET"],
)
def get_url(self, object_name: str) -> str:
return f"https://{self.bucket}.s3.{self.region}.amazonaws.com/{object_name}"
def upload(self, filepath: str) -> str:
object_name = os.path.basename(filepath)
self.client.upload_file(filepath, self.bucket, object_name)
return self.get_url(object_name)

@ -0,0 +1,25 @@
import os
import shutil
from env import DotEnv
from .base import AbstractUploader
class StaticUploader(AbstractUploader):
STATIC_DIR = "static"
def __init__(self, server: str):
self.server = server
@staticmethod
def from_settings(settings: DotEnv) -> "StaticUploader":
return StaticUploader(settings["SERVER"])
def get_url(self, uploaded_path: str) -> str:
return f"{self.server}/{uploaded_path}"
def upload(self, filepath: str):
upload_path = os.path.join(StaticUploader.STATIC_DIR, filepath)
os.makedirs(os.path.dirname(upload_path), exist_ok=True)
shutil.copy(filepath, upload_path)
return f"{self.server}/{upload_path}"

@ -9,6 +9,7 @@ services:
context: .
volumes: # if you want to decrease your model download time, use this.
- ../.cache/huggingface/:/root/.cache/huggingface/
- ./static/:/app/static/
ports:
- "8000:8000"
env_file:

@ -7,30 +7,33 @@ load_dotenv()
class DotEnv(TypedDict):
OPENAI_API_KEY: str
PORT: int
SERVER: str
LOG_LEVEL: str # optional
BOT_NAME: str
AWS_ACCESS_KEY_ID: str
AWS_SECRET_ACCESS_KEY: str
AWS_REGION: str
AWS_S3_BUCKET: str
BOT_NAME: str # optional
AWS_ACCESS_KEY_ID: str # optional
AWS_SECRET_ACCESS_KEY: str # optional
AWS_REGION: str # optional
AWS_S3_BUCKET: str # optional
WINEDB_HOST: str # optional
WINEDB_PASSWORD: str # optional
OPENAI_API_KEY: str
BING_SEARCH_URL: str # optional
BING_SUBSCRIPTION_KEY: str # optional
SERPAPI_API_KEY: str # optional
PORT = int(os.getenv("PORT", 8000))
settings: DotEnv = {
"PORT": PORT,
"SERVER": os.getenv("SERVER", f"http://localhost:{PORT}"),
"OPENAI_API_KEY": os.getenv("OPENAI_API_KEY"),
"LOG_LEVEL": os.getenv("LOG_LEVEL", "INFO"),
"BOT_NAME": os.getenv("BOT_NAME", "Orca"),
"AWS_ACCESS_KEY_ID": os.getenv("AWS_ACCESS_KEY_ID"),
"AWS_SECRET_ACCESS_KEY": os.getenv("AWS_SECRET_ACCESS_KEY"),
"AWS_REGION": os.getenv("AWS_REGION"),
"AWS_S3_BUCKET": os.getenv("AWS_S3_BUCKET"),
"WINEDB_HOST": os.getenv("WINEDB_HOST"),
"WINEDB_PASSWORD": os.getenv("WINEDB_PASSWORD"),
"OPENAI_API_KEY": os.getenv("OPENAI_API_KEY"),
"BING_SEARCH_URL": os.getenv("BING_SEARCH_URL"),
"BING_SUBSCRIPTION_KEY": os.getenv("BING_SUBSCRIPTION_KEY"),
"SERPAPI_API_KEY": os.getenv("SERPAPI_API_KEY"),

@ -4,7 +4,10 @@ version = "0.1.0"
description = ""
authors = ["Taeho Lee <taeho@corca.ai>", "Chung Hwan Han <hanch@corca.ai>"]
readme = "README.md"
packages = [{include = "api"},{include = "core"}]
[tool.poetry.scripts]
serve = "api.main:serve"
[tool.poetry.dependencies]
python = "^3.10"

22
s3.py

@ -1,22 +0,0 @@
import os
import boto3
from env import settings
def upload(file_name: str):
return upload_file(file_name, settings["AWS_S3_BUCKET"])
def upload_file(file_name, bucket, object_name=None):
if object_name is None:
object_name = os.path.basename(file_name)
s3_client = boto3.client(
"s3",
aws_access_key_id=settings["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=settings["AWS_SECRET_ACCESS_KEY"],
)
s3_client.upload_file(file_name, bucket, object_name)
return f"https://{bucket}.s3.{settings['AWS_REGION']}.amazonaws.com/{object_name}"
Loading…
Cancel
Save