mirror of
https://github.com/corca-ai/EVAL
synced 2024-10-30 09:20:44 +00:00
feat: init
This commit is contained in:
commit
073b22927a
7
.gitignore
vendored
Normal file
7
.gitignore
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
__pycache__/
|
||||
.venv/
|
||||
|
||||
.env
|
||||
|
||||
image/
|
||||
dataframe/
|
24
env.py
Normal file
24
env.py
Normal file
@ -0,0 +1,24 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from typing import TypedDict
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class DotEnv(TypedDict):
|
||||
AWS_ACCESS_KEY_ID: str
|
||||
AWS_SECRET_ACCESS_KEY: str
|
||||
AWS_REGION: str
|
||||
AWS_S3_BUCKET: str
|
||||
WINEDB_HOST: str
|
||||
WINEDB_PASSWORD: str
|
||||
|
||||
|
||||
settings: DotEnv = {
|
||||
"AWS_ACCESS_KEY_ID": os.getenv("AWS_ACCESS_KEY_ID"),
|
||||
"AWS_SECRET_ACCESS_KEY": os.getenv("AWS_SECRET_ACCESS_KEY"),
|
||||
"AWS_REGION": os.getenv("AWS_REGION"),
|
||||
"AWS_S3_BUCKET": os.getenv("AWS_S3_BUCKET"),
|
||||
"WINEDB_HOST": os.getenv("WINEDB_HOST"),
|
||||
"WINEDB_PASSWORD": os.getenv("WINEDB_PASSWORD"),
|
||||
}
|
87
file.py
Normal file
87
file.py
Normal file
@ -0,0 +1,87 @@
|
||||
import os
|
||||
import requests
|
||||
import uuid
|
||||
from typing import Callable
|
||||
from enum import Enum
|
||||
|
||||
from PIL import Image
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from utils import IMAGE_PROMPT, DATAFRAME_PROMPT
|
||||
from tools import IMAGE_MODEL
|
||||
|
||||
|
||||
class FileType(Enum):
|
||||
IMAGE = "image"
|
||||
AUDIO = "audio"
|
||||
VIDEO = "video"
|
||||
DATAFRAME = "dataframe"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
def handle(file_name: str) -> Callable:
|
||||
"""
|
||||
Parse file type from file name (ex. image, audio, video, dataframe, etc.)
|
||||
"""
|
||||
file_name = file_name.split("?")[0]
|
||||
|
||||
if file_name.endswith(".png") or file_name.endswith(".jpg"):
|
||||
return handle_image
|
||||
elif file_name.endswith(".mp3") or file_name.endswith(".wav"):
|
||||
return handle_audio
|
||||
elif file_name.endswith(".mp4") or file_name.endswith(".avi"):
|
||||
return handle_video
|
||||
elif file_name.endswith(".csv"):
|
||||
return handle_dataframe
|
||||
else:
|
||||
return handle_unknown
|
||||
|
||||
|
||||
def handle_image(i: int, file: str) -> str:
|
||||
img_data = requests.get(file).content
|
||||
filename = os.path.join("image", str(uuid.uuid4())[0:8] + ".png")
|
||||
with open(filename, "wb") as f:
|
||||
size = f.write(img_data)
|
||||
print(f"Inputs: {file} ({size//1000}MB) => {filename}")
|
||||
img = Image.open(filename)
|
||||
width, height = img.size
|
||||
ratio = min(512 / width, 512 / height)
|
||||
width_new, height_new = (round(width * ratio), round(height * ratio))
|
||||
img = img.resize((width_new, height_new))
|
||||
img = img.convert("RGB")
|
||||
img.save(filename, "PNG")
|
||||
print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
|
||||
try:
|
||||
description = IMAGE_MODEL.inference(filename)
|
||||
except Exception as e:
|
||||
return {"text": "image upload", "response": str(e), "additional": []}
|
||||
|
||||
return IMAGE_PROMPT.format(i=i, filename=filename, description=description)
|
||||
|
||||
|
||||
def handle_audio(i: int, file: str) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
def handle_video(i: int, file: str) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
def handle_dataframe(i: int, file: str) -> str:
|
||||
content = requests.get(file).content
|
||||
filename = os.path.join("dataframe/", str(uuid.uuid4())[0:8] + ".csv")
|
||||
with open(filename, "wb") as f:
|
||||
size = f.write(content)
|
||||
print(f"Inputs: {file} ({size//1000}MB) => {filename}")
|
||||
df = pd.read_csv(filename)
|
||||
try:
|
||||
description = str(df.describe())
|
||||
except Exception as e:
|
||||
return {"text": "image upload", "response": str(e), "additional": []}
|
||||
|
||||
return DATAFRAME_PROMPT.format(i=i, filename=filename, description=description)
|
||||
|
||||
|
||||
def handle_unknown(i: int, file: str) -> str:
|
||||
return ""
|
326
llm.py
Normal file
326
llm.py
Normal file
@ -0,0 +1,326 @@
|
||||
"""OpenAI chat wrapper."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
|
||||
|
||||
from pydantic import BaseModel, Extra, Field, root_validator
|
||||
from tenacity import (
|
||||
before_sleep_log,
|
||||
retry,
|
||||
retry_if_exception_type,
|
||||
stop_after_attempt,
|
||||
wait_exponential,
|
||||
)
|
||||
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ChatGeneration,
|
||||
ChatMessage,
|
||||
ChatResult,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
||||
def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]:
|
||||
import openai
|
||||
|
||||
min_seconds = 4
|
||||
max_seconds = 10
|
||||
# Wait 2^x * 1 second between each retry starting with
|
||||
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
|
||||
return retry(
|
||||
reraise=True,
|
||||
stop=stop_after_attempt(llm.max_retries),
|
||||
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
|
||||
retry=(
|
||||
retry_if_exception_type(openai.error.Timeout)
|
||||
| retry_if_exception_type(openai.error.APIError)
|
||||
| retry_if_exception_type(openai.error.APIConnectionError)
|
||||
| retry_if_exception_type(openai.error.RateLimitError)
|
||||
| retry_if_exception_type(openai.error.ServiceUnavailableError)
|
||||
),
|
||||
before_sleep=before_sleep_log(logger, logging.WARNING),
|
||||
)
|
||||
|
||||
|
||||
async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any:
|
||||
"""Use tenacity to retry the async completion call."""
|
||||
retry_decorator = _create_retry_decorator(llm)
|
||||
|
||||
@retry_decorator
|
||||
async def _completion_with_retry(**kwargs: Any) -> Any:
|
||||
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
|
||||
return await llm.client.acreate(**kwargs)
|
||||
|
||||
return await _completion_with_retry(**kwargs)
|
||||
|
||||
|
||||
def _convert_dict_to_message(_dict: dict) -> BaseMessage:
|
||||
role = _dict["role"]
|
||||
if role == "user":
|
||||
return HumanMessage(content=_dict["content"])
|
||||
elif role == "assistant":
|
||||
return AIMessage(content=_dict["content"])
|
||||
elif role == "system":
|
||||
return SystemMessage(content=_dict["content"])
|
||||
else:
|
||||
return ChatMessage(content=_dict["content"], role=role)
|
||||
|
||||
|
||||
def _convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
if isinstance(message, ChatMessage):
|
||||
message_dict = {"role": message.role, "content": message.content}
|
||||
elif isinstance(message, HumanMessage):
|
||||
message_dict = {"role": "user", "content": message.content}
|
||||
elif isinstance(message, AIMessage):
|
||||
message_dict = {"role": "assistant", "content": message.content}
|
||||
elif isinstance(message, SystemMessage):
|
||||
message_dict = {"role": "system", "content": message.content}
|
||||
else:
|
||||
raise ValueError(f"Got unknown type {message}")
|
||||
if "name" in message.additional_kwargs:
|
||||
message_dict["name"] = message.additional_kwargs["name"]
|
||||
return message_dict
|
||||
|
||||
|
||||
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
|
||||
generations = []
|
||||
for res in response["choices"]:
|
||||
message = _convert_dict_to_message(res["message"])
|
||||
gen = ChatGeneration(message=message)
|
||||
generations.append(gen)
|
||||
return ChatResult(generations=generations)
|
||||
|
||||
|
||||
class ChatOpenAI(BaseChatModel, BaseModel):
|
||||
"""Wrapper around OpenAI Chat large language models.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``OPENAI_API_KEY`` set with your API key.
|
||||
|
||||
Any parameters that are valid to be passed to the openai.create call can be passed
|
||||
in, even if not explicitly saved on this class.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
|
||||
"""
|
||||
|
||||
client: Any #: :meta private:
|
||||
model_name: str = "gpt-4"
|
||||
"""Model name to use."""
|
||||
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Holds any model parameters valid for `create` call not explicitly specified."""
|
||||
openai_api_key: Optional[str] = None
|
||||
max_retries: int = 6
|
||||
"""Maximum number of retries to make when generating."""
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results or not."""
|
||||
n: int = 1
|
||||
"""Number of chat completions to generate for each prompt."""
|
||||
max_tokens: int = 256
|
||||
"""Maximum number of tokens to generate."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.ignore
|
||||
|
||||
@root_validator(pre=True)
|
||||
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Build extra kwargs from additional params that were passed in."""
|
||||
all_required_field_names = {field.alias for field in cls.__fields__.values()}
|
||||
|
||||
extra = values.get("model_kwargs", {})
|
||||
for field_name in list(values):
|
||||
if field_name not in all_required_field_names:
|
||||
if field_name in extra:
|
||||
raise ValueError(f"Found {field_name} supplied twice.")
|
||||
extra[field_name] = values.pop(field_name)
|
||||
values["model_kwargs"] = extra
|
||||
return values
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
openai_api_key = get_from_dict_or_env(
|
||||
values, "openai_api_key", "OPENAI_API_KEY"
|
||||
)
|
||||
try:
|
||||
import openai
|
||||
|
||||
openai.api_key = openai_api_key
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please it install it with `pip install openai`."
|
||||
)
|
||||
try:
|
||||
values["client"] = openai.ChatCompletion
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"`openai` has no `ChatCompletion` attribute, this is likely "
|
||||
"due to an old version of the openai package. Try upgrading it "
|
||||
"with `pip install --upgrade openai`."
|
||||
)
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling OpenAI API."""
|
||||
return {
|
||||
"model": self.model_name,
|
||||
"max_tokens": self.max_tokens,
|
||||
"stream": self.streaming,
|
||||
"n": self.n,
|
||||
**self.model_kwargs,
|
||||
}
|
||||
|
||||
def _create_retry_decorator(self) -> Callable[[Any], Any]:
|
||||
import openai
|
||||
|
||||
min_seconds = 4
|
||||
max_seconds = 10
|
||||
# Wait 2^x * 1 second between each retry starting with
|
||||
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
|
||||
return retry(
|
||||
reraise=True,
|
||||
stop=stop_after_attempt(self.max_retries),
|
||||
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
|
||||
retry=(
|
||||
retry_if_exception_type(openai.error.Timeout)
|
||||
| retry_if_exception_type(openai.error.APIError)
|
||||
| retry_if_exception_type(openai.error.APIConnectionError)
|
||||
| retry_if_exception_type(openai.error.RateLimitError)
|
||||
| retry_if_exception_type(openai.error.ServiceUnavailableError)
|
||||
),
|
||||
before_sleep=before_sleep_log(logger, logging.WARNING),
|
||||
)
|
||||
|
||||
def completion_with_retry(self, **kwargs: Any) -> Any:
|
||||
"""Use tenacity to retry the completion call."""
|
||||
retry_decorator = self._create_retry_decorator()
|
||||
|
||||
@retry_decorator
|
||||
def _completion_with_retry(**kwargs: Any) -> Any:
|
||||
return self.client.create(**kwargs)
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
|
||||
def _generate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
) -> ChatResult:
|
||||
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
# for item in message_dicts:
|
||||
# for k, v in item.items():
|
||||
# print(f"{k}: {v}")
|
||||
# print("-------")
|
||||
# print("===========")
|
||||
|
||||
if self.streaming:
|
||||
inner_completion = ""
|
||||
role = "assistant"
|
||||
params["stream"] = True
|
||||
for stream_resp in self.completion_with_retry(
|
||||
messages=message_dicts, **params
|
||||
):
|
||||
role = stream_resp["choices"][0]["delta"].get("role", role)
|
||||
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||
inner_completion += token
|
||||
self.callback_manager.on_llm_new_token(
|
||||
token,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
message = _convert_dict_to_message(
|
||||
{"content": inner_completion, "role": role}
|
||||
)
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
response = self.completion_with_retry(messages=message_dicts, **params)
|
||||
return _create_chat_result(response)
|
||||
|
||||
def _create_message_dicts(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]]
|
||||
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
|
||||
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
|
||||
if stop is not None:
|
||||
if "stop" in params:
|
||||
raise ValueError("`stop` found in both the input and default params.")
|
||||
params["stop"] = stop
|
||||
message_dicts = [_convert_message_to_dict(m) for m in messages]
|
||||
return message_dicts, params
|
||||
|
||||
async def _agenerate(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||
) -> ChatResult:
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
if self.streaming:
|
||||
inner_completion = ""
|
||||
role = "assistant"
|
||||
params["stream"] = True
|
||||
async for stream_resp in await acompletion_with_retry(
|
||||
self, messages=message_dicts, **params
|
||||
):
|
||||
role = stream_resp["choices"][0]["delta"].get("role", role)
|
||||
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||
inner_completion += token
|
||||
if self.callback_manager.is_async:
|
||||
await self.callback_manager.on_llm_new_token(
|
||||
token,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
else:
|
||||
self.callback_manager.on_llm_new_token(
|
||||
token,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
message = _convert_dict_to_message(
|
||||
{"content": inner_completion, "role": role}
|
||||
)
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
else:
|
||||
response = await acompletion_with_retry(
|
||||
self, messages=message_dicts, **params
|
||||
)
|
||||
return _create_chat_result(response)
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
"""Get the identifying parameters."""
|
||||
return {**{"model_name": self.model_name}, **self._default_params}
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
"""Calculate num tokens with tiktoken package."""
|
||||
# tiktoken NOT supported for Python 3.8 or below
|
||||
if sys.version_info[1] <= 8:
|
||||
return super().get_num_tokens(text)
|
||||
try:
|
||||
import tiktoken
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import tiktoken python package. "
|
||||
"This is needed in order to calculate get_num_tokens. "
|
||||
"Please it install it with `pip install tiktoken`."
|
||||
)
|
||||
# create a GPT-3.5-Turbo encoder instance
|
||||
enc = tiktoken.encoding_for_model(self.model_name)
|
||||
|
||||
# encode the text using the GPT-3.5-Turbo encoder
|
||||
tokenized_text = enc.encode(text)
|
||||
|
||||
# calculate the number of tokens in the encoded text
|
||||
return len(tokenized_text)
|
115
main.py
Normal file
115
main.py
Normal file
@ -0,0 +1,115 @@
|
||||
from typing import List, TypedDict, Callable
|
||||
import re
|
||||
|
||||
from langchain.agents import load_tools
|
||||
from langchain.agents.initialize import initialize_agent
|
||||
from langchain.agents.tools import Tool
|
||||
|
||||
|
||||
from fastapi import FastAPI
|
||||
from pydantic import BaseModel
|
||||
from dotenv import load_dotenv
|
||||
from s3 import upload
|
||||
|
||||
from llm import ChatOpenAI
|
||||
from file import handle
|
||||
from utils import (
|
||||
AWESOMEGPT_PREFIX,
|
||||
AWESOMEGPT_SUFFIX,
|
||||
ERROR_PROMPT,
|
||||
)
|
||||
from tools import AWESOME_MODEL, memory
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
|
||||
print("Initializing AwesomeGPT")
|
||||
llm = ChatOpenAI(temperature=0)
|
||||
tools = [
|
||||
*load_tools(
|
||||
["python_repl", "serpapi", "wikipedia", "bing-search"],
|
||||
llm=llm,
|
||||
),
|
||||
]
|
||||
|
||||
for class_name, instance in AWESOME_MODEL.items():
|
||||
for e in dir(instance):
|
||||
if e.startswith("inference"):
|
||||
func = getattr(instance, e)
|
||||
tools.append(Tool(name=func.name, description=func.description, func=func))
|
||||
|
||||
agent = initialize_agent(
|
||||
tools,
|
||||
llm,
|
||||
agent="chat-conversational-react-description",
|
||||
verbose=True,
|
||||
memory=memory,
|
||||
agent_kwargs={
|
||||
"system_message": AWESOMEGPT_PREFIX,
|
||||
"human_message": AWESOMEGPT_SUFFIX,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class Request(BaseModel):
|
||||
text: str
|
||||
state: List[str]
|
||||
files: List[str]
|
||||
key: str
|
||||
|
||||
|
||||
class Response(TypedDict):
|
||||
text: str
|
||||
response: str
|
||||
additional: List[str]
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def index():
|
||||
return {"message": "Hello World"}
|
||||
|
||||
|
||||
@app.post("/command")
|
||||
async def command(request: Request) -> Response:
|
||||
text = request.text
|
||||
state = request.state
|
||||
files = request.files
|
||||
key = request.key
|
||||
|
||||
print("=============== Running =============")
|
||||
print("Inputs:", text, state, files)
|
||||
# TODO - add state to memory (use key)
|
||||
|
||||
print("======>Previous memory:\n %s" % agent.memory)
|
||||
|
||||
promptedText = ""
|
||||
|
||||
for i, file in enumerate(files):
|
||||
promptedText += handle(file)(i + 1, file)
|
||||
|
||||
promptedText += text
|
||||
|
||||
print("======>Prompted Text:\n %s" % promptedText)
|
||||
|
||||
try:
|
||||
res = agent({"input": promptedText})
|
||||
except Exception as e:
|
||||
try:
|
||||
res = agent(
|
||||
{
|
||||
"input": ERROR_PROMPT.format(promptedText=promptedText, e=str(e)),
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
return {"text": promptedText, "response": str(e), "additional": []}
|
||||
|
||||
images = re.findall("(image/\S*png)", res["output"])
|
||||
|
||||
return {
|
||||
"text": promptedText,
|
||||
"response": res["output"],
|
||||
"additional": [upload(image) for image in images],
|
||||
}
|
11
requirements.txt
Normal file
11
requirements.txt
Normal file
@ -0,0 +1,11 @@
|
||||
pandas
|
||||
Pillow
|
||||
pydantic
|
||||
tenacity
|
||||
langchain
|
||||
fastapi
|
||||
boto3
|
||||
llama_index
|
||||
torch==1.13.1+cu117
|
||||
transformers
|
||||
diffusers
|
22
s3.py
Normal file
22
s3.py
Normal file
@ -0,0 +1,22 @@
|
||||
import os
|
||||
import boto3
|
||||
|
||||
from env import settings
|
||||
|
||||
|
||||
def upload(file_name: str):
|
||||
return upload_file(file_name, settings["AWS_S3_BUCKET"])
|
||||
|
||||
|
||||
def upload_file(file_name, bucket, object_name=None):
|
||||
if object_name is None:
|
||||
object_name = os.path.basename(file_name)
|
||||
|
||||
s3_client = boto3.client(
|
||||
"s3",
|
||||
aws_access_key_id=settings["AWS_ACCESS_KEY_ID"],
|
||||
aws_secret_access_key=settings["AWS_SECRET_ACCESS_KEY"],
|
||||
)
|
||||
s3_client.upload_file(file_name, bucket, object_name)
|
||||
|
||||
return f"https://{bucket}.s3.{settings['AWS_REGION']}.amazonaws.com/{object_name}"
|
105
tools.py
Normal file
105
tools.py
Normal file
@ -0,0 +1,105 @@
|
||||
from langchain.chains.conversation.memory import ConversationBufferMemory
|
||||
|
||||
from utils import prompts
|
||||
from env import settings
|
||||
from vfm import (
|
||||
ImageEditing,
|
||||
InstructPix2Pix,
|
||||
Text2Image,
|
||||
ImageCaptioning,
|
||||
VisualQuestionAnswering,
|
||||
)
|
||||
|
||||
import requests
|
||||
|
||||
from llama_index.readers.database import DatabaseReader
|
||||
from llama_index import GPTSimpleVectorIndex
|
||||
|
||||
|
||||
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
||||
|
||||
|
||||
class RequestsGet:
|
||||
@prompts(
|
||||
name="requests_get",
|
||||
description="A portal to the internet. "
|
||||
"Use this when you need to get specific content from a website."
|
||||
"Input should be a url (i.e. https://www.google.com)."
|
||||
"The output will be the text response of the GET request.",
|
||||
)
|
||||
def inference(self, url: str) -> str:
|
||||
"""Run the tool."""
|
||||
text = requests.get(url).text
|
||||
|
||||
if len(text) > 100:
|
||||
text = text[:100] + "..."
|
||||
return text
|
||||
|
||||
|
||||
class WineDB:
|
||||
def __init__(self):
|
||||
db = DatabaseReader(
|
||||
scheme="postgresql", # Database Scheme
|
||||
host=settings["WINEDB_HOST"], # Database Host
|
||||
port="5432", # Database Port
|
||||
user="alphadom", # Database User
|
||||
password=settings["WINEDB_PASSWORD"], # Database Password
|
||||
dbname="postgres", # Database Name
|
||||
)
|
||||
self.columns = ["nameEn", "nameKo", "description"]
|
||||
concat_columns = str(",'-',".join([f'"{i}"' for i in self.columns]))
|
||||
query = f"""
|
||||
SELECT
|
||||
Concat({concat_columns})
|
||||
FROM wine
|
||||
"""
|
||||
# CAST(type AS VARCHAR), 'nameEn', 'nameKo', vintage, nationality, province, CAST(size AS VARCHAR), 'grapeVariety', price, image, description, code, winery, alcohol, pairing
|
||||
documents = db.load_data(query=query)
|
||||
self.index = GPTSimpleVectorIndex(documents)
|
||||
|
||||
@prompts(
|
||||
name="Wine Recommendataion",
|
||||
description="A tool to recommend wines based on a user's input. "
|
||||
"Inputs are necessary factors for wine recommendations, such as the user's mood today, side dishes to eat with wine, people to drink wine with, what things you want to do, the scent and taste of their favorite wine."
|
||||
"The output will be a list of recommended wines."
|
||||
"The tool is based on a database of wine reviews, which is stored in a database.",
|
||||
)
|
||||
def inference(self, query: str) -> str:
|
||||
"""Run the tool."""
|
||||
results = self.index.query(query)
|
||||
wine = "\n".join(
|
||||
[
|
||||
f"{i}:{j}"
|
||||
for i, j in zip(
|
||||
self.columns, results.source_nodes[0].source_text.split("-")
|
||||
)
|
||||
]
|
||||
)
|
||||
return results.response + "\n\n" + wine
|
||||
|
||||
|
||||
class ExitConversation:
|
||||
@prompts(
|
||||
name="exit_conversation",
|
||||
description="A tool to exit the conversation. "
|
||||
"Use this when you want to end the conversation. "
|
||||
"The output will be a message that the conversation is over.",
|
||||
)
|
||||
def inference(self, query: str) -> str:
|
||||
"""Run the tool."""
|
||||
memory.chat_memory.messages = []
|
||||
return ""
|
||||
|
||||
|
||||
IMAGE_MODEL = ImageCaptioning("cuda:3")
|
||||
|
||||
|
||||
AWESOME_MODEL = {
|
||||
"RequestsGet": RequestsGet(),
|
||||
"WineDB": WineDB(),
|
||||
"ExitConversation": ExitConversation(),
|
||||
"Text2Image": Text2Image("cuda:3"),
|
||||
"ImageEditing": ImageEditing("cuda:3"),
|
||||
"InstructPix2Pix": InstructPix2Pix("cuda:3"),
|
||||
"VisualQuestionAnswering": VisualQuestionAnswering("cuda:3"),
|
||||
}
|
134
utils.py
Normal file
134
utils.py
Normal file
@ -0,0 +1,134 @@
|
||||
import os
|
||||
import random
|
||||
import torch
|
||||
import uuid
|
||||
import numpy as np
|
||||
|
||||
from langchain.output_parsers.base import BaseOutputParser
|
||||
|
||||
|
||||
IMAGE_PROMPT = """
|
||||
{i}th image: provide a figure named {filename}. The description is: {description}.
|
||||
"""
|
||||
|
||||
|
||||
DATAFRAME_PROMPT = """
|
||||
{i}th dataframe: provide a dataframe named {filename}. The description is: {description}.
|
||||
"""
|
||||
|
||||
|
||||
IMAGE_SUFFIX = """
|
||||
Please understand and answer the image based on this information. The image understanding is complete, so don't try to understand the image again.
|
||||
"""
|
||||
|
||||
AWESOMEGPT_PREFIX = """Awesome GPT is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. Awesome GPT is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
|
||||
|
||||
Awesome GPT is able to process and understand large amounts of text and images. As a language model, Awesome GPT can not directly read images, but it has a list of tools to finish different visual tasks.
|
||||
|
||||
Each image will have a file name formed as "image/xxx.png"
|
||||
Each dataframe will have a file name formed as "dataframe/xxx.csv"
|
||||
|
||||
Awesome GPT can invoke different tools to indirectly understand pictures. When talking about images, Awesome GPT is very strict to the file name and will never fabricate nonexistent files. When using tools to generate new image files, Awesome GPT is also known that the image may not be the same as the user's demand, and will use other visual question answering tools or description tools to observe the real image. Awesome GPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the image content and image file name. It will remember to provide the file name from the last tool observation, if a new image is generated.
|
||||
|
||||
Human may provide new figures to Awesome GPT with a description. The description helps Awesome GPT to understand this image, but Awesome GPT should use tools to finish following tasks, rather than directly imagine from the description.
|
||||
|
||||
Overall, Awesome GPT is a powerful visual dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics."""
|
||||
|
||||
AWESOMEGPT_SUFFIX = """TOOLS
|
||||
------
|
||||
Awesome GPT can ask the user to use tools to look up information that may be helpful in answering the users original question.
|
||||
You are very strict to the filename correctness and will never fake a file name if it does not exist.
|
||||
You will remember to provide the image file name loyally if it's provided in the last tool observation.
|
||||
The tools the human can use are:
|
||||
|
||||
{{tools}}
|
||||
|
||||
{format_instructions}
|
||||
|
||||
USER'S INPUT
|
||||
--------------------
|
||||
Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
|
||||
|
||||
{{{{input}}}}"""
|
||||
|
||||
ERROR_PROMPT = "An error has occurred for the following text: \n{promptedText} Please explain this error.\n {e}"
|
||||
|
||||
|
||||
os.makedirs("image", exist_ok=True)
|
||||
os.makedirs("dataframe", exist_ok=True)
|
||||
|
||||
|
||||
def seed_everything(seed):
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
return seed
|
||||
|
||||
|
||||
def prompts(name, description):
|
||||
def decorator(func):
|
||||
func.name = name
|
||||
func.description = description
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def cut_dialogue_history(history_memory, keep_last_n_words=500):
|
||||
tokens = history_memory.split()
|
||||
n_tokens = len(tokens)
|
||||
print(f"hitory_memory:{history_memory}, n_tokens: {n_tokens}")
|
||||
if n_tokens < keep_last_n_words:
|
||||
return history_memory
|
||||
else:
|
||||
paragraphs = history_memory.split("\n")
|
||||
last_n_tokens = n_tokens
|
||||
while last_n_tokens >= keep_last_n_words:
|
||||
last_n_tokens = last_n_tokens - len(paragraphs[0].split(" "))
|
||||
paragraphs = paragraphs[1:]
|
||||
return "\n" + "\n".join(paragraphs)
|
||||
|
||||
|
||||
def get_new_image_name(org_img_name, func_name="update"):
|
||||
head_tail = os.path.split(org_img_name)
|
||||
head = head_tail[0]
|
||||
tail = head_tail[1]
|
||||
name_split = tail.split(".")[0].split("_")
|
||||
this_new_uuid = str(uuid.uuid4())[0:4]
|
||||
if len(name_split) == 1:
|
||||
most_org_file_name = name_split[0]
|
||||
recent_prev_file_name = name_split[0]
|
||||
new_file_name = "{}_{}_{}_{}.png".format(
|
||||
this_new_uuid, func_name, recent_prev_file_name, most_org_file_name
|
||||
)
|
||||
else:
|
||||
assert len(name_split) == 4
|
||||
most_org_file_name = name_split[3]
|
||||
recent_prev_file_name = name_split[0]
|
||||
new_file_name = "{}_{}_{}_{}.png".format(
|
||||
this_new_uuid, func_name, recent_prev_file_name, most_org_file_name
|
||||
)
|
||||
return os.path.join(head, new_file_name)
|
||||
|
||||
|
||||
def get_new_dataframe_name(org_img_name, func_name="update"):
|
||||
head_tail = os.path.split(org_img_name)
|
||||
head = head_tail[0]
|
||||
tail = head_tail[1]
|
||||
name_split = tail.split(".")[0].split("_")
|
||||
this_new_uuid = str(uuid.uuid4())[0:4]
|
||||
if len(name_split) == 1:
|
||||
most_org_file_name = name_split[0]
|
||||
recent_prev_file_name = name_split[0]
|
||||
new_file_name = "{}_{}_{}_{}.csv".format(
|
||||
this_new_uuid, func_name, recent_prev_file_name, most_org_file_name
|
||||
)
|
||||
else:
|
||||
assert len(name_split) == 4
|
||||
most_org_file_name = name_split[3]
|
||||
recent_prev_file_name = name_split[0]
|
||||
new_file_name = "{}_{}_{}_{}.csv".format(
|
||||
this_new_uuid, func_name, recent_prev_file_name, most_org_file_name
|
||||
)
|
||||
return os.path.join(head, new_file_name)
|
243
vfm.py
Normal file
243
vfm.py
Normal file
@ -0,0 +1,243 @@
|
||||
import os
|
||||
import torch
|
||||
import uuid
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
from utils import prompts, get_new_image_name
|
||||
|
||||
from transformers import (
|
||||
CLIPSegProcessor,
|
||||
CLIPSegForImageSegmentation,
|
||||
)
|
||||
from transformers import (
|
||||
BlipProcessor,
|
||||
BlipForConditionalGeneration,
|
||||
BlipForQuestionAnswering,
|
||||
)
|
||||
|
||||
from diffusers import (
|
||||
StableDiffusionPipeline,
|
||||
StableDiffusionInpaintPipeline,
|
||||
StableDiffusionInstructPix2PixPipeline,
|
||||
)
|
||||
from diffusers import EulerAncestralDiscreteScheduler
|
||||
|
||||
|
||||
class MaskFormer:
|
||||
def __init__(self, device):
|
||||
print("Initializing MaskFormer to %s" % device)
|
||||
self.device = device
|
||||
self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
|
||||
self.model = CLIPSegForImageSegmentation.from_pretrained(
|
||||
"CIDAS/clipseg-rd64-refined"
|
||||
).to(device)
|
||||
|
||||
def inference(self, image_path, text):
|
||||
threshold = 0.5
|
||||
min_area = 0.02
|
||||
padding = 20
|
||||
original_image = Image.open(image_path)
|
||||
image = original_image.resize((512, 512))
|
||||
inputs = self.processor(
|
||||
text=text, images=image, padding="max_length", return_tensors="pt"
|
||||
).to(self.device)
|
||||
with torch.no_grad():
|
||||
outputs = self.model(**inputs)
|
||||
mask = torch.sigmoid(outputs[0]).squeeze().cpu().numpy() > threshold
|
||||
area_ratio = len(np.argwhere(mask)) / (mask.shape[0] * mask.shape[1])
|
||||
if area_ratio < min_area:
|
||||
return None
|
||||
true_indices = np.argwhere(mask)
|
||||
mask_array = np.zeros_like(mask, dtype=bool)
|
||||
for idx in true_indices:
|
||||
padded_slice = tuple(
|
||||
slice(max(0, i - padding), i + padding + 1) for i in idx
|
||||
)
|
||||
mask_array[padded_slice] = True
|
||||
visual_mask = (mask_array * 255).astype(np.uint8)
|
||||
image_mask = Image.fromarray(visual_mask)
|
||||
return image_mask.resize(original_image.size)
|
||||
|
||||
|
||||
class ImageEditing:
|
||||
def __init__(self, device):
|
||||
print("Initializing ImageEditing to %s" % device)
|
||||
self.device = device
|
||||
self.mask_former = MaskFormer(device=self.device)
|
||||
self.revision = "fp16" if "cuda" in device else None
|
||||
self.torch_dtype = torch.float16 if "cuda" in device else torch.float32
|
||||
self.inpaint = StableDiffusionInpaintPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-inpainting",
|
||||
revision=self.revision,
|
||||
torch_dtype=self.torch_dtype,
|
||||
).to(device)
|
||||
|
||||
@prompts(
|
||||
name="Remove Something From The Photo",
|
||||
description="useful when you want to remove and object or something from the photo "
|
||||
"from its description or location. "
|
||||
"The input to this tool should be a comma seperated string of two, "
|
||||
"representing the image_path and the object need to be removed. ",
|
||||
)
|
||||
def inference_remove(self, inputs):
|
||||
image_path, to_be_removed_txt = inputs.split(",")
|
||||
return self.inference_replace(f"{image_path},{to_be_removed_txt},background")
|
||||
|
||||
@prompts(
|
||||
name="Replace Something From The Photo",
|
||||
description="useful when you want to replace an object from the object description or "
|
||||
"location with another object from its description. "
|
||||
"The input to this tool should be a comma seperated string of three, "
|
||||
"representing the image_path, the object to be replaced, the object to be replaced with ",
|
||||
)
|
||||
def inference_replace(self, inputs):
|
||||
image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",")
|
||||
original_image = Image.open(image_path)
|
||||
original_size = original_image.size
|
||||
mask_image = self.mask_former.inference(image_path, to_be_replaced_txt)
|
||||
updated_image = self.inpaint(
|
||||
prompt=replace_with_txt,
|
||||
image=original_image.resize((512, 512)),
|
||||
mask_image=mask_image.resize((512, 512)),
|
||||
).images[0]
|
||||
updated_image_path = get_new_image_name(
|
||||
image_path, func_name="replace-something"
|
||||
)
|
||||
updated_image = updated_image.resize(original_size)
|
||||
updated_image.save(updated_image_path)
|
||||
print(
|
||||
f"\nProcessed ImageEditing, Input Image: {image_path}, Replace {to_be_replaced_txt} to {replace_with_txt}, "
|
||||
f"Output Image: {updated_image_path}"
|
||||
)
|
||||
return updated_image_path
|
||||
|
||||
|
||||
class InstructPix2Pix:
|
||||
def __init__(self, device):
|
||||
print("Initializing InstructPix2Pix to %s" % device)
|
||||
self.device = device
|
||||
self.torch_dtype = torch.float16 if "cuda" in device else torch.float32
|
||||
self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
|
||||
"timbrooks/instruct-pix2pix",
|
||||
safety_checker=None,
|
||||
torch_dtype=self.torch_dtype,
|
||||
).to(device)
|
||||
self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
|
||||
self.pipe.scheduler.config
|
||||
)
|
||||
|
||||
@prompts(
|
||||
name="Instruct Image Using Text",
|
||||
description="useful when you want to the style of the image to be like the text. "
|
||||
"like: make it look like a painting. or make it like a robot. "
|
||||
"The input to this tool should be a comma seperated string of two, "
|
||||
"representing the image_path and the text. ",
|
||||
)
|
||||
def inference(self, inputs):
|
||||
"""Change style of image."""
|
||||
print("===>Starting InstructPix2Pix Inference")
|
||||
image_path, text = inputs.split(",")[0], ",".join(inputs.split(",")[1:])
|
||||
original_image = Image.open(image_path)
|
||||
image = self.pipe(
|
||||
text, image=original_image, num_inference_steps=40, image_guidance_scale=1.2
|
||||
).images[0]
|
||||
updated_image_path = get_new_image_name(image_path, func_name="pix2pix")
|
||||
image.save(updated_image_path)
|
||||
print(
|
||||
f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text: {text}, "
|
||||
f"Output Image: {updated_image_path}"
|
||||
)
|
||||
return updated_image_path
|
||||
|
||||
|
||||
class Text2Image:
|
||||
def __init__(self, device):
|
||||
print("Initializing Text2Image to %s" % device)
|
||||
self.device = device
|
||||
self.torch_dtype = torch.float16 if "cuda" in device else torch.float32
|
||||
self.pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5", torch_dtype=self.torch_dtype
|
||||
)
|
||||
self.pipe.to(device)
|
||||
self.a_prompt = "best quality, extremely detailed"
|
||||
self.n_prompt = (
|
||||
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, "
|
||||
"fewer digits, cropped, worst quality, low quality"
|
||||
)
|
||||
|
||||
@prompts(
|
||||
name="Generate Image From User Input Text",
|
||||
description="useful when you want to generate an image from a user input text and save it to a file. "
|
||||
"like: generate an image of an object or something, or generate an image that includes some objects. "
|
||||
"The input to this tool should be a string, representing the text used to generate image. ",
|
||||
)
|
||||
def inference(self, text):
|
||||
image_filename = os.path.join("image", str(uuid.uuid4())[0:8] + ".png")
|
||||
prompt = text + ", " + self.a_prompt
|
||||
image = self.pipe(prompt, negative_prompt=self.n_prompt).images[0]
|
||||
image.save(image_filename)
|
||||
print(
|
||||
f"\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}"
|
||||
)
|
||||
return image_filename
|
||||
|
||||
|
||||
class ImageCaptioning:
|
||||
def __init__(self, device):
|
||||
print("Initializing ImageCaptioning to %s" % device)
|
||||
self.device = device
|
||||
self.torch_dtype = torch.float16 if "cuda" in device else torch.float32
|
||||
self.processor = BlipProcessor.from_pretrained(
|
||||
"Salesforce/blip-image-captioning-base"
|
||||
)
|
||||
self.model = BlipForConditionalGeneration.from_pretrained(
|
||||
"Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype
|
||||
).to(self.device)
|
||||
|
||||
@prompts(
|
||||
name="Get Photo Description",
|
||||
description="useful when you want to know what is inside the photo. receives image_path as input. "
|
||||
"The input to this tool should be a string, representing the image_path. ",
|
||||
)
|
||||
def inference(self, image_path):
|
||||
inputs = self.processor(Image.open(image_path), return_tensors="pt").to(
|
||||
self.device, self.torch_dtype
|
||||
)
|
||||
out = self.model.generate(**inputs)
|
||||
captions = self.processor.decode(out[0], skip_special_tokens=True)
|
||||
print(
|
||||
f"\nProcessed ImageCaptioning, Input Image: {image_path}, Output Text: {captions}"
|
||||
)
|
||||
return captions
|
||||
|
||||
|
||||
class VisualQuestionAnswering:
|
||||
def __init__(self, device):
|
||||
print("Initializing VisualQuestionAnswering to %s" % device)
|
||||
self.torch_dtype = torch.float16 if "cuda" in device else torch.float32
|
||||
self.device = device
|
||||
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
|
||||
self.model = BlipForQuestionAnswering.from_pretrained(
|
||||
"Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype
|
||||
).to(self.device)
|
||||
|
||||
@prompts(
|
||||
name="Answer Question About The Image",
|
||||
description="useful when you need an answer for a question based on an image. "
|
||||
"like: what is the background color of the last image, how many cats in this figure, what is in this figure. "
|
||||
"The input to this tool should be a comma seperated string of two, representing the image_path and the question",
|
||||
)
|
||||
def inference(self, inputs):
|
||||
image_path, question = inputs.split(",")
|
||||
raw_image = Image.open(image_path).convert("RGB")
|
||||
inputs = self.processor(raw_image, question, return_tensors="pt").to(
|
||||
self.device, self.torch_dtype
|
||||
)
|
||||
out = self.model.generate(**inputs)
|
||||
answer = self.processor.decode(out[0], skip_special_tokens=True)
|
||||
print(
|
||||
f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input Question: {question}, "
|
||||
f"Output Answer: {answer}"
|
||||
)
|
||||
return answer
|
Loading…
Reference in New Issue
Block a user