2023-10-28 18:51:12 +00:00
|
|
|
from application.llm.base import BaseLLM
|
|
|
|
from application.core.settings import settings
|
|
|
|
|
2024-04-15 14:05:59 +00:00
|
|
|
|
2023-10-28 18:51:12 +00:00
|
|
|
class AnthropicLLM(BaseLLM):
|
|
|
|
|
2024-04-16 10:01:11 +00:00
|
|
|
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
2023-10-28 18:51:12 +00:00
|
|
|
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
2024-04-15 14:05:59 +00:00
|
|
|
|
2024-04-15 14:17:24 +00:00
|
|
|
super().__init__(*args, **kwargs)
|
2024-04-15 14:05:59 +00:00
|
|
|
self.api_key = (
|
|
|
|
api_key or settings.ANTHROPIC_API_KEY
|
|
|
|
) # If not provided, use a default from settings
|
2024-04-16 10:01:11 +00:00
|
|
|
self.user_api_key = user_api_key
|
2023-10-28 18:51:12 +00:00
|
|
|
self.anthropic = Anthropic(api_key=self.api_key)
|
|
|
|
self.HUMAN_PROMPT = HUMAN_PROMPT
|
|
|
|
self.AI_PROMPT = AI_PROMPT
|
|
|
|
|
2024-04-15 17:02:24 +00:00
|
|
|
def _raw_gen(
|
2024-04-16 04:32:04 +00:00
|
|
|
self, baseself, model, messages, stream=False, max_tokens=300, **kwargs
|
2024-04-15 17:02:24 +00:00
|
|
|
):
|
2024-04-15 14:05:59 +00:00
|
|
|
context = messages[0]["content"]
|
|
|
|
user_question = messages[-1]["content"]
|
2023-10-28 18:51:12 +00:00
|
|
|
prompt = f"### Context \n {context} \n ### Question \n {user_question}"
|
|
|
|
if stream:
|
2024-04-16 04:32:04 +00:00
|
|
|
return self.gen_stream(model, prompt, stream, max_tokens, **kwargs)
|
2023-10-28 18:51:12 +00:00
|
|
|
|
|
|
|
completion = self.anthropic.completions.create(
|
|
|
|
model=model,
|
|
|
|
max_tokens_to_sample=max_tokens,
|
|
|
|
stream=stream,
|
|
|
|
prompt=f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT}",
|
|
|
|
)
|
|
|
|
return completion.completion
|
|
|
|
|
2024-04-16 04:32:04 +00:00
|
|
|
def _raw_gen_stream(
|
|
|
|
self, baseself, model, messages, stream=True, max_tokens=300, **kwargs
|
|
|
|
):
|
2024-04-15 14:05:59 +00:00
|
|
|
context = messages[0]["content"]
|
|
|
|
user_question = messages[-1]["content"]
|
2023-10-28 18:51:12 +00:00
|
|
|
prompt = f"### Context \n {context} \n ### Question \n {user_question}"
|
|
|
|
stream_response = self.anthropic.completions.create(
|
|
|
|
model=model,
|
|
|
|
prompt=f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT}",
|
|
|
|
max_tokens_to_sample=max_tokens,
|
|
|
|
stream=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
for completion in stream_response:
|
2024-04-15 14:05:59 +00:00
|
|
|
yield completion.completion
|