fix: changes to llm classes according to base

pull/926/head
Siddhant Rai 2 months ago
parent c1c69ed22b
commit 60a670ce29

@ -4,9 +4,10 @@ from application.core.settings import settings
class AnthropicLLM(BaseLLM): class AnthropicLLM(BaseLLM):
def __init__(self, api_key=None): def __init__(self, api_key=None, *args, **kwargs):
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
super().__init__(*args, **kwargs)
self.api_key = ( self.api_key = (
api_key or settings.ANTHROPIC_API_KEY api_key or settings.ANTHROPIC_API_KEY
) # If not provided, use a default from settings ) # If not provided, use a default from settings

@ -3,7 +3,7 @@ from application.llm.base import BaseLLM
class HuggingFaceLLM(BaseLLM): class HuggingFaceLLM(BaseLLM):
def __init__(self, api_key, llm_name="Arc53/DocsGPT-7B", q=False): def __init__(self, api_key, llm_name="Arc53/DocsGPT-7B", q=False, *args, **kwargs):
global hf global hf
from langchain.llms import HuggingFacePipeline from langchain.llms import HuggingFacePipeline
@ -33,6 +33,8 @@ class HuggingFaceLLM(BaseLLM):
tokenizer = AutoTokenizer.from_pretrained(llm_name) tokenizer = AutoTokenizer.from_pretrained(llm_name)
model = AutoModelForCausalLM.from_pretrained(llm_name) model = AutoModelForCausalLM.from_pretrained(llm_name)
super().__init__(*args, **kwargs)
self.api_key = api_key
pipe = pipeline( pipe = pipeline(
"text-generation", "text-generation",
model=model, model=model,

@ -4,7 +4,7 @@ from application.core.settings import settings
class LlamaCpp(BaseLLM): class LlamaCpp(BaseLLM):
def __init__(self, api_key, llm_name=settings.MODEL_PATH, **kwargs): def __init__(self, api_key, llm_name=settings.MODEL_PATH, *args, **kwargs):
global llama global llama
try: try:
from llama_cpp import Llama from llama_cpp import Llama
@ -13,6 +13,8 @@ class LlamaCpp(BaseLLM):
"Please install llama_cpp using pip install llama-cpp-python" "Please install llama_cpp using pip install llama-cpp-python"
) )
super().__init__(*args, **kwargs)
self.api_key = api_key
llama = Llama(model_path=llm_name, n_ctx=2048) llama = Llama(model_path=llm_name, n_ctx=2048)
def _raw_gen(self, model, messages, stream=False, **kwargs): def _raw_gen(self, model, messages, stream=False, **kwargs):

@ -4,10 +4,11 @@ from application.core.settings import settings
class OpenAILLM(BaseLLM): class OpenAILLM(BaseLLM):
def __init__(self, api_key): def __init__(self, api_key, *args, **kwargs):
global openai global openai
from openai import OpenAI from openai import OpenAI
super().__init__(*args, **kwargs)
self.client = OpenAI( self.client = OpenAI(
api_key=api_key, api_key=api_key,
) )

@ -4,9 +4,10 @@ from application.core.settings import settings
class PremAILLM(BaseLLM): class PremAILLM(BaseLLM):
def __init__(self, api_key): def __init__(self, api_key, *args, **kwargs):
from premai import Prem from premai import Prem
super().__init__(*args, **kwargs)
self.client = Prem(api_key=api_key) self.client = Prem(api_key=api_key)
self.api_key = api_key self.api_key = api_key
self.project_id = settings.PREMAI_PROJECT_ID self.project_id = settings.PREMAI_PROJECT_ID

@ -60,7 +60,7 @@ class LineIterator:
class SagemakerAPILLM(BaseLLM): class SagemakerAPILLM(BaseLLM):
def __init__(self, *args, **kwargs): def __init__(self, api_key, *args, **kwargs):
import boto3 import boto3
runtime = boto3.client( runtime = boto3.client(
@ -70,6 +70,8 @@ class SagemakerAPILLM(BaseLLM):
region_name="us-west-2", region_name="us-west-2",
) )
super().__init__(*args, **kwargs)
self.api_key = api_key
self.endpoint = settings.SAGEMAKER_ENDPOINT self.endpoint = settings.SAGEMAKER_ENDPOINT
self.runtime = runtime self.runtime = runtime

Loading…
Cancel
Save