forked from Archives/langchain
de6a401a22
OpenLM is a zero-dependency OpenAI-compatible LLM provider that can call different inference endpoints directly via HTTP. It implements the OpenAI Completion class so that it can be used as a drop-in replacement for the OpenAI API. This changeset utilizes BaseOpenAI for minimal added code. --------- Co-authored-by: Dev 2049 <dev.dev2049@gmail.com>
27 lines
767 B
Python
27 lines
767 B
Python
from typing import Any, Dict
|
|
|
|
from pydantic import root_validator
|
|
|
|
from langchain.llms.openai import BaseOpenAI
|
|
|
|
|
|
class OpenLM(BaseOpenAI):
|
|
@property
|
|
def _invocation_params(self) -> Dict[str, Any]:
|
|
return {**{"model": self.model_name}, **super()._invocation_params}
|
|
|
|
@root_validator()
|
|
def validate_environment(cls, values: Dict) -> Dict:
|
|
try:
|
|
import openlm
|
|
|
|
values["client"] = openlm.Completion
|
|
except ImportError:
|
|
raise ValueError(
|
|
"Could not import openlm python package. "
|
|
"Please install it with `pip install openlm`."
|
|
)
|
|
if values["streaming"]:
|
|
raise ValueError("Streaming not supported with openlm")
|
|
return values
|