mirror of
https://github.com/hwchase17/langchain
synced 2024-11-08 07:10:35 +00:00
de6a401a22
OpenLM is a zero-dependency OpenAI-compatible LLM provider that can call different inference endpoints directly via HTTP. It implements the OpenAI Completion class so that it can be used as a drop-in replacement for the OpenAI API. This changeset utilizes BaseOpenAI for minimal added code. --------- Co-authored-by: Dev 2049 <dev.dev2049@gmail.com>
9 lines
241 B
Python
9 lines
241 B
Python
from langchain.llms.openlm import OpenLM
|
|
|
|
|
|
def test_openlm_call() -> None:
|
|
"""Test valid call to openlm."""
|
|
llm = OpenLM(model_name="dolly-v2-7b", max_tokens=10)
|
|
output = llm(prompt="Say foo:")
|
|
assert isinstance(output, str)
|