mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
fc196cab12
Add deepinfra chat models support. This is https://github.com/langchain-ai/langchain/pull/14234 re-opened from my branch (so maintainers can edit).
33 lines
999 B
Python
33 lines
999 B
Python
"""Test DeepInfra API wrapper."""
|
|
from langchain_community.llms.deepinfra import DeepInfra
|
|
|
|
|
|
def test_deepinfra_call() -> None:
|
|
"""Test valid call to DeepInfra."""
|
|
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
|
|
output = llm.invoke("What is 2 + 2?")
|
|
assert isinstance(output, str)
|
|
|
|
|
|
async def test_deepinfra_acall() -> None:
|
|
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
|
|
output = await llm.ainvoke("What is 2 + 2?")
|
|
assert llm._llm_type == "deepinfra"
|
|
assert isinstance(output, str)
|
|
|
|
|
|
def test_deepinfra_stream() -> None:
|
|
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
|
|
num_chunks = 0
|
|
for chunk in llm.stream("[INST] Hello [/INST] "):
|
|
num_chunks += 1
|
|
assert num_chunks > 0
|
|
|
|
|
|
async def test_deepinfra_astream() -> None:
|
|
llm = DeepInfra(model_id="meta-llama/Llama-2-7b-chat-hf")
|
|
num_chunks = 0
|
|
async for chunk in llm.astream("[INST] Hello [/INST] "):
|
|
num_chunks += 1
|
|
assert num_chunks > 0
|