mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
9d6d8f85da
Co-authored-by: Donny Greenberg <dongreenberg2@gmail.com> Co-authored-by: John Dagdelen <jdagdelen@users.noreply.github.com> Co-authored-by: Harrison Chase <harrisonchase@Harrisons-MBP.attlocal.net> Co-authored-by: Andrew White <white.d.andrew@gmail.com> Co-authored-by: Peng Qu <82029664+pengqu123@users.noreply.github.com> Co-authored-by: Matt Robinson <mthw.wm.robinson@gmail.com> Co-authored-by: jeff <tangj1122@gmail.com> Co-authored-by: Harrison Chase <harrisonchase@Harrisons-MacBook-Pro.local> Co-authored-by: zanderchase <zander@unfold.ag> Co-authored-by: Charles Frye <cfrye59@gmail.com> Co-authored-by: zanderchase <zanderchase@gmail.com> Co-authored-by: Shahriar Tajbakhsh <sh.tajbakhsh@gmail.com> Co-authored-by: Stefan Keselj <skeselj@princeton.edu> Co-authored-by: Francisco Ingham <fpingham@gmail.com> Co-authored-by: Dhruv Anand <105786647+dhruv-anand-aintech@users.noreply.github.com> Co-authored-by: cragwolfe <cragcw@gmail.com> Co-authored-by: Anton Troynikov <atroyn@users.noreply.github.com> Co-authored-by: William FH <13333726+hinthornw@users.noreply.github.com> Co-authored-by: Oliver Klingefjord <oliver@klingefjord.com> Co-authored-by: blob42 <contact@blob42.xyz> Co-authored-by: blob42 <spike@w530> Co-authored-by: Enrico Shippole <henryshippole@gmail.com> Co-authored-by: Ibis Prevedello <ibiscp@gmail.com> Co-authored-by: jped <jonathanped@gmail.com> Co-authored-by: Justin Torre <justintorre75@gmail.com> Co-authored-by: Ivan Vendrov <ivan@anthropic.com> Co-authored-by: Sasmitha Manathunga <70096033+mmz-001@users.noreply.github.com> Co-authored-by: Ankush Gola <9536492+agola11@users.noreply.github.com> Co-authored-by: Matt Robinson <mrobinson@unstructuredai.io> Co-authored-by: Jeff Huber <jeffchuber@gmail.com> Co-authored-by: Akshay <64036106+akshayvkt@users.noreply.github.com> Co-authored-by: Andrew Huang <jhuang16888@gmail.com> Co-authored-by: rogerserper <124558887+rogerserper@users.noreply.github.com> Co-authored-by: seanaedmiston <seane999@gmail.com> Co-authored-by: Hasegawa Yuya <52068175+Hase-U@users.noreply.github.com> Co-authored-by: Ivan Vendrov <ivendrov@gmail.com> Co-authored-by: Chen Wu (吴尘) <henrychenwu@cmu.edu> Co-authored-by: Dennis Antela Martinez <dennis.antela@gmail.com> Co-authored-by: Maxime Vidal <max.vidal@hotmail.fr> Co-authored-by: Rishabh Raizada <110235735+rishabh-ti@users.noreply.github.com>
106 lines
3.1 KiB
Python
106 lines
3.1 KiB
Python
"""Test Self-hosted LLMs."""
|
|
import pickle
|
|
from typing import Any, List, Optional
|
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
|
|
from langchain.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline
|
|
|
|
model_reqs = ["pip:./", "transformers", "torch"]
|
|
|
|
|
|
def get_remote_instance() -> Any:
|
|
"""Get remote instance for testing."""
|
|
import runhouse as rh
|
|
|
|
return rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
|
|
|
|
|
|
def test_self_hosted_huggingface_pipeline_text_generation() -> None:
|
|
"""Test valid call to self-hosted HuggingFace text generation model."""
|
|
gpu = get_remote_instance()
|
|
llm = SelfHostedHuggingFaceLLM(
|
|
model_id="gpt2",
|
|
task="text-generation",
|
|
model_kwargs={"n_positions": 1024},
|
|
hardware=gpu,
|
|
model_reqs=model_reqs,
|
|
)
|
|
output = llm("Say foo:") # type: ignore
|
|
assert isinstance(output, str)
|
|
|
|
|
|
def test_self_hosted_huggingface_pipeline_text2text_generation() -> None:
|
|
"""Test valid call to self-hosted HuggingFace text2text generation model."""
|
|
gpu = get_remote_instance()
|
|
llm = SelfHostedHuggingFaceLLM(
|
|
model_id="google/flan-t5-small",
|
|
task="text2text-generation",
|
|
hardware=gpu,
|
|
model_reqs=model_reqs,
|
|
)
|
|
output = llm("Say foo:") # type: ignore
|
|
assert isinstance(output, str)
|
|
|
|
|
|
def load_pipeline() -> Any:
|
|
"""Load pipeline for testing."""
|
|
model_id = "gpt2"
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
model = AutoModelForCausalLM.from_pretrained(model_id)
|
|
pipe = pipeline(
|
|
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
|
|
)
|
|
return pipe
|
|
|
|
|
|
def inference_fn(pipeline: Any, prompt: str, stop: Optional[List[str]] = None) -> str:
|
|
"""Inference function for testing."""
|
|
return pipeline(prompt)[0]["generated_text"]
|
|
|
|
|
|
def test_init_with_local_pipeline() -> None:
|
|
"""Test initialization with a self-hosted HF pipeline."""
|
|
gpu = get_remote_instance()
|
|
pipeline = load_pipeline()
|
|
llm = SelfHostedPipeline.from_pipeline(
|
|
pipeline=pipeline,
|
|
hardware=gpu,
|
|
model_reqs=model_reqs,
|
|
inference_fn=inference_fn,
|
|
)
|
|
output = llm("Say foo:") # type: ignore
|
|
assert isinstance(output, str)
|
|
|
|
|
|
def test_init_with_pipeline_path() -> None:
|
|
"""Test initialization with a self-hosted HF pipeline."""
|
|
gpu = get_remote_instance()
|
|
pipeline = load_pipeline()
|
|
import runhouse as rh
|
|
|
|
rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(
|
|
gpu, path="models"
|
|
)
|
|
llm = SelfHostedPipeline.from_pipeline(
|
|
pipeline="models/pipeline.pkl",
|
|
hardware=gpu,
|
|
model_reqs=model_reqs,
|
|
inference_fn=inference_fn,
|
|
)
|
|
output = llm("Say foo:") # type: ignore
|
|
assert isinstance(output, str)
|
|
|
|
|
|
def test_init_with_pipeline_fn() -> None:
|
|
"""Test initialization with a self-hosted HF pipeline."""
|
|
gpu = get_remote_instance()
|
|
llm = SelfHostedPipeline(
|
|
model_load_fn=load_pipeline,
|
|
hardware=gpu,
|
|
model_reqs=model_reqs,
|
|
inference_fn=inference_fn,
|
|
)
|
|
output = llm("Say foo:") # type: ignore
|
|
assert isinstance(output, str)
|