mirror of
https://github.com/hwchase17/langchain
synced 2024-11-11 19:11:02 +00:00
570d45b2a1
Ollama has a raw option now. https://github.com/ollama/ollama/blob/main/docs/api.md Thank you for contributing to LangChain! - [ ] **PR title**: "package: description" - Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" - [ ] **PR message**: ***Delete this entire checklist*** and replace with - **Description:** a description of the change - **Issue:** the issue # it fixes, if applicable - **Dependencies:** any dependencies required for this change - **Twitter handle:** if your PR gets announced, and you'd like a mention, we'll gladly shout you out! - [ ] **Add tests and docs**: If you're adding a new integration, please include 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/docs/integrations` directory. - [ ] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ Additional guidelines: - Make sure optional dependencies are imported within a function. - Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests. - Most PRs should not touch more than one package. - Changes should be backwards compatible. - If you are adding something to community, do not re-import it in langchain. If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17. --------- Co-authored-by: Isaac Francisco <78627776+isahers1@users.noreply.github.com> Co-authored-by: isaac hershenson <ihershenson@hmc.edu>
202 lines
6.3 KiB
Python
202 lines
6.3 KiB
Python
import requests
|
|
from pytest import MonkeyPatch
|
|
|
|
from langchain_community.llms.ollama import Ollama
|
|
|
|
|
|
def mock_response_stream(): # type: ignore[no-untyped-def]
|
|
mock_response = [b'{ "response": "Response chunk 1" }']
|
|
|
|
class MockRaw:
|
|
def read(self, chunk_size): # type: ignore[no-untyped-def]
|
|
try:
|
|
return mock_response.pop()
|
|
except IndexError:
|
|
return None
|
|
|
|
response = requests.Response()
|
|
response.status_code = 200
|
|
response.raw = MockRaw()
|
|
return response
|
|
|
|
|
|
def test_pass_headers_if_provided(monkeypatch: MonkeyPatch) -> None:
|
|
llm = Ollama(
|
|
base_url="https://ollama-hostname:8000",
|
|
model="foo",
|
|
headers={
|
|
"Authorization": "Bearer TEST-TOKEN-VALUE",
|
|
"Referer": "https://application-host",
|
|
},
|
|
timeout=300,
|
|
)
|
|
|
|
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
|
assert url == "https://ollama-hostname:8000/api/generate"
|
|
assert headers == {
|
|
"Content-Type": "application/json",
|
|
"Authorization": "Bearer TEST-TOKEN-VALUE",
|
|
"Referer": "https://application-host",
|
|
}
|
|
assert json is not None
|
|
assert stream is True
|
|
assert timeout == 300
|
|
|
|
return mock_response_stream()
|
|
|
|
monkeypatch.setattr(requests, "post", mock_post)
|
|
|
|
llm.invoke("Test prompt")
|
|
|
|
|
|
def test_handle_if_headers_not_provided(monkeypatch: MonkeyPatch) -> None:
|
|
llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300)
|
|
|
|
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
|
assert url == "https://ollama-hostname:8000/api/generate"
|
|
assert headers == {
|
|
"Content-Type": "application/json",
|
|
}
|
|
assert json is not None
|
|
assert stream is True
|
|
assert timeout == 300
|
|
|
|
return mock_response_stream()
|
|
|
|
monkeypatch.setattr(requests, "post", mock_post)
|
|
|
|
llm.invoke("Test prompt")
|
|
|
|
|
|
def test_handle_kwargs_top_level_parameters(monkeypatch: MonkeyPatch) -> None:
|
|
"""Test that top level params are sent to the endpoint as top level params"""
|
|
llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300)
|
|
|
|
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
|
assert url == "https://ollama-hostname:8000/api/generate"
|
|
assert headers == {
|
|
"Content-Type": "application/json",
|
|
}
|
|
assert json == {
|
|
"format": None,
|
|
"images": None,
|
|
"model": "test-model",
|
|
"options": {
|
|
"mirostat": None,
|
|
"mirostat_eta": None,
|
|
"mirostat_tau": None,
|
|
"num_ctx": None,
|
|
"num_gpu": None,
|
|
"num_thread": None,
|
|
"num_predict": None,
|
|
"repeat_last_n": None,
|
|
"repeat_penalty": None,
|
|
"stop": None,
|
|
"temperature": None,
|
|
"tfs_z": None,
|
|
"top_k": None,
|
|
"top_p": None,
|
|
},
|
|
"prompt": "Test prompt",
|
|
"system": "Test system prompt",
|
|
"template": None,
|
|
"keep_alive": None,
|
|
"raw": None,
|
|
}
|
|
assert stream is True
|
|
assert timeout == 300
|
|
|
|
return mock_response_stream()
|
|
|
|
monkeypatch.setattr(requests, "post", mock_post)
|
|
|
|
llm.invoke("Test prompt", model="test-model", system="Test system prompt")
|
|
|
|
|
|
def test_handle_kwargs_with_unknown_param(monkeypatch: MonkeyPatch) -> None:
|
|
"""
|
|
Test that params that are not top level params will be sent to the endpoint
|
|
as options
|
|
"""
|
|
llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300)
|
|
|
|
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
|
assert url == "https://ollama-hostname:8000/api/generate"
|
|
assert headers == {
|
|
"Content-Type": "application/json",
|
|
}
|
|
assert json == {
|
|
"format": None,
|
|
"images": None,
|
|
"model": "foo",
|
|
"options": {
|
|
"mirostat": None,
|
|
"mirostat_eta": None,
|
|
"mirostat_tau": None,
|
|
"num_ctx": None,
|
|
"num_gpu": None,
|
|
"num_thread": None,
|
|
"num_predict": None,
|
|
"repeat_last_n": None,
|
|
"repeat_penalty": None,
|
|
"stop": None,
|
|
"temperature": 0.8,
|
|
"tfs_z": None,
|
|
"top_k": None,
|
|
"top_p": None,
|
|
"unknown": "Unknown parameter value",
|
|
},
|
|
"prompt": "Test prompt",
|
|
"system": None,
|
|
"template": None,
|
|
"keep_alive": None,
|
|
"raw": None,
|
|
}
|
|
assert stream is True
|
|
assert timeout == 300
|
|
|
|
return mock_response_stream()
|
|
|
|
monkeypatch.setattr(requests, "post", mock_post)
|
|
|
|
llm.invoke("Test prompt", unknown="Unknown parameter value", temperature=0.8)
|
|
|
|
|
|
def test_handle_kwargs_with_options(monkeypatch: MonkeyPatch) -> None:
|
|
"""
|
|
Test that if options provided it will be sent to the endpoint as options,
|
|
ignoring other params that are not top level params.
|
|
"""
|
|
llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300)
|
|
|
|
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
|
assert url == "https://ollama-hostname:8000/api/generate"
|
|
assert headers == {
|
|
"Content-Type": "application/json",
|
|
}
|
|
assert json == {
|
|
"format": None,
|
|
"images": None,
|
|
"model": "test-another-model",
|
|
"options": {"unknown_option": "Unknown option value"},
|
|
"prompt": "Test prompt",
|
|
"system": None,
|
|
"template": None,
|
|
"keep_alive": None,
|
|
"raw": None,
|
|
}
|
|
assert stream is True
|
|
assert timeout == 300
|
|
|
|
return mock_response_stream()
|
|
|
|
monkeypatch.setattr(requests, "post", mock_post)
|
|
|
|
llm.invoke(
|
|
"Test prompt",
|
|
model="test-another-model",
|
|
options={"unknown_option": "Unknown option value"},
|
|
unknown="Unknown parameter value",
|
|
temperature=0.8,
|
|
)
|