community[patch]: fix top_p type hint (#15452)

fix: https://github.com/langchain-ai/langchain/issues/15341

@efriis
pull/16049/head
chyroc 5 months ago committed by GitHub
parent 251afda549
commit d334efc848
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -97,7 +97,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
will give more diverse answers, while a lower value (e.g. 10)
will be more conservative. (Default: 40)"""
top_p: Optional[int] = None
top_p: Optional[float] = None
"""Works together with top-k. A higher value (e.g., 0.95) will lead
to more diverse text, while a lower value (e.g., 0.5) will
generate more focused and conservative text. (Default: 0.9)"""

@ -38,7 +38,7 @@ class NLPCloud(LLM):
"""Whether or not to remove the end sequence token."""
bad_words: List[str] = []
"""List of tokens not allowed to be generated."""
top_p: int = 1
top_p: float = 1.0
"""Total probability mass of tokens to consider at each step."""
top_k: int = 50
"""The number of highest probability tokens to keep for top-k filtering."""

@ -90,7 +90,7 @@ class _OllamaCommon(BaseLanguageModel):
will give more diverse answers, while a lower value (e.g. 10)
will be more conservative. (Default: 40)"""
top_p: Optional[int] = None
top_p: Optional[float] = None
"""Works together with top-k. A higher value (e.g., 0.95) will lead
to more diverse text, while a lower value (e.g., 0.5) will
generate more focused and conservative text. (Default: 0.9)"""

@ -443,7 +443,7 @@ Supported examples:
top_k: Optional[int] = None
"""Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive."""
top_p: Optional[int] = None
top_p: Optional[float] = None
"""The maximum cumulative probability of tokens to consider when sampling.
The model uses combined Top-k and nucleus sampling.

Loading…
Cancel
Save