mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
openai[patch]: update openai params (#23691)
**Description:** Explicitly add parameters from openai API - [X] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ --------- Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
parent
f0a7581b50
commit
f071581aea
@ -319,10 +319,26 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
None."""
|
||||
max_retries: int = 2
|
||||
"""Maximum number of retries to make when generating."""
|
||||
presence_penalty: Optional[float] = None
|
||||
"""Penalizes repeated tokens."""
|
||||
frequency_penalty: Optional[float] = None
|
||||
"""Penalizes repeated tokens according to frequency."""
|
||||
seed: Optional[int] = None
|
||||
"""Seed for generation"""
|
||||
logprobs: Optional[bool] = False
|
||||
"""Whether to return logprobs."""
|
||||
top_logprobs: Optional[int] = None
|
||||
"""Number of most likely tokens to return at each token position, each with
|
||||
an associated log probability. `logprobs` must be set to true
|
||||
if this parameter is used."""
|
||||
logit_bias: Optional[Dict[int, int]] = None
|
||||
"""Modify the likelihood of specified tokens appearing in the completion."""
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results or not."""
|
||||
n: int = 1
|
||||
"""Number of chat completions to generate for each prompt."""
|
||||
top_p: Optional[float] = None
|
||||
"""Total probability mass of tokens to consider at each step."""
|
||||
max_tokens: Optional[int] = None
|
||||
"""Maximum number of tokens to generate."""
|
||||
tiktoken_model_name: Optional[str] = None
|
||||
@ -444,6 +460,13 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
"stream": self.streaming,
|
||||
"n": self.n,
|
||||
"temperature": self.temperature,
|
||||
"presence_penalty": self.presence_penalty,
|
||||
"frequency_penalty": self.frequency_penalty,
|
||||
"seed": self.seed,
|
||||
"top_p": self.top_p,
|
||||
"logprobs": self.logprobs,
|
||||
"top_logprobs": self.top_logprobs,
|
||||
"logit_bias": self.logit_bias,
|
||||
**self.model_kwargs,
|
||||
}
|
||||
if self.max_tokens is not None:
|
||||
|
@ -110,6 +110,11 @@ class BaseOpenAI(BaseLLM):
|
||||
"""Adjust the probability of specific tokens being generated."""
|
||||
max_retries: int = 2
|
||||
"""Maximum number of retries to make when generating."""
|
||||
seed: Optional[int] = None
|
||||
"""Seed for generation"""
|
||||
logprobs: Optional[int] = None
|
||||
"""Include the log probabilities on the logprobs most likely output tokens,
|
||||
as well the chosen tokens."""
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results or not."""
|
||||
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
|
||||
@ -220,6 +225,8 @@ class BaseOpenAI(BaseLLM):
|
||||
"presence_penalty": self.presence_penalty,
|
||||
"n": self.n,
|
||||
"logit_bias": self.logit_bias,
|
||||
"seed": self.seed,
|
||||
"logprobs": self.logprobs,
|
||||
}
|
||||
|
||||
if self.max_tokens is not None:
|
||||
|
Loading…
Reference in New Issue
Block a user