openai[patch]: update openai params (#23691)

**Description:** Explicitly add parameters from openai API



- [X] **Lint and test**: Run `make format`, `make lint` and `make test`
from the root of the package(s) you've modified. See contribution
guidelines for more: https://python.langchain.com/docs/contributing/

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Roman Solomatin 2024-07-13 04:53:33 +05:00 committed by GitHub
parent f0a7581b50
commit f071581aea
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 30 additions and 0 deletions

View File

@ -319,10 +319,26 @@ class BaseChatOpenAI(BaseChatModel):
None."""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
presence_penalty: Optional[float] = None
"""Penalizes repeated tokens."""
frequency_penalty: Optional[float] = None
"""Penalizes repeated tokens according to frequency."""
seed: Optional[int] = None
"""Seed for generation"""
logprobs: Optional[bool] = False
"""Whether to return logprobs."""
top_logprobs: Optional[int] = None
"""Number of most likely tokens to return at each token position, each with
an associated log probability. `logprobs` must be set to true
if this parameter is used."""
logit_bias: Optional[Dict[int, int]] = None
"""Modify the likelihood of specified tokens appearing in the completion."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
tiktoken_model_name: Optional[str] = None
@ -444,6 +460,13 @@ class BaseChatOpenAI(BaseChatModel):
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"seed": self.seed,
"top_p": self.top_p,
"logprobs": self.logprobs,
"top_logprobs": self.top_logprobs,
"logit_bias": self.logit_bias,
**self.model_kwargs,
}
if self.max_tokens is not None:

View File

@ -110,6 +110,11 @@ class BaseOpenAI(BaseLLM):
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
seed: Optional[int] = None
"""Seed for generation"""
logprobs: Optional[int] = None
"""Include the log probabilities on the logprobs most likely output tokens,
as well the chosen tokens."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
@ -220,6 +225,8 @@ class BaseOpenAI(BaseLLM):
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
"seed": self.seed,
"logprobs": self.logprobs,
}
if self.max_tokens is not None: