mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
community[patch]: Updating default PPLX model to supported llama-3.1 model. (#25643)
# Issue As of late July, Perplexity [no longer supports Llama 3 models](https://docs.perplexity.ai/changelog/introducing-new-and-improved-sonar-models). # Description This PR updates the default model and doc examples to reflect their latest supported model. (Mostly updating the same places changed by #23723.) # Twitter handle `@acompa_` on behalf of the team at Not Diamond. Check us out [here](https://notdiamond.ai). --------- Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
This commit is contained in:
parent
163ef35dd1
commit
bcd5842b5d
@ -62,6 +62,12 @@
|
||||
"id": "97a8ce3a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The code provided assumes that your PPLX_API_KEY is set in your environment variables. If you would like to manually specify your API key and also choose a different model, you can use the following code:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"chat = ChatPerplexity(temperature=0, pplx_api_key=\"YOUR_API_KEY\", model=\"llama-3.1-sonar-small-128k-online\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You can check a list of available models [here](https://docs.perplexity.ai/docs/model-cards). For reproducibility, we can set the API key dynamically by taking it as an input in this notebook."
|
||||
]
|
||||
},
|
||||
@ -92,7 +98,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0, model=\"llama-3-sonar-small-32k-online\")"
|
||||
"chat = ChatPerplexity(temperature=0, model=\"llama-3.1-sonar-small-128k-online\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -160,7 +166,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0, model=\"llama-3-sonar-small-32k-online\")\n",
|
||||
"chat = ChatPerplexity(temperature=0, model=\"llama-3.1-sonar-small-128k-online\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Tell me a joke about {topic}\")])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"response = chain.invoke({\"topic\": \"cats\"})\n",
|
||||
@ -209,7 +215,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0.7, model=\"llama-3-sonar-small-32k-online\")\n",
|
||||
"chat = ChatPerplexity(temperature=0.7, model=\"llama-3.1-sonar-small-128k-online\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"human\", \"Give me a list of famous tourist attractions in Pakistan\")]\n",
|
||||
")\n",
|
||||
|
@ -55,20 +55,20 @@ class ChatPerplexity(BaseChatModel):
|
||||
from langchain_community.chat_models import ChatPerplexity
|
||||
|
||||
chat = ChatPerplexity(
|
||||
model="llama-3-sonar-small-32k-online",
|
||||
model="llama-3.1-sonar-small-128k-online",
|
||||
temperature=0.7,
|
||||
)
|
||||
"""
|
||||
|
||||
client: Any #: :meta private:
|
||||
model: str = "llama-3-sonar-small-32k-online"
|
||||
model: str = "llama-3.1-sonar-small-128k-online"
|
||||
"""Model name."""
|
||||
temperature: float = 0.7
|
||||
"""What sampling temperature to use."""
|
||||
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Holds any model parameters valid for `create` call not explicitly specified."""
|
||||
pplx_api_key: Optional[str] = Field(None, alias="api_key")
|
||||
"""Base URL path for API requests,
|
||||
"""Base URL path for API requests,
|
||||
leave blank if not using a proxy or service emulator."""
|
||||
request_timeout: Optional[Union[float, Tuple[float, float]]] = Field(
|
||||
None, alias="timeout"
|
||||
|
Loading…
Reference in New Issue
Block a user