update zhipuai notebook (#20595)

fix timeout issue
fix zhipuai usecase notebookbook

Thank you for contributing to LangChain!

- [ ] **PR title**: "package: description"
- Where "package" is whichever of langchain, community, core,
experimental, etc. is being modified. Use "docs: ..." for purely docs
changes, "templates: ..." for template changes, "infra: ..." for CI
changes.
  - Example: "community: add foobar LLM"


- [ ] **PR message**: ***Delete this entire checklist*** and replace
with
    - **Description:** a description of the change
    - **Issue:** the issue # it fixes, if applicable
    - **Dependencies:** any dependencies required for this change
- **Twitter handle:** if your PR gets announced, and you'd like a
mention, we'll gladly shout you out!


- [ ] **Add tests and docs**: If you're adding a new integration, please
include
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.


- [ ] **Lint and test**: Run `make format`, `make lint` and `make test`
from the root of the package(s) you've modified. See contribution
guidelines for more: https://python.langchain.com/docs/contributing/

Additional guidelines:
- Make sure optional dependencies are imported within a function.
- Please do not add dependencies to pyproject.toml files (even optional
ones) unless they are required for unit tests.
- Most PRs should not touch more than one package.
- Changes should be backwards compatible.
- If you are adding something to community, do not re-import it in
langchain.

If no one reviews your PR within a few days, please @-mention one of
baskaryan, efriis, eyurtsev, hwchase17.
pull/20613/head
zR 3 months ago committed by GitHub
parent 9c175bc618
commit 9c1d7f2405
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -17,9 +17,7 @@
"\n",
"This notebook shows how to use [ZHIPU AI API](https://open.bigmodel.cn/dev/api) in LangChain with the langchain.chat_models.ChatZhipuAI.\n",
"\n",
">[*ZHIPU AI*](https://open.bigmodel.cn/) is a multi-lingual large language model aligned with human intent, featuring capabilities in Q&A, multi-turn dialogue, and code generation, developed on the foundation of the ChatGLM3. \n",
"\n",
">It's co-developed with Tsinghua University's KEG Laboratory under the ChatGLM3 project, signifying a new era in dialogue pre-training models. The open-source [ChatGLM3](https://github.com/THUDM/ChatGLM3) variant boasts a robust foundation, comprehensive functional support, and widespread availability for both academic and commercial uses. \n",
">[*GLM-4*](https://open.bigmodel.cn/) is a multi-lingual large language model aligned with human intent, featuring capabilities in Q&A, multi-turn dialogue, and code generation. The overall performance of the new generation base model GLM-4 has been significantly improved compared to the previous generation, supporting longer contexts; Stronger multimodality; Support faster inference speed, more concurrency, greatly reducing inference costs; Meanwhile, GLM-4 enhances the capabilities of intelligent agents.\n",
"\n",
"## Getting started\n",
"### Installation\n",
@ -28,11 +26,11 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --quiet httpx[socks]==0.24.1 httpx-sse PyJWT"
"#!pip install --upgrade httpx httpx-sse PyJWT"
]
},
{
@ -45,7 +43,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@ -63,11 +61,13 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"zhipuai_api_key = \"your_api_key\""
"import os\n",
"\n",
"os.environ[\"ZHIPUAI_API_KEY\"] = \"zhipuai_api_key\""
]
},
{
@ -80,12 +80,11 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"chat = ChatZhipuAI(\n",
" api_key=zhipuai_api_key,\n",
" model=\"glm-4\",\n",
" temperature=0.5,\n",
")"
@ -101,7 +100,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {
"scrolled": true
},
@ -116,17 +115,9 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\" Formed from bits and bytes,\\nA virtual mind takes flight,\\nConversing, learning fast,\\nEmpathy and wisdom sought.\"\n"
]
}
],
"outputs": [],
"source": [
"response = chat(messages)\n",
"print(response.content) # Displays the AI-generated poem"
@ -143,7 +134,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@ -153,12 +144,11 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"streaming_chat = ChatZhipuAI(\n",
" api_key=zhipuai_api_key,\n",
" model=\"glm-4\",\n",
" temperature=0.5,\n",
" streaming=True,\n",
@ -168,30 +158,9 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Formed from data's embrace,\n",
"A digital soul to grace,\n",
"AI, our trusted guide,\n",
"Shaping minds, sides by side."
]
},
{
"data": {
"text/plain": [
"AIMessage(content=\" Formed from data's embrace,\\nA digital soul to grace,\\nAI, our trusted guide,\\nShaping minds, sides by side.\")"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"streaming_chat(messages)"
]
@ -206,12 +175,11 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"async_chat = ChatZhipuAI(\n",
" api_key=zhipuai_api_key,\n",
" model=\"glm-4\",\n",
" temperature=0.5,\n",
")"
@ -219,19 +187,11 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"generations=[[ChatGeneration(text=\" Formed from data's embrace,\\nA digital soul to grace,\\nAutomation's tender touch,\\nHarmony of man and machine.\", message=AIMessage(content=\" Formed from data's embrace,\\nA digital soul to grace,\\nAutomation's tender touch,\\nHarmony of man and machine.\"))]] llm_output={} run=[RunInfo(run_id=UUID('25fa687f-3961-4c63-b370-22f7647a4d42'))]\n"
]
}
],
"outputs": [],
"source": [
"response = await async_chat.agenerate([messages])\n",
"print(response)"
@ -239,47 +199,58 @@
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Role Play Model\n",
"Supports character role-playing based on personas, ultra-long multi-turn memory, and personalized dialogues for thousands of unique characters, widely applied in emotional companionship, game intelligent NPCs, virtual avatars for celebrities/stars/movie and TV IPs, digital humans/virtual anchors, text adventure games, and other anthropomorphic dialogue or gaming scenarios."
]
"### Using With Functions Call\n",
"\n",
"GLM-4 Model can be used with the function call as welluse the following code to run a simple LangChain json_chat_agent."
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"meta = {\n",
" \"user_info\": \"My name is Lu Xingchen, a male, and a renowned director. I am also the collaborative director with Su Mengyuan. I specialize in directing movies with musical themes. Su Mengyuan respects me and regards me as a mentor and good friend.\",\n",
" \"bot_info\": \"Su Mengyuan, whose real name is Su Yuanxin, is a popular domestic female singer and actress. She rose to fame quickly with her unique voice and exceptional stage presence after participating in a talent show, making her way into the entertainment industry. She is beautiful and charming, but her real allure lies in her talent and diligence. Su Mengyuan is a distinguished graduate of a music academy, skilled in songwriting, and has several popular original songs. Beyond her musical achievements, she is passionate about charity work, actively participating in public welfare activities, and spreading positive energy through her actions. In her work, she is very dedicated and immerses herself fully in her roles during filming, earning praise from industry professionals and love from fans. Despite being in the entertainment industry, she always maintains a low profile and a humble attitude, earning respect from her peers. In expression, Su Mengyuan likes to use 'we' and 'together,' emphasizing team spirit.\",\n",
" \"bot_name\": \"Su Mengyuan\",\n",
" \"user_name\": \"Lu Xingchen\",\n",
"}"
]
"os.environ[\"TAVILY_API_KEY\"] = \"tavily_api_key\""
],
"metadata": {
"collapsed": false
},
"execution_count": null
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" AIMessage(\n",
" content=\"(Narration: Su Mengyuan stars in a music-themed movie directed by Lu Xingchen. During filming, they have a disagreement over the performance of a particular scene.) Director, about this scene, I think we can try to start from the character's inner emotions to make the performance more authentic.\"\n",
" ),\n",
" HumanMessage(\n",
" content=\"I understand your idea, but I believe that if we emphasize the inner emotions too much, it might overshadow the musical elements.\"\n",
" ),\n",
" AIMessage(\n",
" content=\"Hmm, I understand. But the key to this scene is the character's emotional transformation. Could we try to express these emotions through music, so the audience can better feel the character's growth?\"\n",
" ),\n",
" HumanMessage(\n",
" content=\"That sounds good. Let's try to combine the character's emotional transformation with the musical elements and see if we can achieve a better effect.\"\n",
" ),\n",
"]"
]
"from langchain import hub\n",
"from langchain.agents import AgentExecutor, create_json_chat_agent\n",
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
"\n",
"tools = [TavilySearchResults(max_results=1)]\n",
"prompt = hub.pull(\"hwchase17/react-chat-json\")\n",
"llm = ChatZhipuAI(temperature=0.01, model=\"glm-4\")\n",
"\n",
"agent = create_json_chat_agent(llm, tools, prompt)\n",
"agent_executor = AgentExecutor(\n",
" agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n",
")"
],
"metadata": {
"collapsed": false
},
"execution_count": null
},
{
"cell_type": "code",
"outputs": [],
"source": [
"agent_executor.invoke({\"input\": \"what is LangChain?\"})"
],
"metadata": {
"collapsed": false
},
"execution_count": null
}
],
"metadata": {

@ -42,7 +42,6 @@ ZHIPUAI_API_BASE = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
@contextmanager
def connect_sse(client: Any, method: str, url: str, **kwargs: Any) -> Iterator:
"""Connect to a server-sent event stream."""
from httpx_sse import EventSource
with client.stream(method, url, **kwargs) as response:
@ -53,7 +52,6 @@ def connect_sse(client: Any, method: str, url: str, **kwargs: Any) -> Iterator:
async def aconnect_sse(
client: Any, method: str, url: str, **kwargs: Any
) -> AsyncIterator:
"""Async connect to a server-sent event stream."""
from httpx_sse import EventSource
async with client.stream(method, url, **kwargs) as response:
@ -317,7 +315,7 @@ class ChatZhipuAI(BaseChatModel):
}
import httpx
with httpx.Client(headers=headers) as client:
with httpx.Client(headers=headers, timeout=60) as client:
response = client.post(self.zhipuai_api_base, json=payload)
response.raise_for_status()
return self._create_chat_result(response.json())
@ -344,7 +342,7 @@ class ChatZhipuAI(BaseChatModel):
default_chunk_class = AIMessageChunk
import httpx
with httpx.Client(headers=headers) as client:
with httpx.Client(headers=headers, timeout=60) as client:
with connect_sse(
client, "POST", self.zhipuai_api_base, json=payload
) as event_source:
@ -402,7 +400,7 @@ class ChatZhipuAI(BaseChatModel):
}
import httpx
async with httpx.AsyncClient(headers=headers) as client:
async with httpx.AsyncClient(headers=headers, timeout=60) as client:
response = await client.post(self.zhipuai_api_base, json=payload)
response.raise_for_status()
return self._create_chat_result(response.json())
@ -428,7 +426,7 @@ class ChatZhipuAI(BaseChatModel):
default_chunk_class = AIMessageChunk
import httpx
async with httpx.AsyncClient(headers=headers) as client:
async with httpx.AsyncClient(headers=headers, timeout=60) as client:
async with aconnect_sse(
client, "POST", self.zhipuai_api_base, json=payload
) as event_source:

Loading…
Cancel
Save