Merge branch 'master' into fix/support_infinity_max_token_litellm

pull/21534/head
Ayo Ayibiowu 1 month ago committed by GitHub
commit 5ea8898d72
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -67,7 +67,7 @@
"outputs": [],
"source": [
"llm = OctoAIEndpoint(\n",
" model=\"llama-2-13b-chat-fp16\",\n",
" model_name=\"llama-2-13b-chat-fp16\",\n",
" max_tokens=200,\n",
" presence_penalty=0,\n",
" temperature=0.1,\n",
@ -83,9 +83,9 @@
"source": [
"question = \"Who was Leonardo da Vinci?\"\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"chain = prompt | llm\n",
"\n",
"print(llm_chain.run(question))"
"print(chain.invoke(question))"
]
},
{

@ -35,7 +35,7 @@ There are two ways to set up parameters for myscale index.
```python
from langchain_community.vectorstores import MyScale, MyScaleSettings
config = MyScaleSetting(host="<your-backend-url>", port=8443, ...)
config = MyScaleSettings(host="<your-backend-url>", port=8443, ...)
index = MyScale(embedding_function, config)
index.add_documents(...)
```

Loading…
Cancel
Save