.. |
cassettes
|
community[minor]: add Kinetica LLM wrapper (#17879)
|
2024-02-22 16:02:00 -08:00 |
__init__.py
|
|
|
test_anthropic.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_azure_openai.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_azureml_endpoint.py
|
community[patch]: Support Streaming in Azure Machine Learning (#18246)
|
2024-03-28 23:38:20 +00:00 |
test_baichuan.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_baiduqianfan.py
|
community[patch]: fix qianfan chat stream calling caused exception (#13800)
|
2024-01-09 15:29:25 -08:00 |
test_bedrock.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_dappier.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_deepinfra.py
|
community[minor]: DeepInfra support for chat models (#16380)
|
2024-01-22 11:22:17 -08:00 |
test_edenai.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_ernie.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_fireworks.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_friendli.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_google_palm.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_gpt_router.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_hunyuan.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_jinachat.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_kinetica.py
|
community[minor]: add Kinetica LLM wrapper (#17879)
|
2024-02-22 16:02:00 -08:00 |
test_konko.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_litellm_router.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_litellm.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_llama_edge.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_octoai.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_openai.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_pai_eas_chat_endpoint.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_premai.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_promptlayer_openai.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_qianfan_endpoint.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_sparkllm.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_tongyi.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_vertexai.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_volcengine_maas.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
test_yuan2.py
|
community[patch]: fix yuan2 chat model errors while invoke. (#19015)
|
2024-03-15 16:28:36 -07:00 |
test_zhipuai.py
|
patch: remove usage of llm, chat model __call__ (#20788)
|
2024-04-24 19:39:23 -04:00 |
text_mlx.py
|
community[minor]: Add support for MLX models (chat & llm) (#18152)
|
2024-04-09 14:17:07 +00:00 |