mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
Updates fireworks (#8765)
<!-- Thank you for contributing to LangChain! Replace this comment with: - Description: Updates to Fireworks Documentation, - Issue: N/A, - Dependencies: N/A, - Tag maintainer: @rlancemartin, --------- Co-authored-by: Raj Janardhan <rajjanardhan@Rajs-Laptop.attlocal.net>
This commit is contained in:
parent
8c35fcb571
commit
affaaea87b
@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Fireworks\n",
|
||||
"\n",
|
||||
">[Fireworks](https://www.fireworks.ai/) is an AI startup focused on accelerating product development on generative AI by creating an innovative AI experiment and production platform. \n",
|
||||
">[Fireworks](https://app.fireworks.ai/) accelerates product development on generative AI by creating an innovative AI experiment and production platform. \n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `Fireworks` models."
|
||||
]
|
||||
@ -37,7 +37,7 @@
|
||||
"\n",
|
||||
"Contact Fireworks AI for the an API Key to access our models\n",
|
||||
"\n",
|
||||
"Set up your model using a model id. If the model is not set, the default model is fireworks-llama-v2-13b-chat."
|
||||
"Set up your model using a model id. If the model is not set, the default model is fireworks-llama-v2-7b-chat."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -49,7 +49,7 @@
|
||||
"source": [
|
||||
"# Initialize a Fireworks LLM\n",
|
||||
"os.environ['FIREWORKS_API_KEY'] = \"\" #change this to your own API KEY\n",
|
||||
"llm = Fireworks(model_id=\"fireworks-llama-v2-13b-chat\")"
|
||||
"llm = Fireworks(model_id=\"accounts/fireworks/models/fireworks-llama-v2-13b-chat\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -73,11 +73,10 @@
|
||||
"You can use the LLMs to call the model for specified prompt(s). \n",
|
||||
"\n",
|
||||
"Current Specified Models: \n",
|
||||
"* fireworks-falcon-7b, fireworks-falcon-40b-w8a16\n",
|
||||
"* fireworks-guanaco-30b, fireworks-guanaco-33b-w8a16\n",
|
||||
"* fireworks-llama-7b, fireworks-llama-13b, fireworks-llama-30b-w8a16\n",
|
||||
"* fireworks-llama-v2-13b, fireworks-llama-v2-13b-chat, fireworks-llama-v2-13b-w8a16, fireworks-llama-v2-13b-chat-w8a16\n",
|
||||
"* fireworks-llama-v2-7b, fireworks-llama-v2-7b-chat, fireworks-llama-v2-7b-w8a16, fireworks-llama-v2-7b-chat-w8a16"
|
||||
"* accounts/fireworks/models/fireworks-falcon-7b, accounts/fireworks/models/fireworks-falcon-40b-w8a16\n",
|
||||
"* accounts/fireworks/models/fireworks-starcoder-1b-w8a16-1gpu, accounts/fireworks/models/fireworks-starcoder-3b-w8a16-1gpu, accounts/fireworks/models/fireworks-starcoder-7b-w8a16-1gpu, accounts/fireworks/models/fireworks-starcoder-16b-w8a16 \n",
|
||||
"* accounts/fireworks/models/fireworks-llama-v2-13b, accounts/fireworks/models/fireworks-llama-v2-13b-chat, accounts/fireworks/models/fireworks-llama-v2-13b-w8a16, accounts/fireworks/models/fireworks-llama-v2-13b-chat-w8a16\n",
|
||||
"* accounts/fireworks/models/fireworks-llama-v2-7b, accounts/fireworks/models/fireworks-llama-v2-7b-chat, accounts/fireworks/models/fireworks-llama-v2-7b-w8a16, accounts/fireworks/models/fireworks-llama-v2-7b-chat-w8a16, accounts/fireworks/models/fireworks-llama-v2-70b-chat-4gpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -147,7 +146,7 @@
|
||||
],
|
||||
"source": [
|
||||
"#setting parameters: model_id, temperature, max_tokens, top_p\n",
|
||||
"llm = Fireworks(model_id=\"fireworks-llama-v2-13b-chat\", temperature=0.7, max_tokens=15, top_p=1.0)\n",
|
||||
"llm = Fireworks(model_id=\"accounts/fireworks/models/fireworks-llama-v2-13b-chat\", temperature=0.7, max_tokens=15, top_p=1.0)\n",
|
||||
"print(llm(\"What's the weather like in Kansas City in December?\"))"
|
||||
]
|
||||
},
|
||||
|
@ -19,4 +19,4 @@ llm = Fireworks(model="fireworks-llama-v2-13b-chat", max_tokens=256, temperature
|
||||
llm("Name 3 sports.")
|
||||
```
|
||||
|
||||
For a more detailed walkthrough, see [here](/docs/extras/modules/model_io/models/llms/integrations/Fireworks.ipynb).
|
||||
For a more detailed walkthrough, see [here](/docs/integrations/llms/Fireworks).
|
||||
|
@ -28,7 +28,9 @@ logger = logging.getLogger(__name__)
|
||||
class BaseFireworks(BaseLLM):
|
||||
"""Wrapper around Fireworks large language models."""
|
||||
|
||||
model_id: str = Field("fireworks-llama-v2-7b-chat", alias="model")
|
||||
model_id: str = Field(
|
||||
"accounts/fireworks/models/fireworks-llama-v2-7b-chat", alias="model"
|
||||
)
|
||||
"""Model name to use."""
|
||||
temperature: float = 0.7
|
||||
"""What sampling temperature to use."""
|
||||
@ -175,7 +177,7 @@ class FireworksChat(BaseLLM):
|
||||
fireworkschat = FireworksChat(model_id=""fireworks-llama-v2-13b-chat"")
|
||||
"""
|
||||
|
||||
model_id: str = "fireworks-llama-v2-7b-chat"
|
||||
model_id: str = "accounts/fireworks/models/fireworks-llama-v2-7b-chat"
|
||||
"""Model name to use."""
|
||||
temperature: float = 0.7
|
||||
"""What sampling temperature to use."""
|
||||
|
@ -21,7 +21,9 @@ from langchain.vectorstores import DeepLake
|
||||
|
||||
def test_fireworks_call() -> None:
|
||||
"""Test valid call to fireworks."""
|
||||
llm = Fireworks(model_id="fireworks-llama-v2-13b-chat", max_tokens=900)
|
||||
llm = Fireworks(
|
||||
model_id="accounts/fireworks/models/fireworks-llama-v2-13b-chat", max_tokens=900
|
||||
)
|
||||
output = llm("What is the weather in NYC")
|
||||
assert isinstance(output, str)
|
||||
|
||||
@ -136,17 +138,17 @@ def test_fireworkschat_chain() -> None:
|
||||
|
||||
|
||||
_EXPECTED_NUM_TOKENS = {
|
||||
"fireworks-llama-v2-13b": 17,
|
||||
"fireworks-llama-v2-7b": 17,
|
||||
"fireworks-llama-v2-13b-chat": 17,
|
||||
"fireworks-llama-v2-7b-chat": 17,
|
||||
"accounts/fireworks/models/fireworks-llama-v2-13b": 17,
|
||||
"accounts/fireworks/models/fireworks-llama-v2-7b": 17,
|
||||
"accounts/fireworks/models/fireworks-llama-v2-13b-chat": 17,
|
||||
"accounts/fireworks/models/fireworks-llama-v2-7b-chat": 17,
|
||||
}
|
||||
|
||||
_MODELS = models = [
|
||||
"fireworks-llama-v2-13b",
|
||||
"fireworks-llama-v2-7b",
|
||||
"fireworks-llama-v2-13b-chat",
|
||||
"fireworks-llama-v2-7b-chat",
|
||||
"accounts/fireworks/models/fireworks-llama-v2-13b",
|
||||
"accounts/fireworks/models/fireworks-llama-v2-7b",
|
||||
"accounts/fireworks/models/fireworks-llama-v2-13b-chat",
|
||||
"accounts/fireworks/models/fireworks-llama-v2-7b-chat",
|
||||
]
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user