diff --git a/docs/extras/integrations/llms/Fireworks.ipynb b/docs/extras/integrations/llms/Fireworks.ipynb index 51648b5a62..3f30ab1923 100644 --- a/docs/extras/integrations/llms/Fireworks.ipynb +++ b/docs/extras/integrations/llms/Fireworks.ipynb @@ -7,7 +7,7 @@ "source": [ "# Fireworks\n", "\n", - ">[Fireworks](https://www.fireworks.ai/) is an AI startup focused on accelerating product development on generative AI by creating an innovative AI experiment and production platform. \n", + ">[Fireworks](https://app.fireworks.ai/) accelerates product development on generative AI by creating an innovative AI experiment and production platform. \n", "\n", "This example goes over how to use LangChain to interact with `Fireworks` models." ] @@ -37,7 +37,7 @@ "\n", "Contact Fireworks AI for the an API Key to access our models\n", "\n", - "Set up your model using a model id. If the model is not set, the default model is fireworks-llama-v2-13b-chat." + "Set up your model using a model id. If the model is not set, the default model is fireworks-llama-v2-7b-chat." ] }, { @@ -49,7 +49,7 @@ "source": [ "# Initialize a Fireworks LLM\n", "os.environ['FIREWORKS_API_KEY'] = \"\" #change this to your own API KEY\n", - "llm = Fireworks(model_id=\"fireworks-llama-v2-13b-chat\")" + "llm = Fireworks(model_id=\"accounts/fireworks/models/fireworks-llama-v2-13b-chat\")" ] }, { @@ -73,11 +73,10 @@ "You can use the LLMs to call the model for specified prompt(s). \n", "\n", "Current Specified Models: \n", - "* fireworks-falcon-7b, fireworks-falcon-40b-w8a16\n", - "* fireworks-guanaco-30b, fireworks-guanaco-33b-w8a16\n", - "* fireworks-llama-7b, fireworks-llama-13b, fireworks-llama-30b-w8a16\n", - "* fireworks-llama-v2-13b, fireworks-llama-v2-13b-chat, fireworks-llama-v2-13b-w8a16, fireworks-llama-v2-13b-chat-w8a16\n", - "* fireworks-llama-v2-7b, fireworks-llama-v2-7b-chat, fireworks-llama-v2-7b-w8a16, fireworks-llama-v2-7b-chat-w8a16" + "* accounts/fireworks/models/fireworks-falcon-7b, accounts/fireworks/models/fireworks-falcon-40b-w8a16\n", + "* accounts/fireworks/models/fireworks-starcoder-1b-w8a16-1gpu, accounts/fireworks/models/fireworks-starcoder-3b-w8a16-1gpu, accounts/fireworks/models/fireworks-starcoder-7b-w8a16-1gpu, accounts/fireworks/models/fireworks-starcoder-16b-w8a16 \n", + "* accounts/fireworks/models/fireworks-llama-v2-13b, accounts/fireworks/models/fireworks-llama-v2-13b-chat, accounts/fireworks/models/fireworks-llama-v2-13b-w8a16, accounts/fireworks/models/fireworks-llama-v2-13b-chat-w8a16\n", + "* accounts/fireworks/models/fireworks-llama-v2-7b, accounts/fireworks/models/fireworks-llama-v2-7b-chat, accounts/fireworks/models/fireworks-llama-v2-7b-w8a16, accounts/fireworks/models/fireworks-llama-v2-7b-chat-w8a16, accounts/fireworks/models/fireworks-llama-v2-70b-chat-4gpu" ] }, { @@ -147,7 +146,7 @@ ], "source": [ "#setting parameters: model_id, temperature, max_tokens, top_p\n", - "llm = Fireworks(model_id=\"fireworks-llama-v2-13b-chat\", temperature=0.7, max_tokens=15, top_p=1.0)\n", + "llm = Fireworks(model_id=\"accounts/fireworks/models/fireworks-llama-v2-13b-chat\", temperature=0.7, max_tokens=15, top_p=1.0)\n", "print(llm(\"What's the weather like in Kansas City in December?\"))" ] }, diff --git a/docs/extras/integrations/providers/fireworks.md b/docs/extras/integrations/providers/fireworks.md index fefe25588e..42c8436c8e 100644 --- a/docs/extras/integrations/providers/fireworks.md +++ b/docs/extras/integrations/providers/fireworks.md @@ -19,4 +19,4 @@ llm = Fireworks(model="fireworks-llama-v2-13b-chat", max_tokens=256, temperature llm("Name 3 sports.") ``` -For a more detailed walkthrough, see [here](/docs/extras/modules/model_io/models/llms/integrations/Fireworks.ipynb). \ No newline at end of file +For a more detailed walkthrough, see [here](/docs/integrations/llms/Fireworks). diff --git a/libs/langchain/langchain/llms/fireworks.py b/libs/langchain/langchain/llms/fireworks.py index bc98c03732..90a9c7d4c0 100644 --- a/libs/langchain/langchain/llms/fireworks.py +++ b/libs/langchain/langchain/llms/fireworks.py @@ -28,7 +28,9 @@ logger = logging.getLogger(__name__) class BaseFireworks(BaseLLM): """Wrapper around Fireworks large language models.""" - model_id: str = Field("fireworks-llama-v2-7b-chat", alias="model") + model_id: str = Field( + "accounts/fireworks/models/fireworks-llama-v2-7b-chat", alias="model" + ) """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" @@ -175,7 +177,7 @@ class FireworksChat(BaseLLM): fireworkschat = FireworksChat(model_id=""fireworks-llama-v2-13b-chat"") """ - model_id: str = "fireworks-llama-v2-7b-chat" + model_id: str = "accounts/fireworks/models/fireworks-llama-v2-7b-chat" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" diff --git a/libs/langchain/tests/integration_tests/llms/test_fireworks.py b/libs/langchain/tests/integration_tests/llms/test_fireworks.py index e9a041c2c7..e0dcb4fe43 100644 --- a/libs/langchain/tests/integration_tests/llms/test_fireworks.py +++ b/libs/langchain/tests/integration_tests/llms/test_fireworks.py @@ -21,7 +21,9 @@ from langchain.vectorstores import DeepLake def test_fireworks_call() -> None: """Test valid call to fireworks.""" - llm = Fireworks(model_id="fireworks-llama-v2-13b-chat", max_tokens=900) + llm = Fireworks( + model_id="accounts/fireworks/models/fireworks-llama-v2-13b-chat", max_tokens=900 + ) output = llm("What is the weather in NYC") assert isinstance(output, str) @@ -136,17 +138,17 @@ def test_fireworkschat_chain() -> None: _EXPECTED_NUM_TOKENS = { - "fireworks-llama-v2-13b": 17, - "fireworks-llama-v2-7b": 17, - "fireworks-llama-v2-13b-chat": 17, - "fireworks-llama-v2-7b-chat": 17, + "accounts/fireworks/models/fireworks-llama-v2-13b": 17, + "accounts/fireworks/models/fireworks-llama-v2-7b": 17, + "accounts/fireworks/models/fireworks-llama-v2-13b-chat": 17, + "accounts/fireworks/models/fireworks-llama-v2-7b-chat": 17, } _MODELS = models = [ - "fireworks-llama-v2-13b", - "fireworks-llama-v2-7b", - "fireworks-llama-v2-13b-chat", - "fireworks-llama-v2-7b-chat", + "accounts/fireworks/models/fireworks-llama-v2-13b", + "accounts/fireworks/models/fireworks-llama-v2-7b", + "accounts/fireworks/models/fireworks-llama-v2-13b-chat", + "accounts/fireworks/models/fireworks-llama-v2-7b-chat", ]