Fix Fireworks Callbacks (#12003)

I may be missing something but it seems like we inappropriately overrode
the 'stream()' method, losing callbacks in the process. I don't think
(?) it gave us anything in this case to customize it here?

See new trace:

https://smith.langchain.com/public/fbb82825-3a16-446b-8207-35622358db3b/r

and confirmed it streams.

Also fixes the stopwords issues from #12000
pull/12014/head
William FH 12 months ago committed by GitHub
parent 12f8e87a0e
commit dfb4baa3f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -6,9 +6,7 @@ from langchain.callbacks.manager import (
)
from langchain.llms.base import LLM, create_base_retry_decorator
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema.language_model import LanguageModelInput
from langchain.schema.output import GenerationChunk
from langchain.schema.runnable.config import RunnableConfig
from langchain.utils.env import get_from_dict_or_env
@ -140,42 +138,6 @@ class Fireworks(LLM):
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def stream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
prompt = self._convert_input(input).to_string()
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompt):
yield chunk.text
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
async def astream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> AsyncIterator[str]:
prompt = self._convert_input(input).to_string()
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompt):
yield chunk.text
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
def completion_with_retry(
llm: Fireworks,

Loading…
Cancel
Save