2023-12-11 21:53:30 +00:00
|
|
|
"""**Utilities** are the integrations with third-part systems and packages.
|
|
|
|
|
|
|
|
Other LangChain classes use **Utilities** to interact with third-part systems
|
|
|
|
and packages.
|
|
|
|
"""
|
2024-03-12 22:18:54 +00:00
|
|
|
import importlib
|
2023-12-11 21:53:30 +00:00
|
|
|
from typing import Any
|
|
|
|
|
2024-03-12 22:18:54 +00:00
|
|
|
_module_lookup = {
|
|
|
|
"AlphaVantageAPIWrapper": "langchain_community.utilities.alpha_vantage",
|
|
|
|
"ApifyWrapper": "langchain_community.utilities.apify",
|
|
|
|
"ArceeWrapper": "langchain_community.utilities.arcee",
|
|
|
|
"ArxivAPIWrapper": "langchain_community.utilities.arxiv",
|
|
|
|
"AudioStream": "langchain_community.utilities.nvidia_riva",
|
|
|
|
"BibtexparserWrapper": "langchain_community.utilities.bibtex",
|
|
|
|
"BingSearchAPIWrapper": "langchain_community.utilities.bing_search",
|
|
|
|
"BraveSearchWrapper": "langchain_community.utilities.brave_search",
|
|
|
|
"DuckDuckGoSearchAPIWrapper": "langchain_community.utilities.duckduckgo_search",
|
|
|
|
"GoldenQueryAPIWrapper": "langchain_community.utilities.golden_query",
|
|
|
|
"GoogleFinanceAPIWrapper": "langchain_community.utilities.google_finance",
|
|
|
|
"GoogleJobsAPIWrapper": "langchain_community.utilities.google_jobs",
|
|
|
|
"GoogleLensAPIWrapper": "langchain_community.utilities.google_lens",
|
|
|
|
"GooglePlacesAPIWrapper": "langchain_community.utilities.google_places_api",
|
|
|
|
"GoogleScholarAPIWrapper": "langchain_community.utilities.google_scholar",
|
|
|
|
"GoogleSearchAPIWrapper": "langchain_community.utilities.google_search",
|
|
|
|
"GoogleSerperAPIWrapper": "langchain_community.utilities.google_serper",
|
|
|
|
"GoogleTrendsAPIWrapper": "langchain_community.utilities.google_trends",
|
|
|
|
"GraphQLAPIWrapper": "langchain_community.utilities.graphql",
|
2024-03-29 19:01:27 +00:00
|
|
|
"InfobipAPIWrapper": "langchain_community.utilities.infobip",
|
2024-03-12 22:18:54 +00:00
|
|
|
"JiraAPIWrapper": "langchain_community.utilities.jira",
|
|
|
|
"LambdaWrapper": "langchain_community.utilities.awslambda",
|
|
|
|
"MaxComputeAPIWrapper": "langchain_community.utilities.max_compute",
|
|
|
|
"MerriamWebsterAPIWrapper": "langchain_community.utilities.merriam_webster",
|
|
|
|
"MetaphorSearchAPIWrapper": "langchain_community.utilities.metaphor_search",
|
|
|
|
"NVIDIARivaASR": "langchain_community.utilities.nvidia_riva",
|
|
|
|
"NVIDIARivaStream": "langchain_community.utilities.nvidia_riva",
|
|
|
|
"NVIDIARivaTTS": "langchain_community.utilities.nvidia_riva",
|
|
|
|
"NasaAPIWrapper": "langchain_community.utilities.nasa",
|
|
|
|
"NutritionAIAPI": "langchain_community.utilities.passio_nutrition_ai",
|
|
|
|
"OpenWeatherMapAPIWrapper": "langchain_community.utilities.openweathermap",
|
|
|
|
"OutlineAPIWrapper": "langchain_community.utilities.outline",
|
|
|
|
"Portkey": "langchain_community.utilities.portkey",
|
|
|
|
"PowerBIDataset": "langchain_community.utilities.powerbi",
|
|
|
|
"PubMedAPIWrapper": "langchain_community.utilities.pubmed",
|
|
|
|
"PythonREPL": "langchain_community.utilities.python",
|
|
|
|
"Requests": "langchain_community.utilities.requests",
|
|
|
|
"RequestsWrapper": "langchain_community.utilities.requests",
|
|
|
|
"RivaASR": "langchain_community.utilities.nvidia_riva",
|
|
|
|
"RivaTTS": "langchain_community.utilities.nvidia_riva",
|
|
|
|
"SQLDatabase": "langchain_community.utilities.sql_database",
|
|
|
|
"SceneXplainAPIWrapper": "langchain_community.utilities.scenexplain",
|
|
|
|
"SearchApiAPIWrapper": "langchain_community.utilities.searchapi",
|
|
|
|
"SearxSearchWrapper": "langchain_community.utilities.searx_search",
|
|
|
|
"SerpAPIWrapper": "langchain_community.utilities.serpapi",
|
|
|
|
"SparkSQL": "langchain_community.utilities.spark_sql",
|
|
|
|
"StackExchangeAPIWrapper": "langchain_community.utilities.stackexchange",
|
|
|
|
"SteamWebAPIWrapper": "langchain_community.utilities.steam",
|
|
|
|
"TensorflowDatasets": "langchain_community.utilities.tensorflow_datasets",
|
|
|
|
"TextRequestsWrapper": "langchain_community.utilities.requests",
|
|
|
|
"TwilioAPIWrapper": "langchain_community.utilities.twilio",
|
|
|
|
"WikipediaAPIWrapper": "langchain_community.utilities.wikipedia",
|
|
|
|
"WolframAlphaAPIWrapper": "langchain_community.utilities.wolfram_alpha",
|
|
|
|
"YouSearchAPIWrapper": "langchain_community.utilities.you",
|
|
|
|
"ZapierNLAWrapper": "langchain_community.utilities.zapier",
|
|
|
|
}
|
community: Added new Utility runnables for NVIDIA Riva. (#15966)
**Please tag this issue with `nvidia_genai`**
- **Description:** Added new Runnables for integration NVIDIA Riva into
LCEL chains for Automatic Speech Recognition (ASR) and Text To Speech
(TTS).
- **Issue:** N/A
- **Dependencies:** To use these runnables, the NVIDIA Riva client
libraries are required. It they are not installed, an error will be
raised instructing how to install them. The Runnables can be safely
imported without the riva client libraries.
- **Twitter handle:** N/A
All of the Riva Runnables are inside a single folder in the Utilities
module. In this folder are four files:
- common.py - Contains all code that is common to both TTS and ASR
- stream.py - Contains a class representing an audio stream that allows
the end user to put data into the stream like a queue.
- asr.py - Contains the RivaASR runnable
- tts.py - Contains the RivaTTS runnable
The following Python function is an example of creating a chain that
makes use of both of these Runnables:
```python
def create(
config: Configuration,
audio_encoding: RivaAudioEncoding,
sample_rate: int,
audio_channels: int = 1,
) -> Runnable[ASRInputType, TTSOutputType]:
"""Create a new instance of the chain."""
_LOGGER.info("Instantiating the chain.")
# create the riva asr client
riva_asr = RivaASR(
url=str(config.riva_asr.service.url),
ssl_cert=config.riva_asr.service.ssl_cert,
encoding=audio_encoding,
audio_channel_count=audio_channels,
sample_rate_hertz=sample_rate,
profanity_filter=config.riva_asr.profanity_filter,
enable_automatic_punctuation=config.riva_asr.enable_automatic_punctuation,
language_code=config.riva_asr.language_code,
)
# create the prompt template
prompt = PromptTemplate.from_template("{user_input}")
# model = ChatOpenAI()
model = ChatNVIDIA(model="mixtral_8x7b") # type: ignore
# create the riva tts client
riva_tts = RivaTTS(
url=str(config.riva_asr.service.url),
ssl_cert=config.riva_asr.service.ssl_cert,
output_directory=config.riva_tts.output_directory,
language_code=config.riva_tts.language_code,
voice_name=config.riva_tts.voice_name,
)
# construct and return the chain
return {"user_input": riva_asr} | prompt | model | riva_tts # type: ignore
```
The following code is an example of creating a new audio stream for
Riva:
```python
input_stream = AudioStream(maxsize=1000)
# Send bytes into the stream
for chunk in audio_chunks:
await input_stream.aput(chunk)
input_stream.close()
```
The following code is an example of how to execute the chain with
RivaASR and RivaTTS
```python
output_stream = asyncio.Queue()
while not input_stream.complete:
async for chunk in chain.astream(input_stream):
output_stream.put(chunk)
```
Everything should be async safe and thread safe. Audio data can be put
into the input stream while the chain is running without interruptions.
---------
Co-authored-by: Hayden Wolff <hwolff@nvidia.com>
Co-authored-by: Hayden Wolff <hwolff@Haydens-Laptop.local>
Co-authored-by: Hayden Wolff <haydenwolff99@gmail.com>
Co-authored-by: Erick Friis <erick@langchain.dev>
2024-02-06 03:50:50 +00:00
|
|
|
|
|
|
|
|
2023-12-11 21:53:30 +00:00
|
|
|
def __getattr__(name: str) -> Any:
|
2024-03-12 22:18:54 +00:00
|
|
|
if name in _module_lookup:
|
|
|
|
module = importlib.import_module(_module_lookup[name])
|
|
|
|
return getattr(module, name)
|
|
|
|
raise AttributeError(f"module {__name__} has no attribute {name}")
|
2023-12-11 21:53:30 +00:00
|
|
|
|
|
|
|
|
2024-03-12 22:18:54 +00:00
|
|
|
__all__ = list(_module_lookup.keys())
|