From 4092fd21dcabd1de273ad902fae2186ae5347e03 Mon Sep 17 00:00:00 2001 From: Lance Martin <122662504+rlancemartin@users.noreply.github.com> Date: Tue, 6 Jun 2023 15:15:08 -0700 Subject: [PATCH] YoutubeAudioLoader and updates to OpenAIWhisperParser (#5772) This introduces the `YoutubeAudioLoader`, which will load blobs from a YouTube url and write them. Blobs are then parsed by `OpenAIWhisperParser()`, as show in this [PR](https://github.com/hwchase17/langchain/pull/5580), but we extend the parser to split audio such that each chuck meets the 25MB OpenAI size limit. As shown in the notebook, this enables a very simple UX: ``` # Transcribe the video to text loader = GenericLoader(YoutubeAudioLoader([url],save_dir),OpenAIWhisperParser()) docs = loader.load() ``` Tested on full set of Karpathy lecture videos: ``` # Karpathy lecture videos urls = ["https://youtu.be/VMj-3S1tku0" "https://youtu.be/PaCmpygFfXo", "https://youtu.be/TCH_1BHY58I", "https://youtu.be/P6sfmUTpUmc", "https://youtu.be/q8SA3rM6ckI", "https://youtu.be/t3YJ5hKiMQ0", "https://youtu.be/kCc8FmEb1nY"] # Directory to save audio files save_dir = "~/Downloads/YouTube" # Transcribe the videos to text loader = GenericLoader(YoutubeAudioLoader(urls,save_dir),OpenAIWhisperParser()) docs = loader.load() ``` --- .../examples/youtube_audio.ipynb | 296 ++++++++++++++++++ .../document_loaders/blob_loaders/__init__.py | 3 +- .../blob_loaders/youtube_audio.py | 50 +++ langchain/document_loaders/parsers/audio.py | 43 ++- .../blob_loaders/test_public_api.py | 7 +- 5 files changed, 393 insertions(+), 6 deletions(-) create mode 100644 docs/modules/indexes/document_loaders/examples/youtube_audio.ipynb create mode 100644 langchain/document_loaders/blob_loaders/youtube_audio.py diff --git a/docs/modules/indexes/document_loaders/examples/youtube_audio.ipynb b/docs/modules/indexes/document_loaders/examples/youtube_audio.ipynb new file mode 100644 index 00000000..c0ce120d --- /dev/null +++ b/docs/modules/indexes/document_loaders/examples/youtube_audio.ipynb @@ -0,0 +1,296 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e48afb8d", + "metadata": {}, + "source": [ + "# Loading documents from a YouTube url\n", + "\n", + "Building chat or QA applications on YouTube videos is a topic of high interest.\n", + "\n", + "Below we show how to easily go from a YouTube url to text to chat!\n", + "\n", + "We wil use the `OpenAIWhisperParser`, which will use the OpenAI Whisper API to transcribe audio to text.\n", + "\n", + "Note: You will need to have an `OPENAI_API_KEY` supplied." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "5f34e934", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.document_loaders.generic import GenericLoader\n", + "from langchain.document_loaders.parsers import OpenAIWhisperParser\n", + "from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader" + ] + }, + { + "cell_type": "markdown", + "id": "85fc12bd", + "metadata": {}, + "source": [ + "We will use `yt_dlp` to download audio for YouTube urls.\n", + "\n", + "We will use `pydub` to split downloaded audio files (such that we adhere to Whisper API's 25MB file size limit)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fb5a6606", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install yt_dlp\n", + "! pip install pydub" + ] + }, + { + "cell_type": "markdown", + "id": "b0e119f4", + "metadata": {}, + "source": [ + "### YouTube url to text\n", + "\n", + "Use `YoutubeAudioLoader` to fetch / download the audio files.\n", + "\n", + "Then, ues `OpenAIWhisperParser()` to transcribe them to text.\n", + "\n", + "Let's take the first lecture of Andrej Karpathy's YouTube course as an example! " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "23e1e134", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[youtube] Extracting URL: https://youtu.be/kCc8FmEb1nY\n", + "[youtube] kCc8FmEb1nY: Downloading webpage\n", + "[youtube] kCc8FmEb1nY: Downloading android player API JSON\n", + "[info] kCc8FmEb1nY: Downloading 1 format(s): 140\n", + "[dashsegments] Total fragments: 11\n", + "[download] Destination: /Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/Let's build GPT: from scratch, in code, spelled out..m4a\n", + "[download] 100% of 107.73MiB in 00:00:18 at 5.92MiB/s \n", + "[FixupM4a] Correcting container of \"/Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/Let's build GPT: from scratch, in code, spelled out..m4a\"\n", + "[ExtractAudio] Not converting audio /Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/Let's build GPT: from scratch, in code, spelled out..m4a; file is already in target format m4a\n", + "[youtube] Extracting URL: https://youtu.be/VMj-3S1tku0\n", + "[youtube] VMj-3S1tku0: Downloading webpage\n", + "[youtube] VMj-3S1tku0: Downloading android player API JSON\n", + "[info] VMj-3S1tku0: Downloading 1 format(s): 140\n", + "[download] /Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/The spelled-out intro to neural networks and backpropagation: building micrograd.m4a has already been downloaded\n", + "[download] 100% of 134.98MiB\n", + "[ExtractAudio] Not converting audio /Users/31treehaus/Desktop/AI/langchain-fork/docs/modules/indexes/document_loaders/examples/The spelled-out intro to neural networks and backpropagation: building micrograd.m4a; file is already in target format m4a\n" + ] + } + ], + "source": [ + "# Two Karpathy lecture videos\n", + "urls = [\"https://youtu.be/kCc8FmEb1nY\",\n", + " \"https://youtu.be/VMj-3S1tku0\"]\n", + "\n", + "# Directory to save audio files \n", + "save_dir = \"~/Downloads/YouTube\"\n", + "\n", + "# Transcribe the videos to text\n", + "loader = GenericLoader(YoutubeAudioLoader(urls,save_dir),OpenAIWhisperParser())\n", + "docs = loader.load()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "72a94fd8", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "\"Hello, my name is Andrej and I've been training deep neural networks for a bit more than a decade. And in this lecture I'd like to show you what neural network training looks like under the hood. So in particular we are going to start with a blank Jupyter notebook and by the end of this lecture we will define and train a neural net and you'll get to see everything that goes on under the hood and exactly sort of how that works on an intuitive level. Now specifically what I would like to do is I w\"" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Returns a list of Documents, which can be easily viewed or parsed\n", + "docs[0].page_content[0:500]" + ] + }, + { + "cell_type": "markdown", + "id": "93be6b49", + "metadata": {}, + "source": [ + "### Building a chat app from YouTube video\n", + "\n", + "Given `Documents`, we can easily enable chat / question+answering." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "1823f042", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.chains import RetrievalQA\n", + "from langchain.vectorstores import FAISS\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.text_splitter import RecursiveCharacterTextSplitter" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "7257cda1", + "metadata": {}, + "outputs": [], + "source": [ + "# Combine doc\n", + "combined_docs = [doc.page_content for doc in docs]\n", + "text = \" \".join(combined_docs)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "147c0c55", + "metadata": {}, + "outputs": [], + "source": [ + "# Split them\n", + "text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)\n", + "splits = text_splitter.split_text(text)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "f3556703", + "metadata": {}, + "outputs": [], + "source": [ + "# Build an index\n", + "embeddings = OpenAIEmbeddings()\n", + "vectordb = FAISS.from_texts(splits,embeddings)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "beaa99db", + "metadata": {}, + "outputs": [], + "source": [ + "# Build a QA chain\n", + "qa_chain = RetrievalQA.from_chain_type(llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0),\n", + " chain_type=\"stuff\",\n", + " retriever=vectordb.as_retriever())" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f2239a62", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"We need to zero out the gradient before backprop at each step because the backward pass accumulates gradients in the grad attribute of each parameter. If we don't reset the grad to zero before each backward pass, the gradients will accumulate and add up, leading to incorrect updates and slower convergence. By resetting the grad to zero before each backward pass, we ensure that the gradients are calculated correctly and that the optimization process works as intended.\"" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Ask a question!\n", + "query = \"Why do we need to zero out the gradient before backprop at each step?\"\n", + "qa_chain.run(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "a8d01098", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'In the context of transformers, an encoder is a component that reads in a sequence of input tokens and generates a sequence of hidden representations. On the other hand, a decoder is a component that takes in a sequence of hidden representations and generates a sequence of output tokens. The main difference between the two is that the encoder is used to encode the input sequence into a fixed-length representation, while the decoder is used to decode the fixed-length representation into an output sequence. In machine translation, for example, the encoder reads in the source language sentence and generates a fixed-length representation, which is then used by the decoder to generate the target language sentence.'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "query = \"What is the difference between an encoder and decoder?\"\n", + "qa_chain.run(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "fe1e77dd", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'For any token, x is the input vector that contains the private information of that token, k and q are the key and query vectors respectively, which are produced by forwarding linear modules on x, and v is the vector that is calculated by propagating the same linear module on x again. The key vector represents what the token contains, and the query vector represents what the token is looking for. The vector v is the information that the token will communicate to other tokens if it finds them interesting, and it gets aggregated for the purposes of the self-attention mechanism.'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "query = \"For any token, what are x, k, v, and q?\"\n", + "qa_chain.run(query)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/document_loaders/blob_loaders/__init__.py b/langchain/document_loaders/blob_loaders/__init__.py index b023cdbc..a96bc407 100644 --- a/langchain/document_loaders/blob_loaders/__init__.py +++ b/langchain/document_loaders/blob_loaders/__init__.py @@ -1,4 +1,5 @@ from langchain.document_loaders.blob_loaders.file_system import FileSystemBlobLoader from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader +from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader -__all__ = ["BlobLoader", "Blob", "FileSystemBlobLoader"] +__all__ = ["BlobLoader", "Blob", "FileSystemBlobLoader", "YoutubeAudioLoader"] diff --git a/langchain/document_loaders/blob_loaders/youtube_audio.py b/langchain/document_loaders/blob_loaders/youtube_audio.py new file mode 100644 index 00000000..abfcc833 --- /dev/null +++ b/langchain/document_loaders/blob_loaders/youtube_audio.py @@ -0,0 +1,50 @@ +from typing import Iterable, List + +from langchain.document_loaders.blob_loaders import FileSystemBlobLoader +from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader + + +class YoutubeAudioLoader(BlobLoader): + + """Load YouTube urls as audio file(s).""" + + def __init__(self, urls: List[str], save_dir: str): + if not isinstance(urls, list): + raise TypeError("urls must be a list") + + self.urls = urls + self.save_dir = save_dir + + def yield_blobs(self) -> Iterable[Blob]: + """Yield audio blobs for each url.""" + + try: + import yt_dlp + except ImportError: + raise ValueError( + "yt_dlp package not found, please install it with " + "`pip install yt_dlp`" + ) + + # Use yt_dlp to download audio given a YouTube url + ydl_opts = { + "format": "m4a/bestaudio/best", + "noplaylist": True, + "outtmpl": self.save_dir + "/%(title)s.%(ext)s", + "postprocessors": [ + { + "key": "FFmpegExtractAudio", + "preferredcodec": "m4a", + } + ], + } + + for url in self.urls: + # Download file + with yt_dlp.YoutubeDL(ydl_opts) as ydl: + ydl.download(url) + + # Yield the written blobs + loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a") + for blob in loader.yield_blobs(): + yield blob diff --git a/langchain/document_loaders/parsers/audio.py b/langchain/document_loaders/parsers/audio.py index d55918c2..86ad8f8d 100644 --- a/langchain/document_loaders/parsers/audio.py +++ b/langchain/document_loaders/parsers/audio.py @@ -12,10 +12,45 @@ class OpenAIWhisperParser(BaseBlobParser): def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" - import openai + import io + + try: + import openai + except ImportError: + raise ValueError( + "openai package not found, please install it with " + "`pip install openai`" + ) + try: + from pydub import AudioSegment + except ImportError: + raise ValueError( + "pydub package not found, please install it with " "`pip install pydub`" + ) + + # Audio file from disk + audio = AudioSegment.from_file(blob.path) + + # Define the duration of each chunk in minutes + # Need to meet 25MB size limit for Whisper API + chunk_duration = 20 + chunk_duration_ms = chunk_duration * 60 * 1000 + + # Split the audio into chunk_duration_ms chunks + for split_number, i in enumerate(range(0, len(audio), chunk_duration_ms)): + # Audio chunk + chunk = audio[i : i + chunk_duration_ms] + file_obj = io.BytesIO(chunk.export(format="mp3").read()) + if blob.source is not None: + file_obj.name = blob.source + f"_part_{split_number}.mp3" + else: + file_obj.name = f"part_{split_number}.mp3" + + # Transcribe + print(f"Transcribing part {split_number+1}!") + transcript = openai.Audio.transcribe("whisper-1", file_obj) - with blob.as_bytes_io() as f: - transcript = openai.Audio.transcribe("whisper-1", f) yield Document( - page_content=transcript.text, metadata={"source": blob.source} + page_content=transcript.text, + metadata={"source": blob.source, "chunk": split_number}, ) diff --git a/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py b/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py index c844f243..cf289141 100644 --- a/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py +++ b/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py @@ -3,4 +3,9 @@ from langchain.document_loaders.blob_loaders import __all__ def test_public_api() -> None: """Hard-code public API to help determine if we have broken it.""" - assert sorted(__all__) == ["Blob", "BlobLoader", "FileSystemBlobLoader"] + assert sorted(__all__) == [ + "Blob", + "BlobLoader", + "FileSystemBlobLoader", + "YoutubeAudioLoader", + ]