mirror of
https://github.com/hwchase17/langchain
synced 2024-11-13 19:10:52 +00:00
175 lines
5.1 KiB
Plaintext
175 lines
5.1 KiB
Plaintext
|
{
|
||
|
"cells": [
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"# Video Captioning\n",
|
||
|
"This notebook shows how to use VideoCaptioningChain, which is implemented using Langchain's ImageCaptionLoader and AssemblyAI to produce .srt files.\n",
|
||
|
"\n",
|
||
|
"This system autogenerates both subtitles and closed captions from a video URL."
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Installing Dependencies"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 1,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"# !pip install ffmpeg-python\n",
|
||
|
"# !pip install assemblyai\n",
|
||
|
"# !pip install opencv-python\n",
|
||
|
"# !pip install torch\n",
|
||
|
"# !pip install pillow\n",
|
||
|
"# !pip install transformers\n",
|
||
|
"# !pip install langchain"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Imports"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 1,
|
||
|
"metadata": {
|
||
|
"ExecuteTime": {
|
||
|
"end_time": "2023-11-30T03:39:14.078232Z",
|
||
|
"start_time": "2023-11-30T03:39:12.534410Z"
|
||
|
}
|
||
|
},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"import getpass\n",
|
||
|
"\n",
|
||
|
"from langchain.chains.video_captioning import VideoCaptioningChain\n",
|
||
|
"from langchain.chat_models.openai import ChatOpenAI"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Setting up API Keys"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 2,
|
||
|
"metadata": {
|
||
|
"ExecuteTime": {
|
||
|
"end_time": "2023-11-30T03:39:17.423806Z",
|
||
|
"start_time": "2023-11-30T03:39:17.417945Z"
|
||
|
}
|
||
|
},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"OPENAI_API_KEY = getpass.getpass(\"OpenAI API Key:\")\n",
|
||
|
"\n",
|
||
|
"ASSEMBLYAI_API_KEY = getpass.getpass(\"AssemblyAI API Key:\")"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"**Required parameters:**\n",
|
||
|
"\n",
|
||
|
"* llm: The language model this chain will use to get suggestions on how to refine the closed-captions\n",
|
||
|
"* assemblyai_key: The API key for AssemblyAI, used to generate the subtitles\n",
|
||
|
"\n",
|
||
|
"**Optional Parameters:**\n",
|
||
|
"\n",
|
||
|
"* verbose (Default: True): Sets verbose mode for downstream chain calls\n",
|
||
|
"* use_logging (Default: True): Log the chain's processes in run manager\n",
|
||
|
"* frame_skip (Default: None): Choose how many video frames to skip during processing. Increasing it results in faster execution, but less accurate results. If None, frame skip is calculated manually based on the framerate Set this to 0 to sample all frames\n",
|
||
|
"* image_delta_threshold (Default: 3000000): Set the sensitivity for what the image processor considers a change in scenery in the video, used to delimit closed captions. Higher = less sensitive\n",
|
||
|
"* closed_caption_char_limit (Default: 20): Sets the character limit on closed captions\n",
|
||
|
"* closed_caption_similarity_threshold (Default: 80): Sets the percentage value to how similar two closed caption models should be in order to be clustered into one longer closed caption\n",
|
||
|
"* use_unclustered_video_models (Default: False): If true, closed captions that could not be clustered will be included. May result in spontaneous behaviour from closed captions such as very short lasting captions or fast-changing captions. Enabling this is experimental and not recommended"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Example run"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"# https://ia804703.us.archive.org/27/items/uh-oh-here-we-go-again/Uh-Oh%2C%20Here%20we%20go%20again.mp4\n",
|
||
|
"# https://ia601200.us.archive.org/9/items/f58703d4-61e6-4f8f-8c08-b42c7e16f7cb/f58703d4-61e6-4f8f-8c08-b42c7e16f7cb.mp4\n",
|
||
|
"\n",
|
||
|
"chain = VideoCaptioningChain(\n",
|
||
|
" llm=ChatOpenAI(model=\"gpt-4\", max_tokens=4000, openai_api_key=OPENAI_API_KEY),\n",
|
||
|
" assemblyai_key=ASSEMBLYAI_API_KEY,\n",
|
||
|
")\n",
|
||
|
"\n",
|
||
|
"srt_content = chain.run(\n",
|
||
|
" video_file_path=\"https://ia601200.us.archive.org/9/items/f58703d4-61e6-4f8f-8c08-b42c7e16f7cb/f58703d4-61e6-4f8f-8c08-b42c7e16f7cb.mp4\"\n",
|
||
|
")\n",
|
||
|
"\n",
|
||
|
"print(srt_content)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Writing output to .srt file"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 6,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"with open(\"output.srt\", \"w\") as file:\n",
|
||
|
" file.write(srt_content)"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"metadata": {
|
||
|
"kernelspec": {
|
||
|
"display_name": "myenv",
|
||
|
"language": "python",
|
||
|
"name": "myenv"
|
||
|
},
|
||
|
"language_info": {
|
||
|
"codemirror_mode": {
|
||
|
"name": "ipython",
|
||
|
"version": 3
|
||
|
},
|
||
|
"file_extension": ".py",
|
||
|
"mimetype": "text/x-python",
|
||
|
"name": "python",
|
||
|
"nbconvert_exporter": "python",
|
||
|
"pygments_lexer": "ipython3",
|
||
|
"version": "3.11.6"
|
||
|
},
|
||
|
"vscode": {
|
||
|
"interpreter": {
|
||
|
"hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e"
|
||
|
}
|
||
|
}
|
||
|
},
|
||
|
"nbformat": 4,
|
||
|
"nbformat_minor": 2
|
||
|
}
|