mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
b7d180a70d
Description: Video imagery to text (Closed Captioning) This pull request introduces the VideoCaptioningChain, a tool for automated video captioning. It processes audio and video to generate subtitles and closed captions, merging them into a single SRT output. Issue: https://github.com/langchain-ai/langchain/issues/11770 Dependencies: opencv-python, ffmpeg-python, assemblyai, transformers, pillow, torch, openai Tag maintainer: @baskaryan @hwchase17 Hello! We are a group of students from the University of Toronto (@LunarECL, @TomSadan, @nicoledroi1, @A2113S) that want to make a contribution to the LangChain community! We have ran make format, make lint and make test locally before submitting the PR. To our knowledge, our changes do not introduce any new errors. Thank you for taking the time to review our PR! --------- Co-authored-by: Bagatur <baskaryan@gmail.com>
29 lines
937 B
Python
29 lines
937 B
Python
"""Integration test for video captioning."""
|
|
from langchain_openai import ChatOpenAI
|
|
|
|
from langchain_experimental.video_captioning.base import VideoCaptioningChain
|
|
|
|
|
|
def test_video_captioning_hard() -> None:
|
|
"""Test input that is considered hard for this chain to process."""
|
|
URL = """
|
|
https://ia904700.us.archive.org/22/items/any-chibes/X2Download.com
|
|
-FXX%20USA%20%C2%ABPromo%20Noon%20-%204A%20Every%20Day%EF%BF%BD%EF
|
|
%BF%BD%C2%BB%20November%202021%EF%BF%BD%EF%BF%BD-%281080p60%29.mp4
|
|
"""
|
|
chain = VideoCaptioningChain(
|
|
llm=ChatOpenAI(
|
|
model="gpt-4",
|
|
max_tokens=4000,
|
|
)
|
|
)
|
|
srt_content = chain.run(video_file_path=URL)
|
|
|
|
assert (
|
|
"mustache" in srt_content
|
|
and "Any chives?" in srt_content
|
|
and "How easy? A little tighter." in srt_content
|
|
and "it's a little tight in" in srt_content
|
|
and "every day" in srt_content
|
|
)
|