mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
b7d180a70d
Description: Video imagery to text (Closed Captioning) This pull request introduces the VideoCaptioningChain, a tool for automated video captioning. It processes audio and video to generate subtitles and closed captions, merging them into a single SRT output. Issue: https://github.com/langchain-ai/langchain/issues/11770 Dependencies: opencv-python, ffmpeg-python, assemblyai, transformers, pillow, torch, openai Tag maintainer: @baskaryan @hwchase17 Hello! We are a group of students from the University of Toronto (@LunarECL, @TomSadan, @nicoledroi1, @A2113S) that want to make a contribution to the LangChain community! We have ran make format, make lint and make test locally before submitting the PR. To our knowledge, our changes do not introduce any new errors. Thank you for taking the time to review our PR! --------- Co-authored-by: Bagatur <baskaryan@gmail.com>
15 lines
459 B
Python
15 lines
459 B
Python
from typing import List
|
|
|
|
from langchain_experimental.video_captioning.models import CaptionModel
|
|
|
|
|
|
class SRTProcessor:
|
|
@staticmethod
|
|
def process(caption_models: List[CaptionModel]) -> str:
|
|
"""Generates the full SRT content from a list of caption models."""
|
|
srt_entries = []
|
|
for index, model in enumerate(caption_models, start=1):
|
|
srt_entries.append(model.to_srt_entry(index))
|
|
|
|
return "\n".join(srt_entries)
|