|
- """Classes for producing videos"""
-
- from abc import ABC
- import json
- import logging
- import os
- import subprocess
- import tempfile
-
- # for visualisations:
- import matplotlib.pyplot as plt
-
- # for encoding as JSON
- from pipeline.utils import Feature
-
-
- class Producer(ABC):
- """Generic producer interface."""
- def __init__(self, features):
- """All producers should take a list of features as input"""
- def produce(self):
- """All Producers should produce something!"""
-
- class VideoProducer(Producer):
- """Video producer interface."""
-
-
- class FfmpegVideoProducer(VideoProducer):
- """Produce videos using ffmpeg"""
- # TODO: consider output filename options
-
- def _run_no_output(self, cmd: list, cwd:str=".") -> None:
- """Run a command and return the output as a string
-
- Defined to be mocked out in tests via unittest.mock.patch
- """
- subprocess.run(cmd, stdout=None, stderr=None, cwd=cwd)
-
- def __init__(self, features):
- if not features:
- raise ValueError("No features provided")
- # TODO: consider if we want to permit empty features (producing no video)
- self.features = features
-
- def _ffmpeg_feature_to_clip(self, feature=None, output_filepath=None):
- """use ffmpeg to produve a video clip from a feature"""
- OVERWRITE = True # TODO: consider making this a config option
- if not feature or not feature.interval:
- raise ValueError("No feature provided")
-
- if not output_filepath:
- raise ValueError("No output filepath provided")
-
- ffmpeg_prefix = ["ffmpeg", "-y"] if OVERWRITE else ["ffmpeg"]
- ffmpeg_suffix = ["-r", "60", "-c:v", "libx264", "-crf", "26", "-c:a", "aac", "-preset", "ultrafast"]
- # TODO: match framerate of input video
- # TODO: adjustable encoding options
- seek = ["-ss", str(feature.interval.start)]
- duration = ["-t", str(feature.interval.duration)]
- ffmpeg_args = ffmpeg_prefix + seek + ["-i"] + [feature.source.path] +\
- duration + ffmpeg_suffix + [output_filepath]
- logging.info(f"ffmpeg_args: {ffmpeg_args}")
- self._run_no_output(ffmpeg_args)
-
- def _ffmpeg_concat_clips(self, clips=None, output_filepath=None):
- """use ffmpeg to concatenate clips into a single video"""
- OVERWRITE = True
- ffmpeg_prefix = ["ffmpeg"]
- ffmpeg_prefix += ["-y"] if OVERWRITE else []
- ffmpeg_prefix += ["-f", "concat", "-safe", "0", "-i"]
-
- # there is a method to do this via process substitution, but it's not portable
- # so we'll use the input file list method
-
- if not clips:
- raise ValueError("No clips provided")
-
- if not output_filepath:
- raise ValueError("No output filepath provided")
-
- # generate a temporary file with the list of clips
- join_file = tempfile.NamedTemporaryFile(mode="w")
- for clip in clips:
- join_file.write(f"file '{clip}'\n")
- join_file.flush()
-
- ffmpeg_args = ffmpeg_prefix + [join_file.name] + ["-c", "copy", output_filepath]
- logging.info(f"ffmpeg_args: {ffmpeg_args}")
- self._run_no_output(ffmpeg_args)
- join_file.close()
-
- def produce(self):
- OUTPUT_DIR = "/tmp/" # TODO: make this a config option
-
- clips = []
- for num, feature in enumerate(self.features):
- output_filepath = f"{OUTPUT_DIR}/highlight_{num}.mp4"
- self._ffmpeg_feature_to_clip(feature, output_filepath)
- clips.append(output_filepath)
-
- # concatenate the clips
- output_filepath = f"{OUTPUT_DIR}/highlights.mp4"
- self._ffmpeg_concat_clips(clips, output_filepath)
- logging.info(f"Produced video: {output_filepath}")
-
- class VisualisationProducer(Producer):
- """Visualisation producer -- illustrate the features we have extracted"""
- def __init__(self, features):
- if not features:
- raise ValueError("No features provided")
- self.features = features
-
- def produce(self):
- """Produce visualisation"""
- # basic idea: use matplotlib to plot:
- # - a wide line segment representing the source video[s]
- # - shorter line segments representing the features extracted where:
- # + width represents duration
- # + colour represents feature type
- # + position represents time
- # - save as image
- plotted_source_videos = []
- bar_labels = []
-
- fig, ax = plt.subplots()
- for feature in self.features:
- # plot source video line if not done already
- if feature.source not in plotted_source_videos:
- # use video duration as width
- # ax.plot([0, feature.source.duration()], [0, 0], color='black', linewidth=10)
- ax.broken_barh([(0, feature.source.duration())], (0, 5), facecolors='grey')
- plotted_source_videos.append(feature.source)
- bar_labels.append(os.path.basename(feature.source.path))
- # annotate the source video
- ax.text(0.25, 0.25, os.path.basename(feature.source.path), ha='left', va='bottom',
- fontsize=16)
-
- # plot feature line
- # ax.plot([feature.interval.start, feature.interval.end], [1, 1], color='red', linewidth=5)
- ax.broken_barh([(feature.interval.start, feature.interval.duration)], (10, 5), facecolors='red')
- if feature.feature_extractor not in bar_labels:
- bar_labels.append(feature.feature_extractor)
- # label bar with feature extractor
- ax.text(0, 8, feature.feature_extractor, ha='left', va='bottom',
- fontsize=16)
-
- # label the plot's axes
- ax.set_xlabel('Time')
- # ax.set_yticks([], labels=bar_labels)
- ax.set_yticks([])
- # ax.tick_params(axis='y', labelrotation=90, ha='right')
- # save the plot
- plt.savefig("/tmp/visualisation.png")
- plt.close()
-
- class PipelineJSONEncoder(json.JSONEncoder):
- def default(self, obj):
- if hasattr(obj, 'to_json'):
- return obj.to_json()
- else:
- return json.JSONEncoder.default(self, obj)
-
- class JSONProducer(Producer):
- """Produce JSON output"""
- def __init__(self, features):
- if not features:
- raise ValueError("No features provided")
- self.features = features
-
- def produce(self):
- # FIXME: config option for output path
- with open("/tmp/features.json", "w") as jsonfile:
- jsonfile.write(json.dumps(self.features, cls=PipelineJSONEncoder, indent=4))
|