From 65cb224d6b1e8eee02a550971f3a1d8a2692e251 Mon Sep 17 00:00:00 2001 From: Rob Hallam <0504004h@student.gla.ac.uk> Date: Mon, 16 Sep 2024 03:48:47 +0100 Subject: [PATCH] feat: add two exemplar scripts Fairly basic, could obviously be refactored but they are there to be exemplars to give folks an idea of where they can start with the pipeline. --- make_highlight_clips.py | 97 ++++++++++++++++++++++++++++++++++++ make_highlight_video.py | 106 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 203 insertions(+) create mode 100644 make_highlight_clips.py create mode 100644 make_highlight_video.py diff --git a/make_highlight_clips.py b/make_highlight_clips.py new file mode 100644 index 0000000..4ab9940 --- /dev/null +++ b/make_highlight_clips.py @@ -0,0 +1,97 @@ +"""make_highlight_clips.py -- use the pipeline to make highlight clips from a video + +Usage: + make_highlight_clips.py [--output-dir=] [--output-filename=] [--save-json] + +Options: + -h --help Show this screen + --output-dir= Directory to save the highlight clips [default: /tmp/] + --save-json Save the feature data as a JSON file [default: False] + +Notes: + The input file can be a video or JSON file +""" + +import os +import sys +import time +from pipeline.feature_extractors import RandomFeatureExtractor,\ + LaughterFeatureExtractor, LoudAudioFeatureExtractor,\ + VideoActivityFeatureExtractor, JSONFeatureExtractor +from pipeline.utils import SourceMedia, Source +from pipeline.producers import FfmpegVideoProducer, JSONProducer +from pipeline.consolidators import OverlapConsolidator + +from docopt import docopt + +def main(): + start_time = time.time() + PRINT_SOMETHING = False + if PRINT_SOMETHING: + print(f"+{'-'*78}+") + print(f"| Starting pipeline to make highlight clips at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*13}|") + print(f"+{'-'*78}+") + time.sleep(1) + + args = docopt(__doc__) + input_file = args[''] + output_dir = args['--output-dir'] + # expand "./" to current directory + if output_dir == "./": + output_dir = os.getcwd() + save_json = args['--save-json'] + + source_media = SourceMedia(sources=[Source(input_file, input_file, "manual")]) + + combined_features = [] + + # feature extractors: laughter, loudness, video activity, words + if input_file.endswith(".json"): + feature_extractors = [JSONFeatureExtractor] + else: + feature_extractors = [LaughterFeatureExtractor, LoudAudioFeatureExtractor, + VideoActivityFeatureExtractor + ] + # NOTE: words FE crashes OOM due to noted issues on their GH + # so omitted for now + + for fe in feature_extractors: + fe_instance = fe(input_files=source_media) + fe_instance.setup() + fe_instance.run() + fe_instance.teardown() + if fe_instance.features: + combined_features.extend(fe_instance.features) + + if combined_features: + if PRINT_SOMETHING: + for f in combined_features: + print(f"Feature: {f}") + + # consolidate features + oc = OverlapConsolidator(features=combined_features) + oc.consolidate() + + # output before video + if save_json: + jsonprod = JSONProducer(features=combined_features) + jsonprod.produce() + + # make video from features: + ffprod = FfmpegVideoProducer(features=oc.features, + output_dir=output_dir, + compile_clips=False) + ffprod.produce() + + + + end_time = time.time() + if PRINT_SOMETHING: + print(f"+{'-'*78}+") + print(f"| Pipeline finished creating highlight video at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*12}|") + print(f"| Total time: {end_time - start_time:.2f} seconds {' '*51}|") + print(f"+{'-'*78}+") + + +if __name__ == '__main__': + main() diff --git a/make_highlight_video.py b/make_highlight_video.py new file mode 100644 index 0000000..973921c --- /dev/null +++ b/make_highlight_video.py @@ -0,0 +1,106 @@ +"""make_highlight_video.py -- use the pipeline to make a highlight from a video + +Usage: + make_highlight_clip.py [--max-duration=] [--output-dir=] [--output-filename=] [--save-json] + +Options: + -h --help Show this screen + --max-duration= Maximum duration of the highlight clip [default: 60] + --output-dir= Directory to save the highlight clip [default: ./] + --output-filename= Filename of the highlight clip [default: highlights.mp4] + --save-json Save the feature data as a JSON file [default: False] + +Notes: + The input file can be a video or JSON file +""" + +import os +import sys +import time +from pipeline.feature_extractors import RandomFeatureExtractor,\ + LaughterFeatureExtractor, LoudAudioFeatureExtractor,\ + VideoActivityFeatureExtractor, JSONFeatureExtractor +from pipeline.utils import SourceMedia, Source +from pipeline.producers import FfmpegVideoProducer, JSONProducer +from pipeline.consolidators import OverlapConsolidator +from pipeline.adjusters import TargetTimeAdjuster + +from docopt import docopt + +def main(): + start_time = time.time() + PRINT_SOMETHING = False + if PRINT_SOMETHING: + print(f"+{'-'*78}+") + print(f"| Starting pipeline to make highlight video at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*13}|") + print(f"+{'-'*78}+") + time.sleep(1) + + args = docopt(__doc__) + input_file = args[''] + max_duration = float(args['--max-duration']) + output_dir = args['--output-dir'] + # expand "./" to current directory + if output_dir == "./": + output_dir = os.getcwd() + output_filename = args['--output-filename'] + save_json = args['--save-json'] + + source_media = SourceMedia(sources=[Source(input_file, input_file, "manual")]) + + combined_features = [] + + # feature extractors: laughter, loudness, video activity, words + if input_file.endswith(".json"): + feature_extractors = [JSONFeatureExtractor] + else: + feature_extractors = [LaughterFeatureExtractor, LoudAudioFeatureExtractor, + VideoActivityFeatureExtractor + ] + # NOTE: words FE crashes OOM due to noted issues on their GH + # so omitted for now + + for fe in feature_extractors: + fe_instance = fe(input_files=source_media) + fe_instance.setup() + fe_instance.run() + fe_instance.teardown() + if fe_instance.features: + combined_features.extend(fe_instance.features) + + if combined_features: + if PRINT_SOMETHING: + for f in combined_features: + print(f"Feature: {f}") + + # consolidate features + oc = OverlapConsolidator(features=combined_features) + oc.consolidate() + + # adjust features to target time + tta = TargetTimeAdjuster(features=oc.features, target_time=max_duration) + tta.adjust() + + # output before video + if save_json: + jsonprod = JSONProducer(features=combined_features) + jsonprod.produce() + + # make video from tta features: + ffprod = FfmpegVideoProducer(features=tta.features, + output_dir=output_dir, + output_filename=output_filename) + ffprod.produce() + + + + end_time = time.time() + if PRINT_SOMETHING: + print(f"+{'-'*78}+") + print(f"| Pipeline finished creating highlight video at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*12}|") + print(f"| Total time: {end_time - start_time:.2f} seconds {' '*51}|") + print(f"+{'-'*78}+") + + +if __name__ == '__main__': + main()