|
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697 |
- """make_highlight_clips.py -- use the pipeline to make highlight clips from a video
-
- Usage:
- make_highlight_clips.py <input_file> [--output-dir=<dir>] [--output-filename=<filename>] [--save-json]
-
- Options:
- -h --help Show this screen
- --output-dir=<dir> Directory to save the highlight clips [default: /tmp/]
- --save-json Save the feature data as a JSON file [default: False]
-
- Notes:
- <input_file> The input file can be a video or JSON file
- """
-
- import os
- import sys
- import time
- from pipeline.feature_extractors import RandomFeatureExtractor,\
- LaughterFeatureExtractor, LoudAudioFeatureExtractor,\
- VideoActivityFeatureExtractor, JSONFeatureExtractor
- from pipeline.utils import SourceMedia, Source
- from pipeline.producers import FfmpegVideoProducer, JSONProducer
- from pipeline.consolidators import OverlapConsolidator
-
- from docopt import docopt
-
- def main():
- start_time = time.time()
- PRINT_SOMETHING = False
- if PRINT_SOMETHING:
- print(f"+{'-'*78}+")
- print(f"| Starting pipeline to make highlight clips at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*13}|")
- print(f"+{'-'*78}+")
- time.sleep(1)
-
- args = docopt(__doc__)
- input_file = args['<input_file>']
- output_dir = args['--output-dir']
- # expand "./" to current directory
- if output_dir == "./":
- output_dir = os.getcwd()
- save_json = args['--save-json']
-
- source_media = SourceMedia(sources=[Source(input_file, input_file, "manual")])
-
- combined_features = []
-
- # feature extractors: laughter, loudness, video activity, words
- if input_file.endswith(".json"):
- feature_extractors = [JSONFeatureExtractor]
- else:
- feature_extractors = [LaughterFeatureExtractor, LoudAudioFeatureExtractor,
- VideoActivityFeatureExtractor
- ]
- # NOTE: words FE crashes OOM due to noted issues on their GH
- # so omitted for now
-
- for fe in feature_extractors:
- fe_instance = fe(input_files=source_media)
- fe_instance.setup()
- fe_instance.run()
- fe_instance.teardown()
- if fe_instance.features:
- combined_features.extend(fe_instance.features)
-
- if combined_features:
- if PRINT_SOMETHING:
- for f in combined_features:
- print(f"Feature: {f}")
-
- # consolidate features
- oc = OverlapConsolidator(features=combined_features)
- oc.consolidate()
-
- # output before video
- if save_json:
- jsonprod = JSONProducer(features=combined_features)
- jsonprod.produce()
-
- # make video from features:
- ffprod = FfmpegVideoProducer(features=oc.features,
- output_dir=output_dir,
- compile_clips=False)
- ffprod.produce()
-
-
-
- end_time = time.time()
- if PRINT_SOMETHING:
- print(f"+{'-'*78}+")
- print(f"| Pipeline finished creating highlight video at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*12}|")
- print(f"| Total time: {end_time - start_time:.2f} seconds {' '*51}|")
- print(f"+{'-'*78}+")
-
-
- if __name__ == '__main__':
- main()
|