Переглянути джерело

feat: add two exemplar scripts

Fairly basic, could obviously be refactored but they are there to be exemplars
to give folks an idea of where they can start with the pipeline.
main
Rob Hallam 2 місяці тому
джерело
коміт
65cb224d6b
2 змінених файлів з 203 додано та 0 видалено
  1. +97
    -0
      make_highlight_clips.py
  2. +106
    -0
      make_highlight_video.py

+ 97
- 0
make_highlight_clips.py Переглянути файл

@@ -0,0 +1,97 @@
"""make_highlight_clips.py -- use the pipeline to make highlight clips from a video

Usage:
make_highlight_clips.py <input_file> [--output-dir=<dir>] [--output-filename=<filename>] [--save-json]

Options:
-h --help Show this screen
--output-dir=<dir> Directory to save the highlight clips [default: /tmp/]
--save-json Save the feature data as a JSON file [default: False]

Notes:
<input_file> The input file can be a video or JSON file
"""

import os
import sys
import time
from pipeline.feature_extractors import RandomFeatureExtractor,\
LaughterFeatureExtractor, LoudAudioFeatureExtractor,\
VideoActivityFeatureExtractor, JSONFeatureExtractor
from pipeline.utils import SourceMedia, Source
from pipeline.producers import FfmpegVideoProducer, JSONProducer
from pipeline.consolidators import OverlapConsolidator

from docopt import docopt

def main():
start_time = time.time()
PRINT_SOMETHING = False
if PRINT_SOMETHING:
print(f"+{'-'*78}+")
print(f"| Starting pipeline to make highlight clips at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*13}|")
print(f"+{'-'*78}+")
time.sleep(1)

args = docopt(__doc__)
input_file = args['<input_file>']
output_dir = args['--output-dir']
# expand "./" to current directory
if output_dir == "./":
output_dir = os.getcwd()
save_json = args['--save-json']

source_media = SourceMedia(sources=[Source(input_file, input_file, "manual")])

combined_features = []

# feature extractors: laughter, loudness, video activity, words
if input_file.endswith(".json"):
feature_extractors = [JSONFeatureExtractor]
else:
feature_extractors = [LaughterFeatureExtractor, LoudAudioFeatureExtractor,
VideoActivityFeatureExtractor
]
# NOTE: words FE crashes OOM due to noted issues on their GH
# so omitted for now

for fe in feature_extractors:
fe_instance = fe(input_files=source_media)
fe_instance.setup()
fe_instance.run()
fe_instance.teardown()
if fe_instance.features:
combined_features.extend(fe_instance.features)

if combined_features:
if PRINT_SOMETHING:
for f in combined_features:
print(f"Feature: {f}")

# consolidate features
oc = OverlapConsolidator(features=combined_features)
oc.consolidate()

# output before video
if save_json:
jsonprod = JSONProducer(features=combined_features)
jsonprod.produce()

# make video from features:
ffprod = FfmpegVideoProducer(features=oc.features,
output_dir=output_dir,
compile_clips=False)
ffprod.produce()



end_time = time.time()
if PRINT_SOMETHING:
print(f"+{'-'*78}+")
print(f"| Pipeline finished creating highlight video at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*12}|")
print(f"| Total time: {end_time - start_time:.2f} seconds {' '*51}|")
print(f"+{'-'*78}+")


if __name__ == '__main__':
main()

+ 106
- 0
make_highlight_video.py Переглянути файл

@@ -0,0 +1,106 @@
"""make_highlight_video.py -- use the pipeline to make a highlight from a video

Usage:
make_highlight_clip.py <input_file> [--max-duration=<seconds>] [--output-dir=<dir>] [--output-filename=<filename>] [--save-json]

Options:
-h --help Show this screen
--max-duration=<seconds> Maximum duration of the highlight clip [default: 60]
--output-dir=<dir> Directory to save the highlight clip [default: ./]
--output-filename=<filename> Filename of the highlight clip [default: highlights.mp4]
--save-json Save the feature data as a JSON file [default: False]

Notes:
<input_file> The input file can be a video or JSON file
"""

import os
import sys
import time
from pipeline.feature_extractors import RandomFeatureExtractor,\
LaughterFeatureExtractor, LoudAudioFeatureExtractor,\
VideoActivityFeatureExtractor, JSONFeatureExtractor
from pipeline.utils import SourceMedia, Source
from pipeline.producers import FfmpegVideoProducer, JSONProducer
from pipeline.consolidators import OverlapConsolidator
from pipeline.adjusters import TargetTimeAdjuster

from docopt import docopt

def main():
start_time = time.time()
PRINT_SOMETHING = False
if PRINT_SOMETHING:
print(f"+{'-'*78}+")
print(f"| Starting pipeline to make highlight video at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*13}|")
print(f"+{'-'*78}+")
time.sleep(1)

args = docopt(__doc__)
input_file = args['<input_file>']
max_duration = float(args['--max-duration'])
output_dir = args['--output-dir']
# expand "./" to current directory
if output_dir == "./":
output_dir = os.getcwd()
output_filename = args['--output-filename']
save_json = args['--save-json']

source_media = SourceMedia(sources=[Source(input_file, input_file, "manual")])

combined_features = []

# feature extractors: laughter, loudness, video activity, words
if input_file.endswith(".json"):
feature_extractors = [JSONFeatureExtractor]
else:
feature_extractors = [LaughterFeatureExtractor, LoudAudioFeatureExtractor,
VideoActivityFeatureExtractor
]
# NOTE: words FE crashes OOM due to noted issues on their GH
# so omitted for now

for fe in feature_extractors:
fe_instance = fe(input_files=source_media)
fe_instance.setup()
fe_instance.run()
fe_instance.teardown()
if fe_instance.features:
combined_features.extend(fe_instance.features)

if combined_features:
if PRINT_SOMETHING:
for f in combined_features:
print(f"Feature: {f}")

# consolidate features
oc = OverlapConsolidator(features=combined_features)
oc.consolidate()

# adjust features to target time
tta = TargetTimeAdjuster(features=oc.features, target_time=max_duration)
tta.adjust()

# output before video
if save_json:
jsonprod = JSONProducer(features=combined_features)
jsonprod.produce()

# make video from tta features:
ffprod = FfmpegVideoProducer(features=tta.features,
output_dir=output_dir,
output_filename=output_filename)
ffprod.produce()



end_time = time.time()
if PRINT_SOMETHING:
print(f"+{'-'*78}+")
print(f"| Pipeline finished creating highlight video at {time.strftime('%Y-%m-%d %H:%M:%S')}{' '*12}|")
print(f"| Total time: {end_time - start_time:.2f} seconds {' '*51}|")
print(f"+{'-'*78}+")


if __name__ == '__main__':
main()

Завантаження…
Відмінити
Зберегти