Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

562 строки
22 KiB

  1. from abc import ABC
  2. import json
  3. import logging
  4. import os
  5. import random
  6. import subprocess
  7. from ast import literal_eval
  8. from pipeline.utils import SourceMedia, Source, Feature, Interval
  9. # for loudness detection
  10. import soundfile
  11. import pyloudnorm
  12. # for word detection
  13. from faster_whisper import WhisperModel, BatchedInferencePipeline
  14. logger = logging.getLogger(__name__)
  15. class FeatureExtractor(ABC):
  16. """Feature extractor interface."""
  17. # TODO: #API -- decide if .features will be a member variable
  18. def _run_get_output(self, cmd: list, cwd:str=".") -> str:
  19. """Run a command and return the output as a string
  20. Defined to be mocked out in tests via unittest.mock.patch
  21. """
  22. return subprocess.run(cmd, stdout=subprocess.PIPE, cwd=cwd).stdout.decode("utf-8")
  23. def setup(self):
  24. """Setup the feature extractor -- validate input files & config"""
  25. def run(self):
  26. """Run the feature extractor -- extract features"""
  27. def teardown(self):
  28. """Teardown the feature extractor -- clean up any temporary files created during setup"""
  29. class LaughterFeatureExtractor(FeatureExtractor):
  30. """Feature extractor for laughter detection.
  31. This class is responsible for extracting features corresponding to laughter in media files.
  32. Here:
  33. setup() is used to validate input files & config, which may involve processing video files to extract audio
  34. run() is used to extract features from the audio using jrgillick's laughter-detection
  35. teardown() is used to clean up any temporary files created during setup according to the config
  36. See: https://github.com/jrgillick/laughter-detection for the laughter-detection library
  37. """
  38. _PREPEND_TIME = 7.0 # seconds before the laugh
  39. _APPEND_TIME = 3.0 # seconds after the laugh
  40. def __init__(self, input_files=None, config=None):
  41. """It is expected that input_files is a SourceMedia object"""
  42. self.input_files = input_files
  43. self.config = config
  44. self.features = []
  45. def _laughdetect(self, audio_file) -> list:
  46. """Run laughter detection on the audio file
  47. Returns a list of 2-tuples, each representing a laugh instance in the audio file
  48. """
  49. laugh_detector_dir = "/home/robert/mounts/980data/code/laughter-detection/"
  50. laugh_detector_script = "segment_laughter.py"
  51. # fake output for testing
  52. # laugh_detector_path = "tests/fake_segment_laughter.py"
  53. laugh_detector_cmd = ["python", f"{laugh_detector_dir}{laugh_detector_script}",
  54. f"--input_audio_file={audio_file}"]
  55. # run command, capture output, ignore exit status
  56. # use self._run_get_output to allow mocking in tests
  57. laugh_output = self._run_get_output(laugh_detector_cmd, laugh_detector_dir)
  58. # ↑ have to include cwd to keep laughter-detection imports happy
  59. # also, it isn't happy if no output dir is specified but we get laughs so it's grand
  60. # laughs are lines in stdout that start with "instance:", followed by a space and a 2-tuple of floats
  61. # so jump to the 10th character and evaluate the rest of the line
  62. return [literal_eval(instance[10:])
  63. for instance in laugh_output.splitlines()
  64. if instance.startswith("instance: ")]
  65. def _adjust_features(self):
  66. """Adjust features according to config
  67. Generically, this ensures features conform to config - min/max feature length, etc.
  68. In the context of LaughterFeatureExtractor, there is some secret sauce: things that
  69. cause a laugh generally /precede/ the laugh, so we want more team before the detected start
  70. than at the end. For example, for a minimum feature length of 15s, we might prepend 10 seconds,
  71. and append 5 seconds (for example), or 12s and 3s. We may wish to do this pre/post adjustment
  72. for all laughter features found, regardless of length.
  73. TODO: figure out how we're going to handle length adjustments
  74. TODO: config for length adjustments per design doc
  75. TODO: play with numbers more to see what works best
  76. """
  77. for feature in self.features:
  78. # do the pre & post adjustment
  79. feature.interval.move_start(-self._PREPEND_TIME, relative=True)
  80. feature.interval.move_end(self._APPEND_TIME, relative=True)
  81. def setup(self):
  82. """Setup the laughter feature extractor -- validate input files & config
  83. jrgillick's laughter-detection library can work with AV files directly
  84. TODO: validate input files
  85. TODO: handle config
  86. """
  87. logger.debug("LaughterFeatureExtractor setup")
  88. # Validate input files
  89. if not self.input_files:
  90. raise ValueError("No input files provided")
  91. # TODO: convert video to audio if needed
  92. def run(self):
  93. """Extract laughter features for each input file"""
  94. if self.input_files:
  95. for file in self.input_files:
  96. # adjust this call for better test mocking
  97. laughs = self._laughdetect(file.path)
  98. for laugh in laughs:
  99. start, end = laugh
  100. self.features.append(Feature(interval=Interval(start=start, end=end),
  101. source=file, feature_extractor="laughter"))
  102. # TODO: implement options eg minimum feature length
  103. # adjust features
  104. self._adjust_features()
  105. def teardown(self):
  106. pass
  107. class RandomFeatureExtractor(FeatureExtractor):
  108. """Feature extractor for random feature generation.
  109. This class is responsible for generating random features for testing purposes.
  110. Here:
  111. setup() is used to validate input files & config
  112. run() is used to generate random features
  113. teardown() is used to clean up any temporary files created during setup according to the config
  114. """
  115. NUM_FEATURES = 5
  116. MAX_DURATION = 20.0
  117. def __init__(self, input_files=None, config=None):
  118. """It is expected that input_files is a SourceMedia object"""
  119. self.input_files = input_files
  120. self.config = config
  121. self.features = []
  122. def setup(self):
  123. """Setup the random feature extractor -- validate input files & config"""
  124. logger.debug("RandomFeatureExtractor setup")
  125. # Validate input files
  126. if not self.input_files:
  127. raise ValueError("No input files provided")
  128. def run(self):
  129. """Generate random features for each input file"""
  130. # check self.input_files is of type SourceMedia
  131. if not self.input_files or not isinstance(self.input_files, SourceMedia):
  132. raise ValueError("No input files provided")
  133. for file in self.input_files:
  134. for _ in range(self.NUM_FEATURES):
  135. # round to 3 decimal places
  136. duration = random.random() * self.MAX_DURATION
  137. start = random.random() * file.duration() - duration
  138. self.features.append(Feature(interval=Interval(start=start, duration=duration),
  139. source=file, feature_extractor="random"))
  140. def teardown(self):
  141. pass
  142. class LoudAudioFeatureExtractor(FeatureExtractor):
  143. """Feature extractor for loud audio detection.
  144. This class is responsible for extracting features corresponding to loud audio in media files.
  145. Here:
  146. setup() is used to validate input files & config, and extracting audio
  147. run() uses pyloudnorm to detect loud audio
  148. teardown() is used to clean up temporary files created during setup (if specified by config)
  149. """
  150. _CONFIG_DEFAULT_NUM_FEATURES = 5 # keep the top 5 loudnesses
  151. _CONFIG_DEFAULT_MIN_DURATION = 5.00 # seconds
  152. def __init__(self, input_files=None, config=None,
  153. num_features=_CONFIG_DEFAULT_NUM_FEATURES,
  154. min_duration=_CONFIG_DEFAULT_MIN_DURATION):
  155. if not input_files:
  156. raise ValueError("No input files provided!")
  157. self.input_files = input_files
  158. self.config = config
  159. self.features = []
  160. self._num_features = num_features
  161. self._min_duration = min_duration
  162. def _audio_file_from_path(self, path: str) -> str:
  163. """Return the audio file path given a video file path
  164. Example:
  165. - in = "/path/to/video.mp4"
  166. - out = "/tmp/video.mp4.wav"
  167. """
  168. OUTPUT_DIR = "/tmp"
  169. return f"{OUTPUT_DIR}/{os.path.basename(path)}.wav"
  170. def _get_loudnesses(self, data, meter, rate, window_size, stride_size):
  171. """Extract loudnesses from the audio data using pyloudnorm
  172. return a list of 2-tuples, each representing a timecode and loudness value
  173. """
  174. loudnesses = []
  175. for w in range(0, len(data)-window_size, stride_size):
  176. window = data[w:w+window_size, 0:2] # extract window
  177. loudnesses.append( (w/rate, meter.integrated_loudness(window)) )
  178. return loudnesses
  179. def _loudnorm(self, audio_file):
  180. """Run pyloudnorm on the audio file"""
  181. data, rate = soundfile.read(audio_file) # load audio (with shape (samples, channels))
  182. meter = pyloudnorm.Meter(rate=rate,block_size=0.3) # create BS.1770 meter
  183. loudness_features = []
  184. window_size = int(rate * 0.5) # 500ms
  185. stride_size = int(rate * 0.5) # 500ms -- no overlap
  186. # for w in range(data.shape[0]//100):
  187. # loudnesses.append(meter.integrated_loudness(data[w:w+int(0.3*rate),0:2]))
  188. loudnesses = self._get_loudnesses(data, meter, rate, window_size, stride_size)
  189. for timecode, loudval in sorted([l for l in loudnesses if float(l[1]) != float("-inf")], key=lambda x: x[1], reverse=True):
  190. # print(f"Timecode: {timecode}, Loudness: {loudval}")
  191. loudness_features.append((timecode, round(loudval, 3))) # round to 3 DP
  192. return loudness_features
  193. def _keep_num(self, features, num=_CONFIG_DEFAULT_NUM_FEATURES) -> list:
  194. """Keep the top n features (default: 5)
  195. Approach:
  196. - for range in 0-n
  197. + expand the nth top feature to min duration
  198. (move start back by 0.5*min_duration, end forward by 0.5*min_duration)
  199. + drop any features that are now in that feature's range
  200. - return the top n features
  201. Each feature is a Feature object, with an Interval object
  202. """
  203. for i in range(num):
  204. # expand the feature to min_duration
  205. features[i].interval.move_start(-0.5*self._min_duration, relative=True)
  206. features[i].interval.move_end(0.5*self._min_duration, relative=True)
  207. # drop any features that are now in that feature's range
  208. features = [f for f in features if
  209. f.interval.start < features[i].interval.start or
  210. f.interval.end > features[i].interval.end]
  211. return features[:num]
  212. def setup(self):
  213. """extract audio from video files to be processed by pyloudnorm
  214. TODO: config -- hardcoded for now
  215. """
  216. # pyloudnorm expects WAV files
  217. for file in self.input_files:
  218. audio_file = self._audio_file_from_path(file.path)
  219. # ffmpeg -i input.mp4 -vn -acodec pcm_s16le output.wav
  220. subprocess.run(["ffmpeg", "-y", "-i", file.path, "-vn", "-acodec", "pcm_s16le", audio_file],
  221. stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  222. def run(self):
  223. """Use pyloudnorm to detect loud audio"""
  224. for file in self.input_files:
  225. audio_file = self._audio_file_from_path(file.path)
  226. loudnesses = self._loudnorm(audio_file)
  227. features = []
  228. for time, loudness in loudnesses:
  229. features.append(Feature(interval=Interval(start=time, duration=0.500),
  230. source=file, feature_extractor="loudness",
  231. score=loudness))
  232. # prune features list to keep self.num_features
  233. self.features = self._keep_num(features, self._num_features)
  234. class VideoActivityFeatureExtractor(FeatureExtractor):
  235. """Feature extractor for video activity detection.
  236. This class is responsible for extracting features corresponding to high activity in video files.
  237. Uses ffmpeg's scdet filter with threshold of zero.
  238. Here:
  239. setup() is used to validate input files & config
  240. run() is used to extract features from the video using OpenCV
  241. teardown() is used to clean up any temporary files created during setup according to the config
  242. #TODO: minimum duration -- consider whether to do here, or expand duration post-consolidation
  243. """
  244. _CONFIG_DEFAULT_NUM_FEATURES = 5 # keep the top 5 activity moments
  245. _CONFIG_DEFAULT_MIN_DURATION = 5.00 # seconds
  246. def __init__(self, input_files=None, config=None,
  247. num_features=_CONFIG_DEFAULT_NUM_FEATURES,
  248. min_duration=_CONFIG_DEFAULT_MIN_DURATION):
  249. if not input_files:
  250. raise ValueError("No input files provided!")
  251. self.input_files = input_files
  252. self.config = config
  253. self.features = []
  254. self._num_features = num_features
  255. self._min_duration = min_duration
  256. def _scdet(self, video_file):
  257. """Run scdet filter on the video file"""
  258. ffmpeg_cmd = ["ffmpeg", "-i", video_file, "-vf", "scdet=threshold=0", "-f", "null", "-"]
  259. # output is of the form:
  260. # [scdet @ 0x7f0798003d00] lavfi.scd.score: 0.031, lavfi.scd.time: 23.65
  261. # [scdet @ 0x7f0798003d00] lavfi.scd.score: 0.006, lavfi.scd.time: 23.70
  262. # capture output, extract time & score
  263. scdet_output = subprocess.run(ffmpeg_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stderr.decode("utf-8")
  264. # extract time & score
  265. scores = []
  266. for line in scdet_output.splitlines():
  267. if "lavfi.scd.score" in line:
  268. scores.append( (float(line.split(",")[1].split(":")[1]),
  269. float(line.split(",")[0].split(":")[1]))
  270. )
  271. return scores
  272. def _nonoverlap_mean(self, scores, window_size=0.500) -> list:
  273. """Take the mean of non-overlapping windows of scores
  274. Input: list of tuples in the format (time, score)
  275. Output: list of tuples in the format (time, mean_score) (reduced set)
  276. """
  277. means = []
  278. current_window = []
  279. current_window_start = 0.0
  280. for time, score in scores:
  281. if time - current_window_start > window_size:
  282. # calculate mean of current window
  283. mean_score = sum([s for _, s in current_window]) / len(current_window)
  284. means.append((current_window_start, round(mean_score, 3)))
  285. # reset window
  286. current_window = []
  287. current_window_start = time
  288. current_window.append((time, score))
  289. return means
  290. def _drop_lowest(self, scores, percent=33):
  291. """Drop the lowest n% scores from the list"""
  292. scores = sorted(scores, key=lambda x: x[1], reverse=True)
  293. return scores[:int(len(scores) * (percent / 100))]
  294. def _keep_num(self, features, num=_CONFIG_DEFAULT_NUM_FEATURES) -> list:
  295. """Keep the top n features (default: 5)
  296. Approach:
  297. - for range in 0-n
  298. + expand the nth top feature to min duration
  299. (move start back by 0.5*min_duration, end forward by 0.5*min_duration)
  300. + drop any features that are now in that feature's range
  301. - return the top n features
  302. Each feature is a Feature object, with an Interval object
  303. """
  304. for i in range(num):
  305. # expand the feature to min_duration
  306. features[i].interval.move_start(-0.5*self._min_duration, relative=True)
  307. features[i].interval.move_end(0.5*self._min_duration, relative=True)
  308. # drop any features that are now in that feature's range
  309. features = [f for f in features if
  310. f.interval.start < features[i].interval.start or
  311. f.interval.end > features[i].interval.end]
  312. return features[:num]
  313. def setup(self):
  314. pass
  315. def run(self):
  316. for file in self.input_files:
  317. scores = self._scdet(file.path)
  318. means = sorted(self._nonoverlap_mean(scores), key=lambda x: x[1], reverse=True)
  319. features = []
  320. for time, score in self._drop_lowest(means, 66):
  321. features.append(Feature(interval=Interval(start=time, duration=0.500),
  322. source=file, feature_extractor="videoactivity",
  323. score=score))
  324. # prune features list to keep self.num_features
  325. self.features = self._keep_num(features, self._num_features)
  326. def teardown(self):
  327. pass
  328. class JSONFeatureExtractor(FeatureExtractor):
  329. """(Re-)create features from a JSON file
  330. The JSON file can have one of two formats:
  331. - the format produced by the pipleline (@see: video_producers.py:JSONProducer)
  332. - a simplified format which is easier for manual creation
  333. """
  334. def __init__(self, input_files=None, config=None):
  335. if not input_files:
  336. raise ValueError("No input files provided!")
  337. self.input_files = input_files
  338. self.config = config
  339. self.features = []
  340. def setup(self):
  341. pass
  342. def _interval_from_dict(self, d):
  343. return Interval(start=d["start"], duration=d["duration"])
  344. def _source_from_dict(self, d):
  345. return Source(d["source"], d["path"], d["provider"])
  346. def _read_json_from_file(self, file):
  347. """Read a JSON file and return the contents
  348. Method exists to allow for mocking in tests
  349. """
  350. with open(file, "r") as f:
  351. return json.load(f)
  352. def run(self):
  353. # only pipeline JSON format for now
  354. # TODO: add support for simplified format
  355. for file in self.input_files:
  356. features_from_json = self._read_json_from_file(file.path)
  357. for feature in features_from_json:
  358. self.features.append(Feature(interval=self._interval_from_dict(feature["interval"]),
  359. source=self._source_from_dict(feature["source"]),
  360. feature_extractor=feature["feature_extractor"],
  361. score=feature["score"]))
  362. def teardown(self):
  363. pass
  364. class WordFeatureExtractor(FeatureExtractor):
  365. """Feature extractor for specific word detection (uses Whisper)"""
  366. # set defaults for whisper settings
  367. DEFAULT_MODEL_SIZE = "medium"
  368. DEFAULT_DEVICE = "cpu"
  369. DEFAULT_COMPUTE_TYPE = "int8"
  370. DEFAULT_BEAM_SIZE = 5
  371. DEFAULT_BATCH_SIZE = 16
  372. DEFAULT_PIPELINE_TYPE = "batched" # or "stream"
  373. words = []
  374. def _transcribe(self, model, file, **kwargs):
  375. """Defined here to allow for mocking in tests"""
  376. return model.transcribe(file, **kwargs)
  377. def _whispermodel(self, model_size=DEFAULT_MODEL_SIZE,
  378. device=DEFAULT_DEVICE, compute_type=DEFAULT_COMPUTE_TYPE):
  379. """Defined here to allow for mocking out in tests"""
  380. return WhisperModel(model_size, device=device, compute_type=compute_type)
  381. def _batched_inference_pipeline(self, model):
  382. """Defined here to allow for mocking out in tests"""
  383. return BatchedInferencePipeline(model=model)
  384. def __init__(self, input_files=None, config=None):
  385. if not input_files:
  386. raise ValueError("No input files provided!")
  387. self.input_files = input_files
  388. self.config = config
  389. self.features = []
  390. def setup(self, words=[]):
  391. """Setup the word feature extractor -- validate input files & config
  392. Whisper expects a list of words to search for in the audio
  393. """
  394. logger.debug("WordFeatureExtractor setup")
  395. # Validate words - raise a notice if none provided
  396. if len(words) == 0:
  397. logger.warning("No words provided for detection")
  398. self.words = words
  399. # TODO: consider stripping punctuation since Whisper produces words+punctuation
  400. # and we might want to strip the punctuation there too
  401. def run(self):
  402. """Extract features corresponding to supplied target words (defined in setup) for each input file
  403. Use Whisper to detect words in the audio, then match these to target words and create features
  404. Note: if no words are supplied we can exit early
  405. """
  406. if len(self.words) == 0: return
  407. if self.DEFAULT_PIPELINE_TYPE == "batched":
  408. batched = True
  409. else:
  410. batched = False
  411. # no early exit
  412. # TODO: consider maybe loglevel notice of estimated time! consider also: max execution time config?
  413. # TODO: config options for model size, device, compute type
  414. model = self._whispermodel() # NB uses defaults, TODO: add config options
  415. # NOTE: batched not available on pypi yet at time of writing
  416. if batched:
  417. batched_model = self._batched_inference_pipeline(model)
  418. for file in self.input_files:
  419. # transcribe the audio file
  420. if batched:
  421. segments, _ = self._transcribe(batched_model, file.path, batch_size=self.DEFAULT_BATCH_SIZE)
  422. else:
  423. segments, _ = self._transcribe(model, file.path, beam_size=self.DEFAULT_BEAM_SIZE)
  424. # process the segments
  425. # segment has: start, end, text
  426. for segment in segments:
  427. # check if any of the words are in the segment
  428. for word in segment.text.split():
  429. if word in self.words:
  430. self.features.append(Feature(interval=Interval(start=segment.start, end=segment.end),
  431. source=file, feature_extractor="word",
  432. score=1.0))