2016-01-10 19:09:53 +00:00
|
|
|
import binascii
|
2022-04-11 22:32:57 +00:00
|
|
|
import io
|
|
|
|
import re
|
2022-06-24 10:54:43 +00:00
|
|
|
import urllib.parse
|
2013-09-23 15:59:27 +00:00
|
|
|
|
2022-04-17 17:18:50 +00:00
|
|
|
from . import get_suitable_downloader
|
2016-05-01 07:56:51 +00:00
|
|
|
from .external import FFmpegFD
|
2022-04-11 22:32:57 +00:00
|
|
|
from .fragment import FragmentFD
|
2021-04-28 10:47:30 +00:00
|
|
|
from .. import webvtt
|
2022-04-20 19:05:57 +00:00
|
|
|
from ..dependencies import Cryptodome_AES
|
2022-04-11 22:32:57 +00:00
|
|
|
from ..utils import bug_reports_message, parse_m3u8_attributes, update_url_query
|
2013-09-23 15:59:27 +00:00
|
|
|
|
|
|
|
|
2016-02-19 18:29:24 +00:00
|
|
|
class HlsFD(FragmentFD):
|
2021-03-10 15:26:24 +00:00
|
|
|
"""
|
|
|
|
Download segments in a m3u8 manifest. External downloaders can take over
|
2021-04-10 15:08:33 +00:00
|
|
|
the fragment downloads by supporting the 'm3u8_frag_urls' protocol and
|
2021-03-10 15:26:24 +00:00
|
|
|
re-defining 'supports_manifest' function
|
|
|
|
"""
|
2014-09-24 12:16:56 +00:00
|
|
|
|
2015-07-28 20:28:30 +00:00
|
|
|
FD_NAME = 'hlsnative'
|
|
|
|
|
2016-05-01 07:56:51 +00:00
|
|
|
@staticmethod
|
2021-09-17 19:21:27 +00:00
|
|
|
def can_download(manifest, info_dict, allow_unplayable_formats=False):
|
2021-02-12 03:51:59 +00:00
|
|
|
UNSUPPORTED_FEATURES = [
|
2017-04-13 11:21:17 +00:00
|
|
|
# r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
|
2016-06-04 20:16:05 +00:00
|
|
|
|
2016-05-09 14:45:03 +00:00
|
|
|
# Live streams heuristic does not always work (e.g. geo restricted to Germany
|
|
|
|
# http://hls-geo.daserste.de/i/videoportal/Film/c_620000/622873/format,716451,716457,716450,716458,716459,.mp4.csmil/index_4_av.m3u8?null=0)
|
2016-05-09 16:16:33 +00:00
|
|
|
# r'#EXT-X-MEDIA-SEQUENCE:(?!0$)', # live streams [3]
|
2016-06-04 20:16:05 +00:00
|
|
|
|
|
|
|
# This heuristic also is not correct since segments may not be appended as well.
|
2016-06-04 20:31:10 +00:00
|
|
|
# Twitch vods of finished streams have EXT-X-PLAYLIST-TYPE:EVENT despite
|
|
|
|
# no segments will definitely be appended to the end of the playlist.
|
2016-06-04 20:16:05 +00:00
|
|
|
# r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of
|
2016-06-04 20:21:43 +00:00
|
|
|
# # event media playlists [4]
|
2021-02-23 16:00:56 +00:00
|
|
|
# r'#EXT-X-MAP:', # media initialization [5]
|
2016-05-01 07:56:51 +00:00
|
|
|
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4
|
|
|
|
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2
|
|
|
|
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2
|
2016-05-09 14:55:37 +00:00
|
|
|
# 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5
|
2021-01-01 12:26:37 +00:00
|
|
|
# 5. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.5
|
2021-02-12 03:51:59 +00:00
|
|
|
]
|
|
|
|
if not allow_unplayable_formats:
|
|
|
|
UNSUPPORTED_FEATURES += [
|
|
|
|
r'#EXT-X-KEY:METHOD=(?!NONE|AES-128)', # encrypted streams [1]
|
|
|
|
]
|
2021-03-10 15:26:24 +00:00
|
|
|
|
|
|
|
def check_results():
|
|
|
|
yield not info_dict.get('is_live')
|
|
|
|
for feature in UNSUPPORTED_FEATURES:
|
|
|
|
yield not re.search(feature, manifest)
|
|
|
|
return all(check_results())
|
2016-05-01 07:56:51 +00:00
|
|
|
|
2014-09-24 12:16:56 +00:00
|
|
|
def real_download(self, filename, info_dict):
|
2015-07-28 20:28:30 +00:00
|
|
|
man_url = info_dict['url']
|
|
|
|
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
2016-11-13 15:06:16 +00:00
|
|
|
|
2017-07-29 08:02:41 +00:00
|
|
|
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
|
|
|
man_url = urlh.geturl()
|
|
|
|
s = urlh.read().decode('utf-8', 'ignore')
|
2016-05-01 07:56:51 +00:00
|
|
|
|
2021-09-28 18:53:24 +00:00
|
|
|
can_download, message = self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')), None
|
2022-06-25 22:40:04 +00:00
|
|
|
if can_download:
|
|
|
|
has_ffmpeg = FFmpegFD.available()
|
|
|
|
no_crypto = not Cryptodome_AES and '#EXT-X-KEY:METHOD=AES-128' in s
|
|
|
|
if no_crypto and has_ffmpeg:
|
2021-10-06 01:04:10 +00:00
|
|
|
can_download, message = False, 'The stream has AES-128 encryption and pycryptodomex is not available'
|
2022-06-25 22:40:04 +00:00
|
|
|
elif no_crypto:
|
2021-10-06 01:04:10 +00:00
|
|
|
message = ('The stream has AES-128 encryption and neither ffmpeg nor pycryptodomex are available; '
|
2021-09-28 18:53:24 +00:00
|
|
|
'Decryption will be performed natively, but will be extremely slow')
|
2022-06-28 05:10:54 +00:00
|
|
|
elif info_dict.get('extractor_key') == 'Generic' and re.search(r'(?m)#EXT-X-MEDIA-SEQUENCE:(?!0$)', s):
|
2022-06-25 22:40:04 +00:00
|
|
|
install_ffmpeg = '' if has_ffmpeg else 'install ffmpeg and '
|
|
|
|
message = ('Live HLS streams are not supported by the native downloader. If this is a livestream, '
|
|
|
|
f'please {install_ffmpeg}add "--downloader ffmpeg --hls-use-mpegts" to your command')
|
2021-09-28 18:53:24 +00:00
|
|
|
if not can_download:
|
2021-11-19 01:49:51 +00:00
|
|
|
has_drm = re.search('|'.join([
|
|
|
|
r'#EXT-X-FAXS-CM:', # Adobe Flash Access
|
|
|
|
r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd://', # Apple FairPlay
|
|
|
|
]), s)
|
|
|
|
if has_drm and not self.params.get('allow_unplayable_formats'):
|
|
|
|
self.report_error(
|
|
|
|
'This video is DRM protected; Try selecting another format with --format or '
|
|
|
|
'add --check-formats to automatically fallback to the next best format')
|
|
|
|
return False
|
2021-09-28 18:53:24 +00:00
|
|
|
message = message or 'Unsupported features have been detected'
|
2017-03-25 22:06:33 +00:00
|
|
|
fd = FFmpegFD(self.ydl, self.params)
|
2021-09-28 18:53:24 +00:00
|
|
|
self.report_warning(f'{message}; extraction will be delegated to {fd.get_basename()}')
|
2017-03-25 22:06:33 +00:00
|
|
|
return fd.real_download(filename, info_dict)
|
2021-09-28 18:53:24 +00:00
|
|
|
elif message:
|
|
|
|
self.report_warning(message)
|
2016-05-01 07:56:51 +00:00
|
|
|
|
2021-05-30 16:41:11 +00:00
|
|
|
is_webvtt = info_dict['ext'] == 'vtt'
|
|
|
|
if is_webvtt:
|
|
|
|
real_downloader = None # Packing the fragments is not currently supported for external downloader
|
|
|
|
else:
|
2021-07-31 10:53:54 +00:00
|
|
|
real_downloader = get_suitable_downloader(
|
2021-08-01 07:22:09 +00:00
|
|
|
info_dict, self.params, None, protocol='m3u8_frag_urls', to_stdout=(filename == '-'))
|
2021-03-10 15:26:24 +00:00
|
|
|
if real_downloader and not real_downloader.supports_manifest(s):
|
|
|
|
real_downloader = None
|
2021-03-20 03:20:08 +00:00
|
|
|
if real_downloader:
|
2022-04-11 15:10:28 +00:00
|
|
|
self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')
|
2021-03-10 15:26:24 +00:00
|
|
|
|
2019-01-13 09:01:26 +00:00
|
|
|
def is_ad_fragment_start(s):
|
2019-05-10 20:56:22 +00:00
|
|
|
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
|
|
|
|
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
|
2017-10-14 23:13:48 +00:00
|
|
|
|
2019-01-13 09:01:26 +00:00
|
|
|
def is_ad_fragment_end(s):
|
2019-05-10 20:56:22 +00:00
|
|
|
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
|
|
|
|
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
|
2019-01-13 09:01:26 +00:00
|
|
|
|
2021-03-10 14:39:40 +00:00
|
|
|
fragments = []
|
2021-02-08 16:46:01 +00:00
|
|
|
|
2017-10-14 23:13:48 +00:00
|
|
|
media_frags = 0
|
|
|
|
ad_frags = 0
|
|
|
|
ad_frag_next = False
|
2014-09-24 12:16:56 +00:00
|
|
|
for line in s.splitlines():
|
|
|
|
line = line.strip()
|
2017-10-14 23:13:48 +00:00
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
if line.startswith('#'):
|
2019-01-13 09:01:26 +00:00
|
|
|
if is_ad_fragment_start(line):
|
2017-10-15 04:03:54 +00:00
|
|
|
ad_frag_next = True
|
2019-01-13 09:01:26 +00:00
|
|
|
elif is_ad_fragment_end(line):
|
|
|
|
ad_frag_next = False
|
2017-10-14 23:13:48 +00:00
|
|
|
continue
|
|
|
|
if ad_frag_next:
|
2019-01-13 09:01:26 +00:00
|
|
|
ad_frags += 1
|
2017-10-14 23:13:48 +00:00
|
|
|
continue
|
|
|
|
media_frags += 1
|
2014-09-24 12:16:56 +00:00
|
|
|
|
2015-07-28 20:28:30 +00:00
|
|
|
ctx = {
|
2014-09-24 12:16:56 +00:00
|
|
|
'filename': filename,
|
2017-10-14 23:13:48 +00:00
|
|
|
'total_frags': media_frags,
|
|
|
|
'ad_frags': ad_frags,
|
2015-07-28 20:28:30 +00:00
|
|
|
}
|
|
|
|
|
2021-02-08 16:46:01 +00:00
|
|
|
if real_downloader:
|
|
|
|
self._prepare_external_frag_download(ctx)
|
|
|
|
else:
|
2021-07-21 17:28:43 +00:00
|
|
|
self._prepare_and_start_frag_download(ctx, info_dict)
|
2015-07-28 20:28:30 +00:00
|
|
|
|
2021-04-28 10:47:30 +00:00
|
|
|
extra_state = ctx.setdefault('extra_state', {})
|
|
|
|
|
2021-02-24 14:47:53 +00:00
|
|
|
format_index = info_dict.get('format_index')
|
2016-08-28 16:51:53 +00:00
|
|
|
extra_query = None
|
2016-08-13 21:53:07 +00:00
|
|
|
extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
|
2016-08-28 16:51:53 +00:00
|
|
|
if extra_param_to_segment_url:
|
2022-06-24 10:54:43 +00:00
|
|
|
extra_query = urllib.parse.parse_qs(extra_param_to_segment_url)
|
2016-01-10 19:09:53 +00:00
|
|
|
i = 0
|
|
|
|
media_sequence = 0
|
|
|
|
decrypt_info = {'METHOD': 'NONE'}
|
2017-04-13 11:21:17 +00:00
|
|
|
byte_range = {}
|
2021-02-24 14:47:53 +00:00
|
|
|
discontinuity_count = 0
|
2016-06-28 17:07:50 +00:00
|
|
|
frag_index = 0
|
2017-10-14 23:13:48 +00:00
|
|
|
ad_frag_next = False
|
2016-01-10 19:09:53 +00:00
|
|
|
for line in s.splitlines():
|
|
|
|
line = line.strip()
|
|
|
|
if line:
|
|
|
|
if not line.startswith('#'):
|
2021-02-24 14:47:53 +00:00
|
|
|
if format_index and discontinuity_count != format_index:
|
|
|
|
continue
|
2017-10-14 23:13:48 +00:00
|
|
|
if ad_frag_next:
|
|
|
|
continue
|
2016-06-28 17:07:50 +00:00
|
|
|
frag_index += 1
|
2017-04-22 15:42:24 +00:00
|
|
|
if frag_index <= ctx['fragment_index']:
|
2016-06-28 17:07:50 +00:00
|
|
|
continue
|
2016-01-10 19:09:53 +00:00
|
|
|
frag_url = (
|
|
|
|
line
|
|
|
|
if re.match(r'^https?://', line)
|
2022-06-24 10:54:43 +00:00
|
|
|
else urllib.parse.urljoin(man_url, line))
|
2016-08-28 16:51:53 +00:00
|
|
|
if extra_query:
|
|
|
|
frag_url = update_url_query(frag_url, extra_query)
|
2021-02-08 16:46:01 +00:00
|
|
|
|
2021-03-13 04:46:58 +00:00
|
|
|
fragments.append({
|
|
|
|
'frag_index': frag_index,
|
|
|
|
'url': frag_url,
|
|
|
|
'decrypt_info': decrypt_info,
|
|
|
|
'byte_range': byte_range,
|
|
|
|
'media_sequence': media_sequence,
|
|
|
|
})
|
2021-09-21 22:46:50 +00:00
|
|
|
media_sequence += 1
|
2021-02-08 16:46:01 +00:00
|
|
|
|
2021-02-23 16:00:56 +00:00
|
|
|
elif line.startswith('#EXT-X-MAP'):
|
2021-02-24 14:47:53 +00:00
|
|
|
if format_index and discontinuity_count != format_index:
|
|
|
|
continue
|
2021-02-23 16:00:56 +00:00
|
|
|
if frag_index > 0:
|
|
|
|
self.report_error(
|
2021-03-20 03:20:08 +00:00
|
|
|
'Initialization fragment found after media fragments, unable to download')
|
2021-02-23 16:00:56 +00:00
|
|
|
return False
|
|
|
|
frag_index += 1
|
|
|
|
map_info = parse_m3u8_attributes(line[11:])
|
|
|
|
frag_url = (
|
|
|
|
map_info.get('URI')
|
|
|
|
if re.match(r'^https?://', map_info.get('URI'))
|
2022-06-24 10:54:43 +00:00
|
|
|
else urllib.parse.urljoin(man_url, map_info.get('URI')))
|
2021-02-23 16:00:56 +00:00
|
|
|
if extra_query:
|
|
|
|
frag_url = update_url_query(frag_url, extra_query)
|
2021-03-13 04:46:58 +00:00
|
|
|
|
2022-04-21 16:22:03 +00:00
|
|
|
if map_info.get('BYTERANGE'):
|
|
|
|
splitted_byte_range = map_info.get('BYTERANGE').split('@')
|
|
|
|
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']
|
|
|
|
byte_range = {
|
|
|
|
'start': sub_range_start,
|
|
|
|
'end': sub_range_start + int(splitted_byte_range[0]),
|
|
|
|
}
|
|
|
|
|
2021-03-13 04:46:58 +00:00
|
|
|
fragments.append({
|
|
|
|
'frag_index': frag_index,
|
|
|
|
'url': frag_url,
|
|
|
|
'decrypt_info': decrypt_info,
|
|
|
|
'byte_range': byte_range,
|
|
|
|
'media_sequence': media_sequence
|
|
|
|
})
|
2021-09-21 22:46:50 +00:00
|
|
|
media_sequence += 1
|
2021-02-23 16:00:56 +00:00
|
|
|
|
|
|
|
elif line.startswith('#EXT-X-KEY'):
|
|
|
|
decrypt_url = decrypt_info.get('URI')
|
|
|
|
decrypt_info = parse_m3u8_attributes(line[11:])
|
|
|
|
if decrypt_info['METHOD'] == 'AES-128':
|
|
|
|
if 'IV' in decrypt_info:
|
|
|
|
decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32))
|
|
|
|
if not re.match(r'^https?://', decrypt_info['URI']):
|
2022-06-24 10:54:43 +00:00
|
|
|
decrypt_info['URI'] = urllib.parse.urljoin(
|
2021-02-23 16:00:56 +00:00
|
|
|
man_url, decrypt_info['URI'])
|
|
|
|
if extra_query:
|
|
|
|
decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)
|
|
|
|
if decrypt_url != decrypt_info['URI']:
|
|
|
|
decrypt_info['KEY'] = None
|
|
|
|
|
|
|
|
elif line.startswith('#EXT-X-MEDIA-SEQUENCE'):
|
|
|
|
media_sequence = int(line[22:])
|
|
|
|
elif line.startswith('#EXT-X-BYTERANGE'):
|
|
|
|
splitted_byte_range = line[17:].split('@')
|
|
|
|
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']
|
|
|
|
byte_range = {
|
|
|
|
'start': sub_range_start,
|
|
|
|
'end': sub_range_start + int(splitted_byte_range[0]),
|
|
|
|
}
|
|
|
|
elif is_ad_fragment_start(line):
|
|
|
|
ad_frag_next = True
|
|
|
|
elif is_ad_fragment_end(line):
|
|
|
|
ad_frag_next = False
|
2021-02-24 14:47:53 +00:00
|
|
|
elif line.startswith('#EXT-X-DISCONTINUITY'):
|
|
|
|
discontinuity_count += 1
|
2021-03-13 04:46:58 +00:00
|
|
|
i += 1
|
2021-02-23 16:00:56 +00:00
|
|
|
|
2021-03-13 04:46:58 +00:00
|
|
|
# We only download the first fragment during the test
|
2021-06-21 18:59:50 +00:00
|
|
|
if self.params.get('test', False):
|
2021-03-13 04:46:58 +00:00
|
|
|
fragments = [fragments[0] if fragments else None]
|
2015-07-28 20:28:30 +00:00
|
|
|
|
2021-02-08 16:46:01 +00:00
|
|
|
if real_downloader:
|
2021-10-16 13:01:00 +00:00
|
|
|
info_dict['fragments'] = fragments
|
2021-02-08 16:46:01 +00:00
|
|
|
fd = real_downloader(self.ydl, self.params)
|
|
|
|
# TODO: Make progress updates work without hooking twice
|
|
|
|
# for ph in self._progress_hooks:
|
|
|
|
# fd.add_progress_hook(ph)
|
2021-10-16 13:01:00 +00:00
|
|
|
return fd.real_download(filename, info_dict)
|
2021-04-23 08:52:21 +00:00
|
|
|
|
2021-06-24 16:53:33 +00:00
|
|
|
if is_webvtt:
|
|
|
|
def pack_fragment(frag_content, frag_index):
|
|
|
|
output = io.StringIO()
|
|
|
|
adjust = 0
|
2021-08-15 15:33:06 +00:00
|
|
|
overflow = False
|
|
|
|
mpegts_last = None
|
2021-06-24 16:53:33 +00:00
|
|
|
for block in webvtt.parse_fragment(frag_content):
|
|
|
|
if isinstance(block, webvtt.CueBlock):
|
2021-08-15 15:33:06 +00:00
|
|
|
extra_state['webvtt_mpegts_last'] = mpegts_last
|
|
|
|
if overflow:
|
|
|
|
extra_state['webvtt_mpegts_adjust'] += 1
|
|
|
|
overflow = False
|
2021-06-24 16:53:33 +00:00
|
|
|
block.start += adjust
|
|
|
|
block.end += adjust
|
|
|
|
|
|
|
|
dedup_window = extra_state.setdefault('webvtt_dedup_window', [])
|
|
|
|
|
2021-08-09 20:22:30 +00:00
|
|
|
ready = []
|
|
|
|
|
2021-06-24 16:53:33 +00:00
|
|
|
i = 0
|
2021-08-09 20:22:30 +00:00
|
|
|
is_new = True
|
2021-06-24 16:53:33 +00:00
|
|
|
while i < len(dedup_window):
|
2021-08-09 20:22:30 +00:00
|
|
|
wcue = dedup_window[i]
|
|
|
|
wblock = webvtt.CueBlock.from_json(wcue)
|
|
|
|
i += 1
|
|
|
|
if wblock.hinges(block):
|
|
|
|
wcue['end'] = block.end
|
|
|
|
is_new = False
|
|
|
|
continue
|
|
|
|
if wblock == block:
|
|
|
|
is_new = False
|
|
|
|
continue
|
|
|
|
if wblock.end > block.start:
|
2021-04-28 10:47:30 +00:00
|
|
|
continue
|
2021-08-09 20:22:30 +00:00
|
|
|
ready.append(wblock)
|
|
|
|
i -= 1
|
2021-06-24 16:53:33 +00:00
|
|
|
del dedup_window[i]
|
|
|
|
|
2021-08-09 20:22:30 +00:00
|
|
|
if is_new:
|
|
|
|
dedup_window.append(block.as_json)
|
|
|
|
for block in ready:
|
|
|
|
block.write_into(output)
|
2021-06-24 16:53:33 +00:00
|
|
|
|
2021-08-09 20:22:30 +00:00
|
|
|
# we only emit cues once they fall out of the duplicate window
|
|
|
|
continue
|
2021-06-24 16:53:33 +00:00
|
|
|
elif isinstance(block, webvtt.Magic):
|
|
|
|
# take care of MPEG PES timestamp overflow
|
|
|
|
if block.mpegts is None:
|
|
|
|
block.mpegts = 0
|
|
|
|
extra_state.setdefault('webvtt_mpegts_adjust', 0)
|
|
|
|
block.mpegts += extra_state['webvtt_mpegts_adjust'] << 33
|
|
|
|
if block.mpegts < extra_state.get('webvtt_mpegts_last', 0):
|
2021-08-15 15:33:06 +00:00
|
|
|
overflow = True
|
2021-06-24 16:53:33 +00:00
|
|
|
block.mpegts += 1 << 33
|
2021-08-15 15:33:06 +00:00
|
|
|
mpegts_last = block.mpegts
|
2021-06-24 16:53:33 +00:00
|
|
|
|
|
|
|
if frag_index == 1:
|
|
|
|
extra_state['webvtt_mpegts'] = block.mpegts or 0
|
|
|
|
extra_state['webvtt_local'] = block.local or 0
|
|
|
|
# XXX: block.local = block.mpegts = None ?
|
|
|
|
else:
|
|
|
|
if block.mpegts is not None and block.local is not None:
|
|
|
|
adjust = (
|
|
|
|
(block.mpegts - extra_state.get('webvtt_mpegts', 0))
|
|
|
|
- (block.local - extra_state.get('webvtt_local', 0))
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
elif isinstance(block, webvtt.HeaderBlock):
|
|
|
|
if frag_index != 1:
|
|
|
|
# XXX: this should probably be silent as well
|
|
|
|
# or verify that all segments contain the same data
|
|
|
|
self.report_warning(bug_reports_message(
|
|
|
|
'Discarding a %s block found in the middle of the stream; '
|
|
|
|
'if the subtitles display incorrectly,'
|
|
|
|
% (type(block).__name__)))
|
|
|
|
continue
|
|
|
|
block.write_into(output)
|
|
|
|
|
2022-05-09 11:54:28 +00:00
|
|
|
return output.getvalue().encode()
|
2021-08-09 20:22:30 +00:00
|
|
|
|
|
|
|
def fin_fragments():
|
|
|
|
dedup_window = extra_state.get('webvtt_dedup_window')
|
|
|
|
if not dedup_window:
|
|
|
|
return b''
|
|
|
|
|
|
|
|
output = io.StringIO()
|
|
|
|
for cue in dedup_window:
|
|
|
|
webvtt.CueBlock.from_json(cue).write_into(output)
|
|
|
|
|
2022-05-09 11:54:28 +00:00
|
|
|
return output.getvalue().encode()
|
2021-08-09 20:22:30 +00:00
|
|
|
|
|
|
|
self.download_and_append_fragments(
|
|
|
|
ctx, fragments, info_dict, pack_func=pack_fragment, finish_func=fin_fragments)
|
2021-06-24 16:53:33 +00:00
|
|
|
else:
|
2021-08-09 20:22:30 +00:00
|
|
|
return self.download_and_append_fragments(ctx, fragments, info_dict)
|