Merge branch 'master' into gedi-overhaul

This commit is contained in:
nixxo 2023-01-15 12:33:53 +01:00
commit eb6e27dcf5
No known key found for this signature in database
GPG Key ID: E0DE62EF9A9BFAB2
4 changed files with 244 additions and 140 deletions

View File

@ -32,6 +32,7 @@ from ..utils import (
FormatSorter, FormatSorter,
GeoRestrictedError, GeoRestrictedError,
GeoUtils, GeoUtils,
HEADRequest,
LenientJSONDecoder, LenientJSONDecoder,
RegexNotFoundError, RegexNotFoundError,
RetryManager, RetryManager,
@ -80,6 +81,7 @@ from ..utils import (
update_Request, update_Request,
update_url_query, update_url_query,
url_basename, url_basename,
urlhandle_detect_ext,
url_or_none, url_or_none,
urljoin, urljoin,
variadic, variadic,
@ -2178,13 +2180,23 @@ class InfoExtractor:
return self._parse_m3u8_vod_duration(m3u8_vod or '', video_id) return self._parse_m3u8_vod_duration(m3u8_vod or '', video_id)
def _parse_m3u8_vod_duration(self, m3u8_vod, video_id): def _parse_m3u8_vod_duration(self, m3u8_vod, video_id):
if '#EXT-X-PLAYLIST-TYPE:VOD' not in m3u8_vod: if '#EXT-X-ENDLIST' not in m3u8_vod:
return None return None
return int(sum( return int(sum(
float(line[len('#EXTINF:'):].split(',')[0]) float(line[len('#EXTINF:'):].split(',')[0])
for line in m3u8_vod.splitlines() if line.startswith('#EXTINF:'))) or None for line in m3u8_vod.splitlines() if line.startswith('#EXTINF:'))) or None
def _extract_mpd_vod_duration(
self, mpd_url, video_id, note=None, errnote=None, data=None, headers={}, query={}):
mpd_doc = self._download_xml(
mpd_url, video_id,
note='Downloading MPD VOD manifest' if note is None else note,
errnote='Failed to download VOD manifest' if errnote is None else errnote,
fatal=False, data=data, headers=headers, query=query) or {}
return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
@staticmethod @staticmethod
def _xpath_ns(path, namespace=None): def _xpath_ns(path, namespace=None):
if not namespace: if not namespace:
@ -2311,7 +2323,8 @@ class InfoExtractor:
height = int_or_none(medium.get('height')) height = int_or_none(medium.get('height'))
proto = medium.get('proto') proto = medium.get('proto')
ext = medium.get('ext') ext = medium.get('ext')
src_ext = determine_ext(src) src_ext = determine_ext(src, default_ext=None) or ext or urlhandle_detect_ext(
self._request_webpage(HEADRequest(src), video_id, note='Requesting extension info', fatal=False))
streamer = medium.get('streamer') or base streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'): if proto == 'rtmp' or streamer.startswith('rtmp'):

View File

@ -184,9 +184,10 @@ class DRTVIE(InfoExtractor):
data = self._download_json( data = self._download_json(
programcard_url, video_id, 'Downloading video JSON', query=query) programcard_url, video_id, 'Downloading video JSON', query=query)
supplementary_data = self._download_json( supplementary_data = {}
SERIES_API % f'/episode/{raw_video_id}', raw_video_id, if re.search(r'_\d+$', raw_video_id):
default={}) if re.search(r'_\d+$', raw_video_id) else {} supplementary_data = self._download_json(
SERIES_API % f'/episode/{raw_video_id}', raw_video_id, fatal=False) or {}
title = str_or_none(data.get('Title')) or re.sub( title = str_or_none(data.get('Title')) or re.sub(
r'\s*\|\s*(?:TV\s*\|\s*DR|DRTV)$', '', r'\s*\|\s*(?:TV\s*\|\s*DR|DRTV)$', '',

View File

@ -8,24 +8,26 @@ from .adobepass import AdobePassIE
from ..compat import compat_urllib_parse_unquote from ..compat import compat_urllib_parse_unquote
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
HEADRequest,
RegexNotFoundError,
UserNotLive,
clean_html,
int_or_none, int_or_none,
parse_age_limit, parse_age_limit,
parse_duration, parse_duration,
RegexNotFoundError,
smuggle_url, smuggle_url,
str_or_none,
traverse_obj, traverse_obj,
try_get, try_get,
unified_strdate, unescapeHTML,
unified_timestamp, unified_timestamp,
update_url_query, update_url_query,
url_basename, url_basename,
variadic, xpath_attr,
) )
class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
_VALID_URL = r'https?(?P<permalink>://(?:www\.)?nbc\.com/(?:classic-tv/)?[^/]+/video/[^/]+/(?P<id>n?\d+))' _VALID_URL = r'https?(?P<permalink>://(?:www\.)?nbc\.com/(?:classic-tv/)?[^/]+/video/[^/]+/(?P<id>(?:NBCE|n)?\d+))'
_TESTS = [ _TESTS = [
{ {
@ -38,10 +40,18 @@ class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
'timestamp': 1424246400, 'timestamp': 1424246400,
'upload_date': '20150218', 'upload_date': '20150218',
'uploader': 'NBCU-COM', 'uploader': 'NBCU-COM',
'episode': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s',
'episode_number': 86,
'season': 'Season 2',
'season_number': 2,
'series': 'Tonight Show: Jimmy Fallon',
'duration': 237.0,
'chapters': 'count:1',
'tags': 'count:4',
'thumbnail': r're:https?://.+\.jpg',
}, },
'params': { 'params': {
# m3u8 download 'skip_download': 'm3u8',
'skip_download': True,
}, },
}, },
{ {
@ -55,11 +65,7 @@ class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
'upload_date': '20141206', 'upload_date': '20141206',
'uploader': 'NBCU-COM', 'uploader': 'NBCU-COM',
}, },
'params': { 'skip': 'page not found',
# m3u8 download
'skip_download': True,
},
'skip': 'Only works from US',
}, },
{ {
# HLS streams requires the 'hdnea3' cookie # HLS streams requires the 'hdnea3' cookie
@ -73,10 +79,59 @@ class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
'upload_date': '20090315', 'upload_date': '20090315',
'uploader': 'NBCU-COM', 'uploader': 'NBCU-COM',
}, },
'params': { 'skip': 'page not found',
'skip_download': True, },
{
# manifest url does not have extension
'url': 'https://www.nbc.com/the-golden-globe-awards/video/oprah-winfrey-receives-cecil-b-de-mille-award-at-the-2018-golden-globes/3646439',
'info_dict': {
'id': '3646439',
'ext': 'mp4',
'title': 'Oprah Winfrey Receives Cecil B. de Mille Award at the 2018 Golden Globes',
'episode': 'Oprah Winfrey Receives Cecil B. de Mille Award at the 2018 Golden Globes',
'episode_number': 1,
'season': 'Season 75',
'season_number': 75,
'series': 'The Golden Globe Awards',
'description': 'Oprah Winfrey receives the Cecil B. de Mille Award at the 75th Annual Golden Globe Awards.',
'uploader': 'NBCU-COM',
'upload_date': '20180107',
'timestamp': 1515312000,
'duration': 570.0,
'tags': 'count:8',
'thumbnail': r're:https?://.+\.jpg',
'chapters': 'count:1',
},
'params': {
'skip_download': 'm3u8',
},
},
{
# new video_id format
'url': 'https://www.nbc.com/quantum-leap/video/bens-first-leap-nbcs-quantum-leap/NBCE125189978',
'info_dict': {
'id': 'NBCE125189978',
'ext': 'mp4',
'title': 'Ben\'s First Leap | NBC\'s Quantum Leap',
'description': 'md5:a82762449b7ec4bb83291a7b355ebf8e',
'uploader': 'NBCU-COM',
'series': 'Quantum Leap',
'season': 'Season 1',
'season_number': 1,
'episode': 'Ben\'s First Leap | NBC\'s Quantum Leap',
'episode_number': 1,
'duration': 170.171,
'chapters': [],
'timestamp': 1663956155,
'upload_date': '20220923',
'tags': 'count:10',
'age_limit': 0,
'thumbnail': r're:https?://.+\.jpg',
},
'expected_warnings': ['Ignoring subtitle tracks'],
'params': {
'skip_download': 'm3u8',
}, },
'skip': 'Only works from US',
}, },
{ {
'url': 'https://www.nbc.com/classic-tv/charles-in-charge/video/charles-in-charge-pilot/n3310', 'url': 'https://www.nbc.com/classic-tv/charles-in-charge/video/charles-in-charge-pilot/n3310',
@ -600,32 +655,36 @@ class NBCStationsIE(InfoExtractor):
_TESTS = [{ _TESTS = [{
'url': 'https://www.nbclosangeles.com/news/local/large-structure-fire-in-downtown-la-prompts-smoke-odor-advisory/2968618/', 'url': 'https://www.nbclosangeles.com/news/local/large-structure-fire-in-downtown-la-prompts-smoke-odor-advisory/2968618/',
'md5': '462041d91bd762ef5a38b7d85d6dc18f',
'info_dict': { 'info_dict': {
'id': '2968618', 'id': '2968618',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Large Structure Fire in Downtown LA Prompts Smoke Odor Advisory', 'title': 'Large Structure Fire in Downtown LA Prompts Smoke Odor Advisory',
'description': None, 'description': 'md5:417ed3c2d91fe9d301e6db7b0942f182',
'timestamp': 1661135892, 'timestamp': 1661135892,
'upload_date': '20220821', 'upload_date': '20220822',
'uploader': 'NBC 4', 'uploader': 'NBC 4',
'uploader_id': 'KNBC', 'channel_id': 'KNBC',
'channel': 'nbclosangeles', 'channel': 'nbclosangeles',
}, },
'params': {
'skip_download': 'm3u8',
},
}, { }, {
'url': 'https://www.telemundoarizona.com/responde/huracan-complica-reembolso-para-televidente-de-tucson/2247002/', 'url': 'https://www.telemundoarizona.com/responde/huracan-complica-reembolso-para-televidente-de-tucson/2247002/',
'md5': '0917dcf7885be1023a9220630d415f67',
'info_dict': { 'info_dict': {
'id': '2247002', 'id': '2247002',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Huracán complica que televidente de Tucson reciba reembolso', 'title': 'Huracán complica que televidente de Tucson reciba reembolso',
'description': 'md5:af298dc73aab74d4fca6abfb12acb6cf', 'description': 'md5:af298dc73aab74d4fca6abfb12acb6cf',
'timestamp': 1660886507, 'timestamp': 1660886507,
'upload_date': '20220819', 'upload_date': '20220819',
'uploader': 'Telemundo Arizona', 'uploader': 'Telemundo Arizona',
'uploader_id': 'KTAZ', 'channel_id': 'KTAZ',
'channel': 'telemundoarizona', 'channel': 'telemundoarizona',
}, },
'params': {
'skip_download': 'm3u8',
},
}] }]
_RESOLUTIONS = { _RESOLUTIONS = {
@ -644,48 +703,39 @@ class NBCStationsIE(InfoExtractor):
r'<script>var\s*nbc\s*=', webpage, 'NBC JSON data', video_id) r'<script>var\s*nbc\s*=', webpage, 'NBC JSON data', video_id)
pdk_acct = nbc_data.get('pdkAcct') or 'Yh1nAC' pdk_acct = nbc_data.get('pdkAcct') or 'Yh1nAC'
fw_ssid = traverse_obj(nbc_data, ('video', 'fwSSID')) fw_ssid = traverse_obj(nbc_data, ('video', 'fwSSID'))
fw_network_id = traverse_obj(nbc_data, ('video', 'fwNetworkID'), default='382114')
video_data = self._parse_json(self._html_search_regex( video_data = self._search_json(
r'data-videos="([^"]*)"', webpage, 'video data', default='{}'), video_id) r'data-videos="\[', webpage, 'video data', video_id, default={}, transform_source=unescapeHTML)
video_data = variadic(video_data)[0] video_data.update(self._search_json(
video_data.update(self._parse_json(self._html_search_regex( r'data-meta="', webpage, 'metadata', video_id, default={}, transform_source=unescapeHTML))
r'data-meta="([^"]*)"', webpage, 'metadata', default='{}'), video_id)) if not video_data:
raise ExtractorError('No video metadata found in webpage', expected=True)
formats = [] info, formats, subtitles = {}, [], {}
is_live = int_or_none(video_data.get('mpx_is_livestream')) == 1
query = {
'formats': 'MPEG-DASH none,M3U none,MPEG-DASH none,MPEG4,MP3',
'format': 'SMIL',
'fwsitesection': fw_ssid,
'fwNetworkID': traverse_obj(nbc_data, ('video', 'fwNetworkID'), default='382114'),
'pprofile': 'ots_desktop_html',
'sensitive': 'false',
'w': '1920',
'h': '1080',
'mode': 'LIVE' if is_live else 'on-demand',
'vpaid': 'script',
'schema': '2.0',
'sdk': 'PDK 6.1.3',
}
if video_data.get('mpx_is_livestream') == '1': if is_live:
live = True player_id = traverse_obj(video_data, ((None, ('video', 'meta')), (
player_id = traverse_obj( 'mpx_m3upid', 'mpx_pid', 'pid_streaming_web_medium')), get_all=False)
video_data, 'mpx_m3upid', ('video', 'meta', 'mpx_m3upid'), 'mpx_pid', info['title'] = f'{channel} livestream'
('video', 'meta', 'mpx_pid'), 'pid_streaming_web_medium')
query = {
'mbr': 'true',
'assetTypes': 'LegacyRelease',
'fwsitesection': fw_ssid,
'fwNetworkID': fw_network_id,
'pprofile': 'ots_desktop_html',
'sensitive': 'false',
'w': '1920',
'h': '1080',
'rnd': '1660303',
'mode': 'LIVE',
'format': 'SMIL',
'tracking': 'true',
'formats': 'M3U+none,MPEG-DASH+none,MPEG4,MP3',
'vpaid': 'script',
'schema': '2.0',
'SDK': 'PDK+6.1.3',
}
info = {
'title': f'{channel} livestream',
}
else: else:
live = False player_id = traverse_obj(video_data, (
player_id = traverse_obj( (None, ('video', 'meta')), ('pid_streaming_web_high', 'mpx_pid')), get_all=False)
video_data, ('video', 'meta', 'pid_streaming_web_high'), 'pid_streaming_web_high',
('video', 'meta', 'mpx_pid'), 'mpx_pid')
date_string = traverse_obj(video_data, 'date_string', 'date_gmt') date_string = traverse_obj(video_data, 'date_string', 'date_gmt')
if date_string: if date_string:
@ -693,63 +743,58 @@ class NBCStationsIE(InfoExtractor):
r'datetime="([^"]+)"', date_string, 'date string', fatal=False) r'datetime="([^"]+)"', date_string, 'date string', fatal=False)
else: else:
date_string = traverse_obj( date_string = traverse_obj(
nbc_data, ('dataLayer', 'adobe', 'prop70'), ('dataLayer', 'adobe', 'eVar70'), nbc_data, ('dataLayer', 'adobe', ('prop70', 'eVar70', 'eVar59')), get_all=False)
('dataLayer', 'adobe', 'eVar59'))
video_url = traverse_obj(video_data, ('video', 'meta', 'mp4_url'), 'mp4_url') video_url = traverse_obj(video_data, ((None, ('video', 'meta')), 'mp4_url'), get_all=False)
if video_url: if video_url:
height = url_basename(video_url).split('-')[1].split('p')[0] height = self._search_regex(r'\d+-(\d+)p', url_basename(video_url), 'height', default=None)
formats.append({ formats.append({
'url': video_url, 'url': video_url,
'ext': 'mp4', 'ext': 'mp4',
'width': int_or_none(self._RESOLUTIONS.get(height)), 'width': int_or_none(self._RESOLUTIONS.get(height)),
'height': int_or_none(height), 'height': int_or_none(height),
'format_id': f'http-{height}', 'format_id': 'http-mp4',
}) })
query = { info.update({
'mbr': 'true', 'title': video_data.get('title') or traverse_obj(nbc_data, (
'assetTypes': 'LegacyRelease', 'dataLayer', (None, 'adobe'), ('contenttitle', 'title', 'prop22')), get_all=False),
'fwsitesection': fw_ssid, 'description':
'fwNetworkID': fw_network_id, traverse_obj(video_data, 'summary', 'excerpt', 'video_hero_text')
'format': 'redirect', or clean_html(traverse_obj(nbc_data, ('dataLayer', 'summary'))),
'manifest': 'm3u', 'timestamp': unified_timestamp(date_string),
'Tracking': 'true', })
'Embedded': 'true',
'formats': 'MPEG4',
}
info = {
'title': video_data.get('title') or traverse_obj(
nbc_data, ('dataLayer', 'contenttitle'), ('dataLayer', 'title'),
('dataLayer', 'adobe', 'prop22'), ('dataLayer', 'id')),
'description': traverse_obj(video_data, 'summary', 'excerpt', 'video_hero_text'),
'upload_date': str_or_none(unified_strdate(date_string)),
'timestamp': int_or_none(unified_timestamp(date_string)),
}
if not player_id: smil = None
raise ExtractorError( if player_id and fw_ssid:
'No video player ID or livestream player ID found in webpage', expected=True) smil = self._download_xml(
f'https://link.theplatform.com/s/{pdk_acct}/{player_id}', video_id,
note='Downloading SMIL data', query=query, fatal=is_live)
if smil:
manifest_url = xpath_attr(smil, './/{*}video', 'src', fatal=is_live)
subtitles = self._parse_smil_subtitles(smil, '*')
fmts, subs = self._extract_m3u8_formats_and_subtitles(
manifest_url, video_id, 'mp4', m3u8_id='hls', fatal=is_live,
live=is_live, errnote='No HLS formats found')
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
headers = {'Origin': f'https://www.{channel}.com'} if not formats:
manifest, urlh = self._download_webpage_handle( self.raise_no_formats('No video content found in webpage', expected=True)
f'https://link.theplatform.com/s/{pdk_acct}/{player_id}', video_id, elif is_live:
headers=headers, query=query, note='Downloading manifest') try:
if live: self._request_webpage(
manifest_url = self._search_regex(r'<video src="([^"]*)', manifest, 'manifest URL') HEADRequest(formats[0]['url']), video_id, note='Checking live status')
else: except ExtractorError:
manifest_url = urlh.geturl() raise UserNotLive(video_id=channel)
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', headers=headers, m3u8_id='hls',
fatal=live, live=live, errnote='No HLS formats found'))
return { return {
'id': str_or_none(video_id), 'id': video_id,
'channel': channel, 'channel': channel,
'uploader': str_or_none(nbc_data.get('on_air_name')), 'channel_id': nbc_data.get('callLetters'),
'uploader_id': str_or_none(nbc_data.get('callLetters')), 'uploader': nbc_data.get('on_air_name'),
'formats': formats, 'formats': formats,
'is_live': live, 'subtitles': subtitles,
'is_live': is_live,
**info, **info,
} }

View File

@ -29,6 +29,7 @@ class SlidesLiveIE(InfoExtractor):
'thumbnail': r're:^https?://.*\.jpg', 'thumbnail': r're:^https?://.*\.jpg',
'thumbnails': 'count:42', 'thumbnails': 'count:42',
'chapters': 'count:41', 'chapters': 'count:41',
'duration': 1638,
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@ -45,6 +46,7 @@ class SlidesLiveIE(InfoExtractor):
'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:640', 'thumbnails': 'count:640',
'chapters': 'count:639', 'chapters': 'count:639',
'duration': 9832,
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@ -61,6 +63,7 @@ class SlidesLiveIE(InfoExtractor):
'timestamp': 1643728135, 'timestamp': 1643728135,
'thumbnails': 'count:3', 'thumbnails': 'count:3',
'chapters': 'count:2', 'chapters': 'count:2',
'duration': 5889,
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@ -110,6 +113,7 @@ class SlidesLiveIE(InfoExtractor):
'timestamp': 1629671508, 'timestamp': 1629671508,
'upload_date': '20210822', 'upload_date': '20210822',
'chapters': 'count:7', 'chapters': 'count:7',
'duration': 326,
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@ -126,6 +130,7 @@ class SlidesLiveIE(InfoExtractor):
'timestamp': 1654714970, 'timestamp': 1654714970,
'upload_date': '20220608', 'upload_date': '20220608',
'chapters': 'count:6', 'chapters': 'count:6',
'duration': 171,
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@ -142,6 +147,7 @@ class SlidesLiveIE(InfoExtractor):
'timestamp': 1622806321, 'timestamp': 1622806321,
'upload_date': '20210604', 'upload_date': '20210604',
'chapters': 'count:15', 'chapters': 'count:15',
'duration': 306,
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@ -158,6 +164,7 @@ class SlidesLiveIE(InfoExtractor):
'timestamp': 1654714896, 'timestamp': 1654714896,
'upload_date': '20220608', 'upload_date': '20220608',
'chapters': 'count:8', 'chapters': 'count:8',
'duration': 295,
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@ -174,6 +181,7 @@ class SlidesLiveIE(InfoExtractor):
'thumbnails': 'count:22', 'thumbnails': 'count:22',
'upload_date': '20220608', 'upload_date': '20220608',
'chapters': 'count:21', 'chapters': 'count:21',
'duration': 294,
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@ -196,6 +204,7 @@ class SlidesLiveIE(InfoExtractor):
'thumbnails': 'count:30', 'thumbnails': 'count:30',
'upload_date': '20220608', 'upload_date': '20220608',
'chapters': 'count:31', 'chapters': 'count:31',
'duration': 272,
}, },
}, { }, {
'info_dict': { 'info_dict': {
@ -237,6 +246,7 @@ class SlidesLiveIE(InfoExtractor):
'thumbnails': 'count:43', 'thumbnails': 'count:43',
'upload_date': '20220608', 'upload_date': '20220608',
'chapters': 'count:43', 'chapters': 'count:43',
'duration': 315,
}, },
}, { }, {
'info_dict': { 'info_dict': {
@ -285,6 +295,23 @@ class SlidesLiveIE(InfoExtractor):
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
}, },
}, {
# /v3/ slides, .png only, service_name = yoda
'url': 'https://slideslive.com/38983994',
'info_dict': {
'id': '38983994',
'ext': 'mp4',
'title': 'Zero-Shot AutoML with Pretrained Models',
'timestamp': 1662384834,
'upload_date': '20220905',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:23',
'chapters': 'count:22',
'duration': 295,
},
'params': {
'skip_download': 'm3u8',
},
}, { }, {
# service_name = yoda # service_name = yoda
'url': 'https://slideslive.com/38903721/magic-a-scientific-resurrection-of-an-esoteric-legend', 'url': 'https://slideslive.com/38903721/magic-a-scientific-resurrection-of-an-esoteric-legend',
@ -311,6 +338,7 @@ class SlidesLiveIE(InfoExtractor):
'timestamp': 1629671508, 'timestamp': 1629671508,
'upload_date': '20210822', 'upload_date': '20210822',
'chapters': 'count:7', 'chapters': 'count:7',
'duration': 326,
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@ -369,15 +397,28 @@ class SlidesLiveIE(InfoExtractor):
return m3u8_dict return m3u8_dict
def _extract_formats(self, cdn_hostname, path, video_id): def _extract_formats_and_duration(self, cdn_hostname, path, video_id, skip_duration=False):
formats = [] formats, duration = [], None
formats.extend(self._extract_m3u8_formats(
hls_formats = self._extract_m3u8_formats(
f'https://{cdn_hostname}/{path}/master.m3u8', f'https://{cdn_hostname}/{path}/master.m3u8',
video_id, 'mp4', m3u8_id='hls', fatal=False, live=True)) video_id, 'mp4', m3u8_id='hls', fatal=False, live=True)
formats.extend(self._extract_mpd_formats( if hls_formats:
f'https://{cdn_hostname}/{path}/master.mpd', if not skip_duration:
video_id, mpd_id='dash', fatal=False)) duration = self._extract_m3u8_vod_duration(
return formats hls_formats[0]['url'], video_id, note='Extracting duration from HLS manifest')
formats.extend(hls_formats)
dash_formats = self._extract_mpd_formats(
f'https://{cdn_hostname}/{path}/master.mpd', video_id, mpd_id='dash', fatal=False)
if dash_formats:
if not duration and not skip_duration:
duration = self._extract_mpd_vod_duration(
f'https://{cdn_hostname}/{path}/master.mpd', video_id,
note='Extracting duration from DASH manifest')
formats.extend(dash_formats)
return formats, duration
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
@ -406,44 +447,42 @@ class SlidesLiveIE(InfoExtractor):
assert service_name in ('url', 'yoda', 'vimeo', 'youtube') assert service_name in ('url', 'yoda', 'vimeo', 'youtube')
service_id = player_info['service_id'] service_id = player_info['service_id']
slides_info_url = None slide_url_template = 'https://slides.slideslive.com/%s/slides/original/%s%s'
slides, slides_info = [], [] slides, slides_info = {}, []
if player_info.get('slides_json_url'): if player_info.get('slides_json_url'):
slides_info_url = player_info['slides_json_url'] slides = self._download_json(
slides = traverse_obj(self._download_json( player_info['slides_json_url'], video_id, fatal=False,
slides_info_url, video_id, fatal=False, note='Downloading slides JSON', errnote=False) or {}
note='Downloading slides JSON', errnote=False), 'slides', expected_type=list) or [] slide_ext_default = '.png'
for slide_id, slide in enumerate(slides, start=1): slide_quality = traverse_obj(slides, ('slide_qualities', 0))
if slide_quality:
slide_ext_default = '.jpg'
slide_url_template = f'https://cdn.slideslive.com/data/presentations/%s/slides/{slide_quality}/%s%s'
for slide_id, slide in enumerate(traverse_obj(slides, ('slides', ...), expected_type=dict), 1):
slides_info.append(( slides_info.append((
slide_id, traverse_obj(slide, ('image', 'name')), slide_id, traverse_obj(slide, ('image', 'name')),
traverse_obj(slide, ('image', 'extname'), default=slide_ext_default),
int_or_none(slide.get('time'), scale=1000))) int_or_none(slide.get('time'), scale=1000)))
if not slides and player_info.get('slides_xml_url'): if not slides and player_info.get('slides_xml_url'):
slides_info_url = player_info['slides_xml_url']
slides = self._download_xml( slides = self._download_xml(
slides_info_url, video_id, fatal=False, player_info['slides_xml_url'], video_id, fatal=False,
note='Downloading slides XML', errnote='Failed to download slides info') note='Downloading slides XML', errnote='Failed to download slides info')
for slide_id, slide in enumerate(slides.findall('./slide'), start=1): slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s'
for slide_id, slide in enumerate(slides.findall('./slide') if slides else [], 1):
slides_info.append(( slides_info.append((
slide_id, xpath_text(slide, './slideName', 'name'), slide_id, xpath_text(slide, './slideName', 'name'), '.jpg',
int_or_none(xpath_text(slide, './timeSec', 'time')))) int_or_none(xpath_text(slide, './timeSec', 'time'))))
slides_version = int(self._search_regex(
r'https?://slides\.slideslive\.com/\d+/v(\d+)/\w+\.(?:json|xml)',
slides_info_url, 'slides version', default=0))
if slides_version < 4:
slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s.jpg'
else:
slide_url_template = 'https://slides.slideslive.com/%s/slides/original/%s.png'
chapters, thumbnails = [], [] chapters, thumbnails = [], []
if url_or_none(player_info.get('thumbnail')): if url_or_none(player_info.get('thumbnail')):
thumbnails.append({'id': 'cover', 'url': player_info['thumbnail']}) thumbnails.append({'id': 'cover', 'url': player_info['thumbnail']})
for slide_id, slide_path, start_time in slides_info: for slide_id, slide_path, slide_ext, start_time in slides_info:
if slide_path: if slide_path:
thumbnails.append({ thumbnails.append({
'id': f'{slide_id:03d}', 'id': f'{slide_id:03d}',
'url': slide_url_template % (video_id, slide_path), 'url': slide_url_template % (video_id, slide_path, slide_ext),
}) })
chapters.append({ chapters.append({
'title': f'Slide {slide_id:03d}', 'title': f'Slide {slide_id:03d}',
@ -473,7 +512,12 @@ class SlidesLiveIE(InfoExtractor):
if service_name == 'url': if service_name == 'url':
info['url'] = service_id info['url'] = service_id
elif service_name == 'yoda': elif service_name == 'yoda':
info['formats'] = self._extract_formats(player_info['video_servers'][0], service_id, video_id) formats, duration = self._extract_formats_and_duration(
player_info['video_servers'][0], service_id, video_id)
info.update({
'duration': duration,
'formats': formats,
})
else: else:
info.update({ info.update({
'_type': 'url_transparent', '_type': 'url_transparent',
@ -486,7 +530,7 @@ class SlidesLiveIE(InfoExtractor):
f'https://player.vimeo.com/video/{service_id}', f'https://player.vimeo.com/video/{service_id}',
{'http_headers': {'Referer': url}}) {'http_headers': {'Referer': url}})
video_slides = traverse_obj(slides, (..., 'video', 'id')) video_slides = traverse_obj(slides, ('slides', ..., 'video', 'id'))
if not video_slides: if not video_slides:
return info return info
@ -500,7 +544,7 @@ class SlidesLiveIE(InfoExtractor):
'videos': ','.join(video_slides), 'videos': ','.join(video_slides),
}, note='Downloading video slides info', errnote='Failed to download video slides info') or {} }, note='Downloading video slides info', errnote='Failed to download video slides info') or {}
for slide_id, slide in enumerate(slides, 1): for slide_id, slide in enumerate(traverse_obj(slides, ('slides', ...)), 1):
if not traverse_obj(slide, ('video', 'service')) == 'yoda': if not traverse_obj(slide, ('video', 'service')) == 'yoda':
continue continue
video_path = traverse_obj(slide, ('video', 'id')) video_path = traverse_obj(slide, ('video', 'id'))
@ -508,7 +552,8 @@ class SlidesLiveIE(InfoExtractor):
video_path, 'video_servers', ...), get_all=False) video_path, 'video_servers', ...), get_all=False)
if not cdn_hostname or not video_path: if not cdn_hostname or not video_path:
continue continue
formats = self._extract_formats(cdn_hostname, video_path, video_id) formats, _ = self._extract_formats_and_duration(
cdn_hostname, video_path, video_id, skip_duration=True)
if not formats: if not formats:
continue continue
yield { yield {