0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-27 03:03:01 +00:00
yt-dlp/youtube_dl/extractor/svt.py

372 lines
13 KiB
Python
Raw Normal View History

2015-02-09 15:05:01 +00:00
# coding: utf-8
from __future__ import unicode_literals
2015-02-28 15:25:04 +00:00
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
dict_get,
int_or_none,
orderedSet,
strip_or_none,
try_get,
urljoin,
compat_str,
)
class SVTBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['SE']
2017-02-19 19:25:55 +00:00
def _extract_video(self, video_info, video_id):
is_live = dict_get(video_info, ('live', 'simulcast'), default=False)
m3u8_protocol = 'm3u8' if is_live else 'm3u8_native'
formats = []
for vr in video_info['videoReferences']:
2016-09-17 12:25:31 +00:00
player_type = vr.get('playerType') or vr.get('format')
vurl = vr['url']
2015-02-28 15:25:04 +00:00
ext = determine_ext(vurl)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
vurl, video_id,
ext='mp4', entry_protocol=m3u8_protocol,
2016-03-15 18:33:09 +00:00
m3u8_id=player_type, fatal=False))
2015-02-28 15:25:04 +00:00
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
vurl + '?hdcore=3.3.0', video_id,
2016-03-15 18:33:09 +00:00
f4m_id=player_type, fatal=False))
elif ext == 'mpd':
if player_type == 'dashhbbtv':
formats.extend(self._extract_mpd_formats(
vurl, video_id, mpd_id=player_type, fatal=False))
else:
formats.append({
2016-03-15 18:33:09 +00:00
'format_id': player_type,
'url': vurl,
})
if not formats and video_info.get('rights', {}).get('geoBlockedSweden'):
self.raise_geo_restricted(
'This video is only available in Sweden',
countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
2016-01-12 16:08:55 +00:00
subtitles = {}
subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences'))
if isinstance(subtitle_references, list):
for sr in subtitle_references:
subtitle_url = sr.get('url')
subtitle_lang = sr.get('language', 'sv')
if subtitle_url:
if determine_ext(subtitle_url) == 'm3u8':
# TODO(yan12125): handle WebVTT in m3u8 manifests
continue
subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url})
2016-01-12 16:08:55 +00:00
title = video_info.get('title')
series = video_info.get('programTitle')
season_number = int_or_none(video_info.get('season'))
episode = video_info.get('episodeTitle')
episode_number = int_or_none(video_info.get('episodeNumber'))
duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration')))
age_limit = None
adult = dict_get(
video_info, ('inappropriateForChildren', 'blockedForChildren'),
skip_false_values=False)
if adult is not None:
age_limit = 18 if adult else 0
return {
'id': video_id,
'title': title,
'formats': formats,
2016-01-12 16:08:55 +00:00
'subtitles': subtitles,
'duration': duration,
2015-02-28 15:25:04 +00:00
'age_limit': age_limit,
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
'is_live': is_live,
}
class SVTIE(SVTBaseIE):
_VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)'
_TEST = {
'url': 'http://www.svt.se/wd?widgetId=23991&sectionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false',
'md5': '33e9a5d8f646523ce0868ecfb0eed77d',
'info_dict': {
'id': '2900353',
'ext': 'mp4',
'title': 'Stjärnorna skojar till det - under SVT-intervjun',
'duration': 27,
'age_limit': 0,
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'(?:<iframe src|href)="(?P<url>%s[^"]*)"' % SVTIE._VALID_URL, webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
widget_id = mobj.group('widget_id')
article_id = mobj.group('id')
info = self._download_json(
'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id),
article_id)
info_dict = self._extract_video(info['video'], article_id)
info_dict['title'] = info['context']['title']
return info_dict
2018-04-04 17:28:36 +00:00
class SVTPlayBaseIE(SVTBaseIE):
_SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n'
class SVTPlayIE(SVTPlayBaseIE):
IE_DESC = 'SVT Play and Öppet arkiv'
_VALID_URL = r'''(?x)
(?:
svt:(?P<svt_id>[^/?#&]+)|
https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?#&]+)
)
'''
_TESTS = [{
'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
'info_dict': {
'id': '5996901',
'ext': 'mp4',
'title': 'Flygplan till Haile Selassie',
'duration': 3527,
'thumbnail': r're:^https?://.*[\.-]jpg$',
'age_limit': 0,
'subtitles': {
'sv': [{
'ext': 'wsrt',
}]
},
},
}, {
# geo restricted to Sweden
'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten',
'only_matching': True,
2016-06-25 17:29:53 +00:00
}, {
'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg',
'only_matching': True,
}, {
'url': 'https://www.svtplay.se/kanaler/svt1',
'only_matching': True,
}, {
'url': 'svt:1376446-003A',
'only_matching': True,
}, {
'url': 'svt:14278044',
'only_matching': True,
}]
def _adjust_title(self, info):
if info['is_live']:
info['title'] = self._live_title(info['title'])
def _extract_by_video_id(self, video_id, webpage=None):
data = self._download_json(
'https://api.svt.se/video/%s' % video_id,
video_id, headers=self.geo_verification_headers())
info_dict = self._extract_video(data, video_id)
if not info_dict.get('title'):
title = dict_get(info_dict, ('episode', 'series'))
if not title and webpage:
title = re.sub(
r'\s*\|\s*.+?$', '', self._og_search_title(webpage))
if not title:
title = video_id
info_dict['title'] = title
self._adjust_title(info_dict)
return info_dict
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id, svt_id = mobj.group('id', 'svt_id')
if svt_id:
return self._extract_by_video_id(svt_id)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(
self._search_regex(
2018-04-04 17:28:36 +00:00
self._SVTPLAY_RE, webpage, 'embedded data', default='{}',
group='json'),
video_id, fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
if data:
video_info = try_get(
data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'],
dict)
if video_info:
info_dict = self._extract_video(video_info, video_id)
info_dict.update({
'title': data['context']['dispatcher']['stores']['MetaStore']['title'],
'thumbnail': thumbnail,
})
self._adjust_title(info_dict)
return info_dict
svt_id = self._search_regex(
r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
webpage, 'video id')
return self._extract_by_video_id(svt_id, webpage)
2018-04-04 17:28:36 +00:00
class SVTSeriesIE(SVTPlayBaseIE):
_VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://www.svtplay.se/rederiet',
'info_dict': {
'id': 'rederiet',
'title': 'Rederiet',
'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
},
'playlist_mincount': 318,
}, {
'url': 'https://www.svtplay.se/rederiet?tab=sasong2',
'info_dict': {
'id': 'rederiet-sasong2',
'title': 'Rederiet - Säsong 2',
'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
},
'playlist_count': 12,
}]
@classmethod
def suitable(cls, url):
return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url)
def _real_extract(self, url):
series_id = self._match_id(url)
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
season_slug = qs.get('tab', [None])[0]
if season_slug:
series_id += '-%s' % season_slug
webpage = self._download_webpage(
url, series_id, 'Downloading series page')
root = self._parse_json(
self._search_regex(
2018-04-04 17:28:36 +00:00
self._SVTPLAY_RE, webpage, 'content', group='json'),
series_id)
season_name = None
entries = []
for season in root['relatedVideoContent']['relatedVideosAccordion']:
if not isinstance(season, dict):
continue
if season_slug:
if season.get('slug') != season_slug:
continue
season_name = season.get('name')
videos = season.get('videos')
if not isinstance(videos, list):
continue
for video in videos:
content_url = video.get('contentUrl')
if not content_url or not isinstance(content_url, compat_str):
continue
entries.append(
self.url_result(
urljoin(url, content_url),
ie=SVTPlayIE.ie_key(),
video_title=video.get('title')
))
metadata = root.get('metaData')
if not isinstance(metadata, dict):
metadata = {}
title = metadata.get('title')
season_name = season_name or season_slug
if title and season_name:
title = '%s - %s' % (title, season_name)
elif season_slug:
title = season_slug
return self.playlist_result(
entries, series_id, title, metadata.get('description'))
class SVTPageIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?svt\.se/(?:[^/]+/)*(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://www.svt.se/sport/oseedat/guide-sommartraningen-du-kan-gora-var-och-nar-du-vill',
'info_dict': {
'id': 'guide-sommartraningen-du-kan-gora-var-och-nar-du-vill',
'title': 'GUIDE: Sommarträning du kan göra var och när du vill',
},
'playlist_count': 7,
}, {
'url': 'https://www.svt.se/nyheter/inrikes/ebba-busch-thor-kd-har-delvis-ratt-om-no-go-zoner',
'info_dict': {
'id': 'ebba-busch-thor-kd-har-delvis-ratt-om-no-go-zoner',
'title': 'Ebba Busch Thor har bara delvis rätt om ”no-go-zoner”',
},
'playlist_count': 1,
}, {
# only programTitle
'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun',
'info_dict': {
'id': '2900353',
'ext': 'mp4',
'title': 'Stjärnorna skojar till det - under SVT-intervjun',
'duration': 27,
'age_limit': 0,
},
}, {
'url': 'https://www.svt.se/nyheter/lokalt/vast/svt-testar-tar-nagon-upp-skrapet-1',
'only_matching': True,
}, {
'url': 'https://www.svt.se/vader/manadskronikor/maj2018',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if SVTIE.suitable(url) else super(SVTPageIE, cls).suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result(
'svt:%s' % video_id, ie=SVTPlayIE.ie_key(), video_id=video_id)
for video_id in orderedSet(re.findall(
r'data-video-id=["\'](\d+)', webpage))]
title = strip_or_none(self._og_search_title(webpage, default=None))
return self.playlist_result(entries, playlist_id, title)