mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-10-31 23:02:40 +00:00
[ie/PlaySuisse] Add login support (#9077)
Closes #7974 Authored by: chkuendig
This commit is contained in:
parent
c91d8b1899
commit
cae6e46107
1 changed files with 50 additions and 3 deletions
|
@ -1,10 +1,18 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import int_or_none, traverse_obj
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
|
traverse_obj,
|
||||||
|
update_url_query,
|
||||||
|
urlencode_postdata,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class PlaySuisseIE(InfoExtractor):
|
class PlaySuisseIE(InfoExtractor):
|
||||||
|
_NETRC_MACHINE = 'playsuisse'
|
||||||
_VALID_URL = r'https?://(?:www\.)?playsuisse\.ch/(?:watch|detail)/(?:[^#]*[?&]episodeId=)?(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?playsuisse\.ch/(?:watch|detail)/(?:[^#]*[?&]episodeId=)?(?P<id>[0-9]+)'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
|
@ -134,12 +142,47 @@ class PlaySuisseIE(InfoExtractor):
|
||||||
id
|
id
|
||||||
url
|
url
|
||||||
}'''
|
}'''
|
||||||
|
_LOGIN_BASE_URL = 'https://login.srgssr.ch/srgssrlogin.onmicrosoft.com'
|
||||||
|
_LOGIN_PATH = 'B2C_1A__SignInV2'
|
||||||
|
_ID_TOKEN = None
|
||||||
|
|
||||||
|
def _perform_login(self, username, password):
|
||||||
|
login_page = self._download_webpage(
|
||||||
|
'https://www.playsuisse.ch/api/sso/login', None, note='Downloading login page',
|
||||||
|
query={'x': 'x', 'locale': 'de', 'redirectUrl': 'https://www.playsuisse.ch/'})
|
||||||
|
settings = self._search_json(r'var\s+SETTINGS\s*=', login_page, 'settings', None)
|
||||||
|
|
||||||
|
csrf_token = settings['csrf']
|
||||||
|
query = {'tx': settings['transId'], 'p': self._LOGIN_PATH}
|
||||||
|
|
||||||
|
status = traverse_obj(self._download_json(
|
||||||
|
f'{self._LOGIN_BASE_URL}/{self._LOGIN_PATH}/SelfAsserted', None, 'Logging in',
|
||||||
|
query=query, headers={'X-CSRF-TOKEN': csrf_token}, data=urlencode_postdata({
|
||||||
|
'request_type': 'RESPONSE',
|
||||||
|
'signInName': username,
|
||||||
|
'password': password
|
||||||
|
}), expected_status=400), ('status', {int_or_none}))
|
||||||
|
if status == 400:
|
||||||
|
raise ExtractorError('Invalid username or password', expected=True)
|
||||||
|
|
||||||
|
urlh = self._request_webpage(
|
||||||
|
f'{self._LOGIN_BASE_URL}/{self._LOGIN_PATH}/api/CombinedSigninAndSignup/confirmed',
|
||||||
|
None, 'Downloading ID token', query={
|
||||||
|
'rememberMe': 'false',
|
||||||
|
'csrf_token': csrf_token,
|
||||||
|
**query,
|
||||||
|
'diags': '',
|
||||||
|
})
|
||||||
|
|
||||||
|
self._ID_TOKEN = traverse_obj(parse_qs(urlh.url), ('id_token', 0))
|
||||||
|
if not self._ID_TOKEN:
|
||||||
|
raise ExtractorError('Login failed')
|
||||||
|
|
||||||
def _get_media_data(self, media_id):
|
def _get_media_data(self, media_id):
|
||||||
# NOTE In the web app, the "locale" header is used to switch between languages,
|
# NOTE In the web app, the "locale" header is used to switch between languages,
|
||||||
# However this doesn't seem to take effect when passing the header here.
|
# However this doesn't seem to take effect when passing the header here.
|
||||||
response = self._download_json(
|
response = self._download_json(
|
||||||
'https://4bbepzm4ef.execute-api.eu-central-1.amazonaws.com/prod/graphql',
|
'https://www.playsuisse.ch/api/graphql',
|
||||||
media_id, data=json.dumps({
|
media_id, data=json.dumps({
|
||||||
'operationName': 'AssetWatch',
|
'operationName': 'AssetWatch',
|
||||||
'query': self._GRAPHQL_QUERY,
|
'query': self._GRAPHQL_QUERY,
|
||||||
|
@ -150,6 +193,9 @@ def _get_media_data(self, media_id):
|
||||||
return response['data']['assetV2']
|
return response['data']['assetV2']
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
if not self._ID_TOKEN:
|
||||||
|
self.raise_login_required(method='password')
|
||||||
|
|
||||||
media_id = self._match_id(url)
|
media_id = self._match_id(url)
|
||||||
media_data = self._get_media_data(media_id)
|
media_data = self._get_media_data(media_id)
|
||||||
info = self._extract_single(media_data)
|
info = self._extract_single(media_data)
|
||||||
|
@ -168,7 +214,8 @@ def _extract_single(self, media_data):
|
||||||
if not media.get('url') or media.get('type') != 'HLS':
|
if not media.get('url') or media.get('type') != 'HLS':
|
||||||
continue
|
continue
|
||||||
f, subs = self._extract_m3u8_formats_and_subtitles(
|
f, subs = self._extract_m3u8_formats_and_subtitles(
|
||||||
media['url'], media_data['id'], 'mp4', m3u8_id='HLS', fatal=False)
|
update_url_query(media['url'], {'id_token': self._ID_TOKEN}),
|
||||||
|
media_data['id'], 'mp4', m3u8_id='HLS', fatal=False)
|
||||||
formats.extend(f)
|
formats.extend(f)
|
||||||
self._merge_subtitles(subs, target=subtitles)
|
self._merge_subtitles(subs, target=subtitles)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue