mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-22 02:15:12 +00:00
Update to ytdl-commit-4460329
44603290e5
Except: gedi, tmz7422a2194f
8cb4b71909
d81421af4b
This commit is contained in:
parent
0744a815b7
commit
7c60c33efe
9 changed files with 239 additions and 146 deletions
|
@ -6,25 +6,21 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
js_to_json,
|
int_or_none,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class APAIE(InfoExtractor):
|
class APAIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://[^/]+\.apa\.at/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
_VALID_URL = r'(?P<base_url>https?://[^/]+\.apa\.at)/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
||||||
'md5': '2b12292faeb0a7d930c778c7a5b4759b',
|
'md5': '2b12292faeb0a7d930c778c7a5b4759b',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'jjv85FdZ',
|
'id': '293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
|
'title': '293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
||||||
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'duration': 254,
|
|
||||||
'timestamp': 1519211149,
|
|
||||||
'upload_date': '20180221',
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://uvp-apapublisher.sf.apa.at/embed/2f94e9e6-d945-4db2-9548-f9a41ebf7b78',
|
'url': 'https://uvp-apapublisher.sf.apa.at/embed/2f94e9e6-d945-4db2-9548-f9a41ebf7b78',
|
||||||
|
@ -46,9 +42,11 @@ def _extract_urls(webpage):
|
||||||
webpage)]
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id, base_url = mobj.group('id', 'base_url')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(
|
||||||
|
'%s/player/%s' % (base_url, video_id), video_id)
|
||||||
|
|
||||||
jwplatform_id = self._search_regex(
|
jwplatform_id = self._search_regex(
|
||||||
r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage,
|
r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage,
|
||||||
|
@ -59,16 +57,18 @@ def _real_extract(self, url):
|
||||||
'jwplatform:' + jwplatform_id, ie='JWPlatform',
|
'jwplatform:' + jwplatform_id, ie='JWPlatform',
|
||||||
video_id=video_id)
|
video_id=video_id)
|
||||||
|
|
||||||
sources = self._parse_json(
|
def extract(field, name=None):
|
||||||
self._search_regex(
|
return self._search_regex(
|
||||||
r'sources\s*=\s*(\[.+?\])\s*;', webpage, 'sources'),
|
r'\b%s["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1' % field,
|
||||||
video_id, transform_source=js_to_json)
|
webpage, name or field, default=None, group='value')
|
||||||
|
|
||||||
|
title = extract('title') or video_id
|
||||||
|
description = extract('description')
|
||||||
|
thumbnail = extract('poster', 'thumbnail')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for source in sources:
|
for format_id in ('hls', 'progressive'):
|
||||||
if not isinstance(source, dict):
|
source_url = url_or_none(extract(format_id))
|
||||||
continue
|
|
||||||
source_url = url_or_none(source.get('file'))
|
|
||||||
if not source_url:
|
if not source_url:
|
||||||
continue
|
continue
|
||||||
ext = determine_ext(source_url)
|
ext = determine_ext(source_url)
|
||||||
|
@ -77,18 +77,19 @@ def _real_extract(self, url):
|
||||||
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
m3u8_id='hls', fatal=False))
|
m3u8_id='hls', fatal=False))
|
||||||
else:
|
else:
|
||||||
|
height = int_or_none(self._search_regex(
|
||||||
|
r'(\d+)\.mp4', source_url, 'height', default=None))
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': source_url,
|
'url': source_url,
|
||||||
|
'format_id': format_id,
|
||||||
|
'height': height,
|
||||||
})
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
thumbnail = self._search_regex(
|
|
||||||
r'image\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
|
|
||||||
'thumbnail', fatal=False, group='url')
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video_id,
|
'title': title,
|
||||||
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
|
@ -330,6 +330,7 @@ def _download_video_playback_info(self, disco_base, video_id, headers):
|
||||||
'videoId': video_id,
|
'videoId': video_id,
|
||||||
'wisteriaProperties': {
|
'wisteriaProperties': {
|
||||||
'platform': 'desktop',
|
'platform': 'desktop',
|
||||||
|
'product': 'dplus_us',
|
||||||
},
|
},
|
||||||
}).encode('utf-8'))['data']['attributes']['streaming']
|
}).encode('utf-8'))['data']['attributes']['streaming']
|
||||||
|
|
||||||
|
|
|
@ -1081,6 +1081,7 @@
|
||||||
SafariApiIE,
|
SafariApiIE,
|
||||||
SafariCourseIE,
|
SafariCourseIE,
|
||||||
)
|
)
|
||||||
|
from .samplefocus import SampleFocusIE
|
||||||
from .sapo import SapoIE
|
from .sapo import SapoIE
|
||||||
from .savefrom import SaveFromIE
|
from .savefrom import SaveFromIE
|
||||||
from .sbs import SBSIE
|
from .sbs import SBSIE
|
||||||
|
|
|
@ -413,7 +413,8 @@ class PeerTubeIE(InfoExtractor):
|
||||||
peertube3\.cpy\.re|
|
peertube3\.cpy\.re|
|
||||||
peertube2\.cpy\.re|
|
peertube2\.cpy\.re|
|
||||||
videos\.tcit\.fr|
|
videos\.tcit\.fr|
|
||||||
peertube\.cpy\.re
|
peertube\.cpy\.re|
|
||||||
|
canard\.tube
|
||||||
)'''
|
)'''
|
||||||
_UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}'
|
_UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}'
|
||||||
_API_BASE = 'https://%s/api/v1/videos/%s/%s'
|
_API_BASE = 'https://%s/api/v1/videos/%s/%s'
|
||||||
|
|
100
yt_dlp/extractor/samplefocus.py
Normal file
100
yt_dlp/extractor/samplefocus.py
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
extract_attributes,
|
||||||
|
get_element_by_attribute,
|
||||||
|
int_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SampleFocusIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?samplefocus\.com/samples/(?P<id>[^/?&#]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://samplefocus.com/samples/lil-peep-sad-emo-guitar',
|
||||||
|
'md5': '48c8d62d60be467293912e0e619a5120',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '40316',
|
||||||
|
'display_id': 'lil-peep-sad-emo-guitar',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Lil Peep Sad Emo Guitar',
|
||||||
|
'thumbnail': r're:^https?://.+\.png',
|
||||||
|
'license': 'Standard License',
|
||||||
|
'uploader': 'CapsCtrl',
|
||||||
|
'uploader_id': 'capsctrl',
|
||||||
|
'like_count': int,
|
||||||
|
'comment_count': int,
|
||||||
|
'categories': ['Samples', 'Guitar', 'Electric guitar'],
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://samplefocus.com/samples/dababy-style-bass-808',
|
||||||
|
'only_matching': True
|
||||||
|
}, {
|
||||||
|
'url': 'https://samplefocus.com/samples/young-chop-kick',
|
||||||
|
'only_matching': True
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
sample_id = self._search_regex(
|
||||||
|
r'<input[^>]+id=(["\'])sample_id\1[^>]+value=(?:["\'])(?P<id>\d+)',
|
||||||
|
webpage, 'sample id', group='id')
|
||||||
|
|
||||||
|
title = self._og_search_title(webpage, fatal=False) or self._html_search_regex(
|
||||||
|
r'<h1>(.+?)</h1>', webpage, 'title')
|
||||||
|
|
||||||
|
mp3_url = self._search_regex(
|
||||||
|
r'<input[^>]+id=(["\'])sample_mp3\1[^>]+value=(["\'])(?P<url>(?:(?!\2).)+)',
|
||||||
|
webpage, 'mp3', fatal=False, group='url') or extract_attributes(self._search_regex(
|
||||||
|
r'<meta[^>]+itemprop=(["\'])contentUrl\1[^>]*>',
|
||||||
|
webpage, 'mp3 url', group=0))['content']
|
||||||
|
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage) or self._html_search_regex(
|
||||||
|
r'<img[^>]+class=(?:["\'])waveform responsive-img[^>]+src=(["\'])(?P<url>(?:(?!\1).)+)',
|
||||||
|
webpage, 'mp3', fatal=False, group='url')
|
||||||
|
|
||||||
|
comments = []
|
||||||
|
for author_id, author, body in re.findall(r'(?s)<p[^>]+class="comment-author"><a[^>]+href="/users/([^"]+)">([^"]+)</a>.+?<p[^>]+class="comment-body">([^>]+)</p>', webpage):
|
||||||
|
comments.append({
|
||||||
|
'author': author,
|
||||||
|
'author_id': author_id,
|
||||||
|
'text': body,
|
||||||
|
})
|
||||||
|
|
||||||
|
uploader_id = uploader = None
|
||||||
|
mobj = re.search(r'>By <a[^>]+href="/users/([^"]+)"[^>]*>([^<]+)', webpage)
|
||||||
|
if mobj:
|
||||||
|
uploader_id, uploader = mobj.groups()
|
||||||
|
|
||||||
|
breadcrumb = get_element_by_attribute('typeof', 'BreadcrumbList', webpage)
|
||||||
|
categories = []
|
||||||
|
if breadcrumb:
|
||||||
|
for _, name in re.findall(r'<span[^>]+property=(["\'])name\1[^>]*>([^<]+)', breadcrumb):
|
||||||
|
categories.append(name)
|
||||||
|
|
||||||
|
def extract_count(klass):
|
||||||
|
return int_or_none(self._html_search_regex(
|
||||||
|
r'<span[^>]+class=(?:["\'])?%s-count[^>]*>(\d+)' % klass,
|
||||||
|
webpage, klass, fatal=False))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': sample_id,
|
||||||
|
'title': title,
|
||||||
|
'url': mp3_url,
|
||||||
|
'display_id': display_id,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'uploader': uploader,
|
||||||
|
'license': self._html_search_regex(
|
||||||
|
r'<a[^>]+href=(["\'])/license\1[^>]*>(?P<license>[^<]+)<',
|
||||||
|
webpage, 'license', fatal=False, group='license'),
|
||||||
|
'uploader_id': uploader_id,
|
||||||
|
'like_count': extract_count('sample-%s-favorites' % sample_id),
|
||||||
|
'comment_count': extract_count('comments'),
|
||||||
|
'comments': comments,
|
||||||
|
'categories': categories,
|
||||||
|
}
|
|
@ -1,92 +1,87 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
try_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TF1IE(InfoExtractor):
|
class TF1IE(InfoExtractor):
|
||||||
"""TF1 uses the wat.tv player."""
|
_VALID_URL = r'https?://(?:www\.)?tf1\.fr/[^/]+/(?P<program_slug>[^/]+)/videos/(?P<id>[^/?&#]+)\.html'
|
||||||
_VALID_URL = r'https?://(?:(?:videos|www|lci)\.tf1|(?:www\.)?(?:tfou|ushuaiatv|histoire|tvbreizh))\.fr/(?:[^/]+/)*(?P<id>[^/?#.]+)'
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '10635995',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Citroën Grand C4 Picasso 2013 : présentation officielle',
|
|
||||||
'description': 'Vidéo officielle du nouveau Citroën Grand C4 Picasso, lancé à l\'automne 2013.',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# Sometimes wat serves the whole file with the --test option
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'expected_warnings': ['HTTP Error 404'],
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.tfou.fr/chuggington/videos/le-grand-mysterioso-chuggington-7085291-739.html',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'le-grand-mysterioso-chuggington-7085291-739',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Le grand Mystérioso - Chuggington',
|
|
||||||
'description': 'Le grand Mystérioso - Emery rêve qu\'un article lui soit consacré dans le journal.',
|
|
||||||
'upload_date': '20150103',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# Sometimes wat serves the whole file with the --test option
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'HTTP Error 410: Gone',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://lci.tf1.fr/sept-a-huit/videos/sept-a-huit-du-24-mai-2015-8611550.html',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.tf1.fr/tmc/quotidien-avec-yann-barthes/videos/quotidien-premiere-partie-11-juin-2019.html',
|
'url': 'https://www.tf1.fr/tmc/quotidien-avec-yann-barthes/videos/quotidien-premiere-partie-11-juin-2019.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '13641379',
|
'id': '13641379',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'md5:f392bc52245dc5ad43771650c96fb620',
|
'title': 'md5:f392bc52245dc5ad43771650c96fb620',
|
||||||
'description': 'md5:44bc54f0a21322f5b91d68e76a544eae',
|
'description': 'md5:a02cdb217141fb2d469d6216339b052f',
|
||||||
'upload_date': '20190611',
|
'upload_date': '20190611',
|
||||||
|
'timestamp': 1560273989,
|
||||||
|
'duration': 1738,
|
||||||
|
'series': 'Quotidien avec Yann Barthès',
|
||||||
|
'tags': ['intégrale', 'quotidien', 'Replay'],
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# Sometimes wat serves the whole file with the --test option
|
# Sometimes wat serves the whole file with the --test option
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
|
'format': 'bestvideo',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
program_slug, slug = re.match(self._VALID_URL, url).groups()
|
||||||
|
video = self._download_json(
|
||||||
|
'https://www.tf1.fr/graphql/web', slug, query={
|
||||||
|
'id': '9b80783950b85247541dd1d851f9cc7fa36574af015621f853ab111a679ce26f',
|
||||||
|
'variables': json.dumps({
|
||||||
|
'programSlug': program_slug,
|
||||||
|
'slug': slug,
|
||||||
|
})
|
||||||
|
})['data']['videoBySlug']
|
||||||
|
wat_id = video['streamId']
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
tags = []
|
||||||
|
for tag in (video.get('tags') or []):
|
||||||
|
label = tag.get('label')
|
||||||
|
if not label:
|
||||||
|
continue
|
||||||
|
tags.append(label)
|
||||||
|
|
||||||
wat_id = None
|
decoration = video.get('decoration') or {}
|
||||||
|
|
||||||
data = self._parse_json(
|
thumbnails = []
|
||||||
self._search_regex(
|
for source in (try_get(decoration, lambda x: x['image']['sources'], list) or []):
|
||||||
r'__APOLLO_STATE__\s*=\s*({.+?})\s*(?:;|</script>)', webpage,
|
source_url = source.get('url')
|
||||||
'data', default='{}'), video_id, fatal=False)
|
if not source_url:
|
||||||
|
continue
|
||||||
|
thumbnails.append({
|
||||||
|
'url': source_url,
|
||||||
|
'width': int_or_none(source.get('width')),
|
||||||
|
})
|
||||||
|
|
||||||
if data:
|
return {
|
||||||
try:
|
'_type': 'url_transparent',
|
||||||
wat_id = next(
|
'id': wat_id,
|
||||||
video.get('streamId')
|
'url': 'wat:' + wat_id,
|
||||||
for key, video in data.items()
|
'title': video.get('title'),
|
||||||
if isinstance(video, dict)
|
'thumbnails': thumbnails,
|
||||||
and video.get('slug') == video_id)
|
'description': decoration.get('description'),
|
||||||
if not isinstance(wat_id, compat_str) or not wat_id.isdigit():
|
'timestamp': parse_iso8601(video.get('date')),
|
||||||
wat_id = None
|
'duration': int_or_none(try_get(video, lambda x: x['publicPlayingInfos']['duration'])),
|
||||||
except StopIteration:
|
'tags': tags,
|
||||||
pass
|
'series': decoration.get('programLabel'),
|
||||||
|
'season_number': int_or_none(video.get('season')),
|
||||||
if not wat_id:
|
'episode_number': int_or_none(video.get('episode')),
|
||||||
wat_id = self._html_search_regex(
|
}
|
||||||
(r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8})\1',
|
|
||||||
r'(["\']?)streamId\1\s*:\s*(["\']?)(?P<id>\d+)\2'),
|
|
||||||
webpage, 'wat id', group='id')
|
|
||||||
|
|
||||||
return self.url_result('wat:%s' % wat_id, 'Wat')
|
|
||||||
|
|
|
@ -4,9 +4,10 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
unified_strdate,
|
ExtractorError,
|
||||||
HEADRequest,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
try_get,
|
||||||
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -29,6 +30,7 @@ class WatIE(InfoExtractor):
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
'expected_warnings': ['HTTP Error 404'],
|
'expected_warnings': ['HTTP Error 404'],
|
||||||
|
'skip': 'This content is no longer available',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html',
|
'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html',
|
||||||
|
@ -40,8 +42,10 @@ class WatIE(InfoExtractor):
|
||||||
'upload_date': '20140816',
|
'upload_date': '20140816',
|
||||||
},
|
},
|
||||||
'expected_warnings': ["Ce contenu n'est pas disponible pour l'instant."],
|
'expected_warnings': ["Ce contenu n'est pas disponible pour l'instant."],
|
||||||
|
'skip': 'This content is no longer available',
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
_GEO_BYPASS = False
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
@ -49,71 +53,54 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
# 'contentv4' is used in the website, but it also returns the related
|
# 'contentv4' is used in the website, but it also returns the related
|
||||||
# videos, we don't need them
|
# videos, we don't need them
|
||||||
|
# video_data = self._download_json(
|
||||||
|
# 'http://www.wat.tv/interface/contentv4s/' + video_id, video_id)
|
||||||
video_data = self._download_json(
|
video_data = self._download_json(
|
||||||
'http://www.wat.tv/interface/contentv4s/' + video_id, video_id)
|
'https://mediainfo.tf1.fr/mediainfocombo/' + video_id,
|
||||||
|
video_id, query={'context': 'MYTF1'})
|
||||||
video_info = video_data['media']
|
video_info = video_data['media']
|
||||||
|
|
||||||
error_desc = video_info.get('error_desc')
|
error_desc = video_info.get('error_desc')
|
||||||
if error_desc:
|
if error_desc:
|
||||||
self.report_warning(
|
if video_info.get('error_code') == 'GEOBLOCKED':
|
||||||
'%s returned error: %s' % (self.IE_NAME, error_desc))
|
self.raise_geo_restricted(error_desc, video_info.get('geoList'))
|
||||||
|
raise ExtractorError(error_desc, expected=True)
|
||||||
|
|
||||||
chapters = video_info['chapters']
|
title = video_info['title']
|
||||||
if chapters:
|
|
||||||
first_chapter = chapters[0]
|
|
||||||
|
|
||||||
def video_id_for_chapter(chapter):
|
|
||||||
return chapter['tc_start'].split('-')[0]
|
|
||||||
|
|
||||||
if video_id_for_chapter(first_chapter) != video_id:
|
|
||||||
self.to_screen('Multipart video detected')
|
|
||||||
entries = [self.url_result('wat:%s' % video_id_for_chapter(chapter)) for chapter in chapters]
|
|
||||||
return self.playlist_result(entries, video_id, video_info['title'])
|
|
||||||
# Otherwise we can continue and extract just one part, we have to use
|
|
||||||
# the video id for getting the video url
|
|
||||||
else:
|
|
||||||
first_chapter = video_info
|
|
||||||
|
|
||||||
title = first_chapter['title']
|
|
||||||
|
|
||||||
def extract_url(path_template, url_type):
|
|
||||||
req_url = 'http://www.wat.tv/get/%s' % (path_template % video_id)
|
|
||||||
head = self._request_webpage(HEADRequest(req_url), video_id, 'Extracting %s url' % url_type, fatal=False)
|
|
||||||
if head:
|
|
||||||
red_url = head.geturl()
|
|
||||||
if req_url != red_url:
|
|
||||||
return red_url
|
|
||||||
return None
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
manifest_urls = self._download_json(
|
|
||||||
'http://www.wat.tv/get/webhtml/' + video_id, video_id)
|
|
||||||
m3u8_url = manifest_urls.get('hls')
|
|
||||||
if m3u8_url:
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
m3u8_url, video_id, 'mp4',
|
|
||||||
'm3u8_native', m3u8_id='hls', fatal=False))
|
|
||||||
mpd_url = manifest_urls.get('mpd')
|
|
||||||
if mpd_url:
|
|
||||||
formats.extend(self._extract_mpd_formats(
|
|
||||||
mpd_url.replace('://das-q1.tf1.fr/', '://das-q1-ssl.tf1.fr/'),
|
|
||||||
video_id, mpd_id='dash', fatal=False))
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
date_diffusion = first_chapter.get('date_diffusion') or video_data.get('configv4', {}).get('estatS4')
|
def extract_formats(manifest_urls):
|
||||||
upload_date = unified_strdate(date_diffusion) if date_diffusion else None
|
for f, f_url in manifest_urls.items():
|
||||||
duration = None
|
if not f_url:
|
||||||
files = video_info['files']
|
continue
|
||||||
if files:
|
if f in ('dash', 'mpd'):
|
||||||
duration = int_or_none(files[0].get('duration'))
|
formats.extend(self._extract_mpd_formats(
|
||||||
|
f_url.replace('://das-q1.tf1.fr/', '://das-q1-ssl.tf1.fr/'),
|
||||||
|
video_id, mpd_id='dash', fatal=False))
|
||||||
|
elif f == 'hls':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
f_url, video_id, 'mp4',
|
||||||
|
'm3u8_native', m3u8_id='hls', fatal=False))
|
||||||
|
|
||||||
|
delivery = video_data.get('delivery') or {}
|
||||||
|
extract_formats({delivery.get('format'): delivery.get('url')})
|
||||||
|
if not formats:
|
||||||
|
if delivery.get('drm'):
|
||||||
|
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||||
|
manifest_urls = self._download_json(
|
||||||
|
'http://www.wat.tv/get/webhtml/' + video_id, video_id, fatal=False)
|
||||||
|
if manifest_urls:
|
||||||
|
extract_formats(manifest_urls)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'thumbnail': first_chapter.get('preview'),
|
'thumbnail': video_info.get('preview'),
|
||||||
'description': first_chapter.get('description'),
|
'upload_date': unified_strdate(try_get(
|
||||||
'view_count': int_or_none(video_info.get('views')),
|
video_data, lambda x: x['mediametrie']['chapters'][0]['estatS4'])),
|
||||||
'upload_date': upload_date,
|
'duration': int_or_none(video_info.get('duration')),
|
||||||
'duration': duration,
|
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ class YouPornIE(InfoExtractor):
|
||||||
'title': 'Sex Ed: Is It Safe To Masturbate Daily?',
|
'title': 'Sex Ed: Is It Safe To Masturbate Daily?',
|
||||||
'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?',
|
'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'duration': 210,
|
||||||
'uploader': 'Ask Dan And Jennifer',
|
'uploader': 'Ask Dan And Jennifer',
|
||||||
'upload_date': '20101217',
|
'upload_date': '20101217',
|
||||||
'average_rating': int,
|
'average_rating': int,
|
||||||
|
@ -54,6 +55,7 @@ class YouPornIE(InfoExtractor):
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
'skip': '404',
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.youporn.com/embed/505835/sex-ed-is-it-safe-to-masturbate-daily/',
|
'url': 'https://www.youporn.com/embed/505835/sex-ed-is-it-safe-to-masturbate-daily/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -153,6 +155,8 @@ def _real_extract(self, url):
|
||||||
thumbnail = self._search_regex(
|
thumbnail = self._search_regex(
|
||||||
r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1',
|
r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1',
|
||||||
webpage, 'thumbnail', fatal=False, group='thumbnail')
|
webpage, 'thumbnail', fatal=False, group='thumbnail')
|
||||||
|
duration = int_or_none(self._html_search_meta(
|
||||||
|
'video:duration', webpage, 'duration', fatal=False))
|
||||||
|
|
||||||
uploader = self._html_search_regex(
|
uploader = self._html_search_regex(
|
||||||
r'(?s)<div[^>]+class=["\']submitByLink["\'][^>]*>(.+?)</div>',
|
r'(?s)<div[^>]+class=["\']submitByLink["\'][^>]*>(.+?)</div>',
|
||||||
|
@ -194,6 +198,7 @@ def extract_tag_box(regex, title):
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
|
'duration': duration,
|
||||||
'uploader': uploader,
|
'uploader': uploader,
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'average_rating': average_rating,
|
'average_rating': average_rating,
|
||||||
|
|
|
@ -1472,7 +1472,7 @@ def _real_extract(self, url):
|
||||||
'Refetching age-gated info webpage',
|
'Refetching age-gated info webpage',
|
||||||
'unable to download video info webpage', query={
|
'unable to download video info webpage', query={
|
||||||
'video_id': video_id,
|
'video_id': video_id,
|
||||||
'eurl': 'https://www.youtube.com/embed/' + video_id,
|
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
|
||||||
}, fatal=False)),
|
}, fatal=False)),
|
||||||
lambda x: x['player_response'][0],
|
lambda x: x['player_response'][0],
|
||||||
compat_str) or '{}', video_id)
|
compat_str) or '{}', video_id)
|
||||||
|
@ -1625,6 +1625,8 @@ def feed_entry(name):
|
||||||
# Youtube throttles chunks >~10M
|
# Youtube throttles chunks >~10M
|
||||||
'http_chunk_size': 10485760,
|
'http_chunk_size': 10485760,
|
||||||
}
|
}
|
||||||
|
if dct.get('ext'):
|
||||||
|
dct['container'] = dct['ext'] + '_dash'
|
||||||
formats.append(dct)
|
formats.append(dct)
|
||||||
|
|
||||||
hls_manifest_url = streaming_data.get('hlsManifestUrl')
|
hls_manifest_url = streaming_data.get('hlsManifestUrl')
|
||||||
|
|
Loading…
Reference in a new issue