0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-26 02:55:17 +00:00

[extractor/FranceCulture] Fix extractor (#3874)

Closes #3742
Authored by: aurelg, pukkandan
This commit is contained in:
Aurélien Grosdidier 2022-06-11 01:22:34 +02:00 committed by GitHub
parent 14c3a98049
commit d05460e5fe
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 31 additions and 110 deletions

View file

@ -1188,11 +1188,11 @@ def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, f
self.report_warning('unable to extract %s' % _name + bug_reports_message()) self.report_warning('unable to extract %s' % _name + bug_reports_message())
return None return None
def _search_json(self, start_pattern, string, name, video_id, *, end_pattern='', fatal=True, **kwargs): def _search_json(self, start_pattern, string, name, video_id, *, end_pattern='', contains_pattern='.+', fatal=True, **kwargs):
"""Searches string for the JSON object specified by start_pattern""" """Searches string for the JSON object specified by start_pattern"""
# NB: end_pattern is only used to reduce the size of the initial match # NB: end_pattern is only used to reduce the size of the initial match
return self._parse_json( return self._parse_json(
self._search_regex(rf'{start_pattern}\s*(?P<json>{{.+}})\s*{end_pattern}', self._search_regex(rf'{start_pattern}\s*(?P<json>{{{contains_pattern}}})\s*{end_pattern}',
string, name, group='json', fatal=fatal) or '{}', string, name, group='json', fatal=fatal) or '{}',
video_id, fatal=fatal, ignore_extra=True, **kwargs) or {} video_id, fatal=fatal, ignore_extra=True, **kwargs) or {}

View file

@ -1,125 +1,46 @@
import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import int_or_none, parse_duration, unified_strdate
determine_ext,
extract_attributes,
int_or_none,
traverse_obj,
unified_strdate,
)
class FranceCultureIE(InfoExtractor): class FranceCultureIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?franceculture\.fr/emissions/(?:[^/]+/)*(?P<id>[^/?#&]+)' _VALID_URL = r'https?://(?:www\.)?radiofrance\.fr/franceculture/podcasts/(?:[^?#]+/)?(?P<display_id>[^?#]+)-(?P<id>\d+)($|[?#])'
_TESTS = [{ _TESTS = [
# playlist {
'url': 'https://www.franceculture.fr/emissions/serie/hasta-dente', 'url': 'https://www.radiofrance.fr/franceculture/podcasts/science-en-questions/la-physique-d-einstein-aiderait-elle-a-comprendre-le-cerveau-8440487',
'playlist_count': 12,
'info_dict': {
'id': 'hasta-dente',
'title': 'Hasta Dente',
'description': 'md5:57479af50648d14e9bb649e6b1f8f911',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20201024',
},
'playlist': [{
'info_dict': { 'info_dict': {
'id': '3c1c2e55-41a0-11e5-9fe0-005056a87c89', 'id': '8440487',
'display_id': 'la-physique-d-einstein-aiderait-elle-a-comprendre-le-cerveau',
'ext': 'mp3', 'ext': 'mp3',
'title': 'Jeudi, vous avez dit bizarre ?', 'title': 'La physique dEinstein aiderait-elle à comprendre le cerveau ?',
'description': 'md5:47cf1e00cc21c86b0210279996a812c6', 'description': 'Existerait-il un pont conceptuel entre la physique de lespace-temps et les neurosciences ?',
'duration': 604, 'thumbnail': 'https://cdn.radiofrance.fr/s3/cruiser-production/2022/05/d184e7a3-4827-4494-bf94-04ed7b120db4/1200x630_gettyimages-200171095-001.jpg',
'upload_date': '20201024', 'upload_date': '20220514',
'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2750,
'timestamp': 1603576680
}, },
}, },
], ]
}, {
'url': 'https://www.franceculture.fr/emissions/carnet-nomade/rendez-vous-au-pays-des-geeks',
'info_dict': {
'id': 'rendez-vous-au-pays-des-geeks',
'display_id': 'rendez-vous-au-pays-des-geeks',
'ext': 'mp3',
'title': 'Rendez-vous au pays des geeks',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140301',
'vcodec': 'none',
'duration': 3569,
},
}, {
# no thumbnail
'url': 'https://www.franceculture.fr/emissions/la-recherche-montre-en-main/la-recherche-montre-en-main-du-mercredi-10-octobre-2018',
'only_matching': True,
}]
def _real_extract(self, url): def _real_extract(self, url):
display_id = self._match_id(url) video_id, display_id = self._match_valid_url(url).group('id', 'display_id')
webpage = self._download_webpage(url, display_id) webpage = self._download_webpage(url, display_id)
info = { # _search_json_ld doesn't correctly handle this. See https://github.com/yt-dlp/yt-dlp/pull/3874#discussion_r891903846
'id': display_id, video_data = self._search_json('', webpage, 'audio data', display_id, contains_pattern=r'\s*"@type"\s*:\s*"AudioObject"\s*.+')
'title': self._html_search_regex(
r'(?s)<h1[^>]*itemprop="[^"]*name[^"]*"[^>]*>(.+?)</h1>', return {
webpage, 'title', default=self._og_search_title(webpage)), 'id': video_id,
'display_id': display_id,
'url': video_data['contentUrl'],
'ext': video_data.get('encodingFormat'),
'vcodec': 'none' if video_data.get('encodingFormat') == 'mp3' else None,
'duration': parse_duration(video_data.get('duration')),
'title': self._html_search_regex(r'(?s)<h1[^>]*itemprop="[^"]*name[^"]*"[^>]*>(.+?)</h1>',
webpage, 'title', default=self._og_search_title(webpage)),
'description': self._html_search_regex( 'description': self._html_search_regex(
r'(?s)<div[^>]+class="excerpt"[^>]*>(.*?)</div>', webpage, 'description', default=None), r'(?s)<meta name="description"\s*content="([^"]+)', webpage, 'description', default=None),
'thumbnail': self._og_search_thumbnail(webpage), 'thumbnail': self._og_search_thumbnail(webpage),
'uploader': self._html_search_regex( 'uploader': self._html_search_regex(
r'(?s)<span class="author">(.*?)</span>', webpage, 'uploader', default=None), r'(?s)<span class="author">(.*?)</span>', webpage, 'uploader', default=None),
'upload_date': unified_strdate(self._html_search_regex( 'upload_date': unified_strdate(self._search_regex(
r'(?s)class="teaser-text-date".*?(\d{2}/\d{2}/\d{4})', webpage, 'date', default=None)), r'"datePublished"\s*:\s*"([^"]+)', webpage, 'timestamp', fatal=False))
}
playlist_data = self._search_regex(
r'''(?sx)
<section[^>]+data-xiti-place="[^"]*?liste_episodes[^"?]*?"[^>]*>
(.*?)
</section>
''',
webpage, 'playlist data', fatal=False, default=None)
if playlist_data:
entries = []
for item, item_description in re.findall(
r'(?s)(<button[^<]*class="[^"]*replay-button[^>]*>).*?<p[^>]*class="[^"]*teaser-text-chapo[^>]*>(.*?)</p>',
playlist_data):
item_attributes = extract_attributes(item)
entries.append({
'id': item_attributes.get('data-emission-uuid'),
'url': item_attributes.get('data-url'),
'title': item_attributes.get('data-diffusion-title'),
'duration': int_or_none(traverse_obj(item_attributes, 'data-duration-seconds', 'data-duration-seconds')),
'description': item_description,
'timestamp': int_or_none(item_attributes.get('data-start-time')),
'thumbnail': info['thumbnail'],
'uploader': info['uploader'],
})
return {
'_type': 'playlist',
'entries': entries,
**info
}
video_data = extract_attributes(self._search_regex(
r'''(?sx)
(?:
</h1>|
<div[^>]+class="[^"]*?(?:title-zone-diffusion|heading-zone-(?:wrapper|player-button))[^"]*?"[^>]*>
).*?
(<button[^>]+data-(?:url|asset-source)="[^"]+"[^>]+>)
''',
webpage, 'video data'))
video_url = traverse_obj(video_data, 'data-url', 'data-asset-source')
ext = determine_ext(video_url.lower())
return {
'display_id': display_id,
'url': video_url,
'ext': ext,
'vcodec': 'none' if ext == 'mp3' else None,
'duration': int_or_none(video_data.get('data-duration')),
**info
} }