mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-17 01:25:11 +00:00
[tube8] Fix metadata extraction (closes #17520)
This commit is contained in:
parent
96dbf70de6
commit
79facb2773
1 changed files with 4 additions and 4 deletions
|
@ -45,7 +45,7 @@ def _real_extract(self, url):
|
||||||
r'videoTitle\s*=\s*"([^"]+)', webpage, 'title')
|
r'videoTitle\s*=\s*"([^"]+)', webpage, 'title')
|
||||||
|
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r'>Description:</strong>\s*(.+?)\s*<', webpage, 'description', fatal=False)
|
r'(?s)Description:</dt>\s*<dd>(.+?)</dd>', webpage, 'description', fatal=False)
|
||||||
uploader = self._html_search_regex(
|
uploader = self._html_search_regex(
|
||||||
r'<span class="username">\s*(.+?)\s*<',
|
r'<span class="username">\s*(.+?)\s*<',
|
||||||
webpage, 'uploader', fatal=False)
|
webpage, 'uploader', fatal=False)
|
||||||
|
@ -55,19 +55,19 @@ def _real_extract(self, url):
|
||||||
dislike_count = int_or_none(self._search_regex(
|
dislike_count = int_or_none(self._search_regex(
|
||||||
r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False))
|
r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False))
|
||||||
view_count = str_to_int(self._search_regex(
|
view_count = str_to_int(self._search_regex(
|
||||||
r'<strong>Views: </strong>([\d,\.]+)\s*</li>',
|
r'Views:\s*</dt>\s*<dd>([\d,\.]+)',
|
||||||
webpage, 'view count', fatal=False))
|
webpage, 'view count', fatal=False))
|
||||||
comment_count = str_to_int(self._search_regex(
|
comment_count = str_to_int(self._search_regex(
|
||||||
r'<span id="allCommentsCount">(\d+)</span>',
|
r'<span id="allCommentsCount">(\d+)</span>',
|
||||||
webpage, 'comment count', fatal=False))
|
webpage, 'comment count', fatal=False))
|
||||||
|
|
||||||
category = self._search_regex(
|
category = self._search_regex(
|
||||||
r'Category:\s*</strong>\s*<a[^>]+href=[^>]+>([^<]+)',
|
r'Category:\s*</dt>\s*<dd>\s*<a[^>]+href=[^>]+>([^<]+)',
|
||||||
webpage, 'category', fatal=False)
|
webpage, 'category', fatal=False)
|
||||||
categories = [category] if category else None
|
categories = [category] if category else None
|
||||||
|
|
||||||
tags_str = self._search_regex(
|
tags_str = self._search_regex(
|
||||||
r'(?s)Tags:\s*</strong>(.+?)</(?!a)',
|
r'(?s)Tags:\s*</dt>\s*<dd>(.+?)</(?!a)',
|
||||||
webpage, 'tags', fatal=False)
|
webpage, 'tags', fatal=False)
|
||||||
tags = [t for t in re.findall(
|
tags = [t for t in re.findall(
|
||||||
r'<a[^>]+href=[^>]+>([^<]+)', tags_str)] if tags_str else None
|
r'<a[^>]+href=[^>]+>([^<]+)', tags_str)] if tags_str else None
|
||||||
|
|
Loading…
Reference in a new issue