0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-30 03:33:02 +00:00
yt-dlp/yt_dlp/extractor/redtube.py

137 lines
5.1 KiB
Python
Raw Normal View History

2014-01-21 13:16:44 +00:00
from __future__ import unicode_literals
import re
2013-06-23 20:27:16 +00:00
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
merge_dicts,
str_to_int,
unified_strdate,
2018-07-21 12:08:28 +00:00
url_or_none,
)
2013-06-23 20:27:16 +00:00
class RedTubeIE(InfoExtractor):
2020-09-20 04:39:42 +00:00
_VALID_URL = r'https?://(?:(?:\w+\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
2016-11-06 14:39:29 +00:00
_TESTS = [{
2014-01-21 13:16:44 +00:00
'url': 'http://www.redtube.com/66418',
'md5': 'fc08071233725f26b8f014dba9590005',
2014-01-21 13:16:44 +00:00
'info_dict': {
2014-11-26 11:52:45 +00:00
'id': '66418',
'ext': 'mp4',
2015-03-25 14:09:01 +00:00
'title': 'Sucked on a toilet',
'upload_date': '20110811',
'duration': 596,
'view_count': int,
2015-03-25 14:09:01 +00:00
'age_limit': 18,
2013-06-27 18:46:46 +00:00
}
2016-11-06 14:39:29 +00:00
}, {
'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
'only_matching': True,
2020-09-20 04:39:42 +00:00
}, {
'url': 'http://it.redtube.com/66418',
'only_matching': True,
2016-11-06 14:39:29 +00:00
}]
2013-06-23 20:27:16 +00:00
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
webpage)
2013-10-04 09:41:57 +00:00
def _real_extract(self, url):
2014-11-26 11:52:45 +00:00
video_id = self._match_id(url)
2016-11-06 14:39:29 +00:00
webpage = self._download_webpage(
'http://www.redtube.com/%s' % video_id, video_id)
2013-06-23 20:27:16 +00:00
ERRORS = (
(('video-deleted-info', '>This video has been removed'), 'has been removed'),
(('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'),
)
for patterns, message in ERRORS:
if any(p in webpage for p in patterns):
raise ExtractorError(
'Video %s %s' % (video_id, message), expected=True)
info = self._search_json_ld(webpage, video_id, default={})
if not info.get('title'):
info['title'] = self._html_search_regex(
(r'<h(\d)[^>]+class="(?:video_title_text|videoTitle|video_title)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
webpage, 'title', group='title',
default=None) or self._og_search_title(webpage)
formats = []
sources = self._parse_json(
self._search_regex(
r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'),
video_id, fatal=False)
if sources and isinstance(sources, dict):
for format_id, format_url in sources.items():
if format_url:
formats.append({
'url': format_url,
'format_id': format_id,
'height': int_or_none(format_id),
})
medias = self._parse_json(
self._search_regex(
r'mediaDefinition["\']?\s*:\s*(\[.+?}\s*\])', webpage,
'media definitions', default='{}'),
video_id, fatal=False)
if medias and isinstance(medias, list):
for media in medias:
2018-07-21 12:08:28 +00:00
format_url = url_or_none(media.get('videoUrl'))
if not format_url:
continue
if media.get('format') == 'hls' or determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls',
fatal=False))
continue
format_id = media.get('quality')
formats.append({
'url': format_url,
'format_id': format_id,
'height': int_or_none(format_id),
})
if not formats:
video_url = self._html_search_regex(
r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
formats.append({'url': video_url})
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._search_regex(
r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<',
webpage, 'upload date', default=None))
duration = int_or_none(self._og_search_property(
'video:duration', webpage, default=None) or self._search_regex(
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
view_count = str_to_int(self._search_regex(
(r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)',
r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)',
r'<span[^>]+\bclass=["\']video_view_count[^>]*>\s*([\d,.]+)'),
webpage, 'view count', default=None))
2013-10-06 14:39:35 +00:00
# No self-labeling, but they describe themselves as
# "Home of Videos Porno"
age_limit = 18
return merge_dicts(info, {
2014-01-21 13:16:44 +00:00
'id': video_id,
2014-11-26 11:52:45 +00:00
'ext': 'mp4',
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
2013-10-06 14:39:35 +00:00
'age_limit': age_limit,
'formats': formats,
})