yt-dlp/yt_dlp/extractor/pornhd.py

117 lines
4.4 KiB
Python
Raw Normal View History

2013-12-14 03:22:53 +00:00
from .common import InfoExtractor
from ..utils import (
2019-02-04 17:07:37 +00:00
determine_ext,
ExtractorError,
int_or_none,
js_to_json,
merge_dicts,
2019-02-04 17:07:37 +00:00
urljoin,
)
2013-12-14 03:22:53 +00:00
2013-12-16 04:10:42 +00:00
2013-12-14 03:22:53 +00:00
class PornHdIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?'
2016-06-17 22:50:17 +00:00
_TESTS = [{
'url': 'http://www.pornhd.com/videos/9864/selfie-restroom-masturbation-fun-with-chubby-cutie-hd-porn-video',
2019-02-04 17:07:37 +00:00
'md5': '87f1540746c1d32ec7a2305c12b96b25',
2016-06-17 22:50:17 +00:00
'info_dict': {
'id': '9864',
'display_id': 'selfie-restroom-masturbation-fun-with-chubby-cutie-hd-porn-video',
'ext': 'mp4',
'title': 'Restroom selfie masturbation',
'description': 'md5:3748420395e03e31ac96857a8f125b2b',
'thumbnail': r're:^https?://.*\.jpg',
2016-06-17 22:50:17 +00:00
'view_count': int,
2019-02-04 17:06:04 +00:00
'like_count': int,
2016-06-17 22:50:17 +00:00
'age_limit': 18,
},
'skip': 'HTTP Error 404: Not Found',
2016-06-17 22:50:17 +00:00
}, {
2014-01-28 02:53:00 +00:00
'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
'md5': '1b7b3a40b9d65a8e5b25f7ab9ee6d6de',
2014-01-28 02:53:00 +00:00
'info_dict': {
2014-04-04 15:45:39 +00:00
'id': '1962',
'display_id': 'sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
2014-04-04 15:45:39 +00:00
'ext': 'mp4',
'title': 'md5:98c6f8b2d9c229d0f0fde47f61a1a759',
2014-04-04 15:45:39 +00:00
'description': 'md5:8ff0523848ac2b8f9b065ba781ccf294',
'thumbnail': r're:^https?://.*\.jpg',
'view_count': int,
2019-02-04 17:06:04 +00:00
'like_count': int,
2014-04-04 15:45:39 +00:00
'age_limit': 18,
2016-06-17 22:50:17 +00:00
},
}]
2013-12-14 03:22:53 +00:00
def _real_extract(self, url):
mobj = self._match_valid_url(url)
2014-04-04 15:45:39 +00:00
video_id = mobj.group('id')
display_id = mobj.group('display_id')
2013-12-14 03:22:53 +00:00
webpage = self._download_webpage(url, display_id or video_id)
2013-12-14 03:22:53 +00:00
2014-09-13 14:45:53 +00:00
title = self._html_search_regex(
[r'<span[^>]+class=["\']video-name["\'][^>]*>([^<]+)',
r'<title>(.+?) - .*?[Pp]ornHD.*?</title>'], webpage, 'title')
2014-04-04 15:45:39 +00:00
sources = self._parse_json(js_to_json(self._search_regex(
r"(?s)sources'?\s*[:=]\s*(\{.+?\})",
webpage, 'sources', default='{}')), video_id)
info = {}
if not sources:
entries = self._parse_html5_media_entries(url, webpage, video_id)
if entries:
info = entries[0]
if not sources and not info:
message = self._html_search_regex(
r'(?s)<(div|p)[^>]+class="no-video"[^>]*>(?P<value>.+?)</\1',
webpage, 'error message', group='value')
raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True)
formats = []
2016-06-17 22:37:53 +00:00
for format_id, video_url in sources.items():
2019-02-04 17:07:37 +00:00
video_url = urljoin(url, video_url)
2015-02-10 02:41:31 +00:00
if not video_url:
continue
2016-06-17 22:37:53 +00:00
height = int_or_none(self._search_regex(
r'^(\d+)[pP]', format_id, 'height', default=None))
2015-02-10 02:41:31 +00:00
formats.append({
'url': video_url,
2019-02-04 17:07:37 +00:00
'ext': determine_ext(video_url, 'mp4'),
2016-06-17 22:37:53 +00:00
'format_id': format_id,
'height': height,
2015-02-10 02:41:31 +00:00
})
if formats:
info['formats'] = formats
2013-12-14 03:22:53 +00:00
description = self._html_search_regex(
(r'(?s)<section[^>]+class=["\']video-description[^>]+>(?P<value>.+?)</section>',
r'<(div|p)[^>]+class="description"[^>]*>(?P<value>[^<]+)</\1'),
webpage, 'description', fatal=False,
group='value') or self._html_search_meta(
'description', webpage, default=None) or self._og_search_description(webpage)
view_count = int_or_none(self._html_search_regex(
r'(\d+) views\s*<', webpage, 'view count', fatal=False))
thumbnail = self._search_regex(
r"poster'?\s*:\s*([\"'])(?P<url>(?:(?!\1).)+)\1", webpage,
'thumbnail', default=None, group='url')
2019-02-04 17:06:04 +00:00
like_count = int_or_none(self._search_regex(
(r'(\d+)</span>\s*likes',
r'(\d+)\s*</11[^>]+>(?:&nbsp;|\s)*\blikes',
2019-02-04 17:06:04 +00:00
r'class=["\']save-count["\'][^>]*>\s*(\d+)'),
webpage, 'like count', fatal=False))
return merge_dicts(info, {
2013-12-16 04:10:42 +00:00
'id': video_id,
'display_id': display_id,
2014-04-04 15:45:39 +00:00
'title': title,
'description': description,
'thumbnail': thumbnail,
'view_count': view_count,
2019-02-04 17:06:04 +00:00
'like_count': like_count,
2014-04-04 15:45:39 +00:00
'formats': formats,
'age_limit': 18,
})