0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-27 03:03:01 +00:00
yt-dlp/yt_dlp/extractor/tube8.py

171 lines
6.1 KiB
Python
Raw Normal View History

import re
from .common import InfoExtractor
from ..aes import aes_decrypt_text
from ..compat import compat_urllib_parse_unquote
from ..utils import (
determine_ext,
format_field,
2014-04-12 20:56:32 +00:00
int_or_none,
str_to_int,
strip_or_none,
url_or_none,
2013-10-26 21:27:30 +00:00
)
2014-04-12 20:56:32 +00:00
2013-10-26 21:27:30 +00:00
class Tube8IE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)'
_EMBED_REGEX = [r'<iframe[^>]+\bsrc=["\'](?P<url>(?:https?:)?//(?:www\.)?tube8\.com/embed/(?:[^/]+/)+\d+)']
2016-01-20 14:07:32 +00:00
_TESTS = [{
'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
'md5': '65e20c48e6abff62ed0c3965fff13a39',
'info_dict': {
'id': '229795',
'display_id': 'kasia-music-video',
'ext': 'mp4',
'description': 'hot teen Kasia grinding',
'uploader': 'unknown',
'title': 'Kasia music video',
'age_limit': 18,
'duration': 230,
'categories': ['Teen'],
'tags': ['dancing'],
},
2016-01-20 15:30:29 +00:00
}, {
2016-01-20 14:07:32 +00:00
'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
'only_matching': True,
}]
2013-10-26 21:27:30 +00:00
def _extract_info(self, url, fatal=True):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = (mobj.group('display_id')
if 'display_id' in mobj.groupdict()
else None) or mobj.group('id')
webpage = self._download_webpage(
url, display_id, headers={'Cookie': 'age_verified=1'})
formats = []
format_urls = set()
title = None
thumbnail = None
duration = None
encrypted = False
def extract_format(format_url, height=None):
format_url = url_or_none(format_url)
if not format_url or not format_url.startswith(('http', '//')):
return
if format_url in format_urls:
return
format_urls.add(format_url)
tbr = int_or_none(self._search_regex(
r'[/_](\d+)[kK][/_]', format_url, 'tbr', default=None))
if not height:
height = int_or_none(self._search_regex(
r'[/_](\d+)[pP][/_]', format_url, 'height', default=None))
if encrypted:
format_url = aes_decrypt_text(
video_url, title, 32).decode('utf-8')
formats.append({
'url': format_url,
'format_id': format_field(height, None, '%dp'),
'height': height,
'tbr': tbr,
})
flashvars = self._parse_json(
self._search_regex(
r'flashvars\s*=\s*({.+?});', webpage,
'flashvars', default='{}'),
display_id, fatal=False)
if flashvars:
title = flashvars.get('video_title')
thumbnail = flashvars.get('image_url')
duration = int_or_none(flashvars.get('video_duration'))
encrypted = flashvars.get('encrypted') is True
for key, value in flashvars.items():
mobj = re.search(r'quality_(\d+)[pP]', key)
if mobj:
extract_format(value, int(mobj.group(1)))
video_url = flashvars.get('video_url')
if video_url and determine_ext(video_url, None):
extract_format(video_url)
video_url = self._html_search_regex(
r'flashvars\.video_url\s*=\s*(["\'])(?P<url>http.+?)\1',
webpage, 'video url', default=None, group='url')
if video_url:
extract_format(compat_urllib_parse_unquote(video_url))
if not formats:
if 'title="This video is no longer available"' in webpage:
self.raise_no_formats(
'Video %s is no longer available' % video_id, expected=True)
if not title:
title = self._html_search_regex(
r'<h1[^>]*>([^<]+)', webpage, 'title')
return webpage, {
'id': video_id,
'display_id': display_id,
'title': strip_or_none(title),
'thumbnail': thumbnail,
'duration': duration,
'age_limit': 18,
'formats': formats,
}
2013-10-26 21:27:30 +00:00
def _real_extract(self, url):
2016-08-16 22:46:45 +00:00
webpage, info = self._extract_info(url)
2014-04-12 20:56:32 +00:00
2016-08-16 22:46:45 +00:00
if not info['title']:
info['title'] = self._html_search_regex(
r'videoTitle\s*=\s*"([^"]+)', webpage, 'title')
2014-04-12 20:56:32 +00:00
description = self._html_search_regex(
r'(?s)Description:</dt>\s*<dd>(.+?)</dd>', webpage, 'description', fatal=False)
2014-04-12 20:56:32 +00:00
uploader = self._html_search_regex(
2015-05-30 07:30:14 +00:00
r'<span class="username">\s*(.+?)\s*<',
2014-04-12 20:56:32 +00:00
webpage, 'uploader', fatal=False)
2016-01-20 14:07:32 +00:00
like_count = int_or_none(self._search_regex(
2015-05-30 07:30:14 +00:00
r'rupVar\s*=\s*"(\d+)"', webpage, 'like count', fatal=False))
2016-01-20 14:07:32 +00:00
dislike_count = int_or_none(self._search_regex(
2015-05-30 07:30:14 +00:00
r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False))
2016-01-20 14:07:32 +00:00
view_count = str_to_int(self._search_regex(
r'Views:\s*</dt>\s*<dd>([\d,\.]+)',
2016-01-20 14:07:32 +00:00
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._search_regex(
r'<span id="allCommentsCount">(\d+)</span>',
webpage, 'comment count', fatal=False))
2013-10-26 21:27:30 +00:00
category = self._search_regex(
r'Category:\s*</dt>\s*<dd>\s*<a[^>]+href=[^>]+>([^<]+)',
webpage, 'category', fatal=False)
categories = [category] if category else None
tags_str = self._search_regex(
r'(?s)Tags:\s*</dt>\s*<dd>(.+?)</(?!a)',
webpage, 'tags', fatal=False)
tags = [t for t in re.findall(
r'<a[^>]+href=[^>]+>([^<]+)', tags_str)] if tags_str else None
2016-08-16 22:46:45 +00:00
info.update({
2014-04-12 20:56:32 +00:00
'description': description,
'uploader': uploader,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
'tags': tags,
2016-08-16 22:46:45 +00:00
})
return info