0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-01 23:12:40 +00:00
yt-dlp/yt_dlp/extractor/tube8.py

85 lines
3 KiB
Python
Raw Normal View History

import re
from ..utils import (
2014-04-12 20:56:32 +00:00
int_or_none,
str_to_int,
2013-10-26 21:27:30 +00:00
)
2016-08-16 22:46:45 +00:00
from .keezmovies import KeezMoviesIE
2014-04-12 20:56:32 +00:00
2013-10-26 21:27:30 +00:00
2016-08-16 22:46:45 +00:00
class Tube8IE(KeezMoviesIE):
_VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)'
2016-01-20 14:07:32 +00:00
_TESTS = [{
'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
'md5': '65e20c48e6abff62ed0c3965fff13a39',
'info_dict': {
'id': '229795',
'display_id': 'kasia-music-video',
'ext': 'mp4',
'description': 'hot teen Kasia grinding',
'uploader': 'unknown',
'title': 'Kasia music video',
'age_limit': 18,
'duration': 230,
'categories': ['Teen'],
'tags': ['dancing'],
},
2016-01-20 15:30:29 +00:00
}, {
2016-01-20 14:07:32 +00:00
'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
'only_matching': True,
}]
2013-10-26 21:27:30 +00:00
2018-04-08 17:37:15 +00:00
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?tube8\.com/embed/(?:[^/]+/)+\d+)',
webpage)
2013-10-26 21:27:30 +00:00
def _real_extract(self, url):
2016-08-16 22:46:45 +00:00
webpage, info = self._extract_info(url)
2014-04-12 20:56:32 +00:00
2016-08-16 22:46:45 +00:00
if not info['title']:
info['title'] = self._html_search_regex(
r'videoTitle\s*=\s*"([^"]+)', webpage, 'title')
2014-04-12 20:56:32 +00:00
description = self._html_search_regex(
r'(?s)Description:</dt>\s*<dd>(.+?)</dd>', webpage, 'description', fatal=False)
2014-04-12 20:56:32 +00:00
uploader = self._html_search_regex(
2015-05-30 07:30:14 +00:00
r'<span class="username">\s*(.+?)\s*<',
2014-04-12 20:56:32 +00:00
webpage, 'uploader', fatal=False)
2016-01-20 14:07:32 +00:00
like_count = int_or_none(self._search_regex(
2015-05-30 07:30:14 +00:00
r'rupVar\s*=\s*"(\d+)"', webpage, 'like count', fatal=False))
2016-01-20 14:07:32 +00:00
dislike_count = int_or_none(self._search_regex(
2015-05-30 07:30:14 +00:00
r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False))
2016-01-20 14:07:32 +00:00
view_count = str_to_int(self._search_regex(
r'Views:\s*</dt>\s*<dd>([\d,\.]+)',
2016-01-20 14:07:32 +00:00
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._search_regex(
r'<span id="allCommentsCount">(\d+)</span>',
webpage, 'comment count', fatal=False))
2013-10-26 21:27:30 +00:00
category = self._search_regex(
r'Category:\s*</dt>\s*<dd>\s*<a[^>]+href=[^>]+>([^<]+)',
webpage, 'category', fatal=False)
categories = [category] if category else None
tags_str = self._search_regex(
r'(?s)Tags:\s*</dt>\s*<dd>(.+?)</(?!a)',
webpage, 'tags', fatal=False)
tags = [t for t in re.findall(
r'<a[^>]+href=[^>]+>([^<]+)', tags_str)] if tags_str else None
2016-08-16 22:46:45 +00:00
info.update({
2014-04-12 20:56:32 +00:00
'description': description,
'uploader': uploader,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
'tags': tags,
2016-08-16 22:46:45 +00:00
})
return info