mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-03 23:25:06 +00:00
[ellentv] Simplify and correct tests
This commit is contained in:
parent
266c71f971
commit
1d01f26ab1
1 changed files with 24 additions and 24 deletions
|
@ -4,10 +4,14 @@
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor, ExtractorError
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class EllenTVIE(InfoExtractor):
|
class EllenTVIE(InfoExtractor):
|
||||||
IE_NAME = u'ellentv'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?ellentv\.com/videos/(?P<id>[a-z0-9_-]+)'
|
_VALID_URL = r'https?://(?:www\.)?ellentv\.com/videos/(?P<id>[a-z0-9_-]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.ellentv.com/videos/0-7jqrsr18/',
|
'url': 'http://www.ellentv.com/videos/0-7jqrsr18/',
|
||||||
|
@ -15,43 +19,39 @@ class EllenTVIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '0-7jqrsr18',
|
'id': '0-7jqrsr18',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': u'What\'s Wrong with These Photos? A Whole Lot',
|
'title': 'What\'s Wrong with These Photos? A Whole Lot',
|
||||||
# TODO more properties, either as:
|
'timestamp': 1406876400,
|
||||||
# * A value
|
'upload_date': '20140801',
|
||||||
# * MD5 checksum; start the string with md5:
|
|
||||||
# * A regular expression; start the string with re:
|
|
||||||
# * Any Python type (for example int or float)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
timestamp = parse_iso8601(self._search_regex(
|
||||||
|
r'<span class="publish-date"><time datetime="([^"]+)">',
|
||||||
|
webpage, 'timestamp'))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': id,
|
'id': video_id,
|
||||||
'title': self._og_search_title(webpage),
|
'title': self._og_search_title(webpage),
|
||||||
'url': self._html_search_meta('VideoURL', webpage, 'url')
|
'url': self._html_search_meta('VideoURL', webpage, 'url'),
|
||||||
|
'timestamp': timestamp,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class EllenTVClipsIE(InfoExtractor):
|
class EllenTVClipsIE(InfoExtractor):
|
||||||
IE_NAME = u'ellentv:clips'
|
IE_NAME = 'EllenTV:clips'
|
||||||
_VALID_URL = r'https?://(?:www\.)?ellentv\.com/episodes/(?P<id>[a-z0-9_-]+)'
|
_VALID_URL = r'https?://(?:www\.)?ellentv\.com/episodes/(?P<id>[a-z0-9_-]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.ellentv.com/episodes/meryl-streep-vanessa-hudgens/',
|
'url': 'http://www.ellentv.com/episodes/meryl-streep-vanessa-hudgens/',
|
||||||
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '0_wf6pizq7',
|
'id': 'meryl-streep-vanessa-hudgens',
|
||||||
'ext': 'mp4',
|
'title': 'Meryl Streep, Vanessa Hudgens',
|
||||||
'title': 'Video title goes here',
|
},
|
||||||
# TODO more properties, either as:
|
'playlist_mincount': 9,
|
||||||
# * A value
|
|
||||||
# * MD5 checksum; start the string with md5:
|
|
||||||
# * A regular expression; start the string with re:
|
|
||||||
# * Any Python type (for example int or float)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -76,4 +76,4 @@ def _extract_playlist(self, webpage):
|
||||||
raise ExtractorError('Failed to download JSON', cause=ve)
|
raise ExtractorError('Failed to download JSON', cause=ve)
|
||||||
|
|
||||||
def _extract_entries(self, playlist):
|
def _extract_entries(self, playlist):
|
||||||
return [self.url_result(item[u'url'], 'EllenTV') for item in playlist]
|
return [self.url_result(item['url'], 'EllenTV') for item in playlist]
|
||||||
|
|
Loading…
Reference in a new issue