mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-29 03:23:02 +00:00
[refactor] Single quotes consistency
This commit is contained in:
parent
d800609c62
commit
611c1dd96e
59 changed files with 302 additions and 302 deletions
|
@ -782,7 +782,7 @@ def iter_playlistitems(format):
|
||||||
entries = ie_entries[playliststart:playlistend]
|
entries = ie_entries[playliststart:playlistend]
|
||||||
n_entries = len(entries)
|
n_entries = len(entries)
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
"[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
|
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
|
||||||
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
||||||
elif isinstance(ie_entries, PagedList):
|
elif isinstance(ie_entries, PagedList):
|
||||||
if playlistitems:
|
if playlistitems:
|
||||||
|
@ -796,7 +796,7 @@ def iter_playlistitems(format):
|
||||||
playliststart, playlistend)
|
playliststart, playlistend)
|
||||||
n_entries = len(entries)
|
n_entries = len(entries)
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
"[%s] playlist %s: Downloading %d videos" %
|
'[%s] playlist %s: Downloading %d videos' %
|
||||||
(ie_result['extractor'], playlist, n_entries))
|
(ie_result['extractor'], playlist, n_entries))
|
||||||
else: # iterable
|
else: # iterable
|
||||||
if playlistitems:
|
if playlistitems:
|
||||||
|
@ -807,7 +807,7 @@ def iter_playlistitems(format):
|
||||||
ie_entries, playliststart, playlistend))
|
ie_entries, playliststart, playlistend))
|
||||||
n_entries = len(entries)
|
n_entries = len(entries)
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
"[%s] playlist %s: Downloading %d videos" %
|
'[%s] playlist %s: Downloading %d videos' %
|
||||||
(ie_result['extractor'], playlist, n_entries))
|
(ie_result['extractor'], playlist, n_entries))
|
||||||
|
|
||||||
if self.params.get('playlistreverse', False):
|
if self.params.get('playlistreverse', False):
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
if __package__ is None and not hasattr(sys, "frozen"):
|
if __package__ is None and not hasattr(sys, 'frozen'):
|
||||||
# direct call of __main__.py
|
# direct call of __main__.py
|
||||||
import os.path
|
import os.path
|
||||||
path = os.path.realpath(os.path.abspath(__file__))
|
path = os.path.realpath(os.path.abspath(__file__))
|
||||||
|
|
|
@ -181,20 +181,20 @@ def data_open(self, req):
|
||||||
# parameter := attribute "=" value
|
# parameter := attribute "=" value
|
||||||
url = req.get_full_url()
|
url = req.get_full_url()
|
||||||
|
|
||||||
scheme, data = url.split(":", 1)
|
scheme, data = url.split(':', 1)
|
||||||
mediatype, data = data.split(",", 1)
|
mediatype, data = data.split(',', 1)
|
||||||
|
|
||||||
# even base64 encoded data URLs might be quoted so unquote in any case:
|
# even base64 encoded data URLs might be quoted so unquote in any case:
|
||||||
data = compat_urllib_parse_unquote_to_bytes(data)
|
data = compat_urllib_parse_unquote_to_bytes(data)
|
||||||
if mediatype.endswith(";base64"):
|
if mediatype.endswith(';base64'):
|
||||||
data = binascii.a2b_base64(data)
|
data = binascii.a2b_base64(data)
|
||||||
mediatype = mediatype[:-7]
|
mediatype = mediatype[:-7]
|
||||||
|
|
||||||
if not mediatype:
|
if not mediatype:
|
||||||
mediatype = "text/plain;charset=US-ASCII"
|
mediatype = 'text/plain;charset=US-ASCII'
|
||||||
|
|
||||||
headers = email.message_from_string(
|
headers = email.message_from_string(
|
||||||
"Content-type: %s\nContent-length: %d\n" % (mediatype, len(data)))
|
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
|
||||||
|
|
||||||
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
|
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
|
||||||
|
|
||||||
|
@ -268,7 +268,7 @@ def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
|
||||||
nv = name_value.split('=', 1)
|
nv = name_value.split('=', 1)
|
||||||
if len(nv) != 2:
|
if len(nv) != 2:
|
||||||
if strict_parsing:
|
if strict_parsing:
|
||||||
raise ValueError("bad query field: %r" % (name_value,))
|
raise ValueError('bad query field: %r' % (name_value,))
|
||||||
# Handle case of a control-name with no equal sign
|
# Handle case of a control-name with no equal sign
|
||||||
if keep_blank_values:
|
if keep_blank_values:
|
||||||
nv.append('')
|
nv.append('')
|
||||||
|
@ -466,7 +466,7 @@ def compat_socket_create_connection(address, timeout, source_address=None):
|
||||||
if err is not None:
|
if err is not None:
|
||||||
raise err
|
raise err
|
||||||
else:
|
else:
|
||||||
raise socket.error("getaddrinfo returns an empty list")
|
raise socket.error('getaddrinfo returns an empty list')
|
||||||
else:
|
else:
|
||||||
compat_socket_create_connection = socket.create_connection
|
compat_socket_create_connection = socket.create_connection
|
||||||
|
|
||||||
|
|
|
@ -140,8 +140,8 @@ def real_download(self, filename, info_dict):
|
||||||
|
|
||||||
if data_len is not None:
|
if data_len is not None:
|
||||||
data_len = int(data_len) + resume_len
|
data_len = int(data_len) + resume_len
|
||||||
min_data_len = self.params.get("min_filesize")
|
min_data_len = self.params.get('min_filesize')
|
||||||
max_data_len = self.params.get("max_filesize")
|
max_data_len = self.params.get('max_filesize')
|
||||||
if min_data_len is not None and data_len < min_data_len:
|
if min_data_len is not None and data_len < min_data_len:
|
||||||
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -28,7 +28,7 @@ class AENetworksIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'eg47EERs_JsZ',
|
'id': 'eg47EERs_JsZ',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "Winter Is Coming",
|
'title': 'Winter Is Coming',
|
||||||
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
|
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
|
|
|
@ -86,7 +86,7 @@ class BBCCoUkIE(InfoExtractor):
|
||||||
'id': 'b00yng1d',
|
'id': 'b00yng1d',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
||||||
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
|
'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
|
||||||
'duration': 5100,
|
'duration': 5100,
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
|
|
|
@ -45,7 +45,7 @@ def _real_extract(self, url):
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'(?s)<h1>(.*?)</h1>', webpage, 'title')
|
r'(?s)<h1>(.*?)</h1>', webpage, 'title')
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r"(?s)<h3>About</h3>(.+?)<h3>",
|
r'(?s)<h3>About</h3>(.+?)<h3>',
|
||||||
webpage, 'description', fatal=False)
|
webpage, 'description', fatal=False)
|
||||||
upload_date = unified_strdate(self._html_search_regex(
|
upload_date = unified_strdate(self._html_search_regex(
|
||||||
r"(?s)<span[^>]+class='[^']*fa-calendar-o'[^>]*>(.+?)</span>",
|
r"(?s)<span[^>]+class='[^']*fa-calendar-o'[^>]*>(.+?)</span>",
|
||||||
|
|
|
@ -177,16 +177,16 @@ def _msectotimecode(msec):
|
||||||
for divider in [1000, 60, 60, 100]:
|
for divider in [1000, 60, 60, 100]:
|
||||||
components.append(msec % divider)
|
components.append(msec % divider)
|
||||||
msec //= divider
|
msec //= divider
|
||||||
return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
|
return '{3:02}:{2:02}:{1:02},{0:03}'.format(*components)
|
||||||
|
|
||||||
def _fix_subtitle(subtitle):
|
def _fix_subtitle(subtitle):
|
||||||
for line in subtitle.splitlines():
|
for line in subtitle.splitlines():
|
||||||
m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
|
m = re.match(r'^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$', line)
|
||||||
if m:
|
if m:
|
||||||
yield m.group(1)
|
yield m.group(1)
|
||||||
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
|
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
|
||||||
yield "{0} --> {1}".format(start, stop)
|
yield '{0} --> {1}'.format(start, stop)
|
||||||
else:
|
else:
|
||||||
yield line
|
yield line
|
||||||
|
|
||||||
return "\r\n".join(_fix_subtitle(subtitles))
|
return '\r\n'.join(_fix_subtitle(subtitles))
|
||||||
|
|
|
@ -26,14 +26,14 @@ class CNNIE(InfoExtractor):
|
||||||
'upload_date': '20130609',
|
'upload_date': '20130609',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
'url': 'http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29',
|
||||||
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
'md5': 'b5cc60c60a3477d185af8f19a2a26f4e',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
|
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
"title": "Student's epic speech stuns new freshmen",
|
'title': "Student's epic speech stuns new freshmen",
|
||||||
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
'description': "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||||
"upload_date": "20130821",
|
'upload_date': '20130821',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html',
|
'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html',
|
||||||
|
|
|
@ -46,9 +46,9 @@ def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
player_options_request = {
|
player_options_request = {
|
||||||
"getPlayerOptionsRequest": {
|
'getPlayerOptionsRequest': {
|
||||||
"ResourceId": video_id,
|
'ResourceId': video_id,
|
||||||
"QueryString": "",
|
'QueryString': '',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -195,7 +195,7 @@ def _real_extract(self, url):
|
||||||
if len(altMovieParams) == 0:
|
if len(altMovieParams) == 0:
|
||||||
raise ExtractorError('unable to find Flash URL in webpage ' + url)
|
raise ExtractorError('unable to find Flash URL in webpage ' + url)
|
||||||
else:
|
else:
|
||||||
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
|
mMovieParams = [('http://media.mtvnservices.com/' + altMovieParams[0], altMovieParams[0])]
|
||||||
|
|
||||||
uri = mMovieParams[0][1]
|
uri = mMovieParams[0][1]
|
||||||
# Correct cc.com in uri
|
# Correct cc.com in uri
|
||||||
|
|
|
@ -1497,7 +1497,7 @@ def extract_multisegment_info(element, ms_parent_info):
|
||||||
def _live_title(self, name):
|
def _live_title(self, name):
|
||||||
""" Generate the title for a live video """
|
""" Generate the title for a live video """
|
||||||
now = datetime.datetime.now()
|
now = datetime.datetime.now()
|
||||||
now_str = now.strftime("%Y-%m-%d %H:%M")
|
now_str = now.strftime('%Y-%m-%d %H:%M')
|
||||||
return name + ' ' + now_str
|
return name + ' ' + now_str
|
||||||
|
|
||||||
def _int(self, v, name, fatal=False, **kwargs):
|
def _int(self, v, name, fatal=False, **kwargs):
|
||||||
|
@ -1570,7 +1570,7 @@ def extract_subtitles(self, *args, **kwargs):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def _get_subtitles(self, *args, **kwargs):
|
def _get_subtitles(self, *args, **kwargs):
|
||||||
raise NotImplementedError("This method must be implemented by subclasses")
|
raise NotImplementedError('This method must be implemented by subclasses')
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
|
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
|
||||||
|
@ -1596,7 +1596,7 @@ def extract_automatic_captions(self, *args, **kwargs):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def _get_automatic_captions(self, *args, **kwargs):
|
def _get_automatic_captions(self, *args, **kwargs):
|
||||||
raise NotImplementedError("This method must be implemented by subclasses")
|
raise NotImplementedError('This method must be implemented by subclasses')
|
||||||
|
|
||||||
|
|
||||||
class SearchInfoExtractor(InfoExtractor):
|
class SearchInfoExtractor(InfoExtractor):
|
||||||
|
@ -1636,7 +1636,7 @@ def _real_extract(self, query):
|
||||||
|
|
||||||
def _get_n_results(self, query, n):
|
def _get_n_results(self, query, n):
|
||||||
"""Get a specified number of results for a query"""
|
"""Get a specified number of results for a query"""
|
||||||
raise NotImplementedError("This method must be implemented by subclasses")
|
raise NotImplementedError('This method must be implemented by subclasses')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def SEARCH_KEY(self):
|
def SEARCH_KEY(self):
|
||||||
|
|
|
@ -180,40 +180,40 @@ def ass_bool(strvalue):
|
||||||
return assvalue
|
return assvalue
|
||||||
|
|
||||||
output = '[Script Info]\n'
|
output = '[Script Info]\n'
|
||||||
output += 'Title: %s\n' % sub_root.attrib["title"]
|
output += 'Title: %s\n' % sub_root.attrib['title']
|
||||||
output += 'ScriptType: v4.00+\n'
|
output += 'ScriptType: v4.00+\n'
|
||||||
output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
|
output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style']
|
||||||
output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
|
output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x']
|
||||||
output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
|
output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y']
|
||||||
output += """ScaledBorderAndShadow: yes
|
output += """ScaledBorderAndShadow: yes
|
||||||
|
|
||||||
[V4+ Styles]
|
[V4+ Styles]
|
||||||
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
|
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
|
||||||
"""
|
"""
|
||||||
for style in sub_root.findall('./styles/style'):
|
for style in sub_root.findall('./styles/style'):
|
||||||
output += 'Style: ' + style.attrib["name"]
|
output += 'Style: ' + style.attrib['name']
|
||||||
output += ',' + style.attrib["font_name"]
|
output += ',' + style.attrib['font_name']
|
||||||
output += ',' + style.attrib["font_size"]
|
output += ',' + style.attrib['font_size']
|
||||||
output += ',' + style.attrib["primary_colour"]
|
output += ',' + style.attrib['primary_colour']
|
||||||
output += ',' + style.attrib["secondary_colour"]
|
output += ',' + style.attrib['secondary_colour']
|
||||||
output += ',' + style.attrib["outline_colour"]
|
output += ',' + style.attrib['outline_colour']
|
||||||
output += ',' + style.attrib["back_colour"]
|
output += ',' + style.attrib['back_colour']
|
||||||
output += ',' + ass_bool(style.attrib["bold"])
|
output += ',' + ass_bool(style.attrib['bold'])
|
||||||
output += ',' + ass_bool(style.attrib["italic"])
|
output += ',' + ass_bool(style.attrib['italic'])
|
||||||
output += ',' + ass_bool(style.attrib["underline"])
|
output += ',' + ass_bool(style.attrib['underline'])
|
||||||
output += ',' + ass_bool(style.attrib["strikeout"])
|
output += ',' + ass_bool(style.attrib['strikeout'])
|
||||||
output += ',' + style.attrib["scale_x"]
|
output += ',' + style.attrib['scale_x']
|
||||||
output += ',' + style.attrib["scale_y"]
|
output += ',' + style.attrib['scale_y']
|
||||||
output += ',' + style.attrib["spacing"]
|
output += ',' + style.attrib['spacing']
|
||||||
output += ',' + style.attrib["angle"]
|
output += ',' + style.attrib['angle']
|
||||||
output += ',' + style.attrib["border_style"]
|
output += ',' + style.attrib['border_style']
|
||||||
output += ',' + style.attrib["outline"]
|
output += ',' + style.attrib['outline']
|
||||||
output += ',' + style.attrib["shadow"]
|
output += ',' + style.attrib['shadow']
|
||||||
output += ',' + style.attrib["alignment"]
|
output += ',' + style.attrib['alignment']
|
||||||
output += ',' + style.attrib["margin_l"]
|
output += ',' + style.attrib['margin_l']
|
||||||
output += ',' + style.attrib["margin_r"]
|
output += ',' + style.attrib['margin_r']
|
||||||
output += ',' + style.attrib["margin_v"]
|
output += ',' + style.attrib['margin_v']
|
||||||
output += ',' + style.attrib["encoding"]
|
output += ',' + style.attrib['encoding']
|
||||||
output += '\n'
|
output += '\n'
|
||||||
|
|
||||||
output += """
|
output += """
|
||||||
|
@ -222,15 +222,15 @@ def ass_bool(strvalue):
|
||||||
"""
|
"""
|
||||||
for event in sub_root.findall('./events/event'):
|
for event in sub_root.findall('./events/event'):
|
||||||
output += 'Dialogue: 0'
|
output += 'Dialogue: 0'
|
||||||
output += ',' + event.attrib["start"]
|
output += ',' + event.attrib['start']
|
||||||
output += ',' + event.attrib["end"]
|
output += ',' + event.attrib['end']
|
||||||
output += ',' + event.attrib["style"]
|
output += ',' + event.attrib['style']
|
||||||
output += ',' + event.attrib["name"]
|
output += ',' + event.attrib['name']
|
||||||
output += ',' + event.attrib["margin_l"]
|
output += ',' + event.attrib['margin_l']
|
||||||
output += ',' + event.attrib["margin_r"]
|
output += ',' + event.attrib['margin_r']
|
||||||
output += ',' + event.attrib["margin_v"]
|
output += ',' + event.attrib['margin_v']
|
||||||
output += ',' + event.attrib["effect"]
|
output += ',' + event.attrib['effect']
|
||||||
output += ',' + event.attrib["text"]
|
output += ',' + event.attrib['text']
|
||||||
output += '\n'
|
output += '\n'
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
@ -376,7 +376,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
|
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
|
||||||
IE_NAME = "crunchyroll:playlist"
|
IE_NAME = 'crunchyroll:playlist'
|
||||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
|
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
|
|
@ -87,7 +87,7 @@ def parse_filename_info(url):
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for file in info['Files']:
|
for file in info['Files']:
|
||||||
if info['Type'] == "Video":
|
if info['Type'] == 'Video':
|
||||||
if file['Type'] in video_types:
|
if file['Type'] in video_types:
|
||||||
format = parse_filename_info(file['Location'])
|
format = parse_filename_info(file['Location'])
|
||||||
format.update({
|
format.update({
|
||||||
|
@ -101,10 +101,10 @@ def parse_filename_info(url):
|
||||||
if '/bonanza/' in rtmp_url:
|
if '/bonanza/' in rtmp_url:
|
||||||
format['play_path'] = rtmp_url.split('/bonanza/')[1]
|
format['play_path'] = rtmp_url.split('/bonanza/')[1]
|
||||||
formats.append(format)
|
formats.append(format)
|
||||||
elif file['Type'] == "Thumb":
|
elif file['Type'] == 'Thumb':
|
||||||
thumbnail = file['Location']
|
thumbnail = file['Location']
|
||||||
elif info['Type'] == "Audio":
|
elif info['Type'] == 'Audio':
|
||||||
if file['Type'] == "Audio":
|
if file['Type'] == 'Audio':
|
||||||
format = parse_filename_info(file['Location'])
|
format = parse_filename_info(file['Location'])
|
||||||
format.update({
|
format.update({
|
||||||
'url': file['Location'],
|
'url': file['Location'],
|
||||||
|
@ -112,7 +112,7 @@ def parse_filename_info(url):
|
||||||
'vcodec': 'none',
|
'vcodec': 'none',
|
||||||
})
|
})
|
||||||
formats.append(format)
|
formats.append(format)
|
||||||
elif file['Type'] == "Thumb":
|
elif file['Type'] == 'Thumb':
|
||||||
thumbnail = file['Location']
|
thumbnail = file['Location']
|
||||||
|
|
||||||
description = '%s\n%s\n%s\n' % (
|
description = '%s\n%s\n%s\n' % (
|
||||||
|
|
|
@ -17,85 +17,85 @@ class EightTracksIE(InfoExtractor):
|
||||||
IE_NAME = '8tracks'
|
IE_NAME = '8tracks'
|
||||||
_VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
|
_VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
"name": "EightTracks",
|
'name': 'EightTracks',
|
||||||
"url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
|
'url': 'http://8tracks.com/ytdl/youtube-dl-test-tracks-a',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
'id': '1336550',
|
'id': '1336550',
|
||||||
'display_id': 'youtube-dl-test-tracks-a',
|
'display_id': 'youtube-dl-test-tracks-a',
|
||||||
"description": "test chars: \"'/\\ä↭",
|
'description': "test chars: \"'/\\ä↭",
|
||||||
"title": "youtube-dl test tracks \"'/\\ä↭<>",
|
'title': "youtube-dl test tracks \"'/\\ä↭<>",
|
||||||
},
|
},
|
||||||
"playlist": [
|
'playlist': [
|
||||||
{
|
{
|
||||||
"md5": "96ce57f24389fc8734ce47f4c1abcc55",
|
'md5': '96ce57f24389fc8734ce47f4c1abcc55',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "11885610",
|
'id': '11885610',
|
||||||
"ext": "m4a",
|
'ext': 'm4a',
|
||||||
"title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
|
'title': "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
|
||||||
"uploader_id": "ytdl"
|
'uploader_id': 'ytdl'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "4ab26f05c1f7291ea460a3920be8021f",
|
'md5': '4ab26f05c1f7291ea460a3920be8021f',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "11885608",
|
'id': '11885608',
|
||||||
"ext": "m4a",
|
'ext': 'm4a',
|
||||||
"title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
|
'title': "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
|
||||||
"uploader_id": "ytdl"
|
'uploader_id': 'ytdl'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "d30b5b5f74217410f4689605c35d1fd7",
|
'md5': 'd30b5b5f74217410f4689605c35d1fd7',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "11885679",
|
'id': '11885679',
|
||||||
"ext": "m4a",
|
'ext': 'm4a',
|
||||||
"title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
|
'title': "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
|
||||||
"uploader_id": "ytdl"
|
'uploader_id': 'ytdl'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "4eb0a669317cd725f6bbd336a29f923a",
|
'md5': '4eb0a669317cd725f6bbd336a29f923a',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "11885680",
|
'id': '11885680',
|
||||||
"ext": "m4a",
|
'ext': 'm4a',
|
||||||
"title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
|
'title': "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
|
||||||
"uploader_id": "ytdl"
|
'uploader_id': 'ytdl'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "1893e872e263a2705558d1d319ad19e8",
|
'md5': '1893e872e263a2705558d1d319ad19e8',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "11885682",
|
'id': '11885682',
|
||||||
"ext": "m4a",
|
'ext': 'm4a',
|
||||||
"title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
|
'title': "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
|
||||||
"uploader_id": "ytdl"
|
'uploader_id': 'ytdl'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "b673c46f47a216ab1741ae8836af5899",
|
'md5': 'b673c46f47a216ab1741ae8836af5899',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "11885683",
|
'id': '11885683',
|
||||||
"ext": "m4a",
|
'ext': 'm4a',
|
||||||
"title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
|
'title': "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
|
||||||
"uploader_id": "ytdl"
|
'uploader_id': 'ytdl'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "1d74534e95df54986da7f5abf7d842b7",
|
'md5': '1d74534e95df54986da7f5abf7d842b7',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "11885684",
|
'id': '11885684',
|
||||||
"ext": "m4a",
|
'ext': 'm4a',
|
||||||
"title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
|
'title': "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
|
||||||
"uploader_id": "ytdl"
|
'uploader_id': 'ytdl'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "f081f47af8f6ae782ed131d38b9cd1c0",
|
'md5': 'f081f47af8f6ae782ed131d38b9cd1c0',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "11885685",
|
'id': '11885685',
|
||||||
"ext": "m4a",
|
'ext': 'm4a',
|
||||||
"title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
|
'title': "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
|
||||||
"uploader_id": "ytdl"
|
'uploader_id': 'ytdl'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -72,7 +72,7 @@ def _real_extract(self, url):
|
||||||
def _extract_playlist(self, webpage):
|
def _extract_playlist(self, webpage):
|
||||||
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
|
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
|
||||||
try:
|
try:
|
||||||
return json.loads("[{" + json_string + "}]")
|
return json.loads('[{' + json_string + '}]')
|
||||||
except ValueError as ve:
|
except ValueError as ve:
|
||||||
raise ExtractorError('Failed to download JSON', cause=ve)
|
raise ExtractorError('Failed to download JSON', cause=ve)
|
||||||
|
|
||||||
|
|
|
@ -14,14 +14,14 @@ class EveryonesMixtapeIE(InfoExtractor):
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
|
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
'id': '5bfseWNmlds',
|
'id': '5bfseWNmlds',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
"title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
|
'title': "Passion Pit - \"Sleepyhead\" (Official Music Video)",
|
||||||
"uploader": "FKR.TV",
|
'uploader': 'FKR.TV',
|
||||||
"uploader_id": "frenchkissrecords",
|
'uploader_id': 'frenchkissrecords',
|
||||||
"description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
|
'description': "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
|
||||||
"upload_date": "20081015"
|
'upload_date': '20081015'
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True, # This is simply YouTube
|
'skip_download': True, # This is simply YouTube
|
||||||
|
|
|
@ -41,7 +41,7 @@ class ExfmIE(InfoExtractor):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
song_id = mobj.group('id')
|
song_id = mobj.group('id')
|
||||||
info_url = "http://ex.fm/api/v3/song/%s" % song_id
|
info_url = 'http://ex.fm/api/v3/song/%s' % song_id
|
||||||
info = self._download_json(info_url, song_id)['song']
|
info = self._download_json(info_url, song_id)['song']
|
||||||
song_url = info['url']
|
song_url = info['url']
|
||||||
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
|
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
|
||||||
|
|
|
@ -87,7 +87,7 @@ def _real_extract(self, url):
|
||||||
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
|
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
|
||||||
|
|
||||||
info_url = (
|
info_url = (
|
||||||
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
|
'http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&'.
|
||||||
format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
|
format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
|
||||||
|
|
||||||
info_webpage = self._download_webpage(
|
info_webpage = self._download_webpage(
|
||||||
|
|
|
@ -10,7 +10,7 @@ class FranceInterIE(InfoExtractor):
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
|
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
|
||||||
'md5': '4764932e466e6f6c79c317d2e74f6884',
|
'md5': '4764932e466e6f6c79c317d2e74f6884',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
'id': '793962',
|
'id': '793962',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'L’Histoire dans les jeux vidéo',
|
'title': 'L’Histoire dans les jeux vidéo',
|
||||||
|
|
|
@ -12,8 +12,8 @@ class FreeVideoIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'vysukany-zadecek-22033',
|
'id': 'vysukany-zadecek-22033',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
"title": "vysukany-zadecek-22033",
|
'title': 'vysukany-zadecek-22033',
|
||||||
"age_limit": 18,
|
'age_limit': 18,
|
||||||
},
|
},
|
||||||
'skip': 'Blocked outside .cz',
|
'skip': 'Blocked outside .cz',
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,8 @@ class HentaiStigmaIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'inyouchuu-etsu-bonus',
|
'id': 'inyouchuu-etsu-bonus',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
"title": "Inyouchuu Etsu Bonus",
|
'title': 'Inyouchuu Etsu Bonus',
|
||||||
"age_limit": 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
|
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
|
||||||
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
|
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
|
||||||
gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
|
gcids = re.findall(r'http://.+?/.+?/(.+?)/', surls)
|
||||||
gcid = gcids[-1]
|
gcid = gcids[-1]
|
||||||
|
|
||||||
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
|
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
|
||||||
|
|
|
@ -47,7 +47,7 @@ class LiveLeakIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '801_1409392012',
|
'id': '801_1409392012',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': "Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.",
|
'description': 'Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.',
|
||||||
'uploader': 'bony333',
|
'uploader': 'bony333',
|
||||||
'title': 'Crazy Hungarian tourist films close call waterspout in Croatia'
|
'title': 'Crazy Hungarian tourist films close call waterspout in Croatia'
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,7 @@ def _real_extract(self, url):
|
||||||
path = compat_urllib_parse_urlparse(video_url).path
|
path = compat_urllib_parse_urlparse(video_url).path
|
||||||
extension = os.path.splitext(path)[1][1:]
|
extension = os.path.splitext(path)[1][1:]
|
||||||
format = path.split('/')[5].split('_')[:2]
|
format = path.split('/')[5].split('_')[:2]
|
||||||
format = "-".join(format)
|
format = '-'.join(format)
|
||||||
|
|
||||||
age_limit = self._rta_search(webpage)
|
age_limit = self._rta_search(webpage)
|
||||||
|
|
||||||
|
|
|
@ -18,8 +18,8 @@ class MySpassIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '11741',
|
'id': '11741',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
"description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
|
'description': 'Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?',
|
||||||
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
|
'title': 'Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,14 +18,14 @@ class NerdCubedFeedIE(InfoExtractor):
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
feed = self._download_json(url, url, "Downloading NerdCubed JSON feed")
|
feed = self._download_json(url, url, 'Downloading NerdCubed JSON feed')
|
||||||
|
|
||||||
entries = [{
|
entries = [{
|
||||||
'_type': 'url',
|
'_type': 'url',
|
||||||
'title': feed_entry['title'],
|
'title': feed_entry['title'],
|
||||||
'uploader': feed_entry['source']['name'] if feed_entry['source'] else None,
|
'uploader': feed_entry['source']['name'] if feed_entry['source'] else None,
|
||||||
'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'),
|
'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'),
|
||||||
'url': "http://www.youtube.com/watch?v=" + feed_entry['youtube_id'],
|
'url': 'http://www.youtube.com/watch?v=' + feed_entry['youtube_id'],
|
||||||
} for feed_entry in feed]
|
} for feed_entry in feed]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -27,9 +27,9 @@ class PornHubIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '648719015',
|
'id': '648719015',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
"uploader": "Babes",
|
'uploader': 'Babes',
|
||||||
"title": "Seductive Indian beauty strips down and fingers her pink pussy",
|
'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
|
||||||
"age_limit": 18
|
'age_limit': 18
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
|
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
|
||||||
|
@ -95,7 +95,7 @@ def _real_extract(self, url):
|
||||||
path = compat_urllib_parse_urlparse(video_url).path
|
path = compat_urllib_parse_urlparse(video_url).path
|
||||||
extension = os.path.splitext(path)[1][1:]
|
extension = os.path.splitext(path)[1][1:]
|
||||||
format = path.split('/')[5].split('_')[:2]
|
format = path.split('/')[5].split('_')[:2]
|
||||||
format = "-".join(format)
|
format = '-'.join(format)
|
||||||
|
|
||||||
m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
|
m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
|
||||||
if m is None:
|
if m is None:
|
||||||
|
|
|
@ -56,7 +56,7 @@ def _real_extract(self, url):
|
||||||
r'<h1>(.+?)</h1>', webpage, 'title', flags=re.DOTALL)
|
r'<h1>(.+?)</h1>', webpage, 'title', flags=re.DOTALL)
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r'<article id="descriptif">(.+?)</article>',
|
r'<article id="descriptif">(.+?)</article>',
|
||||||
webpage, "description", fatal=False, flags=re.DOTALL)
|
webpage, 'description', fatal=False, flags=re.DOTALL)
|
||||||
|
|
||||||
thumbnail = self._search_regex(
|
thumbnail = self._search_regex(
|
||||||
r'<div id="mediaspace%s">\s*<img src="/?([^"]+)"' % video_id,
|
r'<div id="mediaspace%s">\s*<img src="/?([^"]+)"' % video_id,
|
||||||
|
|
|
@ -28,16 +28,16 @@ class RadioBremenIE(InfoExtractor):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
meta_url = "http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s" % video_id
|
meta_url = 'http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s' % video_id
|
||||||
meta_doc = self._download_webpage(
|
meta_doc = self._download_webpage(
|
||||||
meta_url, video_id, 'Downloading metadata')
|
meta_url, video_id, 'Downloading metadata')
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r"<h1.*>(?P<title>.+)</h1>", meta_doc, "title")
|
r'<h1.*>(?P<title>.+)</h1>', meta_doc, 'title')
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r"<p>(?P<description>.*)</p>", meta_doc, "description", fatal=False)
|
r'<p>(?P<description>.*)</p>', meta_doc, 'description', fatal=False)
|
||||||
duration = parse_duration(self._html_search_regex(
|
duration = parse_duration(self._html_search_regex(
|
||||||
r"Länge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>",
|
r'Länge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>',
|
||||||
meta_doc, "duration", fatal=False))
|
meta_doc, 'duration', fatal=False))
|
||||||
|
|
||||||
page_doc = self._download_webpage(
|
page_doc = self._download_webpage(
|
||||||
url, video_id, 'Downloading video information')
|
url, video_id, 'Downloading video information')
|
||||||
|
@ -51,7 +51,7 @@ def _real_extract(self, url):
|
||||||
formats = [{
|
formats = [{
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'width': int(mobj.group("width")),
|
'width': int(mobj.group('width')),
|
||||||
}]
|
}]
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|
|
@ -16,9 +16,9 @@ class RadioFranceIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'one-one',
|
'id': 'one-one',
|
||||||
'ext': 'ogg',
|
'ext': 'ogg',
|
||||||
"title": "One to one",
|
'title': 'One to one',
|
||||||
"description": "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
|
'description': "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
|
||||||
"uploader": "Thomas Hercouët",
|
'uploader': 'Thomas Hercouët',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,11 +18,11 @@ class RBMARadioIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'ford-lopatin-live-at-primavera-sound-2011',
|
'id': 'ford-lopatin-live-at-primavera-sound-2011',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
"uploader_id": "ford-lopatin",
|
'uploader_id': 'ford-lopatin',
|
||||||
"location": "Spain",
|
'location': 'Spain',
|
||||||
"description": "Joel Ford and Daniel ’Oneohtrix Point Never’ Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.",
|
'description': 'Joel Ford and Daniel ’Oneohtrix Point Never’ Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.',
|
||||||
"uploader": "Ford & Lopatin",
|
'uploader': 'Ford & Lopatin',
|
||||||
"title": "Live at Primavera Sound 2011",
|
'title': 'Live at Primavera Sound 2011',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,12 +12,12 @@ class ReverbNationIE(InfoExtractor):
|
||||||
'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa',
|
'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa',
|
||||||
'md5': '3da12ebca28c67c111a7f8b262d3f7a7',
|
'md5': '3da12ebca28c67c111a7f8b262d3f7a7',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
"id": "16965047",
|
'id': '16965047',
|
||||||
"ext": "mp3",
|
'ext': 'mp3',
|
||||||
"title": "MONA LISA",
|
'title': 'MONA LISA',
|
||||||
"uploader": "ALKILADOS",
|
'uploader': 'ALKILADOS',
|
||||||
"uploader_id": "216429",
|
'uploader_id': '216429',
|
||||||
"thumbnail": "re:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$"
|
'thumbnail': 're:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$'
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
|
@ -8,13 +8,13 @@
|
||||||
class RingTVIE(InfoExtractor):
|
class RingTVIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)'
|
_VALID_URL = r'http://(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
"url": "http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30",
|
'url': 'http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30',
|
||||||
"md5": "d25945f5df41cdca2d2587165ac28720",
|
'md5': 'd25945f5df41cdca2d2587165ac28720',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
'id': '857645',
|
'id': '857645',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
"title": 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
|
'title': 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
|
||||||
"description": 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
|
'description': 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,8 +32,8 @@ def _real_extract(self, url):
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r'addthis:description="([^"]+)"',
|
r'addthis:description="([^"]+)"',
|
||||||
webpage, 'description', fatal=False)
|
webpage, 'description', fatal=False)
|
||||||
final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" % video_id
|
final_url = 'http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4' % video_id
|
||||||
thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" % video_id
|
thumbnail_url = 'http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg' % video_id
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|
|
@ -43,7 +43,7 @@ def _real_extract(self, url):
|
||||||
r'<meta name="thumbnail" content="uri:irus:(.*?)" />', webpage, 'thumbnail')
|
r'<meta name="thumbnail" content="uri:irus:(.*?)" />', webpage, 'thumbnail')
|
||||||
thumbnail = 'http://img.rasset.ie/' + thumbnail_id + '.jpg'
|
thumbnail = 'http://img.rasset.ie/' + thumbnail_id + '.jpg'
|
||||||
|
|
||||||
feeds_url = self._html_search_meta("feeds-prefix", webpage, 'feeds url') + video_id
|
feeds_url = self._html_search_meta('feeds-prefix', webpage, 'feeds url') + video_id
|
||||||
json_string = self._download_json(feeds_url, video_id)
|
json_string = self._download_json(feeds_url, video_id)
|
||||||
|
|
||||||
# f4m_url = server + relative_url
|
# f4m_url = server + relative_url
|
||||||
|
|
|
@ -63,7 +63,7 @@ def _real_extract(self, url):
|
||||||
download_url = video_info['streamurl']
|
download_url = video_info['streamurl']
|
||||||
download_url = download_url.replace('\\', '')
|
download_url = download_url.replace('\\', '')
|
||||||
stream_url = 'mp4:' + self._html_search_regex(r'ondemand/(.*)', download_url, 'stream URL')
|
stream_url = 'mp4:' + self._html_search_regex(r'ondemand/(.*)', download_url, 'stream URL')
|
||||||
rtmp_conn = ["S:connect", "O:1", "NS:pageUrl:" + url, "NB:fpad:0", "NN:videoFunction:1", "O:0"]
|
rtmp_conn = ['S:connect', 'O:1', 'NS:pageUrl:' + url, 'NB:fpad:0', 'NN:videoFunction:1', 'O:0']
|
||||||
|
|
||||||
formats = [{
|
formats = [{
|
||||||
'url': download_url,
|
'url': download_url,
|
||||||
|
|
|
@ -40,7 +40,7 @@ def _real_extract(self, url):
|
||||||
re.sub(
|
re.sub(
|
||||||
r'(?s)/\*.*?\*/', '',
|
r'(?s)/\*.*?\*/', '',
|
||||||
self._search_regex(
|
self._search_regex(
|
||||||
r"sources\s*:\s*(\[[^\]]+?\])", playerconfig,
|
r'sources\s*:\s*(\[[^\]]+?\])', playerconfig,
|
||||||
'sources',
|
'sources',
|
||||||
).replace(
|
).replace(
|
||||||
"' + thisObj.options.videoserver + '",
|
"' + thisObj.options.videoserver + '",
|
||||||
|
|
|
@ -15,37 +15,37 @@
|
||||||
|
|
||||||
class SenateISVPIE(InfoExtractor):
|
class SenateISVPIE(InfoExtractor):
|
||||||
_COMM_MAP = [
|
_COMM_MAP = [
|
||||||
["ag", "76440", "http://ag-f.akamaihd.net"],
|
['ag', '76440', 'http://ag-f.akamaihd.net'],
|
||||||
["aging", "76442", "http://aging-f.akamaihd.net"],
|
['aging', '76442', 'http://aging-f.akamaihd.net'],
|
||||||
["approps", "76441", "http://approps-f.akamaihd.net"],
|
['approps', '76441', 'http://approps-f.akamaihd.net'],
|
||||||
["armed", "76445", "http://armed-f.akamaihd.net"],
|
['armed', '76445', 'http://armed-f.akamaihd.net'],
|
||||||
["banking", "76446", "http://banking-f.akamaihd.net"],
|
['banking', '76446', 'http://banking-f.akamaihd.net'],
|
||||||
["budget", "76447", "http://budget-f.akamaihd.net"],
|
['budget', '76447', 'http://budget-f.akamaihd.net'],
|
||||||
["cecc", "76486", "http://srs-f.akamaihd.net"],
|
['cecc', '76486', 'http://srs-f.akamaihd.net'],
|
||||||
["commerce", "80177", "http://commerce1-f.akamaihd.net"],
|
['commerce', '80177', 'http://commerce1-f.akamaihd.net'],
|
||||||
["csce", "75229", "http://srs-f.akamaihd.net"],
|
['csce', '75229', 'http://srs-f.akamaihd.net'],
|
||||||
["dpc", "76590", "http://dpc-f.akamaihd.net"],
|
['dpc', '76590', 'http://dpc-f.akamaihd.net'],
|
||||||
["energy", "76448", "http://energy-f.akamaihd.net"],
|
['energy', '76448', 'http://energy-f.akamaihd.net'],
|
||||||
["epw", "76478", "http://epw-f.akamaihd.net"],
|
['epw', '76478', 'http://epw-f.akamaihd.net'],
|
||||||
["ethics", "76449", "http://ethics-f.akamaihd.net"],
|
['ethics', '76449', 'http://ethics-f.akamaihd.net'],
|
||||||
["finance", "76450", "http://finance-f.akamaihd.net"],
|
['finance', '76450', 'http://finance-f.akamaihd.net'],
|
||||||
["foreign", "76451", "http://foreign-f.akamaihd.net"],
|
['foreign', '76451', 'http://foreign-f.akamaihd.net'],
|
||||||
["govtaff", "76453", "http://govtaff-f.akamaihd.net"],
|
['govtaff', '76453', 'http://govtaff-f.akamaihd.net'],
|
||||||
["help", "76452", "http://help-f.akamaihd.net"],
|
['help', '76452', 'http://help-f.akamaihd.net'],
|
||||||
["indian", "76455", "http://indian-f.akamaihd.net"],
|
['indian', '76455', 'http://indian-f.akamaihd.net'],
|
||||||
["intel", "76456", "http://intel-f.akamaihd.net"],
|
['intel', '76456', 'http://intel-f.akamaihd.net'],
|
||||||
["intlnarc", "76457", "http://intlnarc-f.akamaihd.net"],
|
['intlnarc', '76457', 'http://intlnarc-f.akamaihd.net'],
|
||||||
["jccic", "85180", "http://jccic-f.akamaihd.net"],
|
['jccic', '85180', 'http://jccic-f.akamaihd.net'],
|
||||||
["jec", "76458", "http://jec-f.akamaihd.net"],
|
['jec', '76458', 'http://jec-f.akamaihd.net'],
|
||||||
["judiciary", "76459", "http://judiciary-f.akamaihd.net"],
|
['judiciary', '76459', 'http://judiciary-f.akamaihd.net'],
|
||||||
["rpc", "76591", "http://rpc-f.akamaihd.net"],
|
['rpc', '76591', 'http://rpc-f.akamaihd.net'],
|
||||||
["rules", "76460", "http://rules-f.akamaihd.net"],
|
['rules', '76460', 'http://rules-f.akamaihd.net'],
|
||||||
["saa", "76489", "http://srs-f.akamaihd.net"],
|
['saa', '76489', 'http://srs-f.akamaihd.net'],
|
||||||
["smbiz", "76461", "http://smbiz-f.akamaihd.net"],
|
['smbiz', '76461', 'http://smbiz-f.akamaihd.net'],
|
||||||
["srs", "75229", "http://srs-f.akamaihd.net"],
|
['srs', '75229', 'http://srs-f.akamaihd.net'],
|
||||||
["uscc", "76487", "http://srs-f.akamaihd.net"],
|
['uscc', '76487', 'http://srs-f.akamaihd.net'],
|
||||||
["vetaff", "76462", "http://vetaff-f.akamaihd.net"],
|
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
|
||||||
["arch", "", "http://ussenate-f.akamaihd.net/"]
|
['arch', '', 'http://ussenate-f.akamaihd.net/']
|
||||||
]
|
]
|
||||||
_IE_NAME = 'senate.gov'
|
_IE_NAME = 'senate.gov'
|
||||||
_VALID_URL = r'http://www\.senate\.gov/isvp/?\?(?P<qs>.+)'
|
_VALID_URL = r'http://www\.senate\.gov/isvp/?\?(?P<qs>.+)'
|
||||||
|
|
|
@ -13,8 +13,8 @@ class SlutloadIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'TD73btpBqSxc',
|
'id': 'TD73btpBqSxc',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
"title": "virginie baisee en cam",
|
'title': 'virginie baisee en cam',
|
||||||
"age_limit": 18,
|
'age_limit': 18,
|
||||||
'thumbnail': 're:https?://.*?\.jpg'
|
'thumbnail': 're:https?://.*?\.jpg'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ def _real_extract(self, url):
|
||||||
title = self._og_search_title(webpage)
|
title = self._og_search_title(webpage)
|
||||||
|
|
||||||
description = self._og_search_description(webpage)
|
description = self._og_search_description(webpage)
|
||||||
video_url = "http://cdn.videos.snotr.com/%s.flv" % video_id
|
video_url = 'http://cdn.videos.snotr.com/%s.flv' % video_id
|
||||||
|
|
||||||
view_count = str_to_int(self._html_search_regex(
|
view_count = str_to_int(self._html_search_regex(
|
||||||
r'<p>\n<strong>Views:</strong>\n([\d,\.]+)</p>',
|
r'<p>\n<strong>Views:</strong>\n([\d,\.]+)</p>',
|
||||||
|
|
|
@ -222,7 +222,7 @@ def _real_extract(self, url):
|
||||||
full_title = track_id
|
full_title = track_id
|
||||||
token = mobj.group('secret_token')
|
token = mobj.group('secret_token')
|
||||||
if token:
|
if token:
|
||||||
info_json_url += "&secret_token=" + token
|
info_json_url += '&secret_token=' + token
|
||||||
elif mobj.group('player'):
|
elif mobj.group('player'):
|
||||||
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
||||||
real_url = query['url'][0]
|
real_url = query['url'][0]
|
||||||
|
|
|
@ -22,23 +22,23 @@ class SteamIE(InfoExtractor):
|
||||||
_VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
|
_VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
|
||||||
_AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
|
_AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
"url": "http://store.steampowered.com/video/105600/",
|
'url': 'http://store.steampowered.com/video/105600/',
|
||||||
"playlist": [
|
'playlist': [
|
||||||
{
|
{
|
||||||
"md5": "f870007cee7065d7c76b88f0a45ecc07",
|
'md5': 'f870007cee7065d7c76b88f0a45ecc07',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
'id': '81300',
|
'id': '81300',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
"title": "Terraria 1.1 Trailer",
|
'title': 'Terraria 1.1 Trailer',
|
||||||
'playlist_index': 1,
|
'playlist_index': 1,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"md5": "61aaf31a5c5c3041afb58fb83cbb5751",
|
'md5': '61aaf31a5c5c3041afb58fb83cbb5751',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
'id': '80859',
|
'id': '80859',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
"title": "Terraria Trailer",
|
'title': 'Terraria Trailer',
|
||||||
'playlist_index': 2,
|
'playlist_index': 2,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,10 +27,10 @@ class TenPlayIE(InfoExtractor):
|
||||||
}
|
}
|
||||||
|
|
||||||
_video_fields = [
|
_video_fields = [
|
||||||
"id", "name", "shortDescription", "longDescription", "creationDate",
|
'id', 'name', 'shortDescription', 'longDescription', 'creationDate',
|
||||||
"publishedDate", "lastModifiedDate", "customFields", "videoStillURL",
|
'publishedDate', 'lastModifiedDate', 'customFields', 'videoStillURL',
|
||||||
"thumbnailURL", "referenceId", "length", "playsTotal",
|
'thumbnailURL', 'referenceId', 'length', 'playsTotal',
|
||||||
"playsTrailingWeek", "renditions", "captioning", "startDate", "endDate"]
|
'playsTrailingWeek', 'renditions', 'captioning', 'startDate', 'endDate']
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
webpage = self._download_webpage(url, url)
|
webpage = self._download_webpage(url, url)
|
||||||
|
|
|
@ -48,22 +48,22 @@ class TheSixtyOneIE(InfoExtractor):
|
||||||
]
|
]
|
||||||
|
|
||||||
_DECODE_MAP = {
|
_DECODE_MAP = {
|
||||||
"x": "a",
|
'x': 'a',
|
||||||
"m": "b",
|
'm': 'b',
|
||||||
"w": "c",
|
'w': 'c',
|
||||||
"q": "d",
|
'q': 'd',
|
||||||
"n": "e",
|
'n': 'e',
|
||||||
"p": "f",
|
'p': 'f',
|
||||||
"a": "0",
|
'a': '0',
|
||||||
"h": "1",
|
'h': '1',
|
||||||
"e": "2",
|
'e': '2',
|
||||||
"u": "3",
|
'u': '3',
|
||||||
"s": "4",
|
's': '4',
|
||||||
"i": "5",
|
'i': '5',
|
||||||
"o": "6",
|
'o': '6',
|
||||||
"y": "7",
|
'y': '7',
|
||||||
"r": "8",
|
'r': '8',
|
||||||
"c": "9"
|
'c': '9'
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -38,12 +38,12 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
# Presence of (no)watchplus function indicates HD quality is available
|
# Presence of (no)watchplus function indicates HD quality is available
|
||||||
if re.search(r'function (no)?watchplus()', webpage):
|
if re.search(r'function (no)?watchplus()', webpage):
|
||||||
fvar = "fvarhd"
|
fvar = 'fvarhd'
|
||||||
else:
|
else:
|
||||||
fvar = "fvar"
|
fvar = 'fvar'
|
||||||
|
|
||||||
info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
|
info_url = 'http://www.traileraddict.com/%s.php?tid=%s' % (fvar, str(video_id))
|
||||||
info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")
|
info_webpage = self._download_webpage(info_url, video_id, 'Downloading the info webpage')
|
||||||
|
|
||||||
final_url = self._search_regex(r'&fileurl=(.+)',
|
final_url = self._search_regex(r'&fileurl=(.+)',
|
||||||
info_webpage, 'Download url').replace('%3F', '?')
|
info_webpage, 'Download url').replace('%3F', '?')
|
||||||
|
|
|
@ -49,7 +49,7 @@ def _url_for_id(self, video_id, quality=None):
|
||||||
info_url = 'http://v2.tudou.com/f?id=' + compat_str(video_id)
|
info_url = 'http://v2.tudou.com/f?id=' + compat_str(video_id)
|
||||||
if quality:
|
if quality:
|
||||||
info_url += '&hd' + quality
|
info_url += '&hd' + quality
|
||||||
xml_data = self._download_xml(info_url, video_id, "Opening the info XML page")
|
xml_data = self._download_xml(info_url, video_id, 'Opening the info XML page')
|
||||||
final_url = xml_data.text
|
final_url = xml_data.text
|
||||||
return final_url
|
return final_url
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ def get_session_id():
|
||||||
title = self._html_search_regex(r'<title>(.*)</title>',
|
title = self._html_search_regex(r'<title>(.*)</title>',
|
||||||
webpage, 'title').split('/')[0].strip()
|
webpage, 'title').split('/')[0].strip()
|
||||||
|
|
||||||
info_url = "http://vbox7.com/play/magare.do"
|
info_url = 'http://vbox7.com/play/magare.do'
|
||||||
data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
|
data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
|
||||||
info_request = sanitized_Request(info_url, data)
|
info_request = sanitized_Request(info_url, data)
|
||||||
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
|
|
@ -26,7 +26,7 @@ def _real_extract(self, url):
|
||||||
webpage_url = 'http://videopremium.tv/' + video_id
|
webpage_url = 'http://videopremium.tv/' + video_id
|
||||||
webpage = self._download_webpage(webpage_url, video_id)
|
webpage = self._download_webpage(webpage_url, video_id)
|
||||||
|
|
||||||
if re.match(r"^<html><head><script[^>]*>window.location\s*=", webpage):
|
if re.match(r'^<html><head><script[^>]*>window.location\s*=', webpage):
|
||||||
# Download again, we need a cookie
|
# Download again, we need a cookie
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
webpage_url, video_id,
|
webpage_url, video_id,
|
||||||
|
@ -37,10 +37,10 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16),
|
'url': 'rtmp://e%d.md.iplay.md/play' % random.randint(1, 16),
|
||||||
'play_path': "mp4:%s.f4v" % video_id,
|
'play_path': 'mp4:%s.f4v' % video_id,
|
||||||
'page_url': "http://videopremium.tv/" + video_id,
|
'page_url': 'http://videopremium.tv/' + video_id,
|
||||||
'player_url': "http://videopremium.tv/uplayer/uppod.swf",
|
'player_url': 'http://videopremium.tv/uplayer/uppod.swf',
|
||||||
'ext': 'f4v',
|
'ext': 'f4v',
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
}
|
}
|
||||||
|
|
|
@ -368,16 +368,16 @@ def _real_extract(self, url):
|
||||||
{'force_feature_id': True}), 'Vimeo')
|
{'force_feature_id': True}), 'Vimeo')
|
||||||
|
|
||||||
# Extract title
|
# Extract title
|
||||||
video_title = config["video"]["title"]
|
video_title = config['video']['title']
|
||||||
|
|
||||||
# Extract uploader and uploader_id
|
# Extract uploader and uploader_id
|
||||||
video_uploader = config["video"]["owner"]["name"]
|
video_uploader = config['video']['owner']['name']
|
||||||
video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
|
video_uploader_id = config['video']['owner']['url'].split('/')[-1] if config['video']['owner']['url'] else None
|
||||||
|
|
||||||
# Extract video thumbnail
|
# Extract video thumbnail
|
||||||
video_thumbnail = config["video"].get("thumbnail")
|
video_thumbnail = config['video'].get('thumbnail')
|
||||||
if video_thumbnail is None:
|
if video_thumbnail is None:
|
||||||
video_thumbs = config["video"].get("thumbs")
|
video_thumbs = config['video'].get('thumbs')
|
||||||
if video_thumbs and isinstance(video_thumbs, dict):
|
if video_thumbs and isinstance(video_thumbs, dict):
|
||||||
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
|
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
|
||||||
|
|
||||||
|
@ -401,7 +401,7 @@ def _real_extract(self, url):
|
||||||
self._downloader.report_warning('Cannot find video description')
|
self._downloader.report_warning('Cannot find video description')
|
||||||
|
|
||||||
# Extract video duration
|
# Extract video duration
|
||||||
video_duration = int_or_none(config["video"].get("duration"))
|
video_duration = int_or_none(config['video'].get('duration'))
|
||||||
|
|
||||||
# Extract upload date
|
# Extract upload date
|
||||||
video_upload_date = None
|
video_upload_date = None
|
||||||
|
@ -703,10 +703,10 @@ class VimeoLikesIE(InfoExtractor):
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'https://vimeo.com/user755559/likes/',
|
'url': 'https://vimeo.com/user755559/likes/',
|
||||||
'playlist_mincount': 293,
|
'playlist_mincount': 293,
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
'id': 'user755559_likes',
|
'id': 'user755559_likes',
|
||||||
"description": "See all the videos urza likes",
|
'description': 'See all the videos urza likes',
|
||||||
"title": 'Videos urza likes',
|
'title': 'Videos urza likes',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -119,7 +119,7 @@ def _real_extract(self, url):
|
||||||
class VineUserIE(InfoExtractor):
|
class VineUserIE(InfoExtractor):
|
||||||
IE_NAME = 'vine:user'
|
IE_NAME = 'vine:user'
|
||||||
_VALID_URL = r'(?:https?://)?vine\.co/(?P<u>u/)?(?P<user>[^/]+)/?(\?.*)?$'
|
_VALID_URL = r'(?:https?://)?vine\.co/(?P<u>u/)?(?P<user>[^/]+)/?(\?.*)?$'
|
||||||
_VINE_BASE_URL = "https://vine.co/"
|
_VINE_BASE_URL = 'https://vine.co/'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'https://vine.co/Visa',
|
'url': 'https://vine.co/Visa',
|
||||||
|
@ -139,7 +139,7 @@ def _real_extract(self, url):
|
||||||
user = mobj.group('user')
|
user = mobj.group('user')
|
||||||
u = mobj.group('u')
|
u = mobj.group('u')
|
||||||
|
|
||||||
profile_url = "%sapi/users/profiles/%s%s" % (
|
profile_url = '%sapi/users/profiles/%s%s' % (
|
||||||
self._VINE_BASE_URL, 'vanity/' if not u else '', user)
|
self._VINE_BASE_URL, 'vanity/' if not u else '', user)
|
||||||
profile_data = self._download_json(
|
profile_data = self._download_json(
|
||||||
profile_url, user, note='Downloading user profile data')
|
profile_url, user, note='Downloading user profile data')
|
||||||
|
@ -147,7 +147,7 @@ def _real_extract(self, url):
|
||||||
user_id = profile_data['data']['userId']
|
user_id = profile_data['data']['userId']
|
||||||
timeline_data = []
|
timeline_data = []
|
||||||
for pagenum in itertools.count(1):
|
for pagenum in itertools.count(1):
|
||||||
timeline_url = "%sapi/timelines/users/%s?page=%s&size=100" % (
|
timeline_url = '%sapi/timelines/users/%s?page=%s&size=100' % (
|
||||||
self._VINE_BASE_URL, user_id, pagenum)
|
self._VINE_BASE_URL, user_id, pagenum)
|
||||||
timeline_page = self._download_json(
|
timeline_page = self._download_json(
|
||||||
timeline_url, user, note='Downloading page %d' % pagenum)
|
timeline_url, user, note='Downloading page %d' % pagenum)
|
||||||
|
|
|
@ -8,12 +8,12 @@
|
||||||
class WorldStarHipHopIE(InfoExtractor):
|
class WorldStarHipHopIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?v=(?P<id>.*)'
|
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?v=(?P<id>.*)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
"url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
|
'url': 'http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO',
|
||||||
"md5": "9d04de741161603bf7071bbf4e883186",
|
'md5': '9d04de741161603bf7071bbf4e883186',
|
||||||
"info_dict": {
|
'info_dict': {
|
||||||
"id": "wshh6a7q1ny0G34ZwuIO",
|
'id': 'wshh6a7q1ny0G34ZwuIO',
|
||||||
"ext": "mp4",
|
'ext': 'mp4',
|
||||||
"title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
|
'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!'
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO',
|
'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO',
|
||||||
|
@ -21,7 +21,7 @@ class WorldStarHipHopIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'wshh6a7q1ny0G34ZwuIO',
|
'id': 'wshh6a7q1ny0G34ZwuIO',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
"title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
|
'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!'
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
|
@ -16,8 +16,8 @@ class YouJizzIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2189178',
|
'id': '2189178',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
"title": "Zeichentrick 1",
|
'title': 'Zeichentrick 1',
|
||||||
"age_limit": 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -217,7 +217,7 @@ def retrieve_data(req_url, note):
|
||||||
video_password = self._downloader.params.get('videopassword')
|
video_password = self._downloader.params.get('videopassword')
|
||||||
|
|
||||||
# request basic data
|
# request basic data
|
||||||
basic_data_url = "http://play.youku.com/play/get.json?vid=%s&ct=12" % video_id
|
basic_data_url = 'http://play.youku.com/play/get.json?vid=%s&ct=12' % video_id
|
||||||
if video_password:
|
if video_password:
|
||||||
basic_data_url += '&pwd=%s' % video_password
|
basic_data_url += '&pwd=%s' % video_password
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ def _format_option_string(option):
|
||||||
if option.takes_value():
|
if option.takes_value():
|
||||||
opts.append(' %s' % option.metavar)
|
opts.append(' %s' % option.metavar)
|
||||||
|
|
||||||
return "".join(opts)
|
return ''.join(opts)
|
||||||
|
|
||||||
def _comma_separated_values_options_callback(option, opt_str, value, parser):
|
def _comma_separated_values_options_callback(option, opt_str, value, parser):
|
||||||
setattr(parser.values, option.dest, value.split(','))
|
setattr(parser.values, option.dest, value.split(','))
|
||||||
|
|
|
@ -19,7 +19,7 @@ def run(self, information):
|
||||||
|
|
||||||
cmd = cmd.replace('{}', shlex_quote(information['filepath']))
|
cmd = cmd.replace('{}', shlex_quote(information['filepath']))
|
||||||
|
|
||||||
self._downloader.to_screen("[exec] Executing command: %s" % cmd)
|
self._downloader.to_screen('[exec] Executing command: %s' % cmd)
|
||||||
retCode = subprocess.call(cmd, shell=True)
|
retCode = subprocess.call(cmd, shell=True)
|
||||||
if retCode != 0:
|
if retCode != 0:
|
||||||
raise PostProcessingError(
|
raise PostProcessingError(
|
||||||
|
|
|
@ -24,7 +24,7 @@ def format_to_regex(self, fmt):
|
||||||
'(?P<title>.+)\ \-\ (?P<artist>.+)'
|
'(?P<title>.+)\ \-\ (?P<artist>.+)'
|
||||||
"""
|
"""
|
||||||
lastpos = 0
|
lastpos = 0
|
||||||
regex = ""
|
regex = ''
|
||||||
# replace %(..)s with regex group and escape other string parts
|
# replace %(..)s with regex group and escape other string parts
|
||||||
for match in re.finditer(r'%\((\w+)\)s', fmt):
|
for match in re.finditer(r'%\((\w+)\)s', fmt):
|
||||||
regex += re.escape(fmt[lastpos:match.start()])
|
regex += re.escape(fmt[lastpos:match.start()])
|
||||||
|
|
|
@ -80,15 +80,15 @@ def write_xattr(path, key, value):
|
||||||
assert ':' not in key
|
assert ':' not in key
|
||||||
assert os.path.exists(path)
|
assert os.path.exists(path)
|
||||||
|
|
||||||
ads_fn = path + ":" + key
|
ads_fn = path + ':' + key
|
||||||
try:
|
try:
|
||||||
with open(ads_fn, "wb") as f:
|
with open(ads_fn, 'wb') as f:
|
||||||
f.write(value)
|
f.write(value)
|
||||||
except EnvironmentError as e:
|
except EnvironmentError as e:
|
||||||
raise XAttrMetadataError(e.errno, e.strerror)
|
raise XAttrMetadataError(e.errno, e.strerror)
|
||||||
else:
|
else:
|
||||||
user_has_setfattr = check_executable("setfattr", ['--version'])
|
user_has_setfattr = check_executable('setfattr', ['--version'])
|
||||||
user_has_xattr = check_executable("xattr", ['-h'])
|
user_has_xattr = check_executable('xattr', ['-h'])
|
||||||
|
|
||||||
if user_has_setfattr or user_has_xattr:
|
if user_has_setfattr or user_has_xattr:
|
||||||
|
|
||||||
|
@ -150,7 +150,7 @@ def write_xattr(path, key, value):
|
||||||
value = info.get(infoname)
|
value = info.get(infoname)
|
||||||
|
|
||||||
if value:
|
if value:
|
||||||
if infoname == "upload_date":
|
if infoname == 'upload_date':
|
||||||
value = hyphenate_date(value)
|
value = hyphenate_date(value)
|
||||||
|
|
||||||
byte_value = value.encode('utf-8')
|
byte_value = value.encode('utf-8')
|
||||||
|
|
|
@ -31,12 +31,12 @@ def rsa_verify(message, signature, key):
|
||||||
def update_self(to_screen, verbose, opener):
|
def update_self(to_screen, verbose, opener):
|
||||||
"""Update the program file with the latest version from the repository"""
|
"""Update the program file with the latest version from the repository"""
|
||||||
|
|
||||||
UPDATE_URL = "https://rg3.github.io/youtube-dl/update/"
|
UPDATE_URL = 'https://rg3.github.io/youtube-dl/update/'
|
||||||
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
|
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
|
||||||
JSON_URL = UPDATE_URL + 'versions.json'
|
JSON_URL = UPDATE_URL + 'versions.json'
|
||||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
||||||
|
|
||||||
if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
|
if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, 'frozen'):
|
||||||
to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
|
to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ def version_tuple(version_str):
|
||||||
|
|
||||||
filename = sys.argv[0]
|
filename = sys.argv[0]
|
||||||
# Py2EXE: Filename could be different
|
# Py2EXE: Filename could be different
|
||||||
if hasattr(sys, "frozen") and not os.path.isfile(filename):
|
if hasattr(sys, 'frozen') and not os.path.isfile(filename):
|
||||||
if os.path.isfile(filename + '.exe'):
|
if os.path.isfile(filename + '.exe'):
|
||||||
filename += '.exe'
|
filename += '.exe'
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ def version_tuple(version_str):
|
||||||
return
|
return
|
||||||
|
|
||||||
# Py2EXE
|
# Py2EXE
|
||||||
if hasattr(sys, "frozen"):
|
if hasattr(sys, 'frozen'):
|
||||||
exe = os.path.abspath(filename)
|
exe = os.path.abspath(filename)
|
||||||
directory = os.path.dirname(exe)
|
directory = os.path.dirname(exe)
|
||||||
if not os.access(directory, os.W_OK):
|
if not os.access(directory, os.W_OK):
|
||||||
|
|
|
@ -248,7 +248,7 @@ def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
|
||||||
|
|
||||||
def get_element_by_id(id, html):
|
def get_element_by_id(id, html):
|
||||||
"""Return the content of the tag with the specified ID in the passed HTML document"""
|
"""Return the content of the tag with the specified ID in the passed HTML document"""
|
||||||
return get_element_by_attribute("id", id, html)
|
return get_element_by_attribute('id', id, html)
|
||||||
|
|
||||||
|
|
||||||
def get_element_by_attribute(attribute, value, html):
|
def get_element_by_attribute(attribute, value, html):
|
||||||
|
@ -994,7 +994,7 @@ def date_from_str(date_str):
|
||||||
unit += 's'
|
unit += 's'
|
||||||
delta = datetime.timedelta(**{unit: time})
|
delta = datetime.timedelta(**{unit: time})
|
||||||
return today + delta
|
return today + delta
|
||||||
return datetime.datetime.strptime(date_str, "%Y%m%d").date()
|
return datetime.datetime.strptime(date_str, '%Y%m%d').date()
|
||||||
|
|
||||||
|
|
||||||
def hyphenate_date(date_str):
|
def hyphenate_date(date_str):
|
||||||
|
@ -1074,22 +1074,22 @@ def _windows_write_string(s, out):
|
||||||
|
|
||||||
GetStdHandle = ctypes.WINFUNCTYPE(
|
GetStdHandle = ctypes.WINFUNCTYPE(
|
||||||
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
|
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
|
||||||
(b"GetStdHandle", ctypes.windll.kernel32))
|
(b'GetStdHandle', ctypes.windll.kernel32))
|
||||||
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
|
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
|
||||||
|
|
||||||
WriteConsoleW = ctypes.WINFUNCTYPE(
|
WriteConsoleW = ctypes.WINFUNCTYPE(
|
||||||
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
|
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
|
||||||
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
|
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
|
||||||
ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32))
|
ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
|
||||||
written = ctypes.wintypes.DWORD(0)
|
written = ctypes.wintypes.DWORD(0)
|
||||||
|
|
||||||
GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32))
|
GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
|
||||||
FILE_TYPE_CHAR = 0x0002
|
FILE_TYPE_CHAR = 0x0002
|
||||||
FILE_TYPE_REMOTE = 0x8000
|
FILE_TYPE_REMOTE = 0x8000
|
||||||
GetConsoleMode = ctypes.WINFUNCTYPE(
|
GetConsoleMode = ctypes.WINFUNCTYPE(
|
||||||
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
|
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
|
||||||
ctypes.POINTER(ctypes.wintypes.DWORD))(
|
ctypes.POINTER(ctypes.wintypes.DWORD))(
|
||||||
(b"GetConsoleMode", ctypes.windll.kernel32))
|
(b'GetConsoleMode', ctypes.windll.kernel32))
|
||||||
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
|
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
|
||||||
|
|
||||||
def not_a_console(handle):
|
def not_a_console(handle):
|
||||||
|
@ -1387,7 +1387,7 @@ def fix_xml_ampersands(xml_str):
|
||||||
def setproctitle(title):
|
def setproctitle(title):
|
||||||
assert isinstance(title, compat_str)
|
assert isinstance(title, compat_str)
|
||||||
try:
|
try:
|
||||||
libc = ctypes.cdll.LoadLibrary("libc.so.6")
|
libc = ctypes.cdll.LoadLibrary('libc.so.6')
|
||||||
except OSError:
|
except OSError:
|
||||||
return
|
return
|
||||||
title_bytes = title.encode('utf-8')
|
title_bytes = title.encode('utf-8')
|
||||||
|
@ -1427,7 +1427,7 @@ def url_basename(url):
|
||||||
|
|
||||||
class HEADRequest(compat_urllib_request.Request):
|
class HEADRequest(compat_urllib_request.Request):
|
||||||
def get_method(self):
|
def get_method(self):
|
||||||
return "HEAD"
|
return 'HEAD'
|
||||||
|
|
||||||
|
|
||||||
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
|
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
|
||||||
|
|
Loading…
Reference in a new issue