0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-25 02:45:12 +00:00

PEP8 applied

This commit is contained in:
Jouke Waleson 2014-11-23 20:41:03 +01:00
parent 598c218f7b
commit 5f6a1245ff
151 changed files with 419 additions and 343 deletions

View file

@ -9,16 +9,17 @@
BASH_COMPLETION_FILE = "youtube-dl.bash-completion" BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
def build_completion(opt_parser): def build_completion(opt_parser):
opts_flag = [] opts_flag = []
for group in opt_parser.option_groups: for group in opt_parser.option_groups:
for option in group.option_list: for option in group.option_list:
#for every long flag # for every long flag
opts_flag.append(option.get_opt_string()) opts_flag.append(option.get_opt_string())
with open(BASH_COMPLETION_TEMPLATE) as f: with open(BASH_COMPLETION_TEMPLATE) as f:
template = f.read() template = f.read()
with open(BASH_COMPLETION_FILE, "w") as f: with open(BASH_COMPLETION_FILE, "w") as f:
#just using the special char # just using the special char
filled_template = template.replace("{{flags}}", " ".join(opts_flag)) filled_template = template.replace("{{flags}}", " ".join(opts_flag))
f.write(filled_template) f.write(filled_template)

View file

@ -233,6 +233,7 @@ def rmtree(path):
#============================================================================== #==============================================================================
class BuildError(Exception): class BuildError(Exception):
def __init__(self, output, code=500): def __init__(self, output, code=500):
self.output = output self.output = output
@ -369,7 +370,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
class BuildHTTPRequestHandler(BaseHTTPRequestHandler): class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching. actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
def do_GET(self): def do_GET(self):
path = urlparse.urlparse(self.path) path = urlparse.urlparse(self.path)

View file

@ -23,6 +23,7 @@
'batch-file': ['--require-parameter'], 'batch-file': ['--require-parameter'],
} }
def build_completion(opt_parser): def build_completion(opt_parser):
commands = [] commands = []

View file

@ -11,22 +11,22 @@
versions_info = json.load(open('update/versions.json')) versions_info = json.load(open('update/versions.json'))
if 'signature' in versions_info: if 'signature' in versions_info:
del versions_info['signature'] del versions_info['signature']
print('Enter the PKCS1 private key, followed by a blank line:') print('Enter the PKCS1 private key, followed by a blank line:')
privkey = b'' privkey = b''
while True: while True:
try: try:
line = input() line = input()
except EOFError: except EOFError:
break break
if line == '': if line == '':
break break
privkey += line.encode('ascii') + b'\n' privkey += line.encode('ascii') + b'\n'
privkey = rsa.PrivateKey.load_pkcs1(privkey) privkey = rsa.PrivateKey.load_pkcs1(privkey)
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode() signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
print('signature: ' + signature) print('signature: ' + signature)
versions_info['signature'] = signature versions_info['signature'] = signature
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True) json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)

View file

@ -5,7 +5,7 @@
import datetime import datetime
import glob import glob
import io # For Python 2 compatibilty import io # For Python 2 compatibilty
import os import os
import re import re

View file

@ -73,4 +73,3 @@
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file: with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
atom_file.write(atom_template) atom_file.write(atom_template)

View file

@ -9,6 +9,7 @@
import youtube_dl import youtube_dl
def main(): def main():
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf: with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read() template = tmplf.read()

View file

@ -4,7 +4,7 @@
try: try:
import urllib.request as compat_urllib_request import urllib.request as compat_urllib_request
except ImportError: # Python 2 except ImportError: # Python 2
import urllib2 as compat_urllib_request import urllib2 as compat_urllib_request
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n') sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
@ -12,9 +12,9 @@
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n') sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
try: try:
raw_input() raw_input()
except NameError: # Python 3 except NameError: # Python 3
input() input()
filename = sys.argv[0] filename = sys.argv[0]

View file

@ -9,4 +9,4 @@
"dll_excludes": ['w9xpopen.exe'] "dll_excludes": ['w9xpopen.exe']
} }
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None) setup(console=['youtube-dl.py'], options={"py2exe": py2exe_options}, zipfile=None)

View file

@ -4,13 +4,17 @@
import urllib2 import urllib2
import json, hashlib import json, hashlib
def rsa_verify(message, signature, key): def rsa_verify(message, signature, key):
from struct import pack from struct import pack
from hashlib import sha256 from hashlib import sha256
from sys import version_info from sys import version_info
def b(x): def b(x):
if version_info[0] == 2: return x if version_info[0] == 2:
else: return x.encode('latin1') return x
else:
return x.encode('latin1')
assert(type(message) == type(b(''))) assert(type(message) == type(b('')))
block_size = 0 block_size = 0
n = key[0] n = key[0]
@ -23,13 +27,17 @@ def b(x):
raw_bytes.insert(0, pack("B", signature & 0xFF)) raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8 signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes) signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False if signature[0:2] != b('\x00\x01'):
return False
signature = signature[2:] signature = signature[2:]
if not b('\x00') in signature: return False if not b('\x00') in signature:
return False
signature = signature[signature.index(b('\x00'))+1:] signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
return False
signature = signature[19:] signature = signature[19:]
if signature != sha256(message).digest(): return False if signature != sha256(message).digest():
return False
return True return True
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n') sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
@ -92,7 +100,7 @@ def b(x):
ping 127.0.0.1 -n 5 -w 1000 > NUL ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s" move /Y "%s.new" "%s"
del "%s" del "%s"
\n""" %(exe, exe, bat)) \n""" % (exe, exe, bat))
b.close() b.close()
os.startfile(bat) os.startfile(bat)

View file

@ -59,7 +59,7 @@ def __init__(self, override=None):
params = get_params(override=override) params = get_params(override=override)
super(FakeYDL, self).__init__(params, auto_init=False) super(FakeYDL, self).__init__(params, auto_init=False)
self.result = [] self.result = []
def to_screen(self, s, skip_eol=None): def to_screen(self, s, skip_eol=None):
print(s) print(s)
@ -72,8 +72,10 @@ def download(self, x):
def expect_warning(self, regex): def expect_warning(self, regex):
# Silence an expected warning matching a regex # Silence an expected warning matching a regex
old_report_warning = self.report_warning old_report_warning = self.report_warning
def report_warning(self, message): def report_warning(self, message):
if re.match(regex, message): return if re.match(regex, message):
return
old_report_warning(message) old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self) self.report_warning = types.MethodType(report_warning, self)

View file

@ -266,6 +266,7 @@ def test_prepare_filename(self):
'ext': 'mp4', 'ext': 'mp4',
'width': None, 'width': None,
} }
def fname(templ): def fname(templ):
ydl = YoutubeDL({'outtmpl': templ}) ydl = YoutubeDL({'outtmpl': templ})
return ydl.prepare_filename(info) return ydl.prepare_filename(info)

View file

@ -32,19 +32,19 @@ def assertMatch(self, url, ie_list):
def test_youtube_playlist_matching(self): def test_youtube_playlist_matching(self):
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist']) assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585 assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
assertPlaylist('PL63F0C78739B09958') assertPlaylist('PL63F0C78739B09958')
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668 assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M')) self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
# Top tracks # Top tracks
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101') assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
def test_youtube_matching(self): def test_youtube_matching(self):
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M')) self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668 self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube']) self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube']) self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube']) self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])

View file

@ -40,18 +40,22 @@
RETRIES = 3 RETRIES = 3
class YoutubeDL(youtube_dl.YoutubeDL): class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.to_stderr = self.to_screen self.to_stderr = self.to_screen
self.processed_info_dicts = [] self.processed_info_dicts = []
super(YoutubeDL, self).__init__(*args, **kwargs) super(YoutubeDL, self).__init__(*args, **kwargs)
def report_warning(self, message): def report_warning(self, message):
# Don't accept warnings during tests # Don't accept warnings during tests
raise ExtractorError(message) raise ExtractorError(message)
def process_info(self, info_dict): def process_info(self, info_dict):
self.processed_info_dicts.append(info_dict) self.processed_info_dicts.append(info_dict)
return super(YoutubeDL, self).process_info(info_dict) return super(YoutubeDL, self).process_info(info_dict)
def _file_md5(fn): def _file_md5(fn):
with open(fn, 'rb') as f: with open(fn, 'rb') as f:
return hashlib.md5(f.read()).hexdigest() return hashlib.md5(f.read()).hexdigest()
@ -61,10 +65,13 @@ def _file_md5(fn):
class TestDownload(unittest.TestCase): class TestDownload(unittest.TestCase):
maxDiff = None maxDiff = None
def setUp(self): def setUp(self):
self.defs = defs self.defs = defs
### Dynamically generate tests # Dynamically generate tests
def generator(test_case): def generator(test_case):
def test_template(self): def test_template(self):
@ -101,6 +108,7 @@ def print_skipping(reason):
ydl = YoutubeDL(params, auto_init=False) ydl = YoutubeDL(params, auto_init=False)
ydl.add_default_info_extractors() ydl.add_default_info_extractors()
finished_hook_called = set() finished_hook_called = set()
def _hook(status): def _hook(status):
if status['status'] == 'finished': if status['status'] == 'finished':
finished_hook_called.add(status['filename']) finished_hook_called.add(status['filename'])
@ -111,6 +119,7 @@ def get_tc_filename(tc):
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {})) return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
res_dict = None res_dict = None
def try_rm_tcs_files(tcs=None): def try_rm_tcs_files(tcs=None):
if tcs is None: if tcs is None:
tcs = test_cases tcs = test_cases
@ -206,7 +215,7 @@ def try_rm_tcs_files(tcs=None):
return test_template return test_template
### And add them to TestDownload # And add them to TestDownload
for n, test_case in enumerate(defs): for n, test_case in enumerate(defs):
test_method = generator(test_case) test_method = generator(test_case)
tname = 'test_' + str(test_case['name']) tname = 'test_' + str(test_case['name'])

View file

@ -23,6 +23,7 @@
class BaseTestSubtitles(unittest.TestCase): class BaseTestSubtitles(unittest.TestCase):
url = None url = None
IE = None IE = None
def setUp(self): def setUp(self):
self.DL = FakeYDL() self.DL = FakeYDL()
self.ie = self.IE(self.DL) self.ie = self.IE(self.DL)

View file

@ -120,16 +120,16 @@ def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), []) self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1]) self.assertEqual(orderedSet([1]), [1])
#keep the list ordered # keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1]) self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self): def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;') self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual( self.assertEqual(
unescapeHTML('é'), 'é') unescapeHTML('é'), 'é')
def test_daterange(self): def test_daterange(self):
_20century = DateRange("19000101","20000101") _20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century) self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101") _ac = DateRange("00010101")
self.assertTrue("19690721" in _ac) self.assertTrue("19690721" in _ac)

View file

@ -31,19 +31,18 @@ def __init__(self, *args, **kwargs):
}) })
TEST_ID = 'gr51aVj-mLg' TEST_ID = 'gr51aVj-mLg'
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml' ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label'] EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
class TestAnnotations(unittest.TestCase): class TestAnnotations(unittest.TestCase):
def setUp(self): def setUp(self):
# Clear old files # Clear old files
self.tearDown() self.tearDown()
def test_info_json(self): def test_info_json(self):
expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text. expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
ie = youtube_dl.extractor.YoutubeIE() ie = youtube_dl.extractor.YoutubeIE()
ydl = YoutubeDL(params) ydl = YoutubeDL(params)
ydl.add_info_extractor(ie) ydl.add_info_extractor(ie)
@ -51,7 +50,7 @@ def test_info_json(self):
self.assertTrue(os.path.exists(ANNOTATIONS_FILE)) self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
annoxml = None annoxml = None
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof: with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
annoxml = xml.etree.ElementTree.parse(annof) annoxml = xml.etree.ElementTree.parse(annof)
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML') self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
root = annoxml.getroot() root = annoxml.getroot()
self.assertEqual(root.tag, 'document') self.assertEqual(root.tag, 'document')
@ -59,18 +58,17 @@ def test_info_json(self):
self.assertEqual(annotationsTag.tag, 'annotations') self.assertEqual(annotationsTag.tag, 'annotations')
annotations = annotationsTag.findall('annotation') annotations = annotationsTag.findall('annotation')
#Not all the annotations have TEXT children and the annotations are returned unsorted. # Not all the annotations have TEXT children and the annotations are returned unsorted.
for a in annotations: for a in annotations:
self.assertEqual(a.tag, 'annotation') self.assertEqual(a.tag, 'annotation')
if a.get('type') == 'text': if a.get('type') == 'text':
textTag = a.find('TEXT') textTag = a.find('TEXT')
text = textTag.text text = textTag.text
self.assertTrue(text in expected) #assertIn only added in python 2.7 self.assertTrue(text in expected) # assertIn only added in python 2.7
#remove the first occurance, there could be more than one annotation with the same text # remove the first occurance, there could be more than one annotation with the same text
expected.remove(text) expected.remove(text)
#We should have seen (and removed) all the expected annotation texts. # We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.') self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
def tearDown(self): def tearDown(self):
try_rm(ANNOTATIONS_FILE) try_rm(ANNOTATIONS_FILE)

View file

@ -31,7 +31,7 @@ def test_youtube_playlist_noplaylist(self):
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(result['_type'], 'url') self.assertEqual(result['_type'], 'url')
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg') self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
def test_youtube_course(self): def test_youtube_course(self):
dl = FakeYDL() dl = FakeYDL()
ie = YoutubePlaylistIE(dl) ie = YoutubePlaylistIE(dl)

View file

@ -552,7 +552,7 @@ def extract_info(self, url, download=True, ie_key=None, extra_info={},
try: try:
ie_result = ie.extract(url) ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
break break
if isinstance(ie_result, list): if isinstance(ie_result, list):
# Backwards compatibility: old IE result format # Backwards compatibility: old IE result format
@ -565,7 +565,7 @@ def extract_info(self, url, download=True, ie_key=None, extra_info={},
return self.process_ie_result(ie_result, download, extra_info) return self.process_ie_result(ie_result, download, extra_info)
else: else:
return ie_result return ie_result
except ExtractorError as de: # An error we somewhat expected except ExtractorError as de: # An error we somewhat expected
self.report_error(compat_str(de), de.format_traceback()) self.report_error(compat_str(de), de.format_traceback())
break break
except MaxDownloadsReached: except MaxDownloadsReached:
@ -700,6 +700,7 @@ def make_result(embedded_info):
self.report_warning( self.report_warning(
'Extractor %s returned a compat_list result. ' 'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor')) 'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r): def _fixup(r):
self.add_extra_info(r, self.add_extra_info(r,
{ {
@ -1010,7 +1011,7 @@ def process_info(self, info_dict):
else: else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename) self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
subfile.write(sub) subfile.write(sub)
except (OSError, IOError): except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename) self.report_error('Cannot write subtitles file ' + sub_filename)
return return
@ -1111,7 +1112,7 @@ def download(self, url_list):
for url in url_list: for url in url_list:
try: try:
#It also downloads the videos # It also downloads the videos
res = self.extract_info(url) res = self.extract_info(url)
except UnavailableVideoError: except UnavailableVideoError:
self.report_error('unable to download video') self.report_error('unable to download video')
@ -1428,4 +1429,3 @@ def get_encoding(self):
if encoding is None: if encoding is None:
encoding = preferredencoding() encoding = preferredencoding()
return encoding return encoding

View file

@ -128,7 +128,6 @@ def _real_main(argv=None):
compat_print(desc) compat_print(desc)
sys.exit(0) sys.exit(0)
# Conflicting, missing and erroneous options # Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None): if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error('using .netrc conflicts with giving username/password') parser.error('using .netrc conflicts with giving username/password')
@ -197,7 +196,7 @@ def _real_main(argv=None):
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems) # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
if opts.outtmpl is not None: if opts.outtmpl is not None:
opts.outtmpl = opts.outtmpl.decode(preferredencoding()) opts.outtmpl = opts.outtmpl.decode(preferredencoding())
outtmpl =((opts.outtmpl is not None and opts.outtmpl) outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
@ -317,7 +316,6 @@ def _real_main(argv=None):
ydl.add_post_processor(FFmpegAudioFixPP()) ydl.add_post_processor(FFmpegAudioFixPP())
ydl.add_post_processor(AtomicParsleyPP()) ydl.add_post_processor(AtomicParsleyPP())
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way. # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems. # So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
if opts.exec_cmd: if opts.exec_cmd:

View file

@ -7,10 +7,11 @@
BLOCK_SIZE_BYTES = 16 BLOCK_SIZE_BYTES = 16
def aes_ctr_decrypt(data, key, counter): def aes_ctr_decrypt(data, key, counter):
""" """
Decrypt with aes in counter mode Decrypt with aes in counter mode
@param {int[]} data cipher @param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key @param {int[]} key 16/24/32-Byte cipher key
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block) @param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
@ -19,23 +20,24 @@ def aes_ctr_decrypt(data, key, counter):
""" """
expanded_key = key_expansion(key) expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data=[] decrypted_data = []
for i in range(block_count): for i in range(block_count):
counter_block = counter.next_value() counter_block = counter.next_value()
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES] block = data[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block)) block += [0]*(BLOCK_SIZE_BYTES - len(block))
cipher_counter_block = aes_encrypt(counter_block, expanded_key) cipher_counter_block = aes_encrypt(counter_block, expanded_key)
decrypted_data += xor(block, cipher_counter_block) decrypted_data += xor(block, cipher_counter_block)
decrypted_data = decrypted_data[:len(data)] decrypted_data = decrypted_data[:len(data)]
return decrypted_data return decrypted_data
def aes_cbc_decrypt(data, key, iv): def aes_cbc_decrypt(data, key, iv):
""" """
Decrypt with aes in CBC mode Decrypt with aes in CBC mode
@param {int[]} data cipher @param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key @param {int[]} key 16/24/32-Byte cipher key
@param {int[]} iv 16-Byte IV @param {int[]} iv 16-Byte IV
@ -43,60 +45,62 @@ def aes_cbc_decrypt(data, key, iv):
""" """
expanded_key = key_expansion(key) expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data=[] decrypted_data = []
previous_cipher_block = iv previous_cipher_block = iv
for i in range(block_count): for i in range(block_count):
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES] block = data[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block)) block += [0]*(BLOCK_SIZE_BYTES - len(block))
decrypted_block = aes_decrypt(block, expanded_key) decrypted_block = aes_decrypt(block, expanded_key)
decrypted_data += xor(decrypted_block, previous_cipher_block) decrypted_data += xor(decrypted_block, previous_cipher_block)
previous_cipher_block = block previous_cipher_block = block
decrypted_data = decrypted_data[:len(data)] decrypted_data = decrypted_data[:len(data)]
return decrypted_data return decrypted_data
def key_expansion(data): def key_expansion(data):
""" """
Generate key schedule Generate key schedule
@param {int[]} data 16/24/32-Byte cipher key @param {int[]} data 16/24/32-Byte cipher key
@returns {int[]} 176/208/240-Byte expanded key @returns {int[]} 176/208/240-Byte expanded key
""" """
data = data[:] # copy data = data[:] # copy
rcon_iteration = 1 rcon_iteration = 1
key_size_bytes = len(data) key_size_bytes = len(data)
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
while len(data) < expanded_key_size_bytes: while len(data) < expanded_key_size_bytes:
temp = data[-4:] temp = data[-4:]
temp = key_schedule_core(temp, rcon_iteration) temp = key_schedule_core(temp, rcon_iteration)
rcon_iteration += 1 rcon_iteration += 1
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4-key_size_bytes])
for _ in range(3): for _ in range(3):
temp = data[-4:] temp = data[-4:]
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4-key_size_bytes])
if key_size_bytes == 32: if key_size_bytes == 32:
temp = data[-4:] temp = data[-4:]
temp = sub_bytes(temp) temp = sub_bytes(temp)
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4-key_size_bytes])
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
temp = data[-4:] temp = data[-4:]
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4-key_size_bytes])
data = data[:expanded_key_size_bytes] data = data[:expanded_key_size_bytes]
return data return data
def aes_encrypt(data, expanded_key): def aes_encrypt(data, expanded_key):
""" """
Encrypt one block with aes Encrypt one block with aes
@param {int[]} data 16-Byte state @param {int[]} data 16-Byte state
@param {int[]} expanded_key 176/208/240-Byte expanded key @param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte cipher @returns {int[]} 16-Byte cipher
""" """
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
@ -107,30 +111,32 @@ def aes_encrypt(data, expanded_key):
data = shift_rows(data) data = shift_rows(data)
if i != rounds: if i != rounds:
data = mix_columns(data) data = mix_columns(data)
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]) data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES])
return data return data
def aes_decrypt(data, expanded_key): def aes_decrypt(data, expanded_key):
""" """
Decrypt one block with aes Decrypt one block with aes
@param {int[]} data 16-Byte cipher @param {int[]} data 16-Byte cipher
@param {int[]} expanded_key 176/208/240-Byte expanded key @param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte state @returns {int[]} 16-Byte state
""" """
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
for i in range(rounds, 0, -1): for i in range(rounds, 0, -1):
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]) data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES])
if i != rounds: if i != rounds:
data = mix_columns_inv(data) data = mix_columns_inv(data)
data = shift_rows_inv(data) data = shift_rows_inv(data)
data = sub_bytes_inv(data) data = sub_bytes_inv(data)
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
return data return data
def aes_decrypt_text(data, password, key_size_bytes): def aes_decrypt_text(data, password, key_size_bytes):
""" """
Decrypt text Decrypt text
@ -138,33 +144,34 @@ def aes_decrypt_text(data, password, key_size_bytes):
- The cipher key is retrieved by encrypting the first 16 Byte of 'password' - The cipher key is retrieved by encrypting the first 16 Byte of 'password'
with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
- Mode of operation is 'counter' - Mode of operation is 'counter'
@param {str} data Base64 encoded string @param {str} data Base64 encoded string
@param {str,unicode} password Password (will be encoded with utf-8) @param {str,unicode} password Password (will be encoded with utf-8)
@param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
@returns {str} Decrypted data @returns {str} Decrypted data
""" """
NONCE_LENGTH_BYTES = 8 NONCE_LENGTH_BYTES = 8
data = bytes_to_intlist(base64.b64decode(data)) data = bytes_to_intlist(base64.b64decode(data))
password = bytes_to_intlist(password.encode('utf-8')) password = bytes_to_intlist(password.encode('utf-8'))
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password)) key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
nonce = data[:NONCE_LENGTH_BYTES] nonce = data[:NONCE_LENGTH_BYTES]
cipher = data[NONCE_LENGTH_BYTES:] cipher = data[NONCE_LENGTH_BYTES:]
class Counter: class Counter:
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) __value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
def next_value(self): def next_value(self):
temp = self.__value temp = self.__value
self.__value = inc(self.__value) self.__value = inc(self.__value)
return temp return temp
decrypted_data = aes_ctr_decrypt(cipher, key, Counter()) decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
plaintext = intlist_to_bytes(decrypted_data) plaintext = intlist_to_bytes(decrypted_data)
return plaintext return plaintext
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
@ -200,14 +207,14 @@ def next_value(self):
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d) 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1), MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
(0x1,0x2,0x3,0x1), (0x1, 0x2, 0x3, 0x1),
(0x1,0x1,0x2,0x3), (0x1, 0x1, 0x2, 0x3),
(0x3,0x1,0x1,0x2)) (0x3, 0x1, 0x1, 0x2))
MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9), MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
(0x9,0xE,0xB,0xD), (0x9, 0xE, 0xB, 0xD),
(0xD,0x9,0xE,0xB), (0xD, 0x9, 0xE, 0xB),
(0xB,0xD,0x9,0xE)) (0xB, 0xD, 0x9, 0xE))
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35, RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA, 0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31, 0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
@ -241,30 +248,37 @@ def next_value(self):
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, 0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07) 0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
def sub_bytes(data): def sub_bytes(data):
return [SBOX[x] for x in data] return [SBOX[x] for x in data]
def sub_bytes_inv(data): def sub_bytes_inv(data):
return [SBOX_INV[x] for x in data] return [SBOX_INV[x] for x in data]
def rotate(data): def rotate(data):
return data[1:] + [data[0]] return data[1:] + [data[0]]
def key_schedule_core(data, rcon_iteration): def key_schedule_core(data, rcon_iteration):
data = rotate(data) data = rotate(data)
data = sub_bytes(data) data = sub_bytes(data)
data[0] = data[0] ^ RCON[rcon_iteration] data[0] = data[0] ^ RCON[rcon_iteration]
return data return data
def xor(data1, data2): def xor(data1, data2):
return [x^y for x, y in zip(data1, data2)] return [x^y for x, y in zip(data1, data2)]
def rijndael_mul(a, b): def rijndael_mul(a, b):
if(a==0 or b==0): if(a == 0 or b == 0):
return 0 return 0
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF] return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
def mix_column(data, matrix): def mix_column(data, matrix):
data_mixed = [] data_mixed = []
for row in range(4): for row in range(4):
@ -275,33 +289,38 @@ def mix_column(data, matrix):
data_mixed.append(mixed) data_mixed.append(mixed)
return data_mixed return data_mixed
def mix_columns(data, matrix=MIX_COLUMN_MATRIX): def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
data_mixed = [] data_mixed = []
for i in range(4): for i in range(4):
column = data[i*4 : (i+1)*4] column = data[i*4: (i+1)*4]
data_mixed += mix_column(column, matrix) data_mixed += mix_column(column, matrix)
return data_mixed return data_mixed
def mix_columns_inv(data): def mix_columns_inv(data):
return mix_columns(data, MIX_COLUMN_MATRIX_INV) return mix_columns(data, MIX_COLUMN_MATRIX_INV)
def shift_rows(data): def shift_rows(data):
data_shifted = [] data_shifted = []
for column in range(4): for column in range(4):
for row in range(4): for row in range(4):
data_shifted.append( data[((column + row) & 0b11) * 4 + row] ) data_shifted.append(data[((column + row) & 0b11) * 4 + row])
return data_shifted return data_shifted
def shift_rows_inv(data): def shift_rows_inv(data):
data_shifted = [] data_shifted = []
for column in range(4): for column in range(4):
for row in range(4): for row in range(4):
data_shifted.append( data[((column - row) & 0b11) * 4 + row] ) data_shifted.append(data[((column - row) & 0b11) * 4 + row])
return data_shifted return data_shifted
def inc(data): def inc(data):
data = data[:] # copy data = data[:] # copy
for i in range(len(data)-1,-1,-1): for i in range(len(data)-1, -1, -1):
if data[i] == 255: if data[i] == 255:
data[i] = 0 data[i] = 0
else: else:

View file

@ -10,47 +10,47 @@
try: try:
import urllib.request as compat_urllib_request import urllib.request as compat_urllib_request
except ImportError: # Python 2 except ImportError: # Python 2
import urllib2 as compat_urllib_request import urllib2 as compat_urllib_request
try: try:
import urllib.error as compat_urllib_error import urllib.error as compat_urllib_error
except ImportError: # Python 2 except ImportError: # Python 2
import urllib2 as compat_urllib_error import urllib2 as compat_urllib_error
try: try:
import urllib.parse as compat_urllib_parse import urllib.parse as compat_urllib_parse
except ImportError: # Python 2 except ImportError: # Python 2
import urllib as compat_urllib_parse import urllib as compat_urllib_parse
try: try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2 except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse from urlparse import urlparse as compat_urllib_parse_urlparse
try: try:
import urllib.parse as compat_urlparse import urllib.parse as compat_urlparse
except ImportError: # Python 2 except ImportError: # Python 2
import urlparse as compat_urlparse import urlparse as compat_urlparse
try: try:
import http.cookiejar as compat_cookiejar import http.cookiejar as compat_cookiejar
except ImportError: # Python 2 except ImportError: # Python 2
import cookielib as compat_cookiejar import cookielib as compat_cookiejar
try: try:
import html.entities as compat_html_entities import html.entities as compat_html_entities
except ImportError: # Python 2 except ImportError: # Python 2
import htmlentitydefs as compat_html_entities import htmlentitydefs as compat_html_entities
try: try:
import html.parser as compat_html_parser import html.parser as compat_html_parser
except ImportError: # Python 2 except ImportError: # Python 2
import HTMLParser as compat_html_parser import HTMLParser as compat_html_parser
try: try:
import http.client as compat_http_client import http.client as compat_http_client
except ImportError: # Python 2 except ImportError: # Python 2
import httplib as compat_http_client import httplib as compat_http_client
try: try:
@ -111,7 +111,7 @@ def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
try: try:
from urllib.parse import parse_qs as compat_parse_qs from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2 except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken # Python 2's version is apparently totally broken
@ -157,12 +157,12 @@ def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
return parsed_result return parsed_result
try: try:
compat_str = unicode # Python 2 compat_str = unicode # Python 2
except NameError: except NameError:
compat_str = str compat_str = str
try: try:
compat_chr = unichr # Python 2 compat_chr = unichr # Python 2
except NameError: except NameError:
compat_chr = chr compat_chr = chr
@ -182,8 +182,10 @@ def shlex_quote(s):
def compat_ord(c): def compat_ord(c):
if type(c) is int: return c if type(c) is int:
else: return ord(c) return c
else:
return ord(c)
if sys.version_info >= (3, 0): if sys.version_info >= (3, 0):
@ -254,7 +256,7 @@ def compat_expanduser(path):
drive = '' drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH')) userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: #~user if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i]) userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:] return userhome + path[i:]

View file

@ -81,7 +81,7 @@ def calc_eta(start, now, total, current):
if total is None: if total is None:
return None return None
dif = now - start dif = now - start
if current == 0 or dif < 0.001: # One millisecond if current == 0 or dif < 0.001: # One millisecond
return None return None
rate = float(current) / dif rate = float(current) / dif
return int((float(total) - float(current)) / rate) return int((float(total) - float(current)) / rate)
@ -95,7 +95,7 @@ def format_eta(eta):
@staticmethod @staticmethod
def calc_speed(start, now, bytes): def calc_speed(start, now, bytes):
dif = now - start dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond if bytes == 0 or dif < 0.001: # One millisecond
return None return None
return float(bytes) / dif return float(bytes) / dif
@ -108,7 +108,7 @@ def format_speed(speed):
@staticmethod @staticmethod
def best_block_size(elapsed_time, bytes): def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0) new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001: if elapsed_time < 0.001:
return int(new_max) return int(new_max)
rate = bytes / elapsed_time rate = bytes / elapsed_time

View file

@ -101,4 +101,3 @@ def real_download(self, filename, info_dict):
}) })
self.try_rename(tmpfilename, filename) self.try_rename(tmpfilename, filename)
return True return True

View file

@ -180,7 +180,7 @@ def run_rtmpdump(args):
while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live: while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
prevsize = os.path.getsize(encodeFilename(tmpfilename)) prevsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] %s bytes' % prevsize) self.to_screen('[rtmpdump] %s bytes' % prevsize)
time.sleep(5.0) # This seems to be needed time.sleep(5.0) # This seems to be needed
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED]) retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED])
cursize = os.path.getsize(encodeFilename(tmpfilename)) cursize = os.path.getsize(encodeFilename(tmpfilename))
if prevsize == cursize and retval == RD_FAILED: if prevsize == cursize and retval == RD_FAILED:

View file

@ -5,6 +5,7 @@
from .common import InfoExtractor from .common import InfoExtractor
class AdultSwimIE(InfoExtractor): class AdultSwimIE(InfoExtractor):
_VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$' _VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
_TEST = { _TEST = {

View file

@ -1,4 +1,4 @@
#coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals

View file

@ -70,11 +70,13 @@ def _real_extract(self, url):
uploader_id = mobj.group('company') uploader_id = mobj.group('company')
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc') playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
def fix_html(s): def fix_html(s):
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s) s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s) s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
# The ' in the onClick attributes are not escaped, it couldn't be parsed # The ' in the onClick attributes are not escaped, it couldn't be parsed
# like: http://trailers.apple.com/trailers/wb/gravity/ # like: http://trailers.apple.com/trailers/wb/gravity/
def _clean_json(m): def _clean_json(m):
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', '&#39;') return 'iTunes.playURL(%s);' % m.group(1).replace('\'', '&#39;')
s = re.sub(self._JSON_RE, _clean_json, s) s = re.sub(self._JSON_RE, _clean_json, s)

View file

@ -192,4 +192,3 @@ def _real_extract(self, url):
'upload_date': upload_date, 'upload_date': upload_date,
'thumbnail': thumbnail, 'thumbnail': thumbnail,
} }

View file

@ -13,7 +13,7 @@
qualities, qualities,
) )
# There are different sources of video in arte.tv, the extraction process # There are different sources of video in arte.tv, the extraction process
# is different for each one. The videos usually expire in 7 days, so we can't # is different for each one. The videos usually expire in 7 days, so we can't
# add tests. # add tests.

View file

@ -12,17 +12,17 @@ class AudiomackIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)' _VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
IE_NAME = 'audiomack' IE_NAME = 'audiomack'
_TESTS = [ _TESTS = [
#hosted on audiomack # hosted on audiomack
{ {
'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary', 'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
'info_dict': 'info_dict':
{ {
'id' : 'roosh-williams/extraordinary', 'id': 'roosh-williams/extraordinary',
'ext': 'mp3', 'ext': 'mp3',
'title': 'Roosh Williams - Extraordinary' 'title': 'Roosh Williams - Extraordinary'
} }
}, },
#hosted on soundcloud via audiomack # hosted on soundcloud via audiomack
{ {
'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare', 'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
'file': '172419696.mp3', 'file': '172419696.mp3',
@ -49,7 +49,7 @@ def _real_extract(self, url):
raise ExtractorError("Unable to deduce api url of song") raise ExtractorError("Unable to deduce api url of song")
realurl = api_response["url"] realurl = api_response["url"]
#Audiomack wraps a lot of soundcloud tracks in their branded wrapper # Audiomack wraps a lot of soundcloud tracks in their branded wrapper
# - if so, pass the work off to the soundcloud extractor # - if so, pass the work off to the soundcloud extractor
if SoundcloudIE.suitable(realurl): if SoundcloudIE.suitable(realurl):
return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'} return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}

View file

@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor):
_TEST = { _TEST = {
'url': 'http://bambuser.com/v/4050584', 'url': 'http://bambuser.com/v/4050584',
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388 # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
#u'md5': 'fba8f7693e48fd4e8641b3fd5539a641', # u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
'info_dict': { 'info_dict': {
'id': '4050584', 'id': '4050584',
'ext': 'flv', 'ext': 'flv',

View file

@ -83,12 +83,12 @@ def _real_extract(self, url):
initial_url = mp3_info['url'] initial_url = mp3_info['url']
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$' re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
m_url = re.match(re_url, initial_url) m_url = re.match(re_url, initial_url)
#We build the url we will use to get the final track url # We build the url we will use to get the final track url
# This url is build in Bandcamp in the script download_bunde_*.js # This url is build in Bandcamp in the script download_bunde_*.js
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts')) request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url') final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
# If we could correctly generate the .rand field the url would be # If we could correctly generate the .rand field the url would be
#in the "download_url" key # in the "download_url" key
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1) final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
return { return {

View file

@ -220,4 +220,4 @@ def _real_extract(self, url):
'duration': duration, 'duration': duration,
'formats': formats, 'formats': formats,
'subtitles': subtitles, 'subtitles': subtitles,
} }

View file

@ -40,7 +40,7 @@ def _real_extract(self, url):
title = self._html_search_regex( title = self._html_search_regex(
r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title') r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
description = self._html_search_regex( description = self._html_search_regex(
r'<meta name="description" content="([^"]*)"', r'<meta name="description" content="([^"]*)"',
webpage, 'description', fatal=False) webpage, 'description', fatal=False)

View file

@ -1,4 +1,4 @@
#coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor

View file

@ -112,4 +112,4 @@ def _real_extract(self, url):
'like_count': int(infos.find('NB_LIKES').text), 'like_count': int(infos.find('NB_LIKES').text),
'comment_count': int(infos.find('NB_COMMENTS').text), 'comment_count': int(infos.find('NB_COMMENTS').text),
'formats': formats, 'formats': formats,
} }

View file

@ -84,4 +84,4 @@ def _real_extract(self, url):
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'duration': duration, 'duration': duration,
'formats': formats, 'formats': formats,
} }

View file

@ -92,7 +92,7 @@ def _real_extract(self, url):
req.add_header('Referer', url) req.add_header('Referer', url)
playlist = self._download_xml(req, video_id) playlist = self._download_xml(req, video_id)
formats = [] formats = []
for i in playlist.find('smilRoot/body'): for i in playlist.find('smilRoot/body'):
if 'AD' not in i.attrib['id']: if 'AD' not in i.attrib['id']:

View file

@ -5,6 +5,7 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ExtractorError from ..utils import ExtractorError
class Channel9IE(InfoExtractor): class Channel9IE(InfoExtractor):
''' '''
Common extractor for channel9.msdn.com. Common extractor for channel9.msdn.com.
@ -31,7 +32,7 @@ class Channel9IE(InfoExtractor):
'session_code': 'KOS002', 'session_code': 'KOS002',
'session_day': 'Day 1', 'session_day': 'Day 1',
'session_room': 'Arena 1A', 'session_room': 'Arena 1A',
'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ], 'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
}, },
}, },
{ {
@ -44,7 +45,7 @@ class Channel9IE(InfoExtractor):
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b', 'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
'duration': 1540, 'duration': 1540,
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg', 'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
'authors': [ 'Mike Wilmot' ], 'authors': ['Mike Wilmot'],
}, },
} }
] ]
@ -83,7 +84,7 @@ def _formats_from_html(self, html):
'format_id': x.group('quality'), 'format_id': x.group('quality'),
'format_note': x.group('note'), 'format_note': x.group('note'),
'format': '%s (%s)' % (x.group('quality'), x.group('note')), 'format': '%s (%s)' % (x.group('quality'), x.group('note')),
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
'preference': self._known_formats.index(x.group('quality')), 'preference': self._known_formats.index(x.group('quality')),
'vcodec': 'none' if x.group('note') == 'Audio only' else None, 'vcodec': 'none' if x.group('note') == 'Audio only' else None,
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats] } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
@ -202,17 +203,17 @@ def _extract_content(self, html, content_path):
if slides is not None: if slides is not None:
d = common.copy() d = common.copy()
d.update({ 'title': title + '-Slides', 'url': slides }) d.update({'title': title + '-Slides', 'url': slides})
result.append(d) result.append(d)
if zip_ is not None: if zip_ is not None:
d = common.copy() d = common.copy()
d.update({ 'title': title + '-Zip', 'url': zip_ }) d.update({'title': title + '-Zip', 'url': zip_})
result.append(d) result.append(d)
if len(formats) > 0: if len(formats) > 0:
d = common.copy() d = common.copy()
d.update({ 'title': title, 'formats': formats }) d.update({'title': title, 'formats': formats})
result.append(d) result.append(d)
return result return result
@ -270,5 +271,5 @@ def _real_extract(self, url):
else: else:
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True) raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
else: # Assuming list else: # Assuming list
return self._extract_list(content_path) return self._extract_list(content_path)

View file

@ -39,6 +39,7 @@ def _real_extract(self, url):
transform_source=fix_xml_ampersands) transform_source=fix_xml_ampersands)
track_doc = pdoc.find('trackList/track') track_doc = pdoc.find('trackList/track')
def find_param(name): def find_param(name):
node = find_xpath_attr(track_doc, './/param', 'name', name) node = find_xpath_attr(track_doc, './/param', 'name', name)
if node is not None: if node is not None:

View file

@ -423,17 +423,18 @@ def report_login(self):
"""Report attempt to log in.""" """Report attempt to log in."""
self.to_screen('Logging in') self.to_screen('Logging in')
#Methods for following #608 # Methods for following #608
@staticmethod @staticmethod
def url_result(url, ie=None, video_id=None): def url_result(url, ie=None, video_id=None):
"""Returns a url that points to a page that should be processed""" """Returns a url that points to a page that should be processed"""
#TODO: ie should be the class used for getting the info # TODO: ie should be the class used for getting the info
video_info = {'_type': 'url', video_info = {'_type': 'url',
'url': url, 'url': url,
'ie_key': ie} 'ie_key': ie}
if video_id is not None: if video_id is not None:
video_info['id'] = video_id video_info['id'] = video_id
return video_info return video_info
@staticmethod @staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None): def playlist_result(entries, playlist_id=None, playlist_title=None):
"""Returns a playlist""" """Returns a playlist"""
@ -517,7 +518,7 @@ def _get_login_info(self):
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err: except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err)) self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password) return (username, password)
def _get_tfa_info(self): def _get_tfa_info(self):

View file

@ -54,7 +54,7 @@ def _real_extract(self, url):
return { return {
'id': video_id, 'id': video_id,
'url':video_url, 'url': video_url,
'title': title, 'title': title,
'description': description, 'description': description,
'timestamp': timestamp, 'timestamp': timestamp,
@ -62,4 +62,4 @@ def _real_extract(self, url):
'comment_count': comment_count, 'comment_count': comment_count,
'height': height, 'height': height,
'width': width, 'width': width,
} }

View file

@ -69,11 +69,9 @@ def _login(self):
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(login_request, None, False, 'Wrong login info') self._download_webpage(login_request, None, False, 'Wrong login info')
def _real_initialize(self): def _real_initialize(self):
self._login() self._login()
def _decrypt_subtitles(self, data, iv, id): def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(data) data = bytes_to_intlist(data)
iv = bytes_to_intlist(iv) iv = bytes_to_intlist(iv)
@ -99,8 +97,10 @@ def obfuscate_key(key):
return shaHash + [0] * 12 return shaHash + [0] * 12
key = obfuscate_key(id) key = obfuscate_key(id)
class Counter: class Counter:
__value = iv __value = iv
def next_value(self): def next_value(self):
temp = self.__value temp = self.__value
self.__value = inc(self.__value) self.__value = inc(self.__value)
@ -183,7 +183,7 @@ def ass_bool(strvalue):
return output return output
def _real_extract(self,url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id') video_id = mobj.group('video_id')

View file

@ -1,4 +1,4 @@
#coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import re import re
@ -18,6 +18,7 @@
unescapeHTML, unescapeHTML,
) )
class DailymotionBaseInfoExtractor(InfoExtractor): class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod @staticmethod
def _build_request(url): def _build_request(url):
@ -27,6 +28,7 @@ def _build_request(url):
request.add_header('Cookie', 'ff=off') request.add_header('Cookie', 'ff=off')
return request return request
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
"""Information Extractor for Dailymotion""" """Information Extractor for Dailymotion"""

View file

@ -26,13 +26,13 @@ def _real_extract(self, url):
video_id = self._search_regex( video_id = self._search_regex(
r"flashvars.pvg_id=\"(\d+)\";", r"flashvars.pvg_id=\"(\d+)\";",
webpage, 'ID') webpage, 'ID')
json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/' json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
+ video_id) + video_id)
info = self._download_webpage(json_url, title, info = self._download_webpage(json_url, title,
'Downloading JSON config') 'Downloading JSON config')
video_url = json.loads(info)['renditions'][0]['url'] video_url = json.loads(info)['renditions'][0]['url']
return {'id': video_id, return {'id': video_id,
'ext': 'mp4', 'ext': 'mp4',
'url': video_url, 'url': video_url,

View file

@ -27,7 +27,7 @@ def _real_extract(self, url):
video_id = mobj.group('id') video_id = mobj.group('id')
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
info = self._download_json(info_url, video_id) info = self._download_json(info_url, video_id)
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
return { return {
'id': video_id, 'id': video_id,

View file

@ -40,7 +40,7 @@ def _real_extract(self, url):
info_url = ( info_url = (
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&". "http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.','%2E'))) format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
info_webpage = self._download_webpage( info_webpage = self._download_webpage(
info_url, video_id, note='Downloading info page') info_url, video_id, note='Downloading info page')

View file

@ -57,4 +57,4 @@ def _real_extract(self, url):
'duration': int_or_none(duration), 'duration': int_or_none(duration),
'like_count': int_or_none(like_count), 'like_count': int_or_none(like_count),
'dislike_count': int_or_none(dislike_count), 'dislike_count': int_or_none(dislike_count),
} }

View file

@ -17,8 +17,8 @@ class FlickrIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '5645318632', 'id': '5645318632',
'ext': 'mp4', 'ext': 'mp4',
"description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
"uploader_id": "forestwander-nature-pictures", "uploader_id": "forestwander-nature-pictures",
"title": "Dark Hollow Waterfalls" "title": "Dark Hollow Waterfalls"
} }
} }

View file

@ -92,4 +92,4 @@ def _real_extract(self, url):
'duration': duration, 'duration': duration,
'age_limit': 18, 'age_limit': 18,
'webpage_url': webpage_url, 'webpage_url': webpage_url,
} }

View file

@ -733,7 +733,7 @@ def _playlist_from_matches(matches, getter, ie=None):
'title': video_title, 'title': video_title,
'id': video_id, 'id': video_id,
} }
match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage) match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
if match: if match:
return { return {
@ -1025,4 +1025,3 @@ def filter_video(urls):
'_type': 'playlist', '_type': 'playlist',
'entries': entries, 'entries': entries,
} }

View file

@ -397,4 +397,4 @@ def _real_extract(self, url):
'uploader_id': uploader_id, 'uploader_id': uploader_id,
'like_count': like_count, 'like_count': like_count,
'formats': formats 'formats': formats
} }

View file

@ -69,7 +69,7 @@ def _real_extract(self, url):
(?:id="[^"]+"\s+)? (?:id="[^"]+"\s+)?
value="([^"]*)" value="([^"]*)"
''', webpage)) ''', webpage))
if fields['op'] == 'download1': if fields['op'] == 'download1':
post = compat_urllib_parse.urlencode(fields) post = compat_urllib_parse.urlencode(fields)

View file

@ -37,7 +37,7 @@ def _real_extract(self, url):
webpage2 = self._download_webpage(redirect_url, video_id) webpage2 = self._download_webpage(redirect_url, video_id)
video_url = self._html_search_regex( video_url = self._html_search_regex(
r'flvMask:(.*?);', webpage2, 'video_url') r'flvMask:(.*?);', webpage2, 'video_url')
duration = parse_duration(self._search_regex( duration = parse_duration(self._search_regex(
r'<strong>Runtime:</strong>\s*([0-9:]+)</div>', r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
webpage, 'duration', fatal=False)) webpage, 'duration', fatal=False))

View file

@ -13,7 +13,7 @@ class HowcastIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '390161', 'id': '390161',
'ext': 'mp4', 'ext': 'mp4',
'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.', 'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
'title': 'How to Tie a Square Knot Properly', 'title': 'How to Tie a Square Knot Properly',
} }
} }

View file

@ -71,7 +71,7 @@ class ImdbListIE(InfoExtractor):
}, },
'playlist_count': 7, 'playlist_count': 7,
} }
def _real_extract(self, url): def _real_extract(self, url):
list_id = self._match_id(url) list_id = self._match_id(url)
webpage = self._download_webpage(url, list_id) webpage = self._download_webpage(url, list_id)

View file

@ -32,7 +32,7 @@ def _build_url(query):
def _clean_query(query): def _clean_query(query):
NEEDED_ARGS = ['publishedid', 'customerid'] NEEDED_ARGS = ['publishedid', 'customerid']
query_dic = compat_urlparse.parse_qs(query) query_dic = compat_urlparse.parse_qs(query)
cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS) cleaned_dic = dict((k, v[0]) for (k, v) in query_dic.items() if k in NEEDED_ARGS)
# Other player ids return m3u8 urls # Other player ids return m3u8 urls
cleaned_dic['playerid'] = '247' cleaned_dic['playerid'] = '247'
cleaned_dic['videokbrate'] = '100000' cleaned_dic['videokbrate'] = '100000'

View file

@ -102,7 +102,7 @@ def _real_extract(self, url):
compilation = result['compilation'] compilation = result['compilation']
title = result['title'] title = result['title']
title = '%s - %s' % (compilation, title) if compilation is not None else title title = '%s - %s' % (compilation, title) if compilation is not None else title
previews = result['preview'] previews = result['preview']
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format'])) previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
@ -152,17 +152,17 @@ def _real_extract(self, url):
compilation_id = mobj.group('compilationid') compilation_id = mobj.group('compilationid')
season_id = mobj.group('seasonid') season_id = mobj.group('seasonid')
if season_id is not None: # Season link if season_id is not None: # Season link
season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id) season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
playlist_id = '%s/season%s' % (compilation_id, season_id) playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta('title', season_page, 'title') playlist_title = self._html_search_meta('title', season_page, 'title')
entries = self._extract_entries(season_page, compilation_id) entries = self._extract_entries(season_page, compilation_id)
else: # Compilation link else: # Compilation link
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page') compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
playlist_id = compilation_id playlist_id = compilation_id
playlist_title = self._html_search_meta('title', compilation_page, 'title') playlist_title = self._html_search_meta('title', compilation_page, 'title')
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page) seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
if len(seasons) == 0: # No seasons in this compilation if len(seasons) == 0: # No seasons in this compilation
entries = self._extract_entries(compilation_page, compilation_id) entries = self._extract_entries(compilation_page, compilation_id)
else: else:
entries = [] entries = []
@ -172,4 +172,4 @@ def _real_extract(self, url):
compilation_id, 'Downloading season %s web page' % season_id) compilation_id, 'Downloading season %s web page' % season_id)
entries.extend(self._extract_entries(season_page, compilation_id)) entries.extend(self._extract_entries(season_page, compilation_id))
return self.playlist_result(entries, playlist_id, playlist_title) return self.playlist_result(entries, playlist_id, playlist_title)

View file

@ -45,4 +45,3 @@ def _real_extract(self, url):
'title': title, 'title': title,
'description': description, 'description': description,
} }

View file

@ -29,7 +29,7 @@ def _real_extract(self, url):
xml_link = self._html_search_regex( xml_link = self._html_search_regex(
r'<param name="flashvars" value="config=(.*?)" />', r'<param name="flashvars" value="config=(.*?)" />',
webpage, 'config URL') webpage, 'config URL')
video_id = self._search_regex( video_id = self._search_regex(
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml', r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
xml_link, 'video ID') xml_link, 'video ID')
@ -38,7 +38,7 @@ def _real_extract(self, url):
xml_link, title, 'Downloading XML config') xml_link, title, 'Downloading XML config')
info_json = config.find('format.json').text info_json = config.find('format.json').text
info = json.loads(info_json)['versions'][0] info = json.loads(info_json)['versions'][0]
video_url = 'http://video720.jeuxvideo.com/' + info['file'] video_url = 'http://video720.jeuxvideo.com/' + info['file']
return { return {

View file

@ -10,7 +10,7 @@
class KankanIE(InfoExtractor): class KankanIE(InfoExtractor):
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml' _VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
_TEST = { _TEST = {
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml', 'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
'file': '48863.flv', 'file': '48863.flv',

View file

@ -63,4 +63,4 @@ def _real_extract(self, url):
'duration': duration, 'duration': duration,
'view_count': int_or_none(view_count), 'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count), 'comment_count': int_or_none(comment_count),
} }

View file

@ -30,4 +30,3 @@ def _real_extract(self, url):
'title': title, 'title': title,
'url': downloadUrl 'url': downloadUrl
} }

View file

@ -75,4 +75,3 @@ def _real_extract(self, url):
'categories': categories, 'categories': categories,
'ext': 'mp4', 'ext': 'mp4',
} }

View file

@ -52,7 +52,7 @@ def _real_extract(self, url):
r'<div class=\'comments\'>\s*<span class=\'counter\'>(\d+)</span>', webpage, 'comment count', fatal=False) r'<div class=\'comments\'>\s*<span class=\'counter\'>(\d+)</span>', webpage, 'comment count', fatal=False)
upload_date = self._html_search_regex( upload_date = self._html_search_regex(
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False) r'<time datetime=\'([^\']+)\'>', webpage, 'upload date', fatal=False)
if upload_date is not None: if upload_date is not None:
upload_date = unified_strdate(upload_date) upload_date = unified_strdate(upload_date)
@ -71,4 +71,4 @@ def make_entry(video_id, media, video_number=None):
if len(videos) == 1: if len(videos) == 1:
return make_entry(video_id, videos[0]) return make_entry(video_id, videos[0])
else: else:
return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)] return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)]

View file

@ -109,7 +109,7 @@ def _login(self):
'password': password, 'password': password,
'remember': 'false', 'remember': 'false',
'stayPut': 'false' 'stayPut': 'false'
} }
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
login_page = self._download_webpage(request, None, 'Logging in as %s' % username) login_page = self._download_webpage(request, None, 'Logging in as %s' % username)
@ -117,7 +117,7 @@ def _login(self):
m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page) m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
if m is not None: if m is not None:
response = m.group('json') response = m.group('json')
response_json = json.loads(response) response_json = json.loads(response)
state = response_json['state'] state = response_json['state']
if state == 'notlogged': if state == 'notlogged':
@ -187,7 +187,7 @@ def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
course_path = mobj.group('coursepath') course_path = mobj.group('coursepath')
course_id = mobj.group('courseid') course_id = mobj.group('courseid')
page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id, page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
course_id, 'Downloading course JSON') course_id, 'Downloading course JSON')
course_json = json.loads(page) course_json = json.loads(page)
@ -221,4 +221,4 @@ def _real_extract(self, url):
course_title = course_json['Title'] course_title = course_json['Title']
return self.playlist_result(entries, course_id, course_title) return self.playlist_result(entries, course_id, course_title)

View file

@ -53,4 +53,4 @@ def _real_extract(self, url):
'duration': duration, 'duration': duration,
'view_count': view_count, 'view_count': view_count,
'formats': formats, 'formats': formats,
} }

View file

@ -7,6 +7,7 @@
compat_urllib_parse, compat_urllib_parse,
) )
class MalemotionIE(InfoExtractor): class MalemotionIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)' _VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
_TEST = { _TEST = {

View file

@ -7,7 +7,7 @@
class MDRIE(InfoExtractor): class MDRIE(InfoExtractor):
_VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)' _VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)'
# No tests, MDR regularily deletes its videos # No tests, MDR regularily deletes its videos
_TEST = { _TEST = {
'url': 'http://www.mdr.de/fakt/video189002.html', 'url': 'http://www.mdr.de/fakt/video189002.html',

View file

@ -55,4 +55,4 @@ def _real_extract(self, url):
'title': title, 'title': title,
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'duration': duration, 'duration': duration,
} }

View file

@ -54,7 +54,7 @@ def _real_extract(self, url):
title = os.path.splitext(data['fname'])[0] title = os.path.splitext(data['fname'])[0]
#Could be several links with different quality # Could be several links with different quality
links = re.findall(r'"file" : "?(.+?)",', webpage) links = re.findall(r'"file" : "?(.+?)",', webpage)
# Assume the links are ordered in quality # Assume the links are ordered in quality
formats = [{ formats = [{

View file

@ -111,4 +111,4 @@ def _real_extract(self, url):
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'duration': duration, 'duration': duration,
'formats': formats, 'formats': formats,
} }

View file

@ -72,7 +72,7 @@ def _real_extract(self, url):
like_count = str_to_int(self._html_search_regex( like_count = str_to_int(self._html_search_regex(
r'<strong>Favorited</strong>\s+([^<]+)<', r'<strong>Favorited</strong>\s+([^<]+)<',
webpage, 'like count', fatal=False)) webpage, 'like count', fatal=False))
upload_date = self._html_search_regex( upload_date = self._html_search_regex(
r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date') r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
if 'Ago' in upload_date: if 'Ago' in upload_date:

View file

@ -27,7 +27,7 @@ def _real_extract(self, url):
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player') jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player')
formats =[{ formats = [{
'format_id': 'sd', 'format_id': 'sd',
'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'), 'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'),
'quality': 0, 'quality': 0,

View file

@ -24,4 +24,4 @@ class MovShareIE(NovaMovIE):
'title': 'dissapeared image', 'title': 'dissapeared image',
'description': 'optical illusion dissapeared image magic illusion', 'description': 'optical illusion dissapeared image magic illusion',
} }
} }

View file

@ -44,7 +44,7 @@ def _real_extract(self, url):
r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'], r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
False, default=None) False, default=None)
vcodec = src['type'].partition('/')[2] vcodec = src['type'].partition('/')[2]
formats.append({ formats.append({
'format_id': encoding_id + '-' + vcodec, 'format_id': encoding_id + '-' + vcodec,
'url': src['src'], 'url': src['src'],

View file

@ -60,7 +60,7 @@ def _extract_mobile_video_formats(self, mtvn_id):
url = response.geturl() url = response.geturl()
# Transform the url to get the best quality: # Transform the url to get the best quality:
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1) url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
return [{'url': url,'ext': 'mp4'}] return [{'url': url, 'ext': 'mp4'}]
def _extract_video_formats(self, mdoc, mtvn_id): def _extract_video_formats(self, mdoc, mtvn_id):
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None: if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
@ -240,7 +240,7 @@ def _real_extract(self, url):
uri = mobj.groupdict().get('mgid') uri = mobj.groupdict().get('mgid')
if uri is None: if uri is None:
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
# Some videos come from Vevo.com # Some videos come from Vevo.com
m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";', m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
webpage, re.DOTALL) webpage, re.DOTALL)
@ -248,7 +248,7 @@ def _real_extract(self, url):
vevo_id = m_vevo.group(1); vevo_id = m_vevo.group(1);
self.to_screen('Vevo video detected: %s' % vevo_id) self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo') return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri') uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
return self._get_videos_info(uri) return self._get_videos_info(uri)

View file

@ -73,4 +73,3 @@ def _real_extract(self, url):
'is_live': True, 'is_live': True,
'thumbnail': thumbnail, 'thumbnail': thumbnail,
} }

View file

@ -72,4 +72,4 @@ def _real_extract(self, url):
'duration': int_or_none(duration), 'duration': int_or_none(duration),
'view_count': int_or_none(view_count), 'view_count': int_or_none(view_count),
'formats': formats, 'formats': formats,
} }

View file

@ -37,7 +37,7 @@ def _real_extract(self, url):
player_info_page = self._download_webpage('http://player.muzu.tv/player/playerInit?ai=%s' % video_id, player_info_page = self._download_webpage('http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
video_id, u'Downloading player info') video_id, u'Downloading player info')
video_info = json.loads(player_info_page)['videos'][0] video_info = json.loads(player_info_page)['videos'][0]
for quality in ['1080' , '720', '480', '360']: for quality in ['1080', '720', '480', '360']:
if video_info.get('v%s' % quality): if video_info.get('v%s' % quality):
break break

View file

@ -33,7 +33,7 @@ class MyVideoIE(InfoExtractor):
# Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git # Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
# Released into the Public Domain by Tristan Fischer on 2013-05-19 # Released into the Public Domain by Tristan Fischer on 2013-05-19
# https://github.com/rg3/youtube-dl/pull/842 # https://github.com/rg3/youtube-dl/pull/842
def __rc4crypt(self,data, key): def __rc4crypt(self, data, key):
x = 0 x = 0
box = list(range(256)) box = list(range(256))
for i in list(range(256)): for i in list(range(256)):
@ -49,10 +49,10 @@ def __rc4crypt(self,data, key):
out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256]) out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
return out return out
def __md5(self,s): def __md5(self, s):
return hashlib.md5(s).hexdigest().encode() return hashlib.md5(s).hexdigest().encode()
def _real_extract(self,url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') video_id = mobj.group('id')
@ -173,4 +173,3 @@ def _real_extract(self,url):
'play_path': video_playpath, 'play_path': video_playpath,
'player_url': video_swfobj, 'player_url': video_swfobj,
} }

View file

@ -40,7 +40,7 @@ def _real_extract(self, url):
raise ExtractorError('couldn\'t extract vid and key') raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1) vid = m_id.group(1)
key = m_id.group(2) key = m_id.group(2)
query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key,}) query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
query_urls = compat_urllib_parse.urlencode({ query_urls = compat_urllib_parse.urlencode({
'masterVid': vid, 'masterVid': vid,
'protocol': 'p2p', 'protocol': 'p2p',
@ -65,7 +65,7 @@ def _real_extract(self, url):
if domain.startswith('rtmp'): if domain.startswith('rtmp'):
f.update({ f.update({
'ext': 'flv', 'ext': 'flv',
'rtmp_protocol': '1', # rtmpt 'rtmp_protocol': '1', # rtmpt
}) })
formats.append(f) formats.append(f)
self._sort_formats(formats) self._sort_formats(formats)

View file

@ -39,7 +39,6 @@ def _real_extract(self, url):
duration = parse_duration( duration = parse_duration(
self._html_search_meta('duration', webpage, 'duration', fatal=False)) self._html_search_meta('duration', webpage, 'duration', fatal=False))
return { return {
'id': shortened_video_id, 'id': shortened_video_id,
'url': video_url, 'url': video_url,

View file

@ -91,4 +91,4 @@ def _real_extract(self, url):
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'duration': duration, 'duration': duration,
'formats': formats, 'formats': formats,
} }

View file

@ -23,12 +23,12 @@ def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
music_id = mobj.group('id') music_id = mobj.group('id')
webpage = self._download_webpage(url, music_id) webpage = self._download_webpage(url, music_id)
title = self._html_search_regex( title = self._html_search_regex(
r',"name":"([^"]+)",', webpage, 'music title') r',"name":"([^"]+)",', webpage, 'music title')
uploader = self._html_search_regex( uploader = self._html_search_regex(
r',"artist":"([^"]+)",', webpage, 'music uploader') r',"artist":"([^"]+)",', webpage, 'music uploader')
music_url_json_string = self._html_search_regex( music_url_json_string = self._html_search_regex(
r'({"url":"[^"]+"),', webpage, 'music url') + '}' r'({"url":"[^"]+"),', webpage, 'music url') + '}'
music_url_json = json.loads(music_url_json_string) music_url_json = json.loads(music_url_json_string)

View file

@ -89,4 +89,4 @@ def ns(s):
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'duration': duration, 'duration': duration,
'formats': formats, 'formats': formats,
} }

View file

@ -93,4 +93,4 @@ def extract_thumbnail(media):
'uploader': uploader, 'uploader': uploader,
'uploader_id': uploader_id, 'uploader_id': uploader_id,
'formats': formats, 'formats': formats,
} }

View file

@ -31,7 +31,7 @@ def _extract_video(self, info):
path_url, video_id, 'Downloading final video url') path_url, video_id, 'Downloading final video url')
video_url = path_doc.find('path').text video_url = path_doc.find('path').text
else: else:
video_url = initial_video_url video_url = initial_video_url
join = compat_urlparse.urljoin join = compat_urlparse.urljoin
return { return {

View file

@ -163,4 +163,4 @@ def _real_extract(self, url):
'uploader_id': uploader_id, 'uploader_id': uploader_id,
'duration': duration, 'duration': duration,
'formats': formats, 'formats': formats,
} }

View file

@ -66,4 +66,4 @@ def _real_extract(self, url):
'url': video_url, 'url': video_url,
'title': title, 'title': title,
'description': description 'description': description
} }

View file

@ -25,4 +25,4 @@ class NowVideoIE(NovaMovIE):
'title': 'youtubedl test video _BaW_jenozKc.mp4', 'title': 'youtubedl test video _BaW_jenozKc.mp4',
'description': 'Description', 'description': 'Description',
} }
} }

View file

@ -145,4 +145,4 @@ def _real_extract(self, url):
'duration': duration, 'duration': duration,
'view_count': view_count, 'view_count': view_count,
'formats': formats, 'formats': formats,
} }

View file

@ -71,4 +71,4 @@ def _real_extract(self, url):
'upload_date': upload_date, 'upload_date': upload_date,
'age_limit': 18, 'age_limit': 18,
'formats': formats, 'formats': formats,
} }

View file

@ -74,4 +74,4 @@ def get_file_size(file_size):
'duration': duration, 'duration': duration,
'formats': formats, 'formats': formats,
'thumbnails': thumbnails, 'thumbnails': thumbnails,
} }

View file

@ -97,4 +97,3 @@ def _real_extract(self, url):
} }
else: else:
return self._extract_result(videos_info[0], videos_more_info) return self._extract_result(videos_info[0], videos_more_info)

View file

@ -178,4 +178,4 @@ def extract_entry_dict(info, title, subtitle):
'title': data['title'], 'title': data['title'],
'description': data['subtitle'], 'description': data['subtitle'],
'entries': entries 'entries': entries
} }

View file

@ -6,6 +6,7 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import int_or_none from ..utils import int_or_none
class PodomaticIE(InfoExtractor): class PodomaticIE(InfoExtractor):
IE_NAME = 'podomatic' IE_NAME = 'podomatic'
_VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)' _VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)'

View file

@ -56,7 +56,7 @@ def _real_extract(self, url):
comment_count = self._extract_count( comment_count = self._extract_count(
r'All comments \(<var class="videoCommentCount">([\d,\.]+)</var>', webpage, 'comment') r'All comments \(<var class="videoCommentCount">([\d,\.]+)</var>', webpage, 'comment')
video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage))) video_urls = list(map(compat_urllib_parse.unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
if webpage.find('"encrypted":true') != -1: if webpage.find('"encrypted":true') != -1:
password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password')) password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls)) video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))

View file

@ -38,7 +38,7 @@ def _real_extract(self, url):
video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url') video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
video_url = compat_urllib_parse.unquote(video_url) video_url = compat_urllib_parse.unquote(video_url)
#Get the uploaded date # Get the uploaded date
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by' VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False) upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
if upload_date: if upload_date:

Some files were not shown because too many files have changed in this diff Show more