0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-21 02:05:12 +00:00

[cleanup] Add more ruff rules (#10149)

Authored by: seproDev

Reviewed-by: bashonly <88596187+bashonly@users.noreply.github.com>
Reviewed-by: Simon Sawicki <contact@grub4k.xyz>
This commit is contained in:
sepro 2024-06-12 01:09:58 +02:00 committed by GitHub
parent db50f19d76
commit add96eb9f8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
915 changed files with 7027 additions and 7246 deletions

View file

@ -266,7 +266,7 @@ ## Adding support for a new site
$ hatch fmt --check $ hatch fmt --check
``` ```
You can use `hatch fmt` to automatically fix problems. You can use `hatch fmt` to automatically fix problems. Rules that the linter/formatter enforces should not be disabled with `# noqa` unless a maintainer requests it. The only exception allowed is for old/printf-style string formatting in GraphQL query templates (use `# noqa: UP031`).
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.8 and above. Backward compatibility is not required for even older versions of Python. 1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.8 and above. Backward compatibility is not required for even older versions of Python.
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this: 1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:

View file

@ -44,7 +44,7 @@ def main():
'Cryptodome', 'Cryptodome',
# requests >=2.32.0 breaks py2exe builds due to certifi dependency # requests >=2.32.0 breaks py2exe builds due to certifi dependency
'requests', 'requests',
'urllib3' 'urllib3',
], ],
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'], 'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
# Modules that are only imported dynamically must be added here # Modules that are only imported dynamically must be added here

View file

@ -68,7 +68,7 @@ def exe(onedir):
'dist/', 'dist/',
onedir and f'{name}/', onedir and f'{name}/',
name, name,
OS_NAME == 'win32' and '.exe' OS_NAME == 'win32' and '.exe',
))) )))
@ -113,7 +113,7 @@ def windows_set_version(exe, version):
), ),
kids=[ kids=[
StringFileInfo([StringTable('040904B0', [ StringFileInfo([StringTable('040904B0', [
StringStruct('Comments', 'yt-dlp%s Command Line Interface' % suffix), StringStruct('Comments', f'yt-dlp{suffix} Command Line Interface'),
StringStruct('CompanyName', 'https://github.com/yt-dlp'), StringStruct('CompanyName', 'https://github.com/yt-dlp'),
StringStruct('FileDescription', 'yt-dlp%s' % (MACHINE and f' ({MACHINE})')), StringStruct('FileDescription', 'yt-dlp%s' % (MACHINE and f' ({MACHINE})')),
StringStruct('FileVersion', version), StringStruct('FileVersion', version),
@ -123,8 +123,8 @@ def windows_set_version(exe, version):
StringStruct('ProductName', f'yt-dlp{suffix}'), StringStruct('ProductName', f'yt-dlp{suffix}'),
StringStruct( StringStruct(
'ProductVersion', f'{version}{suffix} on Python {platform.python_version()}'), 'ProductVersion', f'{version}{suffix} on Python {platform.python_version()}'),
])]), VarFileInfo([VarStruct('Translation', [0, 1200])]) ])]), VarFileInfo([VarStruct('Translation', [0, 1200])]),
] ],
)) ))

View file

@ -9,8 +9,8 @@
import yt_dlp import yt_dlp
BASH_COMPLETION_FILE = "completions/bash/yt-dlp" BASH_COMPLETION_FILE = 'completions/bash/yt-dlp'
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" BASH_COMPLETION_TEMPLATE = 'devscripts/bash-completion.in'
def build_completion(opt_parser): def build_completion(opt_parser):
@ -21,9 +21,9 @@ def build_completion(opt_parser):
opts_flag.append(option.get_opt_string()) opts_flag.append(option.get_opt_string())
with open(BASH_COMPLETION_TEMPLATE) as f: with open(BASH_COMPLETION_TEMPLATE) as f:
template = f.read() template = f.read()
with open(BASH_COMPLETION_FILE, "w") as f: with open(BASH_COMPLETION_FILE, 'w') as f:
# just using the special char # just using the special char
filled_template = template.replace("{{flags}}", " ".join(opts_flag)) filled_template = template.replace('{{flags}}', ' '.join(opts_flag))
f.write(filled_template) f.write(filled_template)

View file

@ -223,10 +223,10 @@ def format_single_change(self, info: CommitInfo):
return message if not sep else f'{message}{sep}{rest}' return message if not sep else f'{message}{sep}{rest}'
def _format_message_link(self, message, hash): def _format_message_link(self, message, commit_hash):
assert message or hash, 'Improperly defined commit message or override' assert message or commit_hash, 'Improperly defined commit message or override'
message = message if message else hash[:HASH_LENGTH] message = message if message else commit_hash[:HASH_LENGTH]
return f'[{message}]({self.repo_url}/commit/{hash})' if hash else message return f'[{message}]({self.repo_url}/commit/{commit_hash})' if commit_hash else message
def _format_issues(self, issues): def _format_issues(self, issues):
return ', '.join(f'[#{issue}]({self.repo_url}/issues/{issue})' for issue in issues) return ', '.join(f'[#{issue}]({self.repo_url}/issues/{issue})' for issue in issues)
@ -356,7 +356,7 @@ def apply_overrides(self, overrides):
logger.info(f'CHANGE {self._commits[commit.hash]} -> {commit}') logger.info(f'CHANGE {self._commits[commit.hash]} -> {commit}')
self._commits[commit.hash] = commit self._commits[commit.hash] = commit
self._commits = {key: value for key, value in reversed(self._commits.items())} self._commits = dict(reversed(self._commits.items()))
def groups(self): def groups(self):
group_dict = defaultdict(list) group_dict = defaultdict(list)

View file

@ -51,7 +51,7 @@ def apply_patch(text, patch):
), ),
( # Headings ( # Headings
r'(?m)^ (\w.+\n)( (?=\w))?', r'(?m)^ (\w.+\n)( (?=\w))?',
r'## \1' r'## \1',
), ),
( # Fixup `--date` formatting ( # Fixup `--date` formatting
rf'(?m)( --date DATE.+({delim}[^\[]+)*)\[.+({delim}.+)*$', rf'(?m)( --date DATE.+({delim}[^\[]+)*)\[.+({delim}.+)*$',
@ -61,26 +61,26 @@ def apply_patch(text, patch):
), ),
( # Do not split URLs ( # Do not split URLs
rf'({delim[:-1]})? (?P<label>\[\S+\] )?(?P<url>https?({delim})?:({delim})?/({delim})?/(({delim})?\S+)+)\s', rf'({delim[:-1]})? (?P<label>\[\S+\] )?(?P<url>https?({delim})?:({delim})?/({delim})?/(({delim})?\S+)+)\s',
lambda mobj: ''.join((delim, mobj.group('label') or '', re.sub(r'\s+', '', mobj.group('url')), '\n')) lambda mobj: ''.join((delim, mobj.group('label') or '', re.sub(r'\s+', '', mobj.group('url')), '\n')),
), ),
( # Do not split "words" ( # Do not split "words"
rf'(?m)({delim}\S+)+$', rf'(?m)({delim}\S+)+$',
lambda mobj: ''.join((delim, mobj.group(0).replace(delim, ''))) lambda mobj: ''.join((delim, mobj.group(0).replace(delim, ''))),
), ),
( # Allow overshooting last line ( # Allow overshooting last line
rf'(?m)^(?P<prev>.+)${delim}(?P<current>.+)$(?!{delim})', rf'(?m)^(?P<prev>.+)${delim}(?P<current>.+)$(?!{delim})',
lambda mobj: (mobj.group().replace(delim, ' ') lambda mobj: (mobj.group().replace(delim, ' ')
if len(mobj.group()) - len(delim) + 1 <= max_width + ALLOWED_OVERSHOOT if len(mobj.group()) - len(delim) + 1 <= max_width + ALLOWED_OVERSHOOT
else mobj.group()) else mobj.group()),
), ),
( # Avoid newline when a space is available b/w switch and description ( # Avoid newline when a space is available b/w switch and description
DISABLE_PATCH, # This creates issues with prepare_manpage DISABLE_PATCH, # This creates issues with prepare_manpage
r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim), r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim),
r'\1 ' r'\1 ',
), ),
( # Replace brackets with a Markdown link ( # Replace brackets with a Markdown link
r'SponsorBlock API \((http.+)\)', r'SponsorBlock API \((http.+)\)',
r'[SponsorBlock API](\1)' r'[SponsorBlock API](\1)',
), ),
) )

View file

@ -30,7 +30,7 @@ def property_setter(name, value):
opts = parse_options() opts = parse_options()
transform = compose_functions( transform = compose_functions(
property_setter('VARIANT', opts.variant), property_setter('VARIANT', opts.variant),
property_setter('UPDATE_HINT', opts.update_message) property_setter('UPDATE_HINT', opts.update_message),
) )
write_file(VERSION_FILE, transform(read_file(VERSION_FILE))) write_file(VERSION_FILE, transform(read_file(VERSION_FILE)))

View file

@ -24,7 +24,7 @@ def get_new_version(version, revision):
else: else:
old_version = read_version().split('.') old_version = read_version().split('.')
if version.split('.') == old_version[:3]: if version.split('.') == old_version[:3]:
revision = str(int((old_version + [0])[3]) + 1) revision = str(int(([*old_version, 0])[3]) + 1)
return f'{version}.{revision}' if revision else version return f'{version}.{revision}' if revision else version

View file

@ -9,15 +9,15 @@
import yt_dlp import yt_dlp
ZSH_COMPLETION_FILE = "completions/zsh/_yt-dlp" ZSH_COMPLETION_FILE = 'completions/zsh/_yt-dlp'
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in" ZSH_COMPLETION_TEMPLATE = 'devscripts/zsh-completion.in'
def build_completion(opt_parser): def build_completion(opt_parser):
opts = [opt for group in opt_parser.option_groups opts = [opt for group in opt_parser.option_groups
for opt in group.option_list] for opt in group.option_list]
opts_file = [opt for opt in opts if opt.metavar == "FILE"] opts_file = [opt for opt in opts if opt.metavar == 'FILE']
opts_dir = [opt for opt in opts if opt.metavar == "DIR"] opts_dir = [opt for opt in opts if opt.metavar == 'DIR']
fileopts = [] fileopts = []
for opt in opts_file: for opt in opts_file:
@ -38,11 +38,11 @@ def build_completion(opt_parser):
with open(ZSH_COMPLETION_TEMPLATE) as f: with open(ZSH_COMPLETION_TEMPLATE) as f:
template = f.read() template = f.read()
template = template.replace("{{fileopts}}", "|".join(fileopts)) template = template.replace('{{fileopts}}', '|'.join(fileopts))
template = template.replace("{{diropts}}", "|".join(diropts)) template = template.replace('{{diropts}}', '|'.join(diropts))
template = template.replace("{{flags}}", " ".join(flags)) template = template.replace('{{flags}}', ' '.join(flags))
with open(ZSH_COMPLETION_FILE, "w") as f: with open(ZSH_COMPLETION_FILE, 'w') as f:
f.write(template) f.write(template)

View file

@ -183,21 +183,84 @@ line-length = 120
[tool.ruff.lint] [tool.ruff.lint]
ignore = [ ignore = [
"E402", # module level import not at top of file "E402", # module-import-not-at-top-of-file
"E501", # line too long "E501", # line-too-long
"E731", # do not assign a lambda expression, use a def "E731", # lambda-assignment
"E741", # ambiguous variable name "E741", # ambiguous-variable-name
"UP036", # outdated-version-block
"B006", # mutable-argument-default
"B008", # function-call-in-default-argument
"B011", # assert-false
"B017", # assert-raises-exception
"B023", # function-uses-loop-variable (false positives)
"B028", # no-explicit-stacklevel
"B904", # raise-without-from-inside-except
"C401", # unnecessary-generator-set
"C402", # unnecessary-generator-dict
"PIE790", # unnecessary-placeholder
"SIM102", # collapsible-if
"SIM108", # if-else-block-instead-of-if-exp
"SIM112", # uncapitalized-environment-variables
"SIM113", # enumerate-for-loop
"SIM114", # if-with-same-arms
"SIM115", # open-file-with-context-handler
"SIM117", # multiple-with-statements
"SIM223", # expr-and-false
"SIM300", # yoda-conditions
"TD001", # invalid-todo-tag
"TD002", # missing-todo-author
"TD003", # missing-todo-link
"PLE0604", # invalid-all-object (false positives)
"PLW0603", # global-statement
"PLW1510", # subprocess-run-without-check
"PLW2901", # redefined-loop-name
"RUF001", # ambiguous-unicode-character-string
"RUF012", # mutable-class-default
"RUF100", # unused-noqa (flake8 has slightly different behavior)
] ]
select = [ select = [
"E", # pycodestyle errors "E", # pycodestyle Error
"W", # pycodestyle warnings "W", # pycodestyle Warning
"F", # pyflakes "F", # Pyflakes
"I", # import order "I", # isort
"Q", # flake8-quotes
"N803", # invalid-argument-name
"N804", # invalid-first-argument-name-for-class-method
"UP", # pyupgrade
"B", # flake8-bugbear
"A", # flake8-builtins
"COM", # flake8-commas
"C4", # flake8-comprehensions
"FA", # flake8-future-annotations
"ISC", # flake8-implicit-str-concat
"ICN003", # banned-import-from
"PIE", # flake8-pie
"T20", # flake8-print
"RSE", # flake8-raise
"RET504", # unnecessary-assign
"SIM", # flake8-simplify
"TID251", # banned-api
"TD", # flake8-todos
"PLC", # Pylint Convention
"PLE", # Pylint Error
"PLW", # Pylint Warning
"RUF", # Ruff-specific rules
] ]
[tool.ruff.lint.per-file-ignores] [tool.ruff.lint.per-file-ignores]
"devscripts/lazy_load_template.py" = ["F401"] "devscripts/lazy_load_template.py" = [
"!yt_dlp/extractor/**.py" = ["I"] "F401", # unused-import
]
"!yt_dlp/extractor/**.py" = [
"I", # isort
"ICN003", # banned-import-from
"T20", # flake8-print
"A002", # builtin-argument-shadowing
"C408", # unnecessary-collection-call
]
"yt_dlp/jsinterp.py" = [
"UP031", # printf-string-formatting
]
[tool.ruff.lint.isort] [tool.ruff.lint.isort]
known-first-party = [ known-first-party = [
@ -207,6 +270,50 @@ known-first-party = [
] ]
relative-imports-order = "closest-to-furthest" relative-imports-order = "closest-to-furthest"
[tool.ruff.lint.flake8-quotes]
docstring-quotes = "double"
multiline-quotes = "single"
inline-quotes = "single"
avoid-escape = false
[tool.ruff.lint.pep8-naming]
classmethod-decorators = [
"yt_dlp.utils.classproperty",
]
[tool.ruff.lint.flake8-import-conventions]
banned-from = [
"base64",
"datetime",
"functools",
"glob",
"hashlib",
"itertools",
"json",
"math",
"os",
"pathlib",
"random",
"re",
"string",
"sys",
"time",
"urllib",
"uuid",
"xml",
]
[tool.ruff.lint.flake8-tidy-imports.banned-api]
"yt_dlp.compat.compat_str".msg = "Use `str` instead."
"yt_dlp.compat.compat_b64decode".msg = "Use `base64.b64decode` instead."
"yt_dlp.compat.compat_urlparse".msg = "Use `urllib.parse` instead."
"yt_dlp.compat.compat_parse_qs".msg = "Use `urllib.parse.parse_qs` instead."
"yt_dlp.compat.compat_urllib_parse_unquote".msg = "Use `urllib.parse.unquote` instead."
"yt_dlp.compat.compat_urllib_parse_urlencode".msg = "Use `urllib.parse.urlencode` instead."
"yt_dlp.compat.compat_urllib_parse_urlparse".msg = "Use `urllib.parse.urlparse` instead."
"yt_dlp.compat.compat_shlex_quote".msg = "Use `yt_dlp.utils.shell_quote` instead."
"yt_dlp.utils.error_to_compat_str".msg = "Use `str` instead."
[tool.autopep8] [tool.autopep8]
max_line_length = 120 max_line_length = 120
recursive = true recursive = true

View file

@ -22,8 +22,8 @@ def handler(request):
class HandlerWrapper(handler): class HandlerWrapper(handler):
RH_KEY = handler.RH_KEY RH_KEY = handler.RH_KEY
def __init__(self, *args, **kwargs): def __init__(self, **kwargs):
super().__init__(logger=FakeLogger, *args, **kwargs) super().__init__(logger=FakeLogger, **kwargs)
return HandlerWrapper return HandlerWrapper
@ -54,11 +54,11 @@ def skip_handlers_if(request, handler):
def pytest_configure(config): def pytest_configure(config):
config.addinivalue_line( config.addinivalue_line(
"markers", "skip_handler(handler): skip test for the given handler", 'markers', 'skip_handler(handler): skip test for the given handler',
) )
config.addinivalue_line( config.addinivalue_line(
"markers", "skip_handler_if(handler): skip test for the given handler if condition is true" 'markers', 'skip_handler_if(handler): skip test for the given handler if condition is true',
) )
config.addinivalue_line( config.addinivalue_line(
"markers", "skip_handlers_if(handler): skip test for handlers when the condition is true" 'markers', 'skip_handlers_if(handler): skip test for handlers when the condition is true',
) )

View file

@ -16,8 +16,8 @@
import pytest import pytest
is_download_test = pytest.mark.download is_download_test = pytest.mark.download
else: else:
def is_download_test(testClass): def is_download_test(test_class):
return testClass return test_class
def get_params(override=None): def get_params(override=None):
@ -45,10 +45,10 @@ def try_rm(filename):
def report_warning(message, *args, **kwargs): def report_warning(message, *args, **kwargs):
''' """
Print the message to stderr, it will be prefixed with 'WARNING:' Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored If stderr is a tty file the 'WARNING:' will be colored
''' """
if sys.stderr.isatty() and compat_os_name != 'nt': if sys.stderr.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m' _msg_header = '\033[0;33mWARNING:\033[0m'
else: else:
@ -138,15 +138,14 @@ def expect_value(self, got, expected, field):
elif isinstance(expected, list) and isinstance(got, list): elif isinstance(expected, list) and isinstance(got, list):
self.assertEqual( self.assertEqual(
len(expected), len(got), len(expected), len(got),
'Expect a list of length %d, but got a list of length %d for field %s' % ( f'Expect a list of length {len(expected)}, but got a list of length {len(got)} for field {field}')
len(expected), len(got), field))
for index, (item_got, item_expected) in enumerate(zip(got, expected)): for index, (item_got, item_expected) in enumerate(zip(got, expected)):
type_got = type(item_got) type_got = type(item_got)
type_expected = type(item_expected) type_expected = type(item_expected)
self.assertEqual( self.assertEqual(
type_expected, type_got, type_expected, type_got,
'Type mismatch for list item at index %d for field %s, expected %r, got %r' % ( f'Type mismatch for list item at index {index} for field {field}, '
index, field, type_expected, type_got)) f'expected {type_expected!r}, got {type_got!r}')
expect_value(self, item_got, item_expected, field) expect_value(self, item_got, item_expected, field)
else: else:
if isinstance(expected, str) and expected.startswith('md5:'): if isinstance(expected, str) and expected.startswith('md5:'):
@ -224,7 +223,7 @@ def sanitize(key, value):
test_info_dict.pop('display_id') test_info_dict.pop('display_id')
# Remove deprecated fields # Remove deprecated fields
for old in YoutubeDL._deprecated_multivalue_fields.keys(): for old in YoutubeDL._deprecated_multivalue_fields:
test_info_dict.pop(old, None) test_info_dict.pop(old, None)
# release_year may be generated from release_date # release_year may be generated from release_date
@ -246,11 +245,11 @@ def expect_info_dict(self, got_dict, expected_dict):
if expected_dict.get('ext'): if expected_dict.get('ext'):
mandatory_fields.extend(('url', 'ext')) mandatory_fields.extend(('url', 'ext'))
for key in mandatory_fields: for key in mandatory_fields:
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key) self.assertTrue(got_dict.get(key), f'Missing mandatory field {key}')
# Check for mandatory fields that are automatically set by YoutubeDL # Check for mandatory fields that are automatically set by YoutubeDL
if got_dict.get('_type', 'video') == 'video': if got_dict.get('_type', 'video') == 'video':
for key in ['webpage_url', 'extractor', 'extractor_key']: for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key) self.assertTrue(got_dict.get(key), f'Missing field: {key}')
test_info_dict = sanitize_got_info_dict(got_dict) test_info_dict = sanitize_got_info_dict(got_dict)
@ -258,7 +257,7 @@ def expect_info_dict(self, got_dict, expected_dict):
if missing_keys: if missing_keys:
def _repr(v): def _repr(v):
if isinstance(v, str): if isinstance(v, str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n') return "'{}'".format(v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n'))
elif isinstance(v, type): elif isinstance(v, type):
return v.__name__ return v.__name__
else: else:
@ -275,8 +274,7 @@ def _repr(v):
write_string(info_dict_str.replace('\n', '\n '), out=sys.stderr) write_string(info_dict_str.replace('\n', '\n '), out=sys.stderr)
self.assertFalse( self.assertFalse(
missing_keys, missing_keys,
'Missing keys in test definition: %s' % ( 'Missing keys in test definition: {}'.format(', '.join(sorted(missing_keys))))
', '.join(sorted(missing_keys))))
def assertRegexpMatches(self, text, regexp, msg=None): def assertRegexpMatches(self, text, regexp, msg=None):
@ -285,9 +283,9 @@ def assertRegexpMatches(self, text, regexp, msg=None):
else: else:
m = re.match(regexp, text) m = re.match(regexp, text)
if not m: if not m:
note = 'Regexp didn\'t match: %r not found' % (regexp) note = f'Regexp didn\'t match: {regexp!r} not found'
if len(text) < 1000: if len(text) < 1000:
note += ' in %r' % text note += f' in {text!r}'
if msg is None: if msg is None:
msg = note msg = note
else: else:
@ -310,7 +308,7 @@ def assertLessEqual(self, got, expected, msg=None):
def assertEqual(self, got, expected, msg=None): def assertEqual(self, got, expected, msg=None):
if not (got == expected): if got != expected:
if msg is None: if msg is None:
msg = f'{got!r} not equal to {expected!r}' msg = f'{got!r} not equal to {expected!r}'
self.assertTrue(got == expected, msg) self.assertTrue(got == expected, msg)

View file

@ -262,19 +262,19 @@ def test_search_json_ld_realworld(self):
''', ''',
{ {
'chapters': [ 'chapters': [
{"title": "Explosie Turnhout", "start_time": 70, "end_time": 440}, {'title': 'Explosie Turnhout', 'start_time': 70, 'end_time': 440},
{"title": "Jaarwisseling", "start_time": 440, "end_time": 1179}, {'title': 'Jaarwisseling', 'start_time': 440, 'end_time': 1179},
{"title": "Natuurbranden Colorado", "start_time": 1179, "end_time": 1263}, {'title': 'Natuurbranden Colorado', 'start_time': 1179, 'end_time': 1263},
{"title": "Klimaatverandering", "start_time": 1263, "end_time": 1367}, {'title': 'Klimaatverandering', 'start_time': 1263, 'end_time': 1367},
{"title": "Zacht weer", "start_time": 1367, "end_time": 1383}, {'title': 'Zacht weer', 'start_time': 1367, 'end_time': 1383},
{"title": "Financiële balans", "start_time": 1383, "end_time": 1484}, {'title': 'Financiële balans', 'start_time': 1383, 'end_time': 1484},
{"title": "Club Brugge", "start_time": 1484, "end_time": 1575}, {'title': 'Club Brugge', 'start_time': 1484, 'end_time': 1575},
{"title": "Mentale gezondheid bij topsporters", "start_time": 1575, "end_time": 1728}, {'title': 'Mentale gezondheid bij topsporters', 'start_time': 1575, 'end_time': 1728},
{"title": "Olympische Winterspelen", "start_time": 1728, "end_time": 1873}, {'title': 'Olympische Winterspelen', 'start_time': 1728, 'end_time': 1873},
{"title": "Sober oudjaar in Nederland", "start_time": 1873, "end_time": 2079.23} {'title': 'Sober oudjaar in Nederland', 'start_time': 1873, 'end_time': 2079.23},
], ],
'title': 'Het journaal - Aflevering 365 (Seizoen 2021)' 'title': 'Het journaal - Aflevering 365 (Seizoen 2021)',
}, {} }, {},
), ),
( (
# test multiple thumbnails in a list # test multiple thumbnails in a list
@ -301,13 +301,13 @@ def test_search_json_ld_realworld(self):
'thumbnails': [{'url': 'https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg'}], 'thumbnails': [{'url': 'https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg'}],
}, },
{}, {},
) ),
] ]
for html, expected_dict, search_json_ld_kwargs in _TESTS: for html, expected_dict, search_json_ld_kwargs in _TESTS:
expect_dict( expect_dict(
self, self,
self.ie._search_json_ld(html, None, **search_json_ld_kwargs), self.ie._search_json_ld(html, None, **search_json_ld_kwargs),
expected_dict expected_dict,
) )
def test_download_json(self): def test_download_json(self):
@ -366,7 +366,7 @@ def test_parse_html5_media_entries(self):
'height': 740, 'height': 740,
'tbr': 1500, 'tbr': 1500,
}], }],
'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg' 'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg',
}) })
# from https://www.csfd.cz/ # from https://www.csfd.cz/
@ -419,9 +419,9 @@ def test_parse_html5_media_entries(self):
'height': 1080, 'height': 1080,
}], }],
'subtitles': { 'subtitles': {
'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}] 'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}],
}, },
'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360' 'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360',
}) })
# from https://tamasha.com/v/Kkdjw # from https://tamasha.com/v/Kkdjw
@ -452,7 +452,7 @@ def test_parse_html5_media_entries(self):
'ext': 'mp4', 'ext': 'mp4',
'format_id': '144p', 'format_id': '144p',
'height': 144, 'height': 144,
}] }],
}) })
# from https://www.directvnow.com # from https://www.directvnow.com
@ -470,7 +470,7 @@ def test_parse_html5_media_entries(self):
'formats': [{ 'formats': [{
'ext': 'mp4', 'ext': 'mp4',
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4', 'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
}] }],
}) })
# from https://www.directvnow.com # from https://www.directvnow.com
@ -488,7 +488,7 @@ def test_parse_html5_media_entries(self):
'formats': [{ 'formats': [{
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4', 'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
'ext': 'mp4', 'ext': 'mp4',
}] }],
}) })
# from https://www.klarna.com/uk/ # from https://www.klarna.com/uk/
@ -547,8 +547,8 @@ def test_extract_jwplayer_data_realworld(self):
'id': 'XEgvuql4', 'id': 'XEgvuql4',
'formats': [{ 'formats': [{
'url': 'rtmp://192.138.214.154/live/sjclive', 'url': 'rtmp://192.138.214.154/live/sjclive',
'ext': 'flv' 'ext': 'flv',
}] }],
}) })
# from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/ # from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/
@ -588,8 +588,8 @@ def test_extract_jwplayer_data_realworld(self):
'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg', 'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg',
'formats': [{ 'formats': [{
'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv', 'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv',
'ext': 'flv' 'ext': 'flv',
}] }],
}) })
# from http://www.indiedb.com/games/king-machine/videos # from http://www.indiedb.com/games/king-machine/videos
@ -610,12 +610,12 @@ def test_extract_jwplayer_data_realworld(self):
'formats': [{ 'formats': [{
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode_mp4/king-machine-trailer.mp4', 'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode_mp4/king-machine-trailer.mp4',
'height': 360, 'height': 360,
'ext': 'mp4' 'ext': 'mp4',
}, { }, {
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode720p_mp4/king-machine-trailer.mp4', 'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode720p_mp4/king-machine-trailer.mp4',
'height': 720, 'height': 720,
'ext': 'mp4' 'ext': 'mp4',
}] }],
}) })
def test_parse_m3u8_formats(self): def test_parse_m3u8_formats(self):
@ -866,7 +866,7 @@ def test_parse_m3u8_formats(self):
'height': 1080, 'height': 1080,
'vcodec': 'avc1.64002a', 'vcodec': 'avc1.64002a',
}], }],
{} {},
), ),
( (
'bipbop_16x9', 'bipbop_16x9',
@ -990,45 +990,45 @@ def test_parse_m3u8_formats(self):
'en': [{ 'en': [{
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng/prog_index.m3u8', 'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng/prog_index.m3u8',
'ext': 'vtt', 'ext': 'vtt',
'protocol': 'm3u8_native' 'protocol': 'm3u8_native',
}, { }, {
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng_forced/prog_index.m3u8', 'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng_forced/prog_index.m3u8',
'ext': 'vtt', 'ext': 'vtt',
'protocol': 'm3u8_native' 'protocol': 'm3u8_native',
}], }],
'fr': [{ 'fr': [{
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra/prog_index.m3u8', 'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra/prog_index.m3u8',
'ext': 'vtt', 'ext': 'vtt',
'protocol': 'm3u8_native' 'protocol': 'm3u8_native',
}, { }, {
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra_forced/prog_index.m3u8', 'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra_forced/prog_index.m3u8',
'ext': 'vtt', 'ext': 'vtt',
'protocol': 'm3u8_native' 'protocol': 'm3u8_native',
}], }],
'es': [{ 'es': [{
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa/prog_index.m3u8', 'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa/prog_index.m3u8',
'ext': 'vtt', 'ext': 'vtt',
'protocol': 'm3u8_native' 'protocol': 'm3u8_native',
}, { }, {
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa_forced/prog_index.m3u8', 'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa_forced/prog_index.m3u8',
'ext': 'vtt', 'ext': 'vtt',
'protocol': 'm3u8_native' 'protocol': 'm3u8_native',
}], }],
'ja': [{ 'ja': [{
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn/prog_index.m3u8', 'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn/prog_index.m3u8',
'ext': 'vtt', 'ext': 'vtt',
'protocol': 'm3u8_native' 'protocol': 'm3u8_native',
}, { }, {
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn_forced/prog_index.m3u8', 'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn_forced/prog_index.m3u8',
'ext': 'vtt', 'ext': 'vtt',
'protocol': 'm3u8_native' 'protocol': 'm3u8_native',
}], }],
} },
), ),
] ]
for m3u8_file, m3u8_url, expected_formats, expected_subs in _TEST_CASES: for m3u8_file, m3u8_url, expected_formats, expected_subs in _TEST_CASES:
with open('./test/testdata/m3u8/%s.m3u8' % m3u8_file, encoding='utf-8') as f: with open(f'./test/testdata/m3u8/{m3u8_file}.m3u8', encoding='utf-8') as f:
formats, subs = self.ie._parse_m3u8_formats_and_subtitles( formats, subs = self.ie._parse_m3u8_formats_and_subtitles(
f.read(), m3u8_url, ext='mp4') f.read(), m3u8_url, ext='mp4')
self.ie._sort_formats(formats) self.ie._sort_formats(formats)
@ -1366,14 +1366,14 @@ def test_parse_mpd_formats(self):
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd', 'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/', 'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments', 'protocol': 'http_dash_segments',
} },
] ],
}, },
) ),
] ]
for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES: for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES:
with open('./test/testdata/mpd/%s.mpd' % mpd_file, encoding='utf-8') as f: with open(f'./test/testdata/mpd/{mpd_file}.mpd', encoding='utf-8') as f:
formats, subtitles = self.ie._parse_mpd_formats_and_subtitles( formats, subtitles = self.ie._parse_mpd_formats_and_subtitles(
compat_etree_fromstring(f.read().encode()), compat_etree_fromstring(f.read().encode()),
mpd_base_url=mpd_base_url, mpd_url=mpd_url) mpd_base_url=mpd_base_url, mpd_url=mpd_url)
@ -1408,7 +1408,7 @@ def test_parse_ism_formats(self):
'sampling_rate': 48000, 'sampling_rate': 48000,
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video-100', 'format_id': 'video-100',
@ -1431,7 +1431,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D401FDA0544EFFC2D002CBC40000003004000000C03C60CA80000000168EF32C8', 'codec_private_data': '00000001674D401FDA0544EFFC2D002CBC40000003004000000C03C60CA80000000168EF32C8',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video-326', 'format_id': 'video-326',
@ -1454,7 +1454,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D401FDA0241FE23FFC3BC83BA44000003000400000300C03C60CA800000000168EF32C8', 'codec_private_data': '00000001674D401FDA0241FE23FFC3BC83BA44000003000400000300C03C60CA800000000168EF32C8',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video-698', 'format_id': 'video-698',
@ -1477,7 +1477,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D401FDA0350BFB97FF06AF06AD1000003000100000300300F1832A00000000168EF32C8', 'codec_private_data': '00000001674D401FDA0350BFB97FF06AF06AD1000003000100000300300F1832A00000000168EF32C8',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video-1493', 'format_id': 'video-1493',
@ -1500,7 +1500,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D401FDA011C3DE6FFF0D890D871000003000100000300300F1832A00000000168EF32C8', 'codec_private_data': '00000001674D401FDA011C3DE6FFF0D890D871000003000100000300300F1832A00000000168EF32C8',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video-4482', 'format_id': 'video-4482',
@ -1523,7 +1523,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D401FDA01A816F97FFC1ABC1AB440000003004000000C03C60CA80000000168EF32C8', 'codec_private_data': '00000001674D401FDA01A816F97FFC1ABC1AB440000003004000000C03C60CA80000000168EF32C8',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}], }],
{ {
@ -1538,10 +1538,10 @@ def test_parse_ism_formats(self):
'duration': 8880746666, 'duration': 8880746666,
'timescale': 10000000, 'timescale': 10000000,
'fourcc': 'TTML', 'fourcc': 'TTML',
'codec_private_data': '' 'codec_private_data': '',
} },
} },
] ],
}, },
), ),
( (
@ -1571,7 +1571,7 @@ def test_parse_ism_formats(self):
'sampling_rate': 48000, 'sampling_rate': 48000,
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'audio_deu_1-224', 'format_id': 'audio_deu_1-224',
@ -1597,7 +1597,7 @@ def test_parse_ism_formats(self):
'sampling_rate': 48000, 'sampling_rate': 48000,
'channels': 6, 'channels': 6,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video_deu-23', 'format_id': 'video_deu-23',
@ -1622,7 +1622,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '000000016742C00CDB06077E5C05A808080A00000300020000030009C0C02EE0177CC6300F142AE00000000168CA8DC8', 'codec_private_data': '000000016742C00CDB06077E5C05A808080A00000300020000030009C0C02EE0177CC6300F142AE00000000168CA8DC8',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video_deu-403', 'format_id': 'video_deu-403',
@ -1647,7 +1647,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D4014E98323B602D4040405000003000100000300320F1429380000000168EAECF2', 'codec_private_data': '00000001674D4014E98323B602D4040405000003000100000300320F1429380000000168EAECF2',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video_deu-680', 'format_id': 'video_deu-680',
@ -1672,7 +1672,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2', 'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video_deu-1253', 'format_id': 'video_deu-1253',
@ -1698,7 +1698,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2', 'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video_deu-2121', 'format_id': 'video_deu-2121',
@ -1723,7 +1723,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D401EECA0601BD80B50101014000003000400000300C83C58B6580000000168E93B3C80', 'codec_private_data': '00000001674D401EECA0601BD80B50101014000003000400000300C83C58B6580000000168E93B3C80',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video_deu-3275', 'format_id': 'video_deu-3275',
@ -1748,7 +1748,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D4020ECA02802DD80B501010140000003004000000C83C60C65800000000168E93B3C80', 'codec_private_data': '00000001674D4020ECA02802DD80B501010140000003004000000C83C60C65800000000168E93B3C80',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video_deu-5300', 'format_id': 'video_deu-5300',
@ -1773,7 +1773,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80', 'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}, { }, {
'format_id': 'video_deu-8079', 'format_id': 'video_deu-8079',
@ -1798,7 +1798,7 @@ def test_parse_ism_formats(self):
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80', 'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
'channels': 2, 'channels': 2,
'bits_per_sample': 16, 'bits_per_sample': 16,
'nal_unit_length_field': 4 'nal_unit_length_field': 4,
}, },
}], }],
{}, {},
@ -1806,7 +1806,7 @@ def test_parse_ism_formats(self):
] ]
for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES: for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES:
with open('./test/testdata/ism/%s.Manifest' % ism_file, encoding='utf-8') as f: with open(f'./test/testdata/ism/{ism_file}.Manifest', encoding='utf-8') as f:
formats, subtitles = self.ie._parse_ism_formats_and_subtitles( formats, subtitles = self.ie._parse_ism_formats_and_subtitles(
compat_etree_fromstring(f.read().encode()), ism_url=ism_url) compat_etree_fromstring(f.read().encode()), ism_url=ism_url)
self.ie._sort_formats(formats) self.ie._sort_formats(formats)
@ -1827,12 +1827,12 @@ def test_parse_f4m_formats(self):
'tbr': 2148, 'tbr': 2148,
'width': 1280, 'width': 1280,
'height': 720, 'height': 720,
}] }],
), ),
] ]
for f4m_file, f4m_url, expected_formats in _TEST_CASES: for f4m_file, f4m_url, expected_formats in _TEST_CASES:
with open('./test/testdata/f4m/%s.f4m' % f4m_file, encoding='utf-8') as f: with open(f'./test/testdata/f4m/{f4m_file}.f4m', encoding='utf-8') as f:
formats = self.ie._parse_f4m_formats( formats = self.ie._parse_f4m_formats(
compat_etree_fromstring(f.read().encode()), compat_etree_fromstring(f.read().encode()),
f4m_url, None) f4m_url, None)
@ -1873,13 +1873,13 @@ def test_parse_xspf(self):
}, { }, {
'manifest_url': 'https://example.org/src/foo_xspf.xspf', 'manifest_url': 'https://example.org/src/foo_xspf.xspf',
'url': 'https://example.com/track3.mp3', 'url': 'https://example.com/track3.mp3',
}] }],
}] }],
), ),
] ]
for xspf_file, xspf_url, expected_entries in _TEST_CASES: for xspf_file, xspf_url, expected_entries in _TEST_CASES:
with open('./test/testdata/xspf/%s.xspf' % xspf_file, encoding='utf-8') as f: with open(f'./test/testdata/xspf/{xspf_file}.xspf', encoding='utf-8') as f:
entries = self.ie._parse_xspf( entries = self.ie._parse_xspf(
compat_etree_fromstring(f.read().encode()), compat_etree_fromstring(f.read().encode()),
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url) xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)
@ -1902,7 +1902,7 @@ def test_response_with_expected_status_returns_content(self):
server_thread.start() server_thread.start()
(content, urlh) = self.ie._download_webpage_handle( (content, urlh) = self.ie._download_webpage_handle(
'http://127.0.0.1:%d/teapot' % port, None, f'http://127.0.0.1:{port}/teapot', None,
expected_status=TEAPOT_RESPONSE_STATUS) expected_status=TEAPOT_RESPONSE_STATUS)
self.assertEqual(content, TEAPOT_RESPONSE_BODY) self.assertEqual(content, TEAPOT_RESPONSE_BODY)

View file

@ -8,6 +8,7 @@
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib
import copy import copy
import json import json
@ -129,8 +130,8 @@ def test(inp, *expected, multi=False):
'allow_multiple_audio_streams': multi, 'allow_multiple_audio_streams': multi,
}) })
ydl.process_ie_result(info_dict.copy()) ydl.process_ie_result(info_dict.copy())
downloaded = map(lambda x: x['format_id'], ydl.downloaded_info_dicts) downloaded = [x['format_id'] for x in ydl.downloaded_info_dicts]
self.assertEqual(list(downloaded), list(expected)) self.assertEqual(downloaded, list(expected))
test('20/47', '47') test('20/47', '47')
test('20/71/worst', '35') test('20/71/worst', '35')
@ -515,10 +516,8 @@ def test_format_filtering(self):
self.assertEqual(downloaded_ids, ['D', 'C', 'B']) self.assertEqual(downloaded_ids, ['D', 'C', 'B'])
ydl = YDL({'format': 'best[height<40]'}) ydl = YDL({'format': 'best[height<40]'})
try: with contextlib.suppress(ExtractorError):
ydl.process_ie_result(info_dict) ydl.process_ie_result(info_dict)
except ExtractorError:
pass
self.assertEqual(ydl.downloaded_info_dicts, []) self.assertEqual(ydl.downloaded_info_dicts, [])
def test_default_format_spec(self): def test_default_format_spec(self):
@ -652,8 +651,8 @@ def test_add_extra_info(self):
'formats': [ 'formats': [
{'id': 'id 1', 'height': 1080, 'width': 1920}, {'id': 'id 1', 'height': 1080, 'width': 1920},
{'id': 'id 2', 'height': 720}, {'id': 'id 2', 'height': 720},
{'id': 'id 3'} {'id': 'id 3'},
] ],
} }
def test_prepare_outtmpl_and_filename(self): def test_prepare_outtmpl_and_filename(self):
@ -773,7 +772,7 @@ def expect_same_infodict(out):
test('%(formats)j', (json.dumps(FORMATS), None)) test('%(formats)j', (json.dumps(FORMATS), None))
test('%(formats)#j', ( test('%(formats)#j', (
json.dumps(FORMATS, indent=4), json.dumps(FORMATS, indent=4),
json.dumps(FORMATS, indent=4).replace(':', '').replace('"', "").replace('\n', ' ') json.dumps(FORMATS, indent=4).replace(':', '').replace('"', '').replace('\n', ' '),
)) ))
test('%(title5).3B', 'á') test('%(title5).3B', 'á')
test('%(title5)U', 'áéí 𝐀') test('%(title5)U', 'áéí 𝐀')
@ -843,8 +842,8 @@ def gen():
# Empty filename # Empty filename
test('%(foo|)s-%(bar|)s.%(ext)s', '-.mp4') test('%(foo|)s-%(bar|)s.%(ext)s', '-.mp4')
# test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # fixme # test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # FIXME: ?
# test('%(foo|)s', ('', '_')) # fixme # test('%(foo|)s', ('', '_')) # FIXME: ?
# Environment variable expansion for prepare_filename # Environment variable expansion for prepare_filename
os.environ['__yt_dlp_var'] = 'expanded' os.environ['__yt_dlp_var'] = 'expanded'
@ -861,7 +860,7 @@ def gen():
test('Hello %(title1)s', 'Hello $PATH') test('Hello %(title1)s', 'Hello $PATH')
test('Hello %(title2)s', 'Hello %PATH%') test('Hello %(title2)s', 'Hello %PATH%')
test('%(title3)s', ('foo/bar\\test', 'foobartest')) test('%(title3)s', ('foo/bar\\test', 'foobartest'))
test('folder/%(title3)s', ('folder/foo/bar\\test', 'folder%sfoobartest' % os.path.sep)) test('folder/%(title3)s', ('folder/foo/bar\\test', f'folder{os.path.sep}foobartest'))
def test_format_note(self): def test_format_note(self):
ydl = YoutubeDL() ydl = YoutubeDL()
@ -883,22 +882,22 @@ def run(self, info):
f.write('EXAMPLE') f.write('EXAMPLE')
return [info['filepath']], info return [info['filepath']], info
def run_pp(params, PP): def run_pp(params, pp):
with open(filename, 'w') as f: with open(filename, 'w') as f:
f.write('EXAMPLE') f.write('EXAMPLE')
ydl = YoutubeDL(params) ydl = YoutubeDL(params)
ydl.add_post_processor(PP()) ydl.add_post_processor(pp())
ydl.post_process(filename, {'filepath': filename}) ydl.post_process(filename, {'filepath': filename})
run_pp({'keepvideo': True}, SimplePP) run_pp({'keepvideo': True}, SimplePP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename) self.assertTrue(os.path.exists(filename), f'{filename} doesn\'t exist')
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile) self.assertTrue(os.path.exists(audiofile), f'{audiofile} doesn\'t exist')
os.unlink(filename) os.unlink(filename)
os.unlink(audiofile) os.unlink(audiofile)
run_pp({'keepvideo': False}, SimplePP) run_pp({'keepvideo': False}, SimplePP)
self.assertFalse(os.path.exists(filename), '%s exists' % filename) self.assertFalse(os.path.exists(filename), f'{filename} exists')
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile) self.assertTrue(os.path.exists(audiofile), f'{audiofile} doesn\'t exist')
os.unlink(audiofile) os.unlink(audiofile)
class ModifierPP(PostProcessor): class ModifierPP(PostProcessor):
@ -908,7 +907,7 @@ def run(self, info):
return [], info return [], info
run_pp({'keepvideo': False}, ModifierPP) run_pp({'keepvideo': False}, ModifierPP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename) self.assertTrue(os.path.exists(filename), f'{filename} doesn\'t exist')
os.unlink(filename) os.unlink(filename)
def test_match_filter(self): def test_match_filter(self):
@ -920,7 +919,7 @@ def test_match_filter(self):
'duration': 30, 'duration': 30,
'filesize': 10 * 1024, 'filesize': 10 * 1024,
'playlist_id': '42', 'playlist_id': '42',
'uploader': "變態妍字幕版 太妍 тест", 'uploader': '變態妍字幕版 太妍 тест',
'creator': "тест ' 123 ' тест--", 'creator': "тест ' 123 ' тест--",
'webpage_url': 'http://example.com/watch?v=shenanigans', 'webpage_url': 'http://example.com/watch?v=shenanigans',
} }
@ -933,7 +932,7 @@ def test_match_filter(self):
'description': 'foo', 'description': 'foo',
'filesize': 5 * 1024, 'filesize': 5 * 1024,
'playlist_id': '43', 'playlist_id': '43',
'uploader': "тест 123", 'uploader': 'тест 123',
'webpage_url': 'http://example.com/watch?v=SHENANIGANS', 'webpage_url': 'http://example.com/watch?v=SHENANIGANS',
} }
videos = [first, second] videos = [first, second]
@ -1180,7 +1179,7 @@ def _real_extract(self, url):
}) })
return { return {
'id': video_id, 'id': video_id,
'title': 'Video %s' % video_id, 'title': f'Video {video_id}',
'formats': formats, 'formats': formats,
} }
@ -1194,8 +1193,8 @@ def _entries(self):
'_type': 'url_transparent', '_type': 'url_transparent',
'ie_key': VideoIE.ie_key(), 'ie_key': VideoIE.ie_key(),
'id': video_id, 'id': video_id,
'url': 'video:%s' % video_id, 'url': f'video:{video_id}',
'title': 'Video Transparent %s' % video_id, 'title': f'Video Transparent {video_id}',
} }
def _real_extract(self, url): def _real_extract(self, url):

View file

@ -87,7 +87,7 @@ def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode() password = intlist_to_bytes(self.key).decode()
encrypted = base64.b64encode( encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8]) intlist_to_bytes(self.iv[:8])
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae' + b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae',
).decode() ).decode()
decrypted = (aes_decrypt_text(encrypted, password, 16)) decrypted = (aes_decrypt_text(encrypted, password, 16))
self.assertEqual(decrypted, self.secret_msg) self.assertEqual(decrypted, self.secret_msg)
@ -95,7 +95,7 @@ def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode() password = intlist_to_bytes(self.key).decode()
encrypted = base64.b64encode( encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8]) intlist_to_bytes(self.iv[:8])
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83' + b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83',
).decode() ).decode()
decrypted = (aes_decrypt_text(encrypted, password, 32)) decrypted = (aes_decrypt_text(encrypted, password, 32))
self.assertEqual(decrypted, self.secret_msg) self.assertEqual(decrypted, self.secret_msg)
@ -132,16 +132,16 @@ def test_pad_block(self):
block = [0x21, 0xA0, 0x43, 0xFF] block = [0x21, 0xA0, 0x43, 0xFF]
self.assertEqual(pad_block(block, 'pkcs7'), self.assertEqual(pad_block(block, 'pkcs7'),
block + [0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C]) [*block, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C])
self.assertEqual(pad_block(block, 'iso7816'), self.assertEqual(pad_block(block, 'iso7816'),
block + [0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) [*block, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
self.assertEqual(pad_block(block, 'whitespace'), self.assertEqual(pad_block(block, 'whitespace'),
block + [0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20]) [*block, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20])
self.assertEqual(pad_block(block, 'zero'), self.assertEqual(pad_block(block, 'zero'),
block + [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) [*block, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
block = list(range(16)) block = list(range(16))
for mode in ('pkcs7', 'iso7816', 'whitespace', 'zero'): for mode in ('pkcs7', 'iso7816', 'whitespace', 'zero'):

View file

@ -15,8 +15,8 @@
from yt_dlp.compat import ( from yt_dlp.compat import (
compat_etree_fromstring, compat_etree_fromstring,
compat_expanduser, compat_expanduser,
compat_urllib_parse_unquote, compat_urllib_parse_unquote, # noqa: TID251
compat_urllib_parse_urlencode, compat_urllib_parse_urlencode, # noqa: TID251
) )
from yt_dlp.compat.urllib.request import getproxies from yt_dlp.compat.urllib.request import getproxies
@ -24,15 +24,15 @@
class TestCompat(unittest.TestCase): class TestCompat(unittest.TestCase):
def test_compat_passthrough(self): def test_compat_passthrough(self):
with self.assertWarns(DeprecationWarning): with self.assertWarns(DeprecationWarning):
compat.compat_basestring _ = compat.compat_basestring
with self.assertWarns(DeprecationWarning): with self.assertWarns(DeprecationWarning):
compat.WINDOWS_VT_MODE _ = compat.WINDOWS_VT_MODE
self.assertEqual(urllib.request.getproxies, getproxies) self.assertEqual(urllib.request.getproxies, getproxies)
with self.assertWarns(DeprecationWarning): with self.assertWarns(DeprecationWarning):
compat.compat_pycrypto_AES # Must not raise error _ = compat.compat_pycrypto_AES # Must not raise error
def test_compat_expanduser(self): def test_compat_expanduser(self):
old_home = os.environ.get('HOME') old_home = os.environ.get('HOME')

View file

@ -71,7 +71,7 @@ def _generate_expected_groups():
Path('/etc/yt-dlp.conf'), Path('/etc/yt-dlp.conf'),
Path('/etc/yt-dlp/config'), Path('/etc/yt-dlp/config'),
Path('/etc/yt-dlp/config.txt'), Path('/etc/yt-dlp/config.txt'),
] ],
} }

View file

@ -106,7 +106,7 @@ def test_chrome_cookie_decryptor_linux_v11(self):
def test_chrome_cookie_decryptor_windows_v10(self): def test_chrome_cookie_decryptor_windows_v10(self):
with MonkeyPatch(cookies, { with MonkeyPatch(cookies, {
'_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&' '_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&',
}): }):
encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad=' encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad='
value = '32101439' value = '32101439'
@ -121,17 +121,17 @@ def test_chrome_cookie_decryptor_mac_v10(self):
self.assertEqual(decryptor.decrypt(encrypted_value), value) self.assertEqual(decryptor.decrypt(encrypted_value), value)
def test_safari_cookie_parsing(self): def test_safari_cookie_parsing(self):
cookies = \ cookies = (
b'cook\x00\x00\x00\x01\x00\x00\x00i\x00\x00\x01\x00\x01\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00Y' \ b'cook\x00\x00\x00\x01\x00\x00\x00i\x00\x00\x01\x00\x01\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00Y'
b'\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x008\x00\x00\x00B\x00\x00\x00F\x00\x00\x00H' \ b'\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x008\x00\x00\x00B\x00\x00\x00F\x00\x00\x00H'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\xa5>\xc3A\x00\x00\x80\xc3\x07:\xc3A' \ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\xa5>\xc3A\x00\x00\x80\xc3\x07:\xc3A'
b'localhost\x00foo\x00/\x00test%20%3Bcookie\x00\x00\x00\x054\x07\x17 \x05\x00\x00\x00Kbplist00\xd1\x01' \ b'localhost\x00foo\x00/\x00test%20%3Bcookie\x00\x00\x00\x054\x07\x17 \x05\x00\x00\x00Kbplist00\xd1\x01'
b'\x02_\x10\x18NSHTTPCookieAcceptPolicy\x10\x02\x08\x0b&\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\x00' \ b'\x02_\x10\x18NSHTTPCookieAcceptPolicy\x10\x02\x08\x0b&\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\x00'
b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(' b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(')
jar = parse_safari_cookies(cookies) jar = parse_safari_cookies(cookies)
self.assertEqual(len(jar), 1) self.assertEqual(len(jar), 1)
cookie = list(jar)[0] cookie = next(iter(jar))
self.assertEqual(cookie.domain, 'localhost') self.assertEqual(cookie.domain, 'localhost')
self.assertEqual(cookie.port, None) self.assertEqual(cookie.port, None)
self.assertEqual(cookie.path, '/') self.assertEqual(cookie.path, '/')
@ -164,7 +164,7 @@ def _run_tests(self, *cases):
attributes = { attributes = {
key: value key: value
for key, value in dict(morsel).items() for key, value in dict(morsel).items()
if value != "" if value != ''
} }
self.assertEqual(attributes, expected_attributes, message) self.assertEqual(attributes, expected_attributes, message)
@ -174,133 +174,133 @@ def test_parsing(self):
self._run_tests( self._run_tests(
# Copied from https://github.com/python/cpython/blob/v3.10.7/Lib/test/test_http_cookies.py # Copied from https://github.com/python/cpython/blob/v3.10.7/Lib/test/test_http_cookies.py
( (
"Test basic cookie", 'Test basic cookie',
"chips=ahoy; vienna=finger", 'chips=ahoy; vienna=finger',
{"chips": "ahoy", "vienna": "finger"}, {'chips': 'ahoy', 'vienna': 'finger'},
), ),
( (
"Test quoted cookie", 'Test quoted cookie',
'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"', 'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
{"keebler": 'E=mc2; L="Loves"; fudge=\012;'}, {'keebler': 'E=mc2; L="Loves"; fudge=\012;'},
), ),
( (
"Allow '=' in an unquoted value", "Allow '=' in an unquoted value",
"keebler=E=mc2", 'keebler=E=mc2',
{"keebler": "E=mc2"}, {'keebler': 'E=mc2'},
), ),
( (
"Allow cookies with ':' in their name", "Allow cookies with ':' in their name",
"key:term=value:term", 'key:term=value:term',
{"key:term": "value:term"}, {'key:term': 'value:term'},
), ),
( (
"Allow '[' and ']' in cookie values", "Allow '[' and ']' in cookie values",
"a=b; c=[; d=r; f=h", 'a=b; c=[; d=r; f=h',
{"a": "b", "c": "[", "d": "r", "f": "h"}, {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'},
), ),
( (
"Test basic cookie attributes", 'Test basic cookie attributes',
'Customer="WILE_E_COYOTE"; Version=1; Path=/acme', 'Customer="WILE_E_COYOTE"; Version=1; Path=/acme',
{"Customer": ("WILE_E_COYOTE", {"version": "1", "path": "/acme"})}, {'Customer': ('WILE_E_COYOTE', {'version': '1', 'path': '/acme'})},
), ),
( (
"Test flag only cookie attributes", 'Test flag only cookie attributes',
'Customer="WILE_E_COYOTE"; HttpOnly; Secure', 'Customer="WILE_E_COYOTE"; HttpOnly; Secure',
{"Customer": ("WILE_E_COYOTE", {"httponly": True, "secure": True})}, {'Customer': ('WILE_E_COYOTE', {'httponly': True, 'secure': True})},
), ),
( (
"Test flag only attribute with values", 'Test flag only attribute with values',
"eggs=scrambled; httponly=foo; secure=bar; Path=/bacon", 'eggs=scrambled; httponly=foo; secure=bar; Path=/bacon',
{"eggs": ("scrambled", {"httponly": "foo", "secure": "bar", "path": "/bacon"})}, {'eggs': ('scrambled', {'httponly': 'foo', 'secure': 'bar', 'path': '/bacon'})},
), ),
( (
"Test special case for 'expires' attribute, 4 digit year", "Test special case for 'expires' attribute, 4 digit year",
'Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT', 'Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT',
{"Customer": ("W", {"expires": "Wed, 01 Jan 2010 00:00:00 GMT"})}, {'Customer': ('W', {'expires': 'Wed, 01 Jan 2010 00:00:00 GMT'})},
), ),
( (
"Test special case for 'expires' attribute, 2 digit year", "Test special case for 'expires' attribute, 2 digit year",
'Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT', 'Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT',
{"Customer": ("W", {"expires": "Wed, 01 Jan 98 00:00:00 GMT"})}, {'Customer': ('W', {'expires': 'Wed, 01 Jan 98 00:00:00 GMT'})},
), ),
( (
"Test extra spaces in keys and values", 'Test extra spaces in keys and values',
"eggs = scrambled ; secure ; path = bar ; foo=foo ", 'eggs = scrambled ; secure ; path = bar ; foo=foo ',
{"eggs": ("scrambled", {"secure": True, "path": "bar"}), "foo": "foo"}, {'eggs': ('scrambled', {'secure': True, 'path': 'bar'}), 'foo': 'foo'},
), ),
( (
"Test quoted attributes", 'Test quoted attributes',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"', 'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"',
{"Customer": ("WILE_E_COYOTE", {"version": "1", "path": "/acme"})} {'Customer': ('WILE_E_COYOTE', {'version': '1', 'path': '/acme'})},
), ),
# Our own tests that CPython passes # Our own tests that CPython passes
( (
"Allow ';' in quoted value", "Allow ';' in quoted value",
'chips="a;hoy"; vienna=finger', 'chips="a;hoy"; vienna=finger',
{"chips": "a;hoy", "vienna": "finger"}, {'chips': 'a;hoy', 'vienna': 'finger'},
), ),
( (
"Keep only the last set value", 'Keep only the last set value',
"a=c; a=b", 'a=c; a=b',
{"a": "b"}, {'a': 'b'},
), ),
) )
def test_lenient_parsing(self): def test_lenient_parsing(self):
self._run_tests( self._run_tests(
( (
"Ignore and try to skip invalid cookies", 'Ignore and try to skip invalid cookies',
'chips={"ahoy;": 1}; vienna="finger;"', 'chips={"ahoy;": 1}; vienna="finger;"',
{"vienna": "finger;"}, {'vienna': 'finger;'},
), ),
( (
"Ignore cookies without a name", 'Ignore cookies without a name',
"a=b; unnamed; c=d", 'a=b; unnamed; c=d',
{"a": "b", "c": "d"}, {'a': 'b', 'c': 'd'},
), ),
( (
"Ignore '\"' cookie without name", "Ignore '\"' cookie without name",
'a=b; "; c=d', 'a=b; "; c=d',
{"a": "b", "c": "d"}, {'a': 'b', 'c': 'd'},
), ),
( (
"Skip all space separated values", 'Skip all space separated values',
"x a=b c=d x; e=f", 'x a=b c=d x; e=f',
{"a": "b", "c": "d", "e": "f"}, {'a': 'b', 'c': 'd', 'e': 'f'},
), ),
( (
"Skip all space separated values", 'Skip all space separated values',
'x a=b; data={"complex": "json", "with": "key=value"}; x c=d x', 'x a=b; data={"complex": "json", "with": "key=value"}; x c=d x',
{"a": "b", "c": "d"}, {'a': 'b', 'c': 'd'},
), ),
( (
"Expect quote mending", 'Expect quote mending',
'a=b; invalid="; c=d', 'a=b; invalid="; c=d',
{"a": "b", "c": "d"}, {'a': 'b', 'c': 'd'},
), ),
( (
"Reset morsel after invalid to not capture attributes", 'Reset morsel after invalid to not capture attributes',
"a=b; invalid; Version=1; c=d", 'a=b; invalid; Version=1; c=d',
{"a": "b", "c": "d"}, {'a': 'b', 'c': 'd'},
), ),
( (
"Reset morsel after invalid to not capture attributes", 'Reset morsel after invalid to not capture attributes',
"a=b; $invalid; $Version=1; c=d", 'a=b; $invalid; $Version=1; c=d',
{"a": "b", "c": "d"}, {'a': 'b', 'c': 'd'},
), ),
( (
"Continue after non-flag attribute without value", 'Continue after non-flag attribute without value',
"a=b; path; Version=1; c=d", 'a=b; path; Version=1; c=d',
{"a": "b", "c": "d"}, {'a': 'b', 'c': 'd'},
), ),
( (
"Allow cookie attributes with `$` prefix", 'Allow cookie attributes with `$` prefix',
'Customer="WILE_E_COYOTE"; $Version=1; $Secure; $Path=/acme', 'Customer="WILE_E_COYOTE"; $Version=1; $Secure; $Path=/acme',
{"Customer": ("WILE_E_COYOTE", {"version": "1", "secure": True, "path": "/acme"})}, {'Customer': ('WILE_E_COYOTE', {'version': '1', 'secure': True, 'path': '/acme'})},
), ),
( (
"Invalid Morsel keys should not result in an error", 'Invalid Morsel keys should not result in an error',
"Key=Value; [Invalid]=Value; Another=Value", 'Key=Value; [Invalid]=Value; Another=Value',
{"Key": "Value", "Another": "Value"}, {'Key': 'Value', 'Another': 'Value'},
), ),
) )

View file

@ -94,7 +94,7 @@ def test_template(self):
'playlist', [] if is_playlist else [test_case]) 'playlist', [] if is_playlist else [test_case])
def print_skipping(reason): def print_skipping(reason):
print('Skipping %s: %s' % (test_case['name'], reason)) print('Skipping {}: {}'.format(test_case['name'], reason))
self.skipTest(reason) self.skipTest(reason)
if not ie.working(): if not ie.working():
@ -117,7 +117,7 @@ def print_skipping(reason):
for other_ie in other_ies: for other_ie in other_ies:
if not other_ie.working(): if not other_ie.working():
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) print_skipping(f'test depends on {other_ie.ie_key()}IE, marked as not WORKING')
params = get_params(test_case.get('params', {})) params = get_params(test_case.get('params', {}))
params['outtmpl'] = tname + '_' + params['outtmpl'] params['outtmpl'] = tname + '_' + params['outtmpl']
@ -148,10 +148,7 @@ def match_exception(err):
return False return False
if err.__class__.__name__ == expected_exception: if err.__class__.__name__ == expected_exception:
return True return True
for exc in err.exc_info: return any(exc.__class__.__name__ == expected_exception for exc in err.exc_info)
if exc.__class__.__name__ == expected_exception:
return True
return False
def try_rm_tcs_files(tcs=None): def try_rm_tcs_files(tcs=None):
if tcs is None: if tcs is None:
@ -181,7 +178,7 @@ def try_rm_tcs_files(tcs=None):
raise raise
if try_num == RETRIES: if try_num == RETRIES:
report_warning('%s failed due to network errors, skipping...' % tname) report_warning(f'{tname} failed due to network errors, skipping...')
return return
print(f'Retrying: {try_num} failed tries\n\n##########\n\n') print(f'Retrying: {try_num} failed tries\n\n##########\n\n')
@ -244,9 +241,8 @@ def try_rm_tcs_files(tcs=None):
got_fsize = os.path.getsize(tc_filename) got_fsize = os.path.getsize(tc_filename)
assertGreaterEqual( assertGreaterEqual(
self, got_fsize, expected_minsize, self, got_fsize, expected_minsize,
'Expected %s to be at least %s, but it\'s only %s ' % f'Expected {tc_filename} to be at least {format_bytes(expected_minsize)}, '
(tc_filename, format_bytes(expected_minsize), f'but it\'s only {format_bytes(got_fsize)} ')
format_bytes(got_fsize)))
if 'md5' in tc: if 'md5' in tc:
md5_for_file = _file_md5(tc_filename) md5_for_file = _file_md5(tc_filename)
self.assertEqual(tc['md5'], md5_for_file) self.assertEqual(tc['md5'], md5_for_file)
@ -255,7 +251,7 @@ def try_rm_tcs_files(tcs=None):
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json' info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
self.assertTrue( self.assertTrue(
os.path.exists(info_json_fn), os.path.exists(info_json_fn),
'Missing info file %s' % info_json_fn) f'Missing info file {info_json_fn}')
with open(info_json_fn, encoding='utf-8') as infof: with open(info_json_fn, encoding='utf-8') as infof:
info_dict = json.load(infof) info_dict = json.load(infof)
expect_info_dict(self, info_dict, tc.get('info_dict', {})) expect_info_dict(self, info_dict, tc.get('info_dict', {}))

View file

@ -38,9 +38,9 @@ def send_content_range(self, total=None):
end = int(mobj.group(2)) end = int(mobj.group(2))
valid_range = start is not None and end is not None valid_range = start is not None and end is not None
if valid_range: if valid_range:
content_range = 'bytes %d-%d' % (start, end) content_range = f'bytes {start}-{end}'
if total: if total:
content_range += '/%d' % total content_range += f'/{total}'
self.send_header('Content-Range', content_range) self.send_header('Content-Range', content_range)
return (end - start + 1) if valid_range else total return (end - start + 1) if valid_range else total
@ -84,7 +84,7 @@ def download(self, params, ep):
filename = 'testfile.mp4' filename = 'testfile.mp4'
try_rm(encodeFilename(filename)) try_rm(encodeFilename(filename))
self.assertTrue(downloader.real_download(filename, { self.assertTrue(downloader.real_download(filename, {
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep), 'url': f'http://127.0.0.1:{self.port}/{ep}',
}), ep) }), ep)
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep) self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep)
try_rm(encodeFilename(filename)) try_rm(encodeFilename(filename))

View file

@ -105,7 +105,7 @@ def __init__(self, socket, ssl_context, server_hostname=None, suppress_ragged_eo
self.incoming, self.incoming,
self.outgoing, self.outgoing,
server_hostname=server_hostname, server_hostname=server_hostname,
server_side=server_side server_side=server_side,
) )
self._ssl_io_loop(self.sslobj.do_handshake) self._ssl_io_loop(self.sslobj.do_handshake)
@ -333,7 +333,7 @@ def test_http_connect_auth(self, handler, ctx):
@pytest.mark.skip_handler( @pytest.mark.skip_handler(
'Requests', 'Requests',
'bug in urllib3 causes unclosed socket: https://github.com/urllib3/urllib3/issues/3374' 'bug in urllib3 causes unclosed socket: https://github.com/urllib3/urllib3/issues/3374',
) )
def test_http_connect_bad_auth(self, handler, ctx): def test_http_connect_bad_auth(self, handler, ctx):
with ctx.http_server(HTTPConnectProxyHandler, username='test', password='test') as server_address: with ctx.http_server(HTTPConnectProxyHandler, username='test', password='test') as server_address:

View file

@ -29,11 +29,11 @@ def error(self, msg):
@is_download_test @is_download_test
class TestIqiyiSDKInterpreter(unittest.TestCase): class TestIqiyiSDKInterpreter(unittest.TestCase):
def test_iqiyi_sdk_interpreter(self): def test_iqiyi_sdk_interpreter(self):
''' """
Test the functionality of IqiyiSDKInterpreter by trying to log in Test the functionality of IqiyiSDKInterpreter by trying to log in
If `sign` is incorrect, /validate call throws an HTTP 556 error If `sign` is incorrect, /validate call throws an HTTP 556 error
''' """
logger = WarningLogger() logger = WarningLogger()
ie = IqiyiIE(FakeYDL({'logger': logger})) ie = IqiyiIE(FakeYDL({'logger': logger}))
ie._perform_login('foo', 'bar') ie._perform_login('foo', 'bar')

View file

@ -21,7 +21,7 @@ def test_netrc_present(self):
continue continue
self.assertTrue( self.assertTrue(
ie._NETRC_MACHINE, ie._NETRC_MACHINE,
'Extractor %s supports login, but is missing a _NETRC_MACHINE property' % ie.IE_NAME) f'Extractor {ie.IE_NAME} supports login, but is missing a _NETRC_MACHINE property')
if __name__ == '__main__': if __name__ == '__main__':

View file

@ -375,10 +375,10 @@ def test_raise_http_error(self, handler):
with handler() as rh: with handler() as rh:
for bad_status in (400, 500, 599, 302): for bad_status in (400, 500, 599, 302):
with pytest.raises(HTTPError): with pytest.raises(HTTPError):
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_%d' % (self.http_port, bad_status))) validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_{bad_status}'))
# Should not raise an error # Should not raise an error
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_200' % self.http_port)).close() validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_200')).close()
def test_response_url(self, handler): def test_response_url(self, handler):
with handler() as rh: with handler() as rh:
@ -472,7 +472,7 @@ def test_redirect_loop(self, handler):
def test_incompleteread(self, handler): def test_incompleteread(self, handler):
with handler(timeout=2) as rh: with handler(timeout=2) as rh:
with pytest.raises(IncompleteRead, match='13 bytes read, 234221 more expected'): with pytest.raises(IncompleteRead, match='13 bytes read, 234221 more expected'):
validate_and_send(rh, Request('http://127.0.0.1:%d/incompleteread' % self.http_port)).read() validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/incompleteread')).read()
def test_cookies(self, handler): def test_cookies(self, handler):
cookiejar = YoutubeDLCookieJar() cookiejar = YoutubeDLCookieJar()
@ -740,7 +740,7 @@ class TestRequestHandlerMisc:
@pytest.mark.parametrize('handler,logger_name', [ @pytest.mark.parametrize('handler,logger_name', [
('Requests', 'urllib3'), ('Requests', 'urllib3'),
('Websockets', 'websockets.client'), ('Websockets', 'websockets.client'),
('Websockets', 'websockets.server') ('Websockets', 'websockets.server'),
], indirect=['handler']) ], indirect=['handler'])
def test_remove_logging_handler(self, handler, logger_name): def test_remove_logging_handler(self, handler, logger_name):
# Ensure any logging handlers, which may contain a YoutubeDL instance, # Ensure any logging handlers, which may contain a YoutubeDL instance,
@ -794,7 +794,7 @@ def test_verify_cert_error_text(self, handler):
with handler() as rh: with handler() as rh:
with pytest.raises( with pytest.raises(
CertificateVerifyError, CertificateVerifyError,
match=r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed: self.signed certificate' match=r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed: self.signed certificate',
): ):
validate_and_send(rh, Request(f'https://127.0.0.1:{self.https_port}/headers')) validate_and_send(rh, Request(f'https://127.0.0.1:{self.https_port}/headers'))
@ -804,14 +804,14 @@ def test_verify_cert_error_text(self, handler):
( (
Request('http://127.0.0.1', method='GET\n'), Request('http://127.0.0.1', method='GET\n'),
'method can\'t contain control characters', 'method can\'t contain control characters',
lambda v: v < (3, 7, 9) or (3, 8, 0) <= v < (3, 8, 5) lambda v: v < (3, 7, 9) or (3, 8, 0) <= v < (3, 8, 5),
), ),
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1265 # https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1265
# bpo-38576: Check implemented in 3.7.8+, 3.8.3+ # bpo-38576: Check implemented in 3.7.8+, 3.8.3+
( (
Request('http://127.0.0. 1', method='GET'), Request('http://127.0.0. 1', method='GET'),
'URL can\'t contain control characters', 'URL can\'t contain control characters',
lambda v: v < (3, 7, 8) or (3, 8, 0) <= v < (3, 8, 3) lambda v: v < (3, 7, 8) or (3, 8, 0) <= v < (3, 8, 3),
), ),
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1288C31-L1288C50 # https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1288C31-L1288C50
(Request('http://127.0.0.1', headers={'foo\n': 'bar'}), 'Invalid header name', None), (Request('http://127.0.0.1', headers={'foo\n': 'bar'}), 'Invalid header name', None),
@ -840,7 +840,7 @@ class TestRequestsRequestHandler(TestRequestHandlerBase):
(lambda: requests.exceptions.InvalidHeader(), RequestError), (lambda: requests.exceptions.InvalidHeader(), RequestError),
# catch-all: https://github.com/psf/requests/blob/main/src/requests/adapters.py#L535 # catch-all: https://github.com/psf/requests/blob/main/src/requests/adapters.py#L535
(lambda: urllib3.exceptions.HTTPError(), TransportError), (lambda: urllib3.exceptions.HTTPError(), TransportError),
(lambda: requests.exceptions.RequestException(), RequestError) (lambda: requests.exceptions.RequestException(), RequestError),
# (lambda: requests.exceptions.TooManyRedirects(), HTTPError) - Needs a response object # (lambda: requests.exceptions.TooManyRedirects(), HTTPError) - Needs a response object
]) ])
def test_request_error_mapping(self, handler, monkeypatch, raised, expected): def test_request_error_mapping(self, handler, monkeypatch, raised, expected):
@ -868,12 +868,12 @@ def request(self, *args, **kwargs):
( (
lambda: urllib3.exceptions.ProtocolError('error', http.client.IncompleteRead(partial=b'abc', expected=4)), lambda: urllib3.exceptions.ProtocolError('error', http.client.IncompleteRead(partial=b'abc', expected=4)),
IncompleteRead, IncompleteRead,
'3 bytes read, 4 more expected' '3 bytes read, 4 more expected',
), ),
( (
lambda: urllib3.exceptions.ProtocolError('error', urllib3.exceptions.IncompleteRead(partial=3, expected=5)), lambda: urllib3.exceptions.ProtocolError('error', urllib3.exceptions.IncompleteRead(partial=3, expected=5)),
IncompleteRead, IncompleteRead,
'3 bytes read, 5 more expected' '3 bytes read, 5 more expected',
), ),
]) ])
def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match): def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
@ -1125,7 +1125,7 @@ class HTTPSupportedRH(ValidationRH):
('https', False, {}), ('https', False, {}),
]), ]),
(NoCheckRH, [('http', False, {})]), (NoCheckRH, [('http', False, {})]),
(ValidationRH, [('http', UnsupportedRequest, {})]) (ValidationRH, [('http', UnsupportedRequest, {})]),
] ]
PROXY_SCHEME_TESTS = [ PROXY_SCHEME_TESTS = [
@ -1219,7 +1219,7 @@ class HTTPSupportedRH(ValidationRH):
({'impersonate': ImpersonateTarget('chrome', None, None, None)}, False), ({'impersonate': ImpersonateTarget('chrome', None, None, None)}, False),
({'impersonate': ImpersonateTarget(None, None, None, None)}, False), ({'impersonate': ImpersonateTarget(None, None, None, None)}, False),
({'impersonate': ImpersonateTarget()}, False), ({'impersonate': ImpersonateTarget()}, False),
({'impersonate': 'chrome'}, AssertionError) ({'impersonate': 'chrome'}, AssertionError),
]), ]),
(NoCheckRH, 'http', [ (NoCheckRH, 'http', [
({'cookiejar': 'notacookiejar'}, False), ({'cookiejar': 'notacookiejar'}, False),
@ -1235,7 +1235,7 @@ class HTTPSupportedRH(ValidationRH):
('Urllib', False, 'http'), ('Urllib', False, 'http'),
('Requests', False, 'http'), ('Requests', False, 'http'),
('CurlCFFI', False, 'http'), ('CurlCFFI', False, 'http'),
('Websockets', False, 'ws') ('Websockets', False, 'ws'),
], indirect=['handler']) ], indirect=['handler'])
def test_no_proxy(self, handler, fail, scheme): def test_no_proxy(self, handler, fail, scheme):
run_validation(handler, fail, Request(f'{scheme}://', proxies={'no': '127.0.0.1,github.com'})) run_validation(handler, fail, Request(f'{scheme}://', proxies={'no': '127.0.0.1,github.com'}))
@ -1246,7 +1246,7 @@ def test_no_proxy(self, handler, fail, scheme):
(HTTPSupportedRH, 'http'), (HTTPSupportedRH, 'http'),
('Requests', 'http'), ('Requests', 'http'),
('CurlCFFI', 'http'), ('CurlCFFI', 'http'),
('Websockets', 'ws') ('Websockets', 'ws'),
], indirect=['handler']) ], indirect=['handler'])
def test_empty_proxy(self, handler, scheme): def test_empty_proxy(self, handler, scheme):
run_validation(handler, False, Request(f'{scheme}://', proxies={scheme: None})) run_validation(handler, False, Request(f'{scheme}://', proxies={scheme: None}))
@ -1258,7 +1258,7 @@ def test_empty_proxy(self, handler, scheme):
(HTTPSupportedRH, 'http'), (HTTPSupportedRH, 'http'),
('Requests', 'http'), ('Requests', 'http'),
('CurlCFFI', 'http'), ('CurlCFFI', 'http'),
('Websockets', 'ws') ('Websockets', 'ws'),
], indirect=['handler']) ], indirect=['handler'])
def test_invalid_proxy_url(self, handler, scheme, proxy_url): def test_invalid_proxy_url(self, handler, scheme, proxy_url):
run_validation(handler, UnsupportedRequest, Request(f'{scheme}://', proxies={scheme: proxy_url})) run_validation(handler, UnsupportedRequest, Request(f'{scheme}://', proxies={scheme: proxy_url}))
@ -1474,7 +1474,7 @@ def test_compat_opener(self):
@pytest.mark.parametrize('proxy,expected', [ @pytest.mark.parametrize('proxy,expected', [
('http://127.0.0.1:8080', {'all': 'http://127.0.0.1:8080'}), ('http://127.0.0.1:8080', {'all': 'http://127.0.0.1:8080'}),
('', {'all': '__noproxy__'}), ('', {'all': '__noproxy__'}),
(None, {'http': 'http://127.0.0.1:8081', 'https': 'http://127.0.0.1:8081'}) # env, set https (None, {'http': 'http://127.0.0.1:8081', 'https': 'http://127.0.0.1:8081'}), # env, set https
]) ])
def test_proxy(self, proxy, expected, monkeypatch): def test_proxy(self, proxy, expected, monkeypatch):
monkeypatch.setenv('HTTP_PROXY', 'http://127.0.0.1:8081') monkeypatch.setenv('HTTP_PROXY', 'http://127.0.0.1:8081')
@ -1546,7 +1546,7 @@ def _send(self, request: Request):
with FakeImpersonationRHYDL() as ydl: with FakeImpersonationRHYDL() as ydl:
with pytest.raises( with pytest.raises(
RequestError, RequestError,
match=r'Impersonate target "test" is not available' match=r'Impersonate target "test" is not available',
): ):
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)})) ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
@ -1558,7 +1558,7 @@ def _send(self, request: Request):
pass pass
_SUPPORTED_URL_SCHEMES = ('http',) _SUPPORTED_URL_SCHEMES = ('http',)
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc',): 'test'} _SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc'): 'test'}
_SUPPORTED_PROXY_SCHEMES = None _SUPPORTED_PROXY_SCHEMES = None
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
@ -1567,14 +1567,14 @@ def _send(self, request: Request):
with FakeHTTPRHYDL() as ydl: with FakeHTTPRHYDL() as ydl:
with pytest.raises( with pytest.raises(
RequestError, RequestError,
match=r'Impersonate target "test" is not available' match=r'Impersonate target "test" is not available',
): ):
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)})) ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
def test_raise_impersonate_error(self): def test_raise_impersonate_error(self):
with pytest.raises( with pytest.raises(
YoutubeDLError, YoutubeDLError,
match=r'Impersonate target "test" is not available' match=r'Impersonate target "test" is not available',
): ):
FakeYDL({'impersonate': ImpersonateTarget('test', None, None, None)}) FakeYDL({'impersonate': ImpersonateTarget('test', None, None, None)})
@ -1592,7 +1592,7 @@ def _send(self, request: Request):
monkeypatch.setattr(FakeYDL, 'build_request_director', lambda cls, handlers, preferences=None: brh(cls, handlers=[IRH])) monkeypatch.setattr(FakeYDL, 'build_request_director', lambda cls, handlers, preferences=None: brh(cls, handlers=[IRH]))
with FakeYDL({ with FakeYDL({
'impersonate': ImpersonateTarget('abc', None, None, None) 'impersonate': ImpersonateTarget('abc', None, None, None),
}) as ydl: }) as ydl:
rh = self.build_handler(ydl, IRH) rh = self.build_handler(ydl, IRH)
assert rh.impersonate == ImpersonateTarget('abc', None, None, None) assert rh.impersonate == ImpersonateTarget('abc', None, None, None)
@ -1604,7 +1604,7 @@ class TestRH(ImpersonateRequestHandler):
def _send(self, request: Request): def _send(self, request: Request):
pass pass
_SUPPORTED_URL_SCHEMES = ('http',) _SUPPORTED_URL_SCHEMES = ('http',)
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget(target_client,): 'test'} _SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget(target_client): 'test'}
RH_KEY = target_client RH_KEY = target_client
RH_NAME = target_client RH_NAME = target_client
handlers.append(TestRH) handlers.append(TestRH)
@ -1614,7 +1614,7 @@ def _send(self, request: Request):
assert set(ydl._get_available_impersonate_targets()) == { assert set(ydl._get_available_impersonate_targets()) == {
(ImpersonateTarget('xyz'), 'xyz'), (ImpersonateTarget('xyz'), 'xyz'),
(ImpersonateTarget('abc'), 'abc'), (ImpersonateTarget('abc'), 'abc'),
(ImpersonateTarget('asd'), 'asd') (ImpersonateTarget('asd'), 'asd'),
} }
assert ydl._impersonate_target_available(ImpersonateTarget('abc')) assert ydl._impersonate_target_available(ImpersonateTarget('abc'))
assert ydl._impersonate_target_available(ImpersonateTarget()) assert ydl._impersonate_target_available(ImpersonateTarget())
@ -1837,7 +1837,7 @@ def test_copy(self):
extensions={'cookiejar': CookieJar()}, extensions={'cookiejar': CookieJar()},
headers={'Accept-Encoding': 'br'}, headers={'Accept-Encoding': 'br'},
proxies={'http': 'http://127.0.0.1'}, proxies={'http': 'http://127.0.0.1'},
data=[b'123'] data=[b'123'],
) )
req_copy = req.copy() req_copy = req.copy()
assert req_copy is not req assert req_copy is not req
@ -1863,7 +1863,7 @@ class AnotherRequest(Request):
assert isinstance(req.copy(), AnotherRequest) assert isinstance(req.copy(), AnotherRequest)
def test_url(self): def test_url(self):
req = Request(url='https://фtest.example.com/ some spaceв?ä=c',) req = Request(url='https://фtest.example.com/ some spaceв?ä=c')
assert req.url == 'https://xn--test-z6d.example.com/%20some%20space%D0%B2?%C3%A4=c' assert req.url == 'https://xn--test-z6d.example.com/%20some%20space%D0%B2?%C3%A4=c'
assert Request(url='//example.com').url == 'http://example.com' assert Request(url='//example.com').url == 'http://example.com'
@ -1878,7 +1878,7 @@ class TestResponse:
('custom', 200, 'custom'), ('custom', 200, 'custom'),
(None, 404, 'Not Found'), # fallback status (None, 404, 'Not Found'), # fallback status
('', 403, 'Forbidden'), ('', 403, 'Forbidden'),
(None, 999, None) (None, 999, None),
]) ])
def test_reason(self, reason, status, expected): def test_reason(self, reason, status, expected):
res = Response(io.BytesIO(b''), url='test://', headers={}, status=status, reason=reason) res = Response(io.BytesIO(b''), url='test://', headers={}, status=status, reason=reason)
@ -1933,7 +1933,7 @@ def test_target_from_str(self, target_str, expected):
@pytest.mark.parametrize('target_str', [ @pytest.mark.parametrize('target_str', [
'-120', ':-12.0', '-12:-12', '-:-', '-120', ':-12.0', '-12:-12', '-:-',
'::', 'a-c-d:', 'a-c-d:e-f-g', 'a:b:' '::', 'a-c-d:', 'a-c-d:e-f-g', 'a:b:',
]) ])
def test_target_from_invalid_str(self, target_str): def test_target_from_invalid_str(self, target_str):
with pytest.raises(ValueError): with pytest.raises(ValueError):
@ -1949,7 +1949,7 @@ def test_target_from_invalid_str(self, target_str):
(ImpersonateTarget('abc', '120', 'xyz', None), 'abc-120:xyz'), (ImpersonateTarget('abc', '120', 'xyz', None), 'abc-120:xyz'),
(ImpersonateTarget('abc', None, 'xyz'), 'abc:xyz'), (ImpersonateTarget('abc', None, 'xyz'), 'abc:xyz'),
(ImpersonateTarget(None, None, 'xyz', '6.5'), ':xyz-6.5'), (ImpersonateTarget(None, None, 'xyz', '6.5'), ':xyz-6.5'),
(ImpersonateTarget('abc', ), 'abc'), (ImpersonateTarget('abc'), 'abc'),
(ImpersonateTarget(None, None, None, None), ''), (ImpersonateTarget(None, None, None, None), ''),
]) ])
def test_str(self, target, expected): def test_str(self, target, expected):

View file

@ -39,7 +39,7 @@ def test_select_proxy(self):
proxies = { proxies = {
'all': 'socks5://example.com', 'all': 'socks5://example.com',
'http': 'http://example.com:1080', 'http': 'http://example.com:1080',
'no': 'bypass.example.com,yt-dl.org' 'no': 'bypass.example.com,yt-dl.org',
} }
assert select_proxy('https://example.com', proxies) == proxies['all'] assert select_proxy('https://example.com', proxies) == proxies['all']
@ -54,7 +54,7 @@ def test_select_proxy(self):
'port': 1080, 'port': 1080,
'rdns': True, 'rdns': True,
'username': None, 'username': None,
'password': None 'password': None,
}), }),
('socks5://user:@example.com:5555', { ('socks5://user:@example.com:5555', {
'proxytype': ProxyType.SOCKS5, 'proxytype': ProxyType.SOCKS5,
@ -62,7 +62,7 @@ def test_select_proxy(self):
'port': 5555, 'port': 5555,
'rdns': False, 'rdns': False,
'username': 'user', 'username': 'user',
'password': '' 'password': '',
}), }),
('socks4://u%40ser:pa%20ss@127.0.0.1:1080', { ('socks4://u%40ser:pa%20ss@127.0.0.1:1080', {
'proxytype': ProxyType.SOCKS4, 'proxytype': ProxyType.SOCKS4,
@ -70,7 +70,7 @@ def test_select_proxy(self):
'port': 1080, 'port': 1080,
'rdns': False, 'rdns': False,
'username': 'u@ser', 'username': 'u@ser',
'password': 'pa ss' 'password': 'pa ss',
}), }),
('socks4a://:pa%20ss@127.0.0.1', { ('socks4a://:pa%20ss@127.0.0.1', {
'proxytype': ProxyType.SOCKS4A, 'proxytype': ProxyType.SOCKS4A,
@ -78,8 +78,8 @@ def test_select_proxy(self):
'port': 1080, 'port': 1080,
'rdns': True, 'rdns': True,
'username': '', 'username': '',
'password': 'pa ss' 'password': 'pa ss',
}) }),
]) ])
def test_make_socks_proxy_opts(self, socks_proxy, expected): def test_make_socks_proxy_opts(self, socks_proxy, expected):
assert make_socks_proxy_opts(socks_proxy) == expected assert make_socks_proxy_opts(socks_proxy) == expected

View file

@ -27,7 +27,7 @@ def test_default_overwrites(self):
[ [
sys.executable, 'yt_dlp/__main__.py', sys.executable, 'yt_dlp/__main__.py',
'-o', 'test.webm', '-o', 'test.webm',
'https://www.youtube.com/watch?v=jNQXAC9IVRw' 'https://www.youtube.com/watch?v=jNQXAC9IVRw',
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sout, serr = outp.communicate() sout, serr = outp.communicate()
self.assertTrue(b'has already been downloaded' in sout) self.assertTrue(b'has already been downloaded' in sout)
@ -39,7 +39,7 @@ def test_yes_overwrites(self):
[ [
sys.executable, 'yt_dlp/__main__.py', '--yes-overwrites', sys.executable, 'yt_dlp/__main__.py', '--yes-overwrites',
'-o', 'test.webm', '-o', 'test.webm',
'https://www.youtube.com/watch?v=jNQXAC9IVRw' 'https://www.youtube.com/watch?v=jNQXAC9IVRw',
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sout, serr = outp.communicate() sout, serr = outp.communicate()
self.assertTrue(b'has already been downloaded' not in sout) self.assertTrue(b'has already been downloaded' not in sout)

View file

@ -31,7 +31,7 @@ def test_extractor_classes(self):
# don't load modules with underscore prefix # don't load modules with underscore prefix
self.assertFalse( self.assertFalse(
f'{PACKAGE_NAME}.extractor._ignore' in sys.modules.keys(), f'{PACKAGE_NAME}.extractor._ignore' in sys.modules,
'loaded module beginning with underscore') 'loaded module beginning with underscore')
self.assertNotIn('IgnorePluginIE', plugins_ie.keys()) self.assertNotIn('IgnorePluginIE', plugins_ie.keys())

View file

@ -59,7 +59,7 @@ def hook_two(self, filename):
def hook_three(self, filename): def hook_three(self, filename):
self.files.append(filename) self.files.append(filename)
raise Exception('Test exception for \'%s\'' % filename) raise Exception(f'Test exception for \'{filename}\'')
def tearDown(self): def tearDown(self):
for f in self.files: for f in self.files:

View file

@ -9,7 +9,7 @@
from yt_dlp import YoutubeDL from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_shlex_quote from yt_dlp.utils import shell_quote
from yt_dlp.postprocessor import ( from yt_dlp.postprocessor import (
ExecPP, ExecPP,
FFmpegThumbnailsConvertorPP, FFmpegThumbnailsConvertorPP,
@ -65,7 +65,7 @@ class TestExec(unittest.TestCase):
def test_parse_cmd(self): def test_parse_cmd(self):
pp = ExecPP(YoutubeDL(), '') pp = ExecPP(YoutubeDL(), '')
info = {'filepath': 'file name'} info = {'filepath': 'file name'}
cmd = 'echo %s' % compat_shlex_quote(info['filepath']) cmd = 'echo {}'.format(shell_quote(info['filepath']))
self.assertEqual(pp.parse_cmd('echo', info), cmd) self.assertEqual(pp.parse_cmd('echo', info), cmd)
self.assertEqual(pp.parse_cmd('echo {}', info), cmd) self.assertEqual(pp.parse_cmd('echo {}', info), cmd)
@ -125,7 +125,8 @@ def test_remove_marked_arrange_sponsors_CanGetThroughUnaltered(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, []) self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self): def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
chapters = self._chapters([70], ['c']) + [ chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 20, 'sponsor'), self._sponsor_chapter(10, 20, 'sponsor'),
self._sponsor_chapter(30, 40, 'preview'), self._sponsor_chapter(30, 40, 'preview'),
self._sponsor_chapter(50, 60, 'filler')] self._sponsor_chapter(50, 60, 'filler')]
@ -136,7 +137,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self): def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self):
chapters = self._chapters([70], ['c']) + [ chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 20, 'chapter', title='sb c1'), self._sponsor_chapter(10, 20, 'chapter', title='sb c1'),
self._sponsor_chapter(15, 16, 'chapter', title='sb c2'), self._sponsor_chapter(15, 16, 'chapter', title='sb c2'),
self._sponsor_chapter(30, 40, 'preview'), self._sponsor_chapter(30, 40, 'preview'),
@ -149,10 +151,14 @@ def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self): def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
chapters = self._chapters([120], ['c']) + [ chapters = [
self._sponsor_chapter(10, 45, 'sponsor'), self._sponsor_chapter(20, 40, 'selfpromo'), *self._chapters([120], ['c']),
self._sponsor_chapter(50, 70, 'sponsor'), self._sponsor_chapter(60, 85, 'selfpromo'), self._sponsor_chapter(10, 45, 'sponsor'),
self._sponsor_chapter(90, 120, 'selfpromo'), self._sponsor_chapter(100, 110, 'sponsor')] self._sponsor_chapter(20, 40, 'selfpromo'),
self._sponsor_chapter(50, 70, 'sponsor'),
self._sponsor_chapter(60, 85, 'selfpromo'),
self._sponsor_chapter(90, 120, 'selfpromo'),
self._sponsor_chapter(100, 110, 'sponsor')]
expected = self._chapters( expected = self._chapters(
[10, 20, 40, 45, 50, 60, 70, 85, 90, 100, 110, 120], [10, 20, 40, 45, 50, 60, 70, 85, 90, 100, 110, 120],
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion', ['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
@ -172,7 +178,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithCuts(self):
chapters, self._chapters([40], ['c']), cuts) chapters, self._chapters([40], ['c']), cuts)
def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self): def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
chapters = self._chapters([70], ['c']) + [ chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 20, 'sponsor'), self._sponsor_chapter(10, 20, 'sponsor'),
self._sponsor_chapter(30, 40, 'selfpromo', remove=True), self._sponsor_chapter(30, 40, 'selfpromo', remove=True),
self._sponsor_chapter(50, 60, 'interaction')] self._sponsor_chapter(50, 60, 'interaction')]
@ -185,24 +192,29 @@ def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
def test_remove_marked_arrange_sponsors_ChapterWithSponsorCutInTheMiddle(self): def test_remove_marked_arrange_sponsors_ChapterWithSponsorCutInTheMiddle(self):
cuts = [self._sponsor_chapter(20, 30, 'selfpromo', remove=True), cuts = [self._sponsor_chapter(20, 30, 'selfpromo', remove=True),
self._chapter(40, 50, remove=True)] self._chapter(40, 50, remove=True)]
chapters = self._chapters([70], ['c']) + [self._sponsor_chapter(10, 60, 'sponsor')] + cuts chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 60, 'sponsor'),
*cuts]
expected = self._chapters( expected = self._chapters(
[10, 40, 50], ['c', '[SponsorBlock]: Sponsor', 'c']) [10, 40, 50], ['c', '[SponsorBlock]: Sponsor', 'c'])
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self): def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self):
cuts = [self._sponsor_chapter(20, 50, 'selfpromo', remove=True)] cuts = [self._sponsor_chapter(20, 50, 'selfpromo', remove=True)]
chapters = self._chapters([60], ['c']) + [ chapters = [
*self._chapters([60], ['c']),
self._sponsor_chapter(10, 20, 'intro'), self._sponsor_chapter(10, 20, 'intro'),
self._sponsor_chapter(30, 40, 'sponsor'), self._sponsor_chapter(30, 40, 'sponsor'),
self._sponsor_chapter(50, 60, 'outro'), self._sponsor_chapter(50, 60, 'outro'),
] + cuts *cuts]
expected = self._chapters( expected = self._chapters(
[10, 20, 30], ['c', '[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits']) [10, 20, 30], ['c', '[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self): def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
chapters = self._chapters([70], ['c']) + [ chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 20, 'sponsor'), self._sponsor_chapter(10, 20, 'sponsor'),
self._sponsor_chapter(20, 30, 'selfpromo'), self._sponsor_chapter(20, 30, 'selfpromo'),
self._sponsor_chapter(30, 40, 'interaction')] self._sponsor_chapter(30, 40, 'interaction')]
@ -213,7 +225,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self): def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
chapters = self._chapters([70], ['c']) + [ chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 20, 'sponsor'), self._sponsor_chapter(10, 20, 'sponsor'),
self._sponsor_chapter(20, 30, 'interaction', remove=True), self._sponsor_chapter(20, 30, 'interaction', remove=True),
self._chapter(30, 40, remove=True), self._chapter(30, 40, remove=True),
@ -226,7 +239,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
chapters, expected, [self._chapter(20, 50, remove=True)]) chapters, expected, [self._chapter(20, 50, remove=True)])
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self): def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
chapters = self._chapters([70], ['c']) + [ chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 30, 'sponsor'), self._sponsor_chapter(10, 30, 'sponsor'),
self._sponsor_chapter(20, 50, 'selfpromo'), self._sponsor_chapter(20, 50, 'selfpromo'),
self._sponsor_chapter(40, 60, 'interaction')] self._sponsor_chapter(40, 60, 'interaction')]
@ -238,7 +252,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self): def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
chapters = self._chapters([70], ['c']) + [ chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 30, 'sponsor', remove=True), self._sponsor_chapter(10, 30, 'sponsor', remove=True),
self._sponsor_chapter(20, 50, 'selfpromo', remove=True), self._sponsor_chapter(20, 50, 'selfpromo', remove=True),
self._sponsor_chapter(40, 60, 'interaction', remove=True)] self._sponsor_chapter(40, 60, 'interaction', remove=True)]
@ -246,7 +261,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
chapters, self._chapters([20], ['c']), [self._chapter(10, 60, remove=True)]) chapters, self._chapters([20], ['c']), [self._chapter(10, 60, remove=True)])
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(self): def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(self):
chapters = self._chapters([170], ['c']) + [ chapters = [
*self._chapters([170], ['c']),
self._sponsor_chapter(0, 30, 'intro'), self._sponsor_chapter(0, 30, 'intro'),
self._sponsor_chapter(20, 50, 'sponsor'), self._sponsor_chapter(20, 50, 'sponsor'),
self._sponsor_chapter(40, 60, 'selfpromo'), self._sponsor_chapter(40, 60, 'selfpromo'),
@ -267,7 +283,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(sel
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self): def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
chapters = self._chapters([170], ['c']) + [ chapters = [
*self._chapters([170], ['c']),
self._chapter(0, 30, remove=True), self._chapter(0, 30, remove=True),
self._sponsor_chapter(20, 50, 'sponsor', remove=True), self._sponsor_chapter(20, 50, 'sponsor', remove=True),
self._chapter(40, 60, remove=True), self._chapter(40, 60, remove=True),
@ -284,7 +301,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
chapters, self._chapters([20], ['c']), expected_cuts) chapters, self._chapters([20], ['c']), expected_cuts)
def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterCut(self): def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterCut(self):
chapters = self._chapters([60], ['c']) + [ chapters = [
*self._chapters([60], ['c']),
self._sponsor_chapter(10, 60, 'sponsor'), self._sponsor_chapter(10, 60, 'sponsor'),
self._sponsor_chapter(10, 40, 'intro'), self._sponsor_chapter(10, 40, 'intro'),
self._sponsor_chapter(30, 50, 'interaction'), self._sponsor_chapter(30, 50, 'interaction'),
@ -297,7 +315,8 @@ def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterC
chapters, expected, [self._chapter(30, 50, remove=True)]) chapters, expected, [self._chapter(30, 50, remove=True)])
def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self): def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
chapters = self._chapters([70], ['c']) + [ chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 30, 'sponsor'), self._sponsor_chapter(10, 30, 'sponsor'),
self._sponsor_chapter(20, 50, 'interaction'), self._sponsor_chapter(20, 50, 'interaction'),
self._sponsor_chapter(30, 50, 'selfpromo', remove=True), self._sponsor_chapter(30, 50, 'selfpromo', remove=True),
@ -310,7 +329,8 @@ def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
chapters, expected, [self._chapter(30, 50, remove=True)]) chapters, expected, [self._chapter(30, 50, remove=True)])
def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self): def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
chapters = self._chapters([70], ['c']) + [ chapters = [
*self._chapters([70], ['c']),
self._sponsor_chapter(10, 60, 'sponsor'), self._sponsor_chapter(10, 60, 'sponsor'),
self._sponsor_chapter(20, 60, 'interaction'), self._sponsor_chapter(20, 60, 'interaction'),
self._sponsor_chapter(30, 50, 'selfpromo', remove=True)] self._sponsor_chapter(30, 50, 'selfpromo', remove=True)]
@ -321,7 +341,8 @@ def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
chapters, expected, [self._chapter(30, 50, remove=True)]) chapters, expected, [self._chapter(30, 50, remove=True)])
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndCuts(self): def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndCuts(self):
chapters = self._chapters([200], ['c']) + [ chapters = [
*self._chapters([200], ['c']),
self._sponsor_chapter(10, 40, 'sponsor'), self._sponsor_chapter(10, 40, 'sponsor'),
self._sponsor_chapter(10, 30, 'intro'), self._sponsor_chapter(10, 30, 'intro'),
self._chapter(20, 30, remove=True), self._chapter(20, 30, remove=True),
@ -347,8 +368,9 @@ def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndC
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, expected_cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, expected_cuts)
def test_remove_marked_arrange_sponsors_SponsorOverlapsMultipleChapters(self): def test_remove_marked_arrange_sponsors_SponsorOverlapsMultipleChapters(self):
chapters = (self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5']) chapters = [
+ [self._sponsor_chapter(10, 90, 'sponsor')]) *self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5']),
self._sponsor_chapter(10, 90, 'sponsor')]
expected = self._chapters([10, 90, 100], ['c1', '[SponsorBlock]: Sponsor', 'c5']) expected = self._chapters([10, 90, 100], ['c1', '[SponsorBlock]: Sponsor', 'c5'])
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
@ -359,9 +381,10 @@ def test_remove_marked_arrange_sponsors_CutOverlapsMultipleChapters(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_SponsorsWithinSomeChaptersAndOverlappingOthers(self): def test_remove_marked_arrange_sponsors_SponsorsWithinSomeChaptersAndOverlappingOthers(self):
chapters = (self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4']) chapters = [
+ [self._sponsor_chapter(20, 30, 'sponsor'), *self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4']),
self._sponsor_chapter(50, 70, 'selfpromo')]) self._sponsor_chapter(20, 30, 'sponsor'),
self._sponsor_chapter(50, 70, 'selfpromo')]
expected = self._chapters([10, 20, 30, 40, 50, 70, 80], expected = self._chapters([10, 20, 30, 40, 50, 70, 80],
['c1', 'c2', '[SponsorBlock]: Sponsor', 'c2', 'c3', ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c2', 'c3',
'[SponsorBlock]: Unpaid/Self Promotion', 'c4']) '[SponsorBlock]: Unpaid/Self Promotion', 'c4'])
@ -374,8 +397,9 @@ def test_remove_marked_arrange_sponsors_CutsWithinSomeChaptersAndOverlappingOthe
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_ChaptersAfterLastSponsor(self): def test_remove_marked_arrange_sponsors_ChaptersAfterLastSponsor(self):
chapters = (self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4']) chapters = [
+ [self._sponsor_chapter(10, 30, 'music_offtopic')]) *self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4']),
self._sponsor_chapter(10, 30, 'music_offtopic')]
expected = self._chapters( expected = self._chapters(
[10, 30, 40, 50, 60], [10, 30, 40, 50, 60],
['c1', '[SponsorBlock]: Non-Music Section', 'c2', 'c3', 'c4']) ['c1', '[SponsorBlock]: Non-Music Section', 'c2', 'c3', 'c4'])
@ -388,8 +412,9 @@ def test_remove_marked_arrange_sponsors_ChaptersAfterLastCut(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_SponsorStartsAtChapterStart(self): def test_remove_marked_arrange_sponsors_SponsorStartsAtChapterStart(self):
chapters = (self._chapters([10, 20, 40], ['c1', 'c2', 'c3']) chapters = [
+ [self._sponsor_chapter(20, 30, 'sponsor')]) *self._chapters([10, 20, 40], ['c1', 'c2', 'c3']),
self._sponsor_chapter(20, 30, 'sponsor')]
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3']) expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
@ -400,8 +425,9 @@ def test_remove_marked_arrange_sponsors_CutStartsAtChapterStart(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_SponsorEndsAtChapterEnd(self): def test_remove_marked_arrange_sponsors_SponsorEndsAtChapterEnd(self):
chapters = (self._chapters([10, 30, 40], ['c1', 'c2', 'c3']) chapters = [
+ [self._sponsor_chapter(20, 30, 'sponsor')]) *self._chapters([10, 30, 40], ['c1', 'c2', 'c3']),
self._sponsor_chapter(20, 30, 'sponsor')]
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3']) expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
@ -412,8 +438,9 @@ def test_remove_marked_arrange_sponsors_CutEndsAtChapterEnd(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_SponsorCoincidesWithChapters(self): def test_remove_marked_arrange_sponsors_SponsorCoincidesWithChapters(self):
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) chapters = [
+ [self._sponsor_chapter(10, 30, 'sponsor')]) *self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']),
self._sponsor_chapter(10, 30, 'sponsor')]
expected = self._chapters([10, 30, 40], ['c1', '[SponsorBlock]: Sponsor', 'c4']) expected = self._chapters([10, 30, 40], ['c1', '[SponsorBlock]: Sponsor', 'c4'])
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
@ -424,8 +451,9 @@ def test_remove_marked_arrange_sponsors_CutCoincidesWithChapters(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_SponsorsAtVideoBoundaries(self): def test_remove_marked_arrange_sponsors_SponsorsAtVideoBoundaries(self):
chapters = (self._chapters([20, 40, 60], ['c1', 'c2', 'c3']) chapters = [
+ [self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')]) *self._chapters([20, 40, 60], ['c1', 'c2', 'c3']),
self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')]
expected = self._chapters( expected = self._chapters(
[10, 20, 40, 50, 60], ['[SponsorBlock]: Intermission/Intro Animation', 'c1', 'c2', 'c3', '[SponsorBlock]: Endcards/Credits']) [10, 20, 40, 50, 60], ['[SponsorBlock]: Intermission/Intro Animation', 'c1', 'c2', 'c3', '[SponsorBlock]: Endcards/Credits'])
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
@ -437,8 +465,10 @@ def test_remove_marked_arrange_sponsors_CutsAtVideoBoundaries(self):
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_SponsorsOverlapChaptersAtVideoBoundaries(self): def test_remove_marked_arrange_sponsors_SponsorsOverlapChaptersAtVideoBoundaries(self):
chapters = (self._chapters([10, 40, 50], ['c1', 'c2', 'c3']) chapters = [
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(30, 50, 'outro')]) *self._chapters([10, 40, 50], ['c1', 'c2', 'c3']),
self._sponsor_chapter(0, 20, 'intro'),
self._sponsor_chapter(30, 50, 'outro')]
expected = self._chapters( expected = self._chapters(
[20, 30, 50], ['[SponsorBlock]: Intermission/Intro Animation', 'c2', '[SponsorBlock]: Endcards/Credits']) [20, 30, 50], ['[SponsorBlock]: Intermission/Intro Animation', 'c2', '[SponsorBlock]: Endcards/Credits'])
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
@ -450,8 +480,10 @@ def test_remove_marked_arrange_sponsors_CutsOverlapChaptersAtVideoBoundaries(sel
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
def test_remove_marked_arrange_sponsors_EverythingSponsored(self): def test_remove_marked_arrange_sponsors_EverythingSponsored(self):
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) chapters = [
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(20, 40, 'outro')]) *self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']),
self._sponsor_chapter(0, 20, 'intro'),
self._sponsor_chapter(20, 40, 'outro')]
expected = self._chapters([20, 40], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits']) expected = self._chapters([20, 40], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, []) self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
@ -491,38 +523,39 @@ def test_remove_marked_arrange_sponsors_TinyChapterAtTheStartPrependedToTheNext(
chapters, self._chapters([2.5], ['c2']), cuts) chapters, self._chapters([2.5], ['c2']), cuts)
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromSponsorOverlapAreIgnored(self): def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromSponsorOverlapAreIgnored(self):
chapters = self._chapters([1, 3, 4], ['c1', 'c2', 'c3']) + [ chapters = [
*self._chapters([1, 3, 4], ['c1', 'c2', 'c3']),
self._sponsor_chapter(1.5, 2.5, 'sponsor')] self._sponsor_chapter(1.5, 2.5, 'sponsor')]
self._remove_marked_arrange_sponsors_test_impl( self._remove_marked_arrange_sponsors_test_impl(
chapters, self._chapters([1.5, 2.5, 4], ['c1', '[SponsorBlock]: Sponsor', 'c3']), []) chapters, self._chapters([1.5, 2.5, 4], ['c1', '[SponsorBlock]: Sponsor', 'c3']), [])
def test_remove_marked_arrange_sponsors_TinySponsorsOverlapsAreIgnored(self): def test_remove_marked_arrange_sponsors_TinySponsorsOverlapsAreIgnored(self):
chapters = self._chapters([2, 3, 5], ['c1', 'c2', 'c3']) + [ chapters = [
*self._chapters([2, 3, 5], ['c1', 'c2', 'c3']),
self._sponsor_chapter(1, 3, 'sponsor'), self._sponsor_chapter(1, 3, 'sponsor'),
self._sponsor_chapter(2.5, 4, 'selfpromo') self._sponsor_chapter(2.5, 4, 'selfpromo')]
]
self._remove_marked_arrange_sponsors_test_impl( self._remove_marked_arrange_sponsors_test_impl(
chapters, self._chapters([1, 3, 4, 5], [ chapters, self._chapters([1, 3, 4, 5], [
'c1', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c3']), []) 'c1', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c3']), [])
def test_remove_marked_arrange_sponsors_TinySponsorsPrependedToTheNextSponsor(self): def test_remove_marked_arrange_sponsors_TinySponsorsPrependedToTheNextSponsor(self):
chapters = self._chapters([4], ['c']) + [ chapters = [
*self._chapters([4], ['c']),
self._sponsor_chapter(1.5, 2, 'sponsor'), self._sponsor_chapter(1.5, 2, 'sponsor'),
self._sponsor_chapter(2, 4, 'selfpromo') self._sponsor_chapter(2, 4, 'selfpromo')]
]
self._remove_marked_arrange_sponsors_test_impl( self._remove_marked_arrange_sponsors_test_impl(
chapters, self._chapters([1.5, 4], ['c', '[SponsorBlock]: Unpaid/Self Promotion']), []) chapters, self._chapters([1.5, 4], ['c', '[SponsorBlock]: Unpaid/Self Promotion']), [])
def test_remove_marked_arrange_sponsors_SmallestSponsorInTheOverlapGetsNamed(self): def test_remove_marked_arrange_sponsors_SmallestSponsorInTheOverlapGetsNamed(self):
self._pp._sponsorblock_chapter_title = '[SponsorBlock]: %(name)s' self._pp._sponsorblock_chapter_title = '[SponsorBlock]: %(name)s'
chapters = self._chapters([10], ['c']) + [ chapters = [
*self._chapters([10], ['c']),
self._sponsor_chapter(2, 8, 'sponsor'), self._sponsor_chapter(2, 8, 'sponsor'),
self._sponsor_chapter(4, 6, 'selfpromo') self._sponsor_chapter(4, 6, 'selfpromo')]
]
self._remove_marked_arrange_sponsors_test_impl( self._remove_marked_arrange_sponsors_test_impl(
chapters, self._chapters([2, 4, 6, 8, 10], [ chapters, self._chapters([2, 4, 6, 8, 10], [
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion',
'[SponsorBlock]: Sponsor', 'c' '[SponsorBlock]: Sponsor', 'c',
]), []) ]), [])
def test_make_concat_opts_CommonCase(self): def test_make_concat_opts_CommonCase(self):

View file

@ -95,7 +95,7 @@ def handle(self):
return return
elif Socks5Auth.AUTH_USER_PASS in methods: elif Socks5Auth.AUTH_USER_PASS in methods:
self.connection.sendall(struct.pack("!BB", SOCKS5_VERSION, Socks5Auth.AUTH_USER_PASS)) self.connection.sendall(struct.pack('!BB', SOCKS5_VERSION, Socks5Auth.AUTH_USER_PASS))
_, user_len = struct.unpack('!BB', self.connection.recv(2)) _, user_len = struct.unpack('!BB', self.connection.recv(2))
username = self.connection.recv(user_len).decode() username = self.connection.recv(user_len).decode()
@ -174,7 +174,7 @@ def handle(self):
if 0x0 < dest_ip <= 0xFF: if 0x0 < dest_ip <= 0xFF:
use_remote_dns = True use_remote_dns = True
else: else:
socks_info['ipv4_address'] = socket.inet_ntoa(struct.pack("!I", dest_ip)) socks_info['ipv4_address'] = socket.inet_ntoa(struct.pack('!I', dest_ip))
user_id = self._read_until_null().decode() user_id = self._read_until_null().decode()
if user_id != (self.socks_kwargs.get('user_id') or ''): if user_id != (self.socks_kwargs.get('user_id') or ''):
@ -291,7 +291,7 @@ def ctx(request):
('Urllib', 'http'), ('Urllib', 'http'),
('Requests', 'http'), ('Requests', 'http'),
('Websockets', 'ws'), ('Websockets', 'ws'),
('CurlCFFI', 'http') ('CurlCFFI', 'http'),
], indirect=True) ], indirect=True)
class TestSocks4Proxy: class TestSocks4Proxy:
def test_socks4_no_auth(self, handler, ctx): def test_socks4_no_auth(self, handler, ctx):
@ -366,7 +366,7 @@ def test_timeout(self, handler, ctx):
('Urllib', 'http'), ('Urllib', 'http'),
('Requests', 'http'), ('Requests', 'http'),
('Websockets', 'ws'), ('Websockets', 'ws'),
('CurlCFFI', 'http') ('CurlCFFI', 'http'),
], indirect=True) ], indirect=True)
class TestSocks5Proxy: class TestSocks5Proxy:

View file

@ -40,12 +40,11 @@ def setUp(self):
self.ie = self.IE() self.ie = self.IE()
self.DL.add_info_extractor(self.ie) self.DL.add_info_extractor(self.ie)
if not self.IE.working(): if not self.IE.working():
print('Skipping: %s marked as not _WORKING' % self.IE.ie_key()) print(f'Skipping: {self.IE.ie_key()} marked as not _WORKING')
self.skipTest('IE marked as not _WORKING') self.skipTest('IE marked as not _WORKING')
def getInfoDict(self): def getInfoDict(self):
info_dict = self.DL.extract_info(self.url, download=False) return self.DL.extract_info(self.url, download=False)
return info_dict
def getSubtitles(self): def getSubtitles(self):
info_dict = self.getInfoDict() info_dict = self.getInfoDict()
@ -87,7 +86,7 @@ def test_youtube_allsubtitles(self):
self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d') self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d')
self.assertEqual(md5(subtitles['it']), '0e0b667ba68411d88fd1c5f4f4eab2f9') self.assertEqual(md5(subtitles['it']), '0e0b667ba68411d88fd1c5f4f4eab2f9')
for lang in ['fr', 'de']: for lang in ['fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted')
def _test_subtitles_format(self, fmt, md5_hash, lang='en'): def _test_subtitles_format(self, fmt, md5_hash, lang='en'):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
@ -157,7 +156,7 @@ def test_allsubtitles(self):
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f') self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792') self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
for lang in ['es', 'fr', 'de']: for lang in ['es', 'fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted')
def test_nosubtitles(self): def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles') self.DL.expect_warning('video doesn\'t have subtitles')
@ -182,7 +181,7 @@ def test_allsubtitles(self):
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14') self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5') self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
for lang in ['es', 'fr', 'de']: for lang in ['es', 'fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted')
@is_download_test @is_download_test

View file

@ -31,7 +31,7 @@ def test_traversal_base(self):
'allow tuple path' 'allow tuple path'
assert traverse_obj(_TEST_DATA, ['str']) == 'str', \ assert traverse_obj(_TEST_DATA, ['str']) == 'str', \
'allow list path' 'allow list path'
assert traverse_obj(_TEST_DATA, (value for value in ("str",))) == 'str', \ assert traverse_obj(_TEST_DATA, (value for value in ('str',))) == 'str', \
'allow iterable path' 'allow iterable path'
assert traverse_obj(_TEST_DATA, 'str') == 'str', \ assert traverse_obj(_TEST_DATA, 'str') == 'str', \
'single items should be treated as a path' 'single items should be treated as a path'
@ -70,7 +70,7 @@ def test_traversal_function(self):
def test_traversal_set(self): def test_traversal_set(self):
# transformation/type, like `expected_type` # transformation/type, like `expected_type`
assert traverse_obj(_TEST_DATA, (..., {str.upper}, )) == ['STR'], \ assert traverse_obj(_TEST_DATA, (..., {str.upper})) == ['STR'], \
'Function in set should be a transformation' 'Function in set should be a transformation'
assert traverse_obj(_TEST_DATA, (..., {str})) == ['str'], \ assert traverse_obj(_TEST_DATA, (..., {str})) == ['str'], \
'Type in set should be a type filter' 'Type in set should be a type filter'
@ -276,7 +276,7 @@ def test_traversal_traverse_string(self):
'`...` should result in string (same value) if `traverse_string`' '`...` should result in string (same value) if `traverse_string`'
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), traverse_string=True) == 'sr', \ assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), traverse_string=True) == 'sr', \
'`slice` should result in string if `traverse_string`' '`slice` should result in string if `traverse_string`'
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == "s"), traverse_string=True) == 'str', \ assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'), traverse_string=True) == 'str', \
'function should result in string if `traverse_string`' 'function should result in string if `traverse_string`'
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), traverse_string=True) == ['s', 'r'], \ assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), traverse_string=True) == ['s', 'r'], \
'branching should result in list if `traverse_string`' 'branching should result in list if `traverse_string`'

View file

@ -78,11 +78,11 @@
TEST_LOCKFILE_COMMENT = '# This file is used for regulating self-update' TEST_LOCKFILE_COMMENT = '# This file is used for regulating self-update'
TEST_LOCKFILE_V1 = r'''%s TEST_LOCKFILE_V1 = rf'''{TEST_LOCKFILE_COMMENT}
lock 2022.08.18.36 .+ Python 3\.6 lock 2022.08.18.36 .+ Python 3\.6
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7 lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server) lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
''' % TEST_LOCKFILE_COMMENT '''
TEST_LOCKFILE_V2_TMPL = r'''%s TEST_LOCKFILE_V2_TMPL = r'''%s
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6 lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
@ -98,12 +98,12 @@
TEST_LOCKFILE_ACTUAL = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_V1.rstrip('\n') TEST_LOCKFILE_ACTUAL = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_V1.rstrip('\n')
TEST_LOCKFILE_FORK = r'''%s# Test if a fork blocks updates to non-numeric tags TEST_LOCKFILE_FORK = rf'''{TEST_LOCKFILE_ACTUAL}# Test if a fork blocks updates to non-numeric tags
lockV2 fork/yt-dlp pr0000 .+ Python 3.6 lockV2 fork/yt-dlp pr0000 .+ Python 3.6
lockV2 fork/yt-dlp pr1234 (?!win_x86_exe).+ Python 3\.7 lockV2 fork/yt-dlp pr1234 (?!win_x86_exe).+ Python 3\.7
lockV2 fork/yt-dlp pr1234 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 fork/yt-dlp pr1234 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 fork/yt-dlp pr9999 .+ Python 3.11 lockV2 fork/yt-dlp pr9999 .+ Python 3.11
''' % TEST_LOCKFILE_ACTUAL '''
class FakeUpdater(Updater): class FakeUpdater(Updater):

View file

@ -276,8 +276,8 @@ def env(var):
self.assertEqual(expand_path(env('HOME')), os.getenv('HOME')) self.assertEqual(expand_path(env('HOME')), os.getenv('HOME'))
self.assertEqual(expand_path('~'), os.getenv('HOME')) self.assertEqual(expand_path('~'), os.getenv('HOME'))
self.assertEqual( self.assertEqual(
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')), expand_path('~/{}'.format(env('yt_dlp_EXPATH_PATH'))),
'%s/expanded' % os.getenv('HOME')) '{}/expanded'.format(os.getenv('HOME')))
finally: finally:
os.environ['HOME'] = old_home or '' os.environ['HOME'] = old_home or ''
@ -356,12 +356,12 @@ def test_datetime_from_str(self):
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto')) self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
def test_daterange(self): def test_daterange(self):
_20century = DateRange("19000101", "20000101") _20century = DateRange('19000101', '20000101')
self.assertFalse("17890714" in _20century) self.assertFalse('17890714' in _20century)
_ac = DateRange("00010101") _ac = DateRange('00010101')
self.assertTrue("19690721" in _ac) self.assertTrue('19690721' in _ac)
_firstmilenium = DateRange(end="10000101") _firstmilenium = DateRange(end='10000101')
self.assertTrue("07110427" in _firstmilenium) self.assertTrue('07110427' in _firstmilenium)
def test_unified_dates(self): def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221') self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
@ -506,7 +506,7 @@ def test_xpath_attr(self):
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True) self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self): def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]} data = {'ö': 'ö', 'abc': [3]}
url = 'https://foo.bar/baz?x=y#a' url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data) smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url) unsmug_url, unsmug_data = unsmuggle_url(smug_url)
@ -784,7 +784,7 @@ def test_parse_iso8601(self):
def test_strip_jsonp(self): def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);') stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped) d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}]) self.assertEqual(d, [{'id': '532cb', 'x': 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc') stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped) d = json.loads(stripped)
@ -922,19 +922,19 @@ def test_escape_rfc3986(self):
def test_normalize_url(self): def test_normalize_url(self):
self.assertEqual( self.assertEqual(
normalize_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'), normalize_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4' 'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4',
) )
self.assertEqual( self.assertEqual(
normalize_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'), normalize_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290' 'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290',
) )
self.assertEqual( self.assertEqual(
normalize_url('http://тест.рф/фрагмент'), normalize_url('http://тест.рф/фрагмент'),
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82' 'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82',
) )
self.assertEqual( self.assertEqual(
normalize_url('http://тест.рф/абв?абв=абв#абв'), normalize_url('http://тест.рф/абв?абв=абв#абв'),
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2' 'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2',
) )
self.assertEqual(normalize_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0') self.assertEqual(normalize_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
@ -979,7 +979,7 @@ def test_js_to_json_vars_strings(self):
'e': 'false', 'e': 'false',
'f': '"false"', 'f': '"false"',
'g': 'var', 'g': 'var',
} },
)), )),
{ {
'null': None, 'null': None,
@ -988,8 +988,8 @@ def test_js_to_json_vars_strings(self):
'trueStr': 'true', 'trueStr': 'true',
'false': False, 'false': False,
'falseStr': 'false', 'falseStr': 'false',
'unresolvedVar': 'var' 'unresolvedVar': 'var',
} },
) )
self.assertDictEqual( self.assertDictEqual(
@ -1005,14 +1005,14 @@ def test_js_to_json_vars_strings(self):
'b': '"123"', 'b': '"123"',
'c': '1.23', 'c': '1.23',
'd': '"1.23"', 'd': '"1.23"',
} },
)), )),
{ {
'int': 123, 'int': 123,
'intStr': '123', 'intStr': '123',
'float': 1.23, 'float': 1.23,
'floatStr': '1.23', 'floatStr': '1.23',
} },
) )
self.assertDictEqual( self.assertDictEqual(
@ -1028,14 +1028,14 @@ def test_js_to_json_vars_strings(self):
'b': '"{}"', 'b': '"{}"',
'c': '[]', 'c': '[]',
'd': '"[]"', 'd': '"[]"',
} },
)), )),
{ {
'object': {}, 'object': {},
'objectStr': '{}', 'objectStr': '{}',
'array': [], 'array': [],
'arrayStr': '[]', 'arrayStr': '[]',
} },
) )
def test_js_to_json_realworld(self): def test_js_to_json_realworld(self):
@ -1081,7 +1081,7 @@ def test_js_to_json_realworld(self):
def test_js_to_json_edgecases(self): def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}") on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"}) self.assertEqual(json.loads(on), {'abc_def': "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}') on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True}) self.assertEqual(json.loads(on), {'abc': True})
@ -1113,9 +1113,9 @@ def test_js_to_json_edgecases(self):
'c': 0, 'c': 0,
'd': 42.42, 'd': 42.42,
'e': [], 'e': [],
'f': "abc", 'f': 'abc',
'g': "", 'g': '',
'42': 42 '42': 42,
}) })
on = js_to_json('["abc", "def",]') on = js_to_json('["abc", "def",]')
@ -1209,8 +1209,8 @@ def test_js_to_json_common_constructors(self):
self.assertEqual(json.loads(js_to_json('Array(5, 10)')), [5, 10]) self.assertEqual(json.loads(js_to_json('Array(5, 10)')), [5, 10])
self.assertEqual(json.loads(js_to_json('new Array(15,5)')), [15, 5]) self.assertEqual(json.loads(js_to_json('new Array(15,5)')), [15, 5])
self.assertEqual(json.loads(js_to_json('new Map([Array(5, 10),new Array(15,5)])')), {'5': 10, '15': 5}) self.assertEqual(json.loads(js_to_json('new Map([Array(5, 10),new Array(15,5)])')), {'5': 10, '15': 5})
self.assertEqual(json.loads(js_to_json('new Date("123")')), "123") self.assertEqual(json.loads(js_to_json('new Date("123")')), '123')
self.assertEqual(json.loads(js_to_json('new Date(\'2023-10-19\')')), "2023-10-19") self.assertEqual(json.loads(js_to_json('new Date(\'2023-10-19\')')), '2023-10-19')
def test_extract_attributes(self): def test_extract_attributes(self):
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'}) self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
@ -1265,7 +1265,7 @@ def test_intlist_to_bytes(self):
def test_args_to_str(self): def test_args_to_str(self):
self.assertEqual( self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']), args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""' 'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""',
) )
def test_parse_filesize(self): def test_parse_filesize(self):
@ -1348,10 +1348,10 @@ def test_is_html(self):
self.assertTrue(is_html( # UTF-8 with BOM self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa')) b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00' b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00',
)) ))
self.assertTrue(is_html( # UTF-16-BE self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4' b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4',
)) ))
self.assertTrue(is_html( # UTF-32-BE self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4')) b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
@ -1935,7 +1935,7 @@ def test_locked_file(self):
with locked_file(FILE, test_mode, False): with locked_file(FILE, test_mode, False):
pass pass
except (BlockingIOError, PermissionError): except (BlockingIOError, PermissionError):
if not testing_write: # FIXME if not testing_write: # FIXME: blocked read access
print(f'Known issue: Exclusive lock ({lock_mode}) blocks read access ({test_mode})') print(f'Known issue: Exclusive lock ({lock_mode}) blocks read access ({test_mode})')
continue continue
self.assertTrue(testing_write, f'{test_mode} is blocked by {lock_mode}') self.assertTrue(testing_write, f'{test_mode} is blocked by {lock_mode}')
@ -2003,7 +2003,7 @@ def total(*x, **kwargs):
msg='int fn with expected_type int should give int') msg='int fn with expected_type int should give int')
self.assertEqual(try_call(lambda: 1, expected_type=dict), None, self.assertEqual(try_call(lambda: 1, expected_type=dict), None,
msg='int fn with wrong expected_type should give None') msg='int fn with wrong expected_type should give None')
self.assertEqual(try_call(total, args=(0, 1, 0, ), expected_type=int), 1, self.assertEqual(try_call(total, args=(0, 1, 0), expected_type=int), 1,
msg='fn should accept arglist') msg='fn should accept arglist')
self.assertEqual(try_call(total, kwargs={'a': 0, 'b': 1, 'c': 0}, expected_type=int), 1, self.assertEqual(try_call(total, kwargs={'a': 0, 'b': 1, 'c': 0}, expected_type=int), 1,
msg='fn should accept kwargs') msg='fn should accept kwargs')

View file

@ -297,14 +297,14 @@ def test_request_headers(self, handler):
'client_certificate': os.path.join(MTLS_CERT_DIR, 'client.crt'), 'client_certificate': os.path.join(MTLS_CERT_DIR, 'client.crt'),
'client_certificate_key': os.path.join(MTLS_CERT_DIR, 'clientencrypted.key'), 'client_certificate_key': os.path.join(MTLS_CERT_DIR, 'clientencrypted.key'),
'client_certificate_password': 'foobar', 'client_certificate_password': 'foobar',
} },
)) ))
def test_mtls(self, handler, client_cert): def test_mtls(self, handler, client_cert):
with handler( with handler(
# Disable client-side validation of unacceptable self-signed testcert.pem # Disable client-side validation of unacceptable self-signed testcert.pem
# The test is of a check on the server side, so unaffected # The test is of a check on the server side, so unaffected
verify=False, verify=False,
client_cert=client_cert client_cert=client_cert,
) as rh: ) as rh:
ws_validate_and_send(rh, Request(self.mtls_wss_base_url)).close() ws_validate_and_send(rh, Request(self.mtls_wss_base_url)).close()

View file

@ -13,7 +13,7 @@
class TestYoutubeMisc(unittest.TestCase): class TestYoutubeMisc(unittest.TestCase):
def test_youtube_extract(self): def test_youtube_extract(self):
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id) assertExtractId = lambda url, video_id: self.assertEqual(YoutubeIE.extract_id(url), video_id)
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc') assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')

View file

@ -46,17 +46,17 @@
( (
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js', 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
84, 84,
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>' '123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>',
), ),
( (
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js', 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
83, 83,
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F' '123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F',
), ),
( (
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js', 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288', '4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B' '82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B',
), ),
( (
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
@ -207,7 +207,7 @@ def tearDown(self):
def t_factory(name, sig_func, url_pattern): def t_factory(name, sig_func, url_pattern):
def make_tfunc(url, sig_input, expected_sig): def make_tfunc(url, sig_input, expected_sig):
m = url_pattern.match(url) m = url_pattern.match(url)
assert m, '%r should follow URL format' % url assert m, f'{url!r} should follow URL format'
test_id = m.group('id') test_id = m.group('id')
def test_func(self): def test_func(self):

View file

@ -109,7 +109,6 @@
determine_protocol, determine_protocol,
encode_compat_str, encode_compat_str,
encodeFilename, encodeFilename,
error_to_compat_str,
escapeHTML, escapeHTML,
expand_path, expand_path,
extract_basic_auth, extract_basic_auth,
@ -583,7 +582,7 @@ class YoutubeDL:
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data', 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies', 'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options', 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time' 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time',
} }
_deprecated_multivalue_fields = { _deprecated_multivalue_fields = {
'album_artist': 'album_artists', 'album_artist': 'album_artists',
@ -594,7 +593,7 @@ class YoutubeDL:
} }
_format_selection_exts = { _format_selection_exts = {
'audio': set(MEDIA_EXTENSIONS.common_audio), 'audio': set(MEDIA_EXTENSIONS.common_audio),
'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )), 'video': {*MEDIA_EXTENSIONS.common_video, '3gp'},
'storyboards': set(MEDIA_EXTENSIONS.storyboards), 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
} }
@ -628,7 +627,7 @@ def __init__(self, params=None, auto_init=True):
error=sys.stderr, error=sys.stderr,
screen=sys.stderr if self.params.get('quiet') else stdout, screen=sys.stderr if self.params.get('quiet') else stdout,
console=None if compat_os_name == 'nt' else next( console=None if compat_os_name == 'nt' else next(
filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None) filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None),
) )
try: try:
@ -679,9 +678,9 @@ def process_color_policy(stream):
width_args = [] if width is None else ['-w', str(width)] width_args = [] if width is None else ['-w', str(width)]
sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error} sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
try: try:
self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs) self._output_process = Popen(['bidiv', *width_args], **sp_kwargs)
except OSError: except OSError:
self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_process = Popen(['fribidi', '-c', 'UTF-8', *width_args], **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb') self._output_channel = os.fdopen(master, 'rb')
except OSError as ose: except OSError as ose:
if ose.errno == errno.ENOENT: if ose.errno == errno.ENOENT:
@ -822,8 +821,7 @@ def warn_if_short_id(self, argv):
) )
self.report_warning( self.report_warning(
'Long argument string detected. ' 'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s' % f'Use -- to separate parameters and URLs, like this:\n{shell_quote(correct_argv)}')
shell_quote(correct_argv))
def add_info_extractor(self, ie): def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list.""" """Add an InfoExtractor object to the end of the list."""
@ -922,7 +920,7 @@ def to_screen(self, message, skip_eol=False, quiet=None, only_once=False):
if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'): if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
return return
self._write_string( self._write_string(
'%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')), '{}{}'.format(self._bidi_workaround(message), ('' if skip_eol else '\n')),
self._out_files.screen, only_once=only_once) self._out_files.screen, only_once=only_once)
def to_stderr(self, message, only_once=False): def to_stderr(self, message, only_once=False):
@ -1045,10 +1043,10 @@ def _format_err(self, *args, **kwargs):
return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs) return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
def report_warning(self, message, only_once=False): def report_warning(self, message, only_once=False):
''' """
Print the message to stderr, it will be prefixed with 'WARNING:' Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored If stderr is a tty file the 'WARNING:' will be colored
''' """
if self.params.get('logger') is not None: if self.params.get('logger') is not None:
self.params['logger'].warning(message) self.params['logger'].warning(message)
else: else:
@ -1066,14 +1064,14 @@ def deprecated_feature(self, message):
self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True) self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
def report_error(self, message, *args, **kwargs): def report_error(self, message, *args, **kwargs):
''' """
Do the same as trouble, but prefixes the message with 'ERROR:', colored Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file. in red if stderr is a tty file.
''' """
self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs) self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
def write_debug(self, message, only_once=False): def write_debug(self, message, only_once=False):
'''Log debug message or Print message to stderr''' """Log debug message or Print message to stderr"""
if not self.params.get('verbose', False): if not self.params.get('verbose', False):
return return
message = f'[debug] {message}' message = f'[debug] {message}'
@ -1085,14 +1083,14 @@ def write_debug(self, message, only_once=False):
def report_file_already_downloaded(self, file_name): def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded.""" """Report file has already been fully downloaded."""
try: try:
self.to_screen('[download] %s has already been downloaded' % file_name) self.to_screen(f'[download] {file_name} has already been downloaded')
except UnicodeEncodeError: except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded') self.to_screen('[download] The file has already been downloaded')
def report_file_delete(self, file_name): def report_file_delete(self, file_name):
"""Report that existing file will be deleted.""" """Report that existing file will be deleted."""
try: try:
self.to_screen('Deleting existing file %s' % file_name) self.to_screen(f'Deleting existing file {file_name}')
except UnicodeEncodeError: except UnicodeEncodeError:
self.to_screen('Deleting existing file') self.to_screen('Deleting existing file')
@ -1147,7 +1145,7 @@ def _outtmpl_expandpath(outtmpl):
@staticmethod @staticmethod
def escape_outtmpl(outtmpl): def escape_outtmpl(outtmpl):
''' Escape any remaining strings like %s, %abc% etc. ''' """ Escape any remaining strings like %s, %abc% etc. """
return re.sub( return re.sub(
STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'), STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0), lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
@ -1155,7 +1153,7 @@ def escape_outtmpl(outtmpl):
@classmethod @classmethod
def validate_outtmpl(cls, outtmpl): def validate_outtmpl(cls, outtmpl):
''' @return None or Exception object ''' """ @return None or Exception object """
outtmpl = re.sub( outtmpl = re.sub(
STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'), STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
lambda mobj: f'{mobj.group(0)[:-1]}s', lambda mobj: f'{mobj.group(0)[:-1]}s',
@ -1208,13 +1206,13 @@ def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
} }
# Field is of the form key1.key2... # Field is of the form key1.key2...
# where keys (except first) can be string, int, slice or "{field, ...}" # where keys (except first) can be string, int, slice or "{field, ...}"
FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'} FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'} # noqa: UP031
FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % { FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % { # noqa: UP031
'inner': FIELD_INNER_RE, 'inner': FIELD_INNER_RE,
'field': rf'\w*(?:\.{FIELD_INNER_RE})*' 'field': rf'\w*(?:\.{FIELD_INNER_RE})*',
} }
MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})' MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys())) MATH_OPERATORS_RE = r'(?:{})'.format('|'.join(map(re.escape, MATH_FUNCTIONS.keys())))
INTERNAL_FORMAT_RE = re.compile(rf'''(?xs) INTERNAL_FORMAT_RE = re.compile(rf'''(?xs)
(?P<negate>-)? (?P<negate>-)?
(?P<fields>{FIELD_RE}) (?P<fields>{FIELD_RE})
@ -1337,7 +1335,7 @@ def create_key(outer_mobj):
value, default = None, na value, default = None, na
fmt = outer_mobj.group('format') fmt = outer_mobj.group('format')
if fmt == 's' and last_field in field_size_compat_map.keys() and isinstance(value, int): if fmt == 's' and last_field in field_size_compat_map and isinstance(value, int):
fmt = f'0{field_size_compat_map[last_field]:d}d' fmt = f'0{field_size_compat_map[last_field]:d}d'
flags = outer_mobj.group('conversion') or '' flags = outer_mobj.group('conversion') or ''
@ -1362,7 +1360,7 @@ def create_key(outer_mobj):
elif fmt[-1] == 'U': # unicode normalized elif fmt[-1] == 'U': # unicode normalized
value, fmt = unicodedata.normalize( value, fmt = unicodedata.normalize(
# "+" = compatibility equivalence, "#" = NFD # "+" = compatibility equivalence, "#" = NFD
'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'), 'NF{}{}'.format('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
value), str_fmt value), str_fmt
elif fmt[-1] == 'D': # decimal suffix elif fmt[-1] == 'D': # decimal suffix
num_fmt, fmt = fmt[:-1].replace('#', ''), 's' num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
@ -1390,7 +1388,7 @@ def create_key(outer_mobj):
if fmt[-1] in 'csra': if fmt[-1] in 'csra':
value = sanitizer(last_field, value) value = sanitizer(last_field, value)
key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format')) key = '{}\0{}'.format(key.replace('%', '%\0'), outer_mobj.group('format'))
TMPL_DICT[key] = value TMPL_DICT[key] = value
return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix')) return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
@ -1479,9 +1477,9 @@ def check_filter():
date = info_dict.get('upload_date') date = info_dict.get('upload_date')
if date is not None: if date is not None:
dateRange = self.params.get('daterange', DateRange()) date_range = self.params.get('daterange', DateRange())
if date not in dateRange: if date not in date_range:
return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}' return f'{date_from_str(date).isoformat()} upload date is not in range {date_range}'
view_count = info_dict.get('view_count') view_count = info_dict.get('view_count')
if view_count is not None: if view_count is not None:
min_views = self.params.get('min_views') min_views = self.params.get('min_views')
@ -1491,7 +1489,7 @@ def check_filter():
if max_views is not None and view_count > max_views: if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title return f'Skipping "{video_title}" because it is age restricted'
match_filter = self.params.get('match_filter') match_filter = self.params.get('match_filter')
if match_filter is None: if match_filter is None:
@ -1544,7 +1542,7 @@ def check_filter():
@staticmethod @staticmethod
def add_extra_info(info_dict, extra_info): def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing''' """Set the keys from extra_info in info dict if they are missing"""
for key, value in extra_info.items(): for key, value in extra_info.items():
info_dict.setdefault(key, value) info_dict.setdefault(key, value)
@ -1590,7 +1588,7 @@ def extract_info(self, url, download=True, ie_key=None, extra_info=None,
self.to_screen(f'[download] {self._format_screen(temp_id, self.Styles.ID)}: ' self.to_screen(f'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
'has already been recorded in the archive') 'has already been recorded in the archive')
if self.params.get('break_on_existing', False): if self.params.get('break_on_existing', False):
raise ExistingVideoReached() raise ExistingVideoReached
break break
return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process) return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
else: else:
@ -1616,8 +1614,8 @@ def wrapper(self, *args, **kwargs):
except GeoRestrictedError as e: except GeoRestrictedError as e:
msg = e.msg msg = e.msg
if e.countries: if e.countries:
msg += '\nThis video is available in %s.' % ', '.join( msg += '\nThis video is available in {}.'.format(', '.join(
map(ISO3166Utils.short2full, e.countries)) map(ISO3166Utils.short2full, e.countries)))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.' msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg) self.report_error(msg)
except ExtractorError as e: # An error we somewhat expected except ExtractorError as e: # An error we somewhat expected
@ -1826,8 +1824,8 @@ def process_ie_result(self, ie_result, download=True, extra_info=None):
if isinstance(additional_urls, str): if isinstance(additional_urls, str):
additional_urls = [additional_urls] additional_urls = [additional_urls]
self.to_screen( self.to_screen(
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls))) '[info] {}: {} additional URL(s) requested'.format(ie_result['id'], len(additional_urls)))
self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls)) self.write_debug('Additional URLs: "{}"'.format('", "'.join(additional_urls)))
ie_result['additional_entries'] = [ ie_result['additional_entries'] = [
self.extract_info( self.extract_info(
url, download, extra_info=extra_info, url, download, extra_info=extra_info,
@ -1879,8 +1877,8 @@ def process_ie_result(self, ie_result, download=True, extra_info=None):
webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
if webpage_url and webpage_url in self._playlist_urls: if webpage_url and webpage_url in self._playlist_urls:
self.to_screen( self.to_screen(
'[download] Skipping already downloaded playlist: %s' '[download] Skipping already downloaded playlist: {}'.format(
% ie_result.get('title') or ie_result.get('id')) ie_result.get('title')) or ie_result.get('id'))
return return
self._playlist_level += 1 self._playlist_level += 1
@ -1895,8 +1893,8 @@ def process_ie_result(self, ie_result, download=True, extra_info=None):
self._playlist_urls.clear() self._playlist_urls.clear()
elif result_type == 'compat_list': elif result_type == 'compat_list':
self.report_warning( self.report_warning(
'Extractor %s returned a compat_list result. ' 'Extractor {} returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor')) 'It needs to be updated.'.format(ie_result.get('extractor')))
def _fixup(r): def _fixup(r):
self.add_extra_info(r, { self.add_extra_info(r, {
@ -1913,7 +1911,7 @@ def _fixup(r):
] ]
return ie_result return ie_result
else: else:
raise Exception('Invalid result type: %s' % result_type) raise Exception(f'Invalid result type: {result_type}')
def _ensure_dir_exists(self, path): def _ensure_dir_exists(self, path):
return make_dir(path, self.report_error) return make_dir(path, self.report_error)
@ -2029,8 +2027,9 @@ def __process_playlist(self, ie_result, download):
resolved_entries[i] = (playlist_index, NO_DEFAULT) resolved_entries[i] = (playlist_index, NO_DEFAULT)
continue continue
self.to_screen('[download] Downloading item %s of %s' % ( self.to_screen(
self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS))) f'[download] Downloading item {self._format_screen(i + 1, self.Styles.ID)} '
f'of {self._format_screen(n_entries, self.Styles.EMPHASIS)}')
entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({ entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({
'playlist_index': playlist_index, 'playlist_index': playlist_index,
@ -2080,9 +2079,9 @@ def _build_format_filter(self, filter_spec):
} }
operator_rex = re.compile(r'''(?x)\s* operator_rex = re.compile(r'''(?x)\s*
(?P<key>[\w.-]+)\s* (?P<key>[\w.-]+)\s*
(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<op>{})(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
''' % '|'.join(map(re.escape, OPERATORS.keys()))) '''.format('|'.join(map(re.escape, OPERATORS.keys()))))
m = operator_rex.fullmatch(filter_spec) m = operator_rex.fullmatch(filter_spec)
if m: if m:
try: try:
@ -2093,7 +2092,7 @@ def _build_format_filter(self, filter_spec):
comparison_value = parse_filesize(m.group('value') + 'B') comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None: if comparison_value is None:
raise ValueError( raise ValueError(
'Invalid value %r in format specification %r' % ( 'Invalid value {!r} in format specification {!r}'.format(
m.group('value'), filter_spec)) m.group('value'), filter_spec))
op = OPERATORS[m.group('op')] op = OPERATORS[m.group('op')]
@ -2103,15 +2102,15 @@ def _build_format_filter(self, filter_spec):
'^=': lambda attr, value: attr.startswith(value), '^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value), '$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr, '*=': lambda attr, value: value in attr,
'~=': lambda attr, value: value.search(attr) is not None '~=': lambda attr, value: value.search(attr) is not None,
} }
str_operator_rex = re.compile(r'''(?x)\s* str_operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-zA-Z0-9._-]+)\s* (?P<key>[a-zA-Z0-9._-]+)\s*
(?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)? (?P<negation>!\s*)?(?P<op>{})\s*(?P<none_inclusive>\?\s*)?
(?P<quote>["'])? (?P<quote>["'])?
(?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+)) (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
(?(quote)(?P=quote))\s* (?(quote)(?P=quote))\s*
''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) '''.format('|'.join(map(re.escape, STR_OPERATORS.keys()))))
m = str_operator_rex.fullmatch(filter_spec) m = str_operator_rex.fullmatch(filter_spec)
if m: if m:
if m.group('op') == '~=': if m.group('op') == '~=':
@ -2125,7 +2124,7 @@ def _build_format_filter(self, filter_spec):
op = str_op op = str_op
if not m: if not m:
raise SyntaxError('Invalid filter specification %r' % filter_spec) raise SyntaxError(f'Invalid filter specification {filter_spec!r}')
def _filter(f): def _filter(f):
actual_value = f.get(m.group('key')) actual_value = f.get(m.group('key'))
@ -2141,7 +2140,7 @@ def _check_formats(self, formats):
if working: if working:
yield f yield f
continue continue
self.to_screen('[info] Testing format %s' % f['format_id']) self.to_screen('[info] Testing format {}'.format(f['format_id']))
path = self.get_output_path('temp') path = self.get_output_path('temp')
if not self._ensure_dir_exists(f'{path}/'): if not self._ensure_dir_exists(f'{path}/'):
continue continue
@ -2149,19 +2148,19 @@ def _check_formats(self, formats):
temp_file.close() temp_file.close()
try: try:
success, _ = self.dl(temp_file.name, f, test=True) success, _ = self.dl(temp_file.name, f, test=True)
except (DownloadError, OSError, ValueError) + network_exceptions: except (DownloadError, OSError, ValueError, *network_exceptions):
success = False success = False
finally: finally:
if os.path.exists(temp_file.name): if os.path.exists(temp_file.name):
try: try:
os.remove(temp_file.name) os.remove(temp_file.name)
except OSError: except OSError:
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name) self.report_warning(f'Unable to delete temporary file "{temp_file.name}"')
f['__working'] = success f['__working'] = success
if success: if success:
yield f yield f
else: else:
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id']) self.to_screen('[info] Unable to download format {}. Skipping...'.format(f['format_id']))
def _select_formats(self, formats, selector): def _select_formats(self, formats, selector):
return list(selector({ return list(selector({
@ -2214,8 +2213,8 @@ def syntax_error(note, start):
def _parse_filter(tokens): def _parse_filter(tokens):
filter_parts = [] filter_parts = []
for type, string_, start, _, _ in tokens: for type_, string_, _start, _, _ in tokens:
if type == tokenize.OP and string_ == ']': if type_ == tokenize.OP and string_ == ']':
return ''.join(filter_parts) return ''.join(filter_parts)
else: else:
filter_parts.append(string_) filter_parts.append(string_)
@ -2225,23 +2224,23 @@ def _remove_unused_ops(tokens):
# E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')') ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None last_string, last_start, last_end, last_line = None, None, None, None
for type, string_, start, end, line in tokens: for type_, string_, start, end, line in tokens:
if type == tokenize.OP and string_ == '[': if type_ == tokenize.OP and string_ == '[':
if last_string: if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None last_string = None
yield type, string_, start, end, line yield type_, string_, start, end, line
# everything inside brackets will be handled by _parse_filter # everything inside brackets will be handled by _parse_filter
for type, string_, start, end, line in tokens: for type_, string_, start, end, line in tokens:
yield type, string_, start, end, line yield type_, string_, start, end, line
if type == tokenize.OP and string_ == ']': if type_ == tokenize.OP and string_ == ']':
break break
elif type == tokenize.OP and string_ in ALLOWED_OPS: elif type_ == tokenize.OP and string_ in ALLOWED_OPS:
if last_string: if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None last_string = None
yield type, string_, start, end, line yield type_, string_, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]: elif type_ in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string: if not last_string:
last_string = string_ last_string = string_
last_start = start last_start = start
@ -2254,13 +2253,13 @@ def _remove_unused_ops(tokens):
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = [] selectors = []
current_selector = None current_selector = None
for type, string_, start, _, _ in tokens: for type_, string_, start, _, _ in tokens:
# ENCODING is only defined in Python 3.x # ENCODING is only defined in Python 3.x
if type == getattr(tokenize, 'ENCODING', None): if type_ == getattr(tokenize, 'ENCODING', None):
continue continue
elif type in [tokenize.NAME, tokenize.NUMBER]: elif type_ in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string_, []) current_selector = FormatSelector(SINGLE, string_, [])
elif type == tokenize.OP: elif type_ == tokenize.OP:
if string_ == ')': if string_ == ')':
if not inside_group: if not inside_group:
# ')' will be handled by the parentheses group # ')' will be handled by the parentheses group
@ -2303,7 +2302,7 @@ def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, ins
current_selector = FormatSelector(MERGE, (selector_1, selector_2), []) current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
else: else:
raise syntax_error(f'Operator not recognized: "{string_}"', start) raise syntax_error(f'Operator not recognized: "{string_}"', start)
elif type == tokenize.ENDMARKER: elif type_ == tokenize.ENDMARKER:
break break
if current_selector: if current_selector:
selectors.append(current_selector) selectors.append(current_selector)
@ -2378,7 +2377,7 @@ def _merge(formats_pair):
'acodec': the_only_audio.get('acodec'), 'acodec': the_only_audio.get('acodec'),
'abr': the_only_audio.get('abr'), 'abr': the_only_audio.get('abr'),
'asr': the_only_audio.get('asr'), 'asr': the_only_audio.get('asr'),
'audio_channels': the_only_audio.get('audio_channels') 'audio_channels': the_only_audio.get('audio_channels'),
}) })
return new_dict return new_dict
@ -2459,9 +2458,9 @@ def selector_function(ctx):
format_fallback = not format_type and not format_modified # for b, w format_fallback = not format_type and not format_modified # for b, w
_filter_f = ( _filter_f = (
(lambda f: f.get('%scodec' % format_type) != 'none') (lambda f: f.get(f'{format_type}codec') != 'none')
if format_type and format_modified # bv*, ba*, wv*, wa* if format_type and format_modified # bv*, ba*, wv*, wa*
else (lambda f: f.get('%scodec' % not_format_type) == 'none') else (lambda f: f.get(f'{not_format_type}codec') == 'none')
if format_type # bv, ba, wv, wa if format_type # bv, ba, wv, wa
else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none') else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
if not format_modified # b, w if not format_modified # b, w
@ -2529,7 +2528,7 @@ def __iter__(self):
def __next__(self): def __next__(self):
if self.counter >= len(self.tokens): if self.counter >= len(self.tokens):
raise StopIteration() raise StopIteration
value = self.tokens[self.counter] value = self.tokens[self.counter]
self.counter += 1 self.counter += 1
return value return value
@ -2612,7 +2611,7 @@ def check_thumbnails(thumbnails):
self._sort_thumbnails(thumbnails) self._sort_thumbnails(thumbnails)
for i, t in enumerate(thumbnails): for i, t in enumerate(thumbnails):
if t.get('id') is None: if t.get('id') is None:
t['id'] = '%d' % i t['id'] = str(i)
if t.get('width') and t.get('height'): if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height']) t['resolution'] = '%dx%d' % (t['width'], t['height'])
t['url'] = sanitize_url(t['url']) t['url'] = sanitize_url(t['url'])
@ -2673,8 +2672,8 @@ def _fill_common_fields(self, info_dict, final=True):
# Auto generate title fields corresponding to the *_number fields when missing # Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series. # in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'): for field in ('chapter', 'season', 'episode'):
if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field): if final and info_dict.get(f'{field}_number') is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) info_dict[field] = '%s %d' % (field.capitalize(), info_dict[f'{field}_number'])
for old_key, new_key in self._deprecated_multivalue_fields.items(): for old_key, new_key in self._deprecated_multivalue_fields.items():
if new_key in info_dict and old_key in info_dict: if new_key in info_dict and old_key in info_dict:
@ -2706,8 +2705,8 @@ def process_video_result(self, info_dict, download=True):
def report_force_conversion(field, field_not, conversion): def report_force_conversion(field, field_not, conversion):
self.report_warning( self.report_warning(
'"%s" field is not %s - forcing %s conversion, there is an error in extractor' f'"{field}" field is not {field_not} - forcing {conversion} conversion, '
% (field, field_not, conversion)) 'there is an error in extractor')
def sanitize_string_field(info, string_field): def sanitize_string_field(info, string_field):
field = info.get(string_field) field = info.get(string_field)
@ -2824,28 +2823,28 @@ def is_wellformed(f):
if not formats: if not formats:
self.raise_no_formats(info_dict) self.raise_no_formats(info_dict)
for format in formats: for fmt in formats:
sanitize_string_field(format, 'format_id') sanitize_string_field(fmt, 'format_id')
sanitize_numeric_fields(format) sanitize_numeric_fields(fmt)
format['url'] = sanitize_url(format['url']) fmt['url'] = sanitize_url(fmt['url'])
if format.get('ext') is None: if fmt.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower() fmt['ext'] = determine_ext(fmt['url']).lower()
if format['ext'] in ('aac', 'opus', 'mp3', 'flac', 'vorbis'): if fmt['ext'] in ('aac', 'opus', 'mp3', 'flac', 'vorbis'):
if format.get('acodec') is None: if fmt.get('acodec') is None:
format['acodec'] = format['ext'] fmt['acodec'] = fmt['ext']
if format.get('protocol') is None: if fmt.get('protocol') is None:
format['protocol'] = determine_protocol(format) fmt['protocol'] = determine_protocol(fmt)
if format.get('resolution') is None: if fmt.get('resolution') is None:
format['resolution'] = self.format_resolution(format, default=None) fmt['resolution'] = self.format_resolution(fmt, default=None)
if format.get('dynamic_range') is None and format.get('vcodec') != 'none': if fmt.get('dynamic_range') is None and fmt.get('vcodec') != 'none':
format['dynamic_range'] = 'SDR' fmt['dynamic_range'] = 'SDR'
if format.get('aspect_ratio') is None: if fmt.get('aspect_ratio') is None:
format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2)) fmt['aspect_ratio'] = try_call(lambda: round(fmt['width'] / fmt['height'], 2))
# For fragmented formats, "tbr" is often max bitrate and not average # For fragmented formats, "tbr" is often max bitrate and not average
if (('manifest-filesize-approx' in self.params['compat_opts'] or not format.get('manifest_url')) if (('manifest-filesize-approx' in self.params['compat_opts'] or not fmt.get('manifest_url'))
and not format.get('filesize') and not format.get('filesize_approx')): and not fmt.get('filesize') and not fmt.get('filesize_approx')):
format['filesize_approx'] = filesize_from_tbr(format.get('tbr'), info_dict.get('duration')) fmt['filesize_approx'] = filesize_from_tbr(fmt.get('tbr'), info_dict.get('duration'))
format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict), load_cookies=True) fmt['http_headers'] = self._calc_headers(collections.ChainMap(fmt, info_dict), load_cookies=True)
# Safeguard against old/insecure infojson when using --load-info-json # Safeguard against old/insecure infojson when using --load-info-json
if info_dict.get('http_headers'): if info_dict.get('http_headers'):
@ -2858,36 +2857,36 @@ def is_wellformed(f):
self.sort_formats({ self.sort_formats({
'formats': formats, 'formats': formats,
'_format_sort_fields': info_dict.get('_format_sort_fields') '_format_sort_fields': info_dict.get('_format_sort_fields'),
}) })
# Sanitize and group by format_id # Sanitize and group by format_id
formats_dict = {} formats_dict = {}
for i, format in enumerate(formats): for i, fmt in enumerate(formats):
if not format.get('format_id'): if not fmt.get('format_id'):
format['format_id'] = str(i) fmt['format_id'] = str(i)
else: else:
# Sanitize format_id from characters used in format selector expression # Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id']) fmt['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', fmt['format_id'])
formats_dict.setdefault(format['format_id'], []).append(format) formats_dict.setdefault(fmt['format_id'], []).append(fmt)
# Make sure all formats have unique format_id # Make sure all formats have unique format_id
common_exts = set(itertools.chain(*self._format_selection_exts.values())) common_exts = set(itertools.chain(*self._format_selection_exts.values()))
for format_id, ambiguous_formats in formats_dict.items(): for format_id, ambiguous_formats in formats_dict.items():
ambigious_id = len(ambiguous_formats) > 1 ambigious_id = len(ambiguous_formats) > 1
for i, format in enumerate(ambiguous_formats): for i, fmt in enumerate(ambiguous_formats):
if ambigious_id: if ambigious_id:
format['format_id'] = '%s-%d' % (format_id, i) fmt['format_id'] = f'{format_id}-{i}'
# Ensure there is no conflict between id and ext in format selection # Ensure there is no conflict between id and ext in format selection
# See https://github.com/yt-dlp/yt-dlp/issues/1282 # See https://github.com/yt-dlp/yt-dlp/issues/1282
if format['format_id'] != format['ext'] and format['format_id'] in common_exts: if fmt['format_id'] != fmt['ext'] and fmt['format_id'] in common_exts:
format['format_id'] = 'f%s' % format['format_id'] fmt['format_id'] = 'f{}'.format(fmt['format_id'])
if format.get('format') is None: if fmt.get('format') is None:
format['format'] = '{id} - {res}{note}'.format( fmt['format'] = '{id} - {res}{note}'.format(
id=format['format_id'], id=fmt['format_id'],
res=self.format_resolution(format), res=self.format_resolution(fmt),
note=format_field(format, 'format_note', ' (%s)'), note=format_field(fmt, 'format_note', ' (%s)'),
) )
if self.params.get('check_formats') is True: if self.params.get('check_formats') is True:
@ -3009,7 +3008,7 @@ def to_screen(*msg):
info_dict['requested_downloads'] = downloaded_formats info_dict['requested_downloads'] = downloaded_formats
info_dict = self.run_all_pps('after_video', info_dict) info_dict = self.run_all_pps('after_video', info_dict)
if max_downloads_reached: if max_downloads_reached:
raise MaxDownloadsReached() raise MaxDownloadsReached
# We update the info dict with the selected best quality format (backwards compatibility) # We update the info dict with the selected best quality format (backwards compatibility)
info_dict.update(best_format) info_dict.update(best_format)
@ -3070,8 +3069,8 @@ def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
else: else:
f = formats[-1] f = formats[-1]
self.report_warning( self.report_warning(
'No subtitle format found matching "%s" for language %s, ' 'No subtitle format found matching "{}" for language {}, '
'using %s. Use --list-subs for a list of available subtitles' % (formats_query, lang, f['ext'])) 'using {}. Use --list-subs for a list of available subtitles'.format(formats_query, lang, f['ext']))
subs[lang] = f subs[lang] = f
return subs return subs
@ -3226,7 +3225,7 @@ def replace_info_dict(new_info):
def check_max_downloads(): def check_max_downloads():
if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'): if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
raise MaxDownloadsReached() raise MaxDownloadsReached
if self.params.get('simulate'): if self.params.get('simulate'):
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive') info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
@ -3400,7 +3399,7 @@ def correct_ext(filename, ext=new_ext):
for f in info_dict['requested_formats'] if fd != FFmpegFD else []: for f in info_dict['requested_formats'] if fd != FFmpegFD else []:
f['filepath'] = fname = prepend_extension( f['filepath'] = fname = prepend_extension(
correct_ext(temp_filename, info_dict['ext']), correct_ext(temp_filename, info_dict['ext']),
'f%s' % f['format_id'], info_dict['ext']) 'f{}'.format(f['format_id']), info_dict['ext'])
downloaded.append(fname) downloaded.append(fname)
info_dict['url'] = '\n'.join(f['url'] for f in info_dict['requested_formats']) info_dict['url'] = '\n'.join(f['url'] for f in info_dict['requested_formats'])
success, real_download = self.dl(temp_filename, info_dict) success, real_download = self.dl(temp_filename, info_dict)
@ -3433,7 +3432,7 @@ def correct_ext(filename, ext=new_ext):
if temp_filename != '-': if temp_filename != '-':
fname = prepend_extension( fname = prepend_extension(
correct_ext(temp_filename, new_info['ext']), correct_ext(temp_filename, new_info['ext']),
'f%s' % f['format_id'], new_info['ext']) 'f{}'.format(f['format_id']), new_info['ext'])
if not self._ensure_dir_exists(fname): if not self._ensure_dir_exists(fname):
return return
f['filepath'] = fname f['filepath'] = fname
@ -3465,11 +3464,11 @@ def correct_ext(filename, ext=new_ext):
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename))) info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
except network_exceptions as err: except network_exceptions as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err)) self.report_error(f'unable to download video data: {err}')
return return
except OSError as err: except OSError as err:
raise UnavailableVideoError(err) raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err: except ContentTooShortError as err:
self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})') self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
return return
@ -3536,13 +3535,13 @@ def ffmpeg_fixup(cndn, msg, cls):
try: try:
replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move)) replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
except PostProcessingError as err: except PostProcessingError as err:
self.report_error('Postprocessing: %s' % str(err)) self.report_error(f'Postprocessing: {err}')
return return
try: try:
for ph in self._post_hooks: for ph in self._post_hooks:
ph(info_dict['filepath']) ph(info_dict['filepath'])
except Exception as err: except Exception as err:
self.report_error('post hooks: %s' % str(err)) self.report_error(f'post hooks: {err}')
return return
info_dict['__write_download_archive'] = True info_dict['__write_download_archive'] = True
@ -3609,7 +3608,7 @@ def download_with_info_file(self, info_filename):
@staticmethod @staticmethod
def sanitize_info(info_dict, remove_private_keys=False): def sanitize_info(info_dict, remove_private_keys=False):
''' Sanitize the infodict for converting to json ''' """ Sanitize the infodict for converting to json """
if info_dict is None: if info_dict is None:
return info_dict return info_dict
info_dict.setdefault('epoch', int(time.time())) info_dict.setdefault('epoch', int(time.time()))
@ -3644,7 +3643,7 @@ def filter_fn(obj):
@staticmethod @staticmethod
def filter_requested_info(info_dict, actually_filter=True): def filter_requested_info(info_dict, actually_filter=True):
''' Alias of sanitize_info for backward compatibility ''' """ Alias of sanitize_info for backward compatibility """
return YoutubeDL.sanitize_info(info_dict, actually_filter) return YoutubeDL.sanitize_info(info_dict, actually_filter)
def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None): def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
@ -3666,7 +3665,7 @@ def actual_post_extract(info_dict):
actual_post_extract(video_dict or {}) actual_post_extract(video_dict or {})
return return
post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {}) post_extractor = info_dict.pop('__post_extractor', None) or dict
info_dict.update(post_extractor()) info_dict.update(post_extractor())
actual_post_extract(info_dict or {}) actual_post_extract(info_dict or {})
@ -3771,7 +3770,7 @@ def format_resolution(format, default='unknown'):
if format.get('width') and format.get('height'): if format.get('width') and format.get('height'):
return '%dx%d' % (format['width'], format['height']) return '%dx%d' % (format['width'], format['height'])
elif format.get('height'): elif format.get('height'):
return '%sp' % format['height'] return '{}p'.format(format['height'])
elif format.get('width'): elif format.get('width'):
return '%dx?' % format['width'] return '%dx?' % format['width']
return default return default
@ -3788,7 +3787,7 @@ def _format_note(self, fdict):
if fdict.get('language'): if fdict.get('language'):
if res: if res:
res += ' ' res += ' '
res += '[%s]' % fdict['language'] res += '[{}]'.format(fdict['language'])
if fdict.get('format_note') is not None: if fdict.get('format_note') is not None:
if res: if res:
res += ' ' res += ' '
@ -3800,7 +3799,7 @@ def _format_note(self, fdict):
if fdict.get('container') is not None: if fdict.get('container') is not None:
if res: if res:
res += ', ' res += ', '
res += '%s container' % fdict['container'] res += '{} container'.format(fdict['container'])
if (fdict.get('vcodec') is not None if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'): and fdict.get('vcodec') != 'none'):
if res: if res:
@ -3815,7 +3814,7 @@ def _format_note(self, fdict):
if fdict.get('fps') is not None: if fdict.get('fps') is not None:
if res: if res:
res += ', ' res += ', '
res += '%sfps' % fdict['fps'] res += '{}fps'.format(fdict['fps'])
if fdict.get('acodec') is not None: if fdict.get('acodec') is not None:
if res: if res:
res += ', ' res += ', '
@ -3858,7 +3857,7 @@ def render_formats_table(self, info_dict):
format_field(f, 'format_id'), format_field(f, 'format_id'),
format_field(f, 'ext'), format_field(f, 'ext'),
self.format_resolution(f), self.format_resolution(f),
self._format_note(f) self._format_note(f),
] for f in formats if (f.get('preference') or 0) >= -1000] ] for f in formats if (f.get('preference') or 0) >= -1000]
return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1) return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
@ -3964,11 +3963,11 @@ def print_debug_header(self):
from .extractor.extractors import _LAZY_LOADER from .extractor.extractors import _LAZY_LOADER
from .extractor.extractors import ( from .extractor.extractors import (
_PLUGIN_CLASSES as plugin_ies, _PLUGIN_CLASSES as plugin_ies,
_PLUGIN_OVERRIDES as plugin_ie_overrides _PLUGIN_OVERRIDES as plugin_ie_overrides,
) )
def get_encoding(stream): def get_encoding(stream):
ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__)) ret = str(getattr(stream, 'encoding', f'missing ({type(stream).__name__})'))
additional_info = [] additional_info = []
if os.environ.get('TERM', '').lower() == 'dumb': if os.environ.get('TERM', '').lower() == 'dumb':
additional_info.append('dumb') additional_info.append('dumb')
@ -3979,13 +3978,13 @@ def get_encoding(stream):
ret = f'{ret} ({",".join(additional_info)})' ret = f'{ret} ({",".join(additional_info)})'
return ret return ret
encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % ( encoding_str = 'Encodings: locale {}, fs {}, pref {}, {}'.format(
locale.getpreferredencoding(), locale.getpreferredencoding(),
sys.getfilesystemencoding(), sys.getfilesystemencoding(),
self.get_encoding(), self.get_encoding(),
', '.join( ', '.join(
f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_ f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
if stream is not None and key != 'console') if stream is not None and key != 'console'),
) )
logger = self.params.get('logger') logger = self.params.get('logger')
@ -4017,7 +4016,7 @@ def get_encoding(stream):
else: else:
write_debug('Lazy loading extractors is disabled') write_debug('Lazy loading extractors is disabled')
if self.params['compat_opts']: if self.params['compat_opts']:
write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts'])) write_debug('Compatibility options: {}'.format(', '.join(self.params['compat_opts'])))
if current_git_head(): if current_git_head():
write_debug(f'Git HEAD: {current_git_head()}') write_debug(f'Git HEAD: {current_git_head()}')
@ -4026,14 +4025,14 @@ def get_encoding(stream):
exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self) exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
ffmpeg_features = {key for key, val in ffmpeg_features.items() if val} ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
if ffmpeg_features: if ffmpeg_features:
exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features)) exe_versions['ffmpeg'] += ' ({})'.format(','.join(sorted(ffmpeg_features)))
exe_versions['rtmpdump'] = rtmpdump_version() exe_versions['rtmpdump'] = rtmpdump_version()
exe_versions['phantomjs'] = PhantomJSwrapper._version() exe_versions['phantomjs'] = PhantomJSwrapper._version()
exe_str = ', '.join( exe_str = ', '.join(
f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
) or 'none' ) or 'none'
write_debug('exe versions: %s' % exe_str) write_debug(f'exe versions: {exe_str}')
from .compat.compat_utils import get_package_info from .compat.compat_utils import get_package_info
from .dependencies import available_dependencies from .dependencies import available_dependencies
@ -4045,7 +4044,7 @@ def get_encoding(stream):
write_debug(f'Proxy map: {self.proxies}') write_debug(f'Proxy map: {self.proxies}')
write_debug(f'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}') write_debug(f'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items(): for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
display_list = ['%s%s' % ( display_list = ['{}{}'.format(
klass.__name__, '' if klass.__name__ == name else f' as {name}') klass.__name__, '' if klass.__name__ == name else f' as {name}')
for name, klass in plugins.items()] for name, klass in plugins.items()]
if plugin_type == 'Extractor': if plugin_type == 'Extractor':
@ -4062,14 +4061,13 @@ def get_encoding(stream):
# Not implemented # Not implemented
if False and self.params.get('call_home'): if False and self.params.get('call_home'):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode() ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
write_debug('Public IP address: %s' % ipaddr) write_debug(f'Public IP address: {ipaddr}')
latest_version = self.urlopen( latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode() 'https://yt-dl.org/latest/version').read().decode()
if version_tuple(latest_version) > version_tuple(__version__): if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning( self.report_warning(
'You are using an outdated version (newest version: %s)! ' f'You are using an outdated version (newest version: {latest_version})! '
'See https://yt-dl.org/update if you need help updating.' % 'See https://yt-dl.org/update if you need help updating.')
latest_version)
@functools.cached_property @functools.cached_property
def proxies(self): def proxies(self):
@ -4103,7 +4101,7 @@ def _opener(self):
return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies) return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies)
def _get_available_impersonate_targets(self): def _get_available_impersonate_targets(self):
# todo(future): make available as public API # TODO(future): make available as public API
return [ return [
(target, rh.RH_NAME) (target, rh.RH_NAME)
for rh in self._request_director.handlers.values() for rh in self._request_director.handlers.values()
@ -4112,7 +4110,7 @@ def _get_available_impersonate_targets(self):
] ]
def _impersonate_target_available(self, target): def _impersonate_target_available(self, target):
# todo(future): make available as public API # TODO(future): make available as public API
return any( return any(
rh.is_supported_target(target) rh.is_supported_target(target)
for rh in self._request_director.handlers.values() for rh in self._request_director.handlers.values()
@ -4238,7 +4236,7 @@ def get_encoding(self):
return encoding return encoding
def _write_info_json(self, label, ie_result, infofn, overwrite=None): def _write_info_json(self, label, ie_result, infofn, overwrite=None):
''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error ''' """ Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error """
if overwrite is None: if overwrite is None:
overwrite = self.params.get('overwrites', True) overwrite = self.params.get('overwrites', True)
if not self.params.get('writeinfojson'): if not self.params.get('writeinfojson'):
@ -4261,7 +4259,7 @@ def _write_info_json(self, label, ie_result, infofn, overwrite=None):
return None return None
def _write_description(self, label, ie_result, descfn): def _write_description(self, label, ie_result, descfn):
''' Write description and returns True = written, False = skip, None = error ''' """ Write description and returns True = written, False = skip, None = error """
if not self.params.get('writedescription'): if not self.params.get('writedescription'):
return False return False
elif not descfn: elif not descfn:
@ -4285,7 +4283,7 @@ def _write_description(self, label, ie_result, descfn):
return True return True
def _write_subtitles(self, info_dict, filename): def _write_subtitles(self, info_dict, filename):
''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error''' """ Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error"""
ret = [] ret = []
subtitles = info_dict.get('requested_subtitles') subtitles = info_dict.get('requested_subtitles')
if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')): if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
@ -4331,7 +4329,7 @@ def _write_subtitles(self, info_dict, filename):
self.dl(sub_filename, sub_copy, subtitle=True) self.dl(sub_filename, sub_copy, subtitle=True)
sub_info['filepath'] = sub_filename sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final)) ret.append((sub_filename, sub_filename_final))
except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err: except (DownloadError, ExtractorError, OSError, ValueError, *network_exceptions) as err:
msg = f'Unable to download video subtitles for {sub_lang!r}: {err}' msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
if self.params.get('ignoreerrors') is not True: # False or 'only_download' if self.params.get('ignoreerrors') is not True: # False or 'only_download'
if not self.params.get('ignoreerrors'): if not self.params.get('ignoreerrors'):
@ -4341,7 +4339,7 @@ def _write_subtitles(self, info_dict, filename):
return ret return ret
def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None): def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error ''' """ Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error """
write_all = self.params.get('write_all_thumbnails', False) write_all = self.params.get('write_all_thumbnails', False)
thumbnails, ret = [], [] thumbnails, ret = [], []
if write_all or self.params.get('writethumbnail', False): if write_all or self.params.get('writethumbnail', False):
@ -4368,8 +4366,8 @@ def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None
existing_thumb = self.existing_file((thumb_filename_final, thumb_filename)) existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
if existing_thumb: if existing_thumb:
self.to_screen('[info] %s is already present' % ( self.to_screen('[info] {} is already present'.format((
thumb_display_id if multiple else f'{label} thumbnail').capitalize()) thumb_display_id if multiple else f'{label} thumbnail').capitalize()))
t['filepath'] = existing_thumb t['filepath'] = existing_thumb
ret.append((existing_thumb, thumb_filename_final)) ret.append((existing_thumb, thumb_filename_final))
else: else:

View file

@ -14,7 +14,7 @@
import re import re
import traceback import traceback
from .compat import compat_os_name, compat_shlex_quote from .compat import compat_os_name
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
from .downloader.external import get_external_downloader from .downloader.external import get_external_downloader
from .extractor import list_extractor_classes from .extractor import list_extractor_classes
@ -58,6 +58,7 @@
read_stdin, read_stdin,
render_table, render_table,
setproctitle, setproctitle,
shell_quote,
traverse_obj, traverse_obj,
variadic, variadic,
write_string, write_string,
@ -115,9 +116,9 @@ def print_extractor_information(opts, urls):
ie.description(markdown=False, search_examples=_SEARCHES) ie.description(markdown=False, search_examples=_SEARCHES)
for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False) for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False)
elif opts.ap_list_mso: elif opts.ap_list_mso:
out = 'Supported TV Providers:\n%s\n' % render_table( out = 'Supported TV Providers:\n{}\n'.format(render_table(
['mso', 'mso name'], ['mso', 'mso name'],
[[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]) [[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]))
else: else:
return False return False
write_string(out, out=sys.stdout) write_string(out, out=sys.stdout)
@ -129,7 +130,7 @@ def _unused_compat_opt(name):
if name not in opts.compat_opts: if name not in opts.compat_opts:
return False return False
opts.compat_opts.discard(name) opts.compat_opts.discard(name)
opts.compat_opts.update(['*%s' % name]) opts.compat_opts.update([f'*{name}'])
return True return True
def set_default_compat(compat_name, opt_name, default=True, remove_compat=True): def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
@ -222,7 +223,7 @@ def validate_minmax(min_val, max_val, min_name, max_name=None):
validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval') validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval')
if opts.wait_for_video is not None: if opts.wait_for_video is not None:
min_wait, max_wait, *_ = map(parse_duration, opts.wait_for_video.split('-', 1) + [None]) min_wait, max_wait, *_ = map(parse_duration, [*opts.wait_for_video.split('-', 1), None])
validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video), validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video),
'time range to wait for video', opts.wait_for_video) 'time range to wait for video', opts.wait_for_video)
validate_minmax(min_wait, max_wait, 'time range to wait for video') validate_minmax(min_wait, max_wait, 'time range to wait for video')
@ -264,9 +265,9 @@ def parse_retries(name, value):
# Retry sleep function # Retry sleep function
def parse_sleep_func(expr): def parse_sleep_func(expr):
NUMBER_RE = r'\d+(?:\.\d+)?' NUMBER_RE = r'\d+(?:\.\d+)?'
op, start, limit, step, *_ = tuple(re.fullmatch( op, start, limit, step, *_ = (*tuple(re.fullmatch(
rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?', rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?',
expr.strip()).groups()) + (None, None) expr.strip()).groups()), None, None)
if op == 'exp': if op == 'exp':
return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf')) return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf'))
@ -396,13 +397,13 @@ def parse_chapters(name, value, advanced=False):
# MetadataParser # MetadataParser
def metadataparser_actions(f): def metadataparser_actions(f):
if isinstance(f, str): if isinstance(f, str):
cmd = '--parse-metadata %s' % compat_shlex_quote(f) cmd = f'--parse-metadata {shell_quote(f)}'
try: try:
actions = [MetadataFromFieldPP.to_action(f)] actions = [MetadataFromFieldPP.to_action(f)]
except Exception as err: except Exception as err:
raise ValueError(f'{cmd} is invalid; {err}') raise ValueError(f'{cmd} is invalid; {err}')
else: else:
cmd = '--replace-in-metadata %s' % ' '.join(map(compat_shlex_quote, f)) cmd = f'--replace-in-metadata {shell_quote(f)}'
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(',')) actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
for action in actions: for action in actions:
@ -413,7 +414,7 @@ def metadataparser_actions(f):
yield action yield action
if opts.metafromtitle is not None: if opts.metafromtitle is not None:
opts.parse_metadata.setdefault('pre_process', []).append('title:%s' % opts.metafromtitle) opts.parse_metadata.setdefault('pre_process', []).append(f'title:{opts.metafromtitle}')
opts.parse_metadata = { opts.parse_metadata = {
k: list(itertools.chain(*map(metadataparser_actions, v))) k: list(itertools.chain(*map(metadataparser_actions, v)))
for k, v in opts.parse_metadata.items() for k, v in opts.parse_metadata.items()
@ -602,7 +603,7 @@ def get_postprocessors(opts):
yield { yield {
'key': 'MetadataParser', 'key': 'MetadataParser',
'actions': actions, 'actions': actions,
'when': when 'when': when,
} }
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
if sponsorblock_query: if sponsorblock_query:
@ -610,19 +611,19 @@ def get_postprocessors(opts):
'key': 'SponsorBlock', 'key': 'SponsorBlock',
'categories': sponsorblock_query, 'categories': sponsorblock_query,
'api': opts.sponsorblock_api, 'api': opts.sponsorblock_api,
'when': 'after_filter' 'when': 'after_filter',
} }
if opts.convertsubtitles: if opts.convertsubtitles:
yield { yield {
'key': 'FFmpegSubtitlesConvertor', 'key': 'FFmpegSubtitlesConvertor',
'format': opts.convertsubtitles, 'format': opts.convertsubtitles,
'when': 'before_dl' 'when': 'before_dl',
} }
if opts.convertthumbnails: if opts.convertthumbnails:
yield { yield {
'key': 'FFmpegThumbnailsConvertor', 'key': 'FFmpegThumbnailsConvertor',
'format': opts.convertthumbnails, 'format': opts.convertthumbnails,
'when': 'before_dl' 'when': 'before_dl',
} }
if opts.extractaudio: if opts.extractaudio:
yield { yield {
@ -647,7 +648,7 @@ def get_postprocessors(opts):
yield { yield {
'key': 'FFmpegEmbedSubtitle', 'key': 'FFmpegEmbedSubtitle',
# already_have_subtitle = True prevents the file from being deleted after embedding # already_have_subtitle = True prevents the file from being deleted after embedding
'already_have_subtitle': opts.writesubtitles and keep_subs 'already_have_subtitle': opts.writesubtitles and keep_subs,
} }
if not opts.writeautomaticsub and keep_subs: if not opts.writeautomaticsub and keep_subs:
opts.writesubtitles = True opts.writesubtitles = True
@ -660,7 +661,7 @@ def get_postprocessors(opts):
'remove_sponsor_segments': opts.sponsorblock_remove, 'remove_sponsor_segments': opts.sponsorblock_remove,
'remove_ranges': opts.remove_ranges, 'remove_ranges': opts.remove_ranges,
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title, 'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
'force_keyframes': opts.force_keyframes_at_cuts 'force_keyframes': opts.force_keyframes_at_cuts,
} }
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and # FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
# FFmpegExtractAudioPP as containers before conversion may not support # FFmpegExtractAudioPP as containers before conversion may not support
@ -694,7 +695,7 @@ def get_postprocessors(opts):
yield { yield {
'key': 'EmbedThumbnail', 'key': 'EmbedThumbnail',
# already_have_thumbnail = True prevents the file from being deleted after embedding # already_have_thumbnail = True prevents the file from being deleted after embedding
'already_have_thumbnail': opts.writethumbnail 'already_have_thumbnail': opts.writethumbnail,
} }
if not opts.writethumbnail: if not opts.writethumbnail:
opts.writethumbnail = True opts.writethumbnail = True
@ -741,7 +742,7 @@ def parse_options(argv=None):
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:]) print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:])
any_getting = any(getattr(opts, k) for k in ( any_getting = any(getattr(opts, k) for k in (
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename', 'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl' 'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl',
)) ))
if opts.quiet is None: if opts.quiet is None:
opts.quiet = any_getting or opts.print_json or bool(opts.forceprint) opts.quiet = any_getting or opts.print_json or bool(opts.forceprint)
@ -1002,7 +1003,7 @@ def _real_main(argv=None):
def make_row(target, handler): def make_row(target, handler):
return [ return [
join_nonempty(target.client.title(), target.version, delim='-') or '-', join_nonempty(target.client.title(), target.version, delim='-') or '-',
join_nonempty((target.os or "").title(), target.os_version, delim='-') or '-', join_nonempty((target.os or '').title(), target.os_version, delim='-') or '-',
handler, handler,
] ]

View file

@ -68,7 +68,7 @@ def pad_block(block, padding_mode):
raise NotImplementedError(f'Padding mode {padding_mode} is not implemented') raise NotImplementedError(f'Padding mode {padding_mode} is not implemented')
if padding_mode == 'iso7816' and padding_size: if padding_mode == 'iso7816' and padding_size:
block = block + [0x80] # NB: += mutates list block = [*block, 0x80] # NB: += mutates list
padding_size -= 1 padding_size -= 1
return block + [PADDING_BYTE[padding_mode]] * padding_size return block + [PADDING_BYTE[padding_mode]] * padding_size
@ -110,9 +110,7 @@ def aes_ecb_decrypt(data, key, iv=None):
for i in range(block_count): for i in range(block_count):
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
encrypted_data += aes_decrypt(block, expanded_key) encrypted_data += aes_decrypt(block, expanded_key)
encrypted_data = encrypted_data[:len(data)] return encrypted_data[:len(data)]
return encrypted_data
def aes_ctr_decrypt(data, key, iv): def aes_ctr_decrypt(data, key, iv):
@ -148,9 +146,7 @@ def aes_ctr_encrypt(data, key, iv):
cipher_counter_block = aes_encrypt(counter_block, expanded_key) cipher_counter_block = aes_encrypt(counter_block, expanded_key)
encrypted_data += xor(block, cipher_counter_block) encrypted_data += xor(block, cipher_counter_block)
encrypted_data = encrypted_data[:len(data)] return encrypted_data[:len(data)]
return encrypted_data
def aes_cbc_decrypt(data, key, iv): def aes_cbc_decrypt(data, key, iv):
@ -174,9 +170,7 @@ def aes_cbc_decrypt(data, key, iv):
decrypted_block = aes_decrypt(block, expanded_key) decrypted_block = aes_decrypt(block, expanded_key)
decrypted_data += xor(decrypted_block, previous_cipher_block) decrypted_data += xor(decrypted_block, previous_cipher_block)
previous_cipher_block = block previous_cipher_block = block
decrypted_data = decrypted_data[:len(data)] return decrypted_data[:len(data)]
return decrypted_data
def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'): def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
@ -224,7 +218,7 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key)) hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key))
if len(nonce) == 12: if len(nonce) == 12:
j0 = nonce + [0, 0, 0, 1] j0 = [*nonce, 0, 0, 0, 1]
else: else:
fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8 fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8
ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big')) ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big'))
@ -242,11 +236,11 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
data data
+ [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad + [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad
+ bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data + bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data
+ ((len(data) * 8).to_bytes(8, 'big'))) # length of data + ((len(data) * 8).to_bytes(8, 'big'))), # length of data
) )
if tag != aes_ctr_encrypt(s_tag, key, j0): if tag != aes_ctr_encrypt(s_tag, key, j0):
raise ValueError("Mismatching authentication tag") raise ValueError('Mismatching authentication tag')
return decrypted_data return decrypted_data
@ -288,9 +282,7 @@ def aes_decrypt(data, expanded_key):
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV)) data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV))
data = shift_rows_inv(data) data = shift_rows_inv(data)
data = sub_bytes_inv(data) data = sub_bytes_inv(data)
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) return xor(data, expanded_key[:BLOCK_SIZE_BYTES])
return data
def aes_decrypt_text(data, password, key_size_bytes): def aes_decrypt_text(data, password, key_size_bytes):
@ -318,9 +310,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
cipher = data[NONCE_LENGTH_BYTES:] cipher = data[NONCE_LENGTH_BYTES:]
decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)) decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES))
plaintext = intlist_to_bytes(decrypted_data) return intlist_to_bytes(decrypted_data)
return plaintext
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
@ -428,9 +418,7 @@ def key_expansion(data):
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
temp = data[-4:] temp = data[-4:]
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
data = data[:expanded_key_size_bytes] return data[:expanded_key_size_bytes]
return data
def iter_vector(iv): def iter_vector(iv):
@ -511,7 +499,7 @@ def block_product(block_x, block_y):
# NIST SP 800-38D, Algorithm 1 # NIST SP 800-38D, Algorithm 1
if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES: if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES:
raise ValueError("Length of blocks need to be %d bytes" % BLOCK_SIZE_BYTES) raise ValueError(f'Length of blocks need to be {BLOCK_SIZE_BYTES} bytes')
block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1) block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1)
block_v = block_y[:] block_v = block_y[:]
@ -534,7 +522,7 @@ def ghash(subkey, data):
# NIST SP 800-38D, Algorithm 2 # NIST SP 800-38D, Algorithm 2
if len(data) % BLOCK_SIZE_BYTES: if len(data) % BLOCK_SIZE_BYTES:
raise ValueError("Length of data should be %d bytes" % BLOCK_SIZE_BYTES) raise ValueError(f'Length of data should be {BLOCK_SIZE_BYTES} bytes')
last_y = [0] * BLOCK_SIZE_BYTES last_y = [0] * BLOCK_SIZE_BYTES
for i in range(0, len(data), BLOCK_SIZE_BYTES): for i in range(0, len(data), BLOCK_SIZE_BYTES):

View file

@ -81,10 +81,10 @@ def remove(self):
cachedir = self._get_root_dir() cachedir = self._get_root_dir()
if not any((term in cachedir) for term in ('cache', 'tmp')): if not any((term in cachedir) for term in ('cache', 'tmp')):
raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir) raise Exception(f'Not removing directory {cachedir} - this does not look like a cache dir')
self._ydl.to_screen( self._ydl.to_screen(
'Removing cache dir %s .' % cachedir, skip_eol=True) f'Removing cache dir {cachedir} .', skip_eol=True)
if os.path.exists(cachedir): if os.path.exists(cachedir):
self._ydl.to_screen('.', skip_eol=True) self._ydl.to_screen('.', skip_eol=True)
shutil.rmtree(cachedir) shutil.rmtree(cachedir)

View file

@ -35,7 +35,7 @@
from ..dependencies import brotli as compat_brotli # noqa: F401 from ..dependencies import brotli as compat_brotli # noqa: F401
from ..dependencies import websockets as compat_websockets # noqa: F401 from ..dependencies import websockets as compat_websockets # noqa: F401
from ..dependencies.Cryptodome import AES as compat_pycrypto_AES # noqa: F401 from ..dependencies.Cryptodome import AES as compat_pycrypto_AES # noqa: F401
from ..networking.exceptions import HTTPError as compat_HTTPError # noqa: F401 from ..networking.exceptions import HTTPError as compat_HTTPError
passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode')) passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'))

View file

@ -7,6 +7,6 @@
del passthrough_module del passthrough_module
try: try:
cache # >= 3.9 _ = cache # >= 3.9
except NameError: except NameError:
cache = lru_cache(maxsize=None) cache = lru_cache(maxsize=None)

View file

@ -146,7 +146,7 @@ def _extract_firefox_cookies(profile, container, logger):
identities = json.load(containers).get('identities', []) identities = json.load(containers).get('identities', [])
container_id = next((context.get('userContextId') for context in identities if container in ( container_id = next((context.get('userContextId') for context in identities if container in (
context.get('name'), context.get('name'),
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group()) try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group()),
)), None) )), None)
if not isinstance(container_id, int): if not isinstance(container_id, int):
raise ValueError(f'could not find firefox container "{container}" in containers.json') raise ValueError(f'could not find firefox container "{container}" in containers.json')
@ -263,7 +263,7 @@ def _get_chromium_based_browser_settings(browser_name):
return { return {
'browser_dir': browser_dir, 'browser_dir': browser_dir,
'keyring_name': keyring_name, 'keyring_name': keyring_name,
'supports_profiles': browser_name not in browsers_without_profiles 'supports_profiles': browser_name not in browsers_without_profiles,
} }
@ -826,7 +826,7 @@ def _choose_linux_keyring(logger):
elif desktop_environment == _LinuxDesktopEnvironment.KDE6: elif desktop_environment == _LinuxDesktopEnvironment.KDE6:
linux_keyring = _LinuxKeyring.KWALLET6 linux_keyring = _LinuxKeyring.KWALLET6
elif desktop_environment in ( elif desktop_environment in (
_LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER _LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER,
): ):
linux_keyring = _LinuxKeyring.BASICTEXT linux_keyring = _LinuxKeyring.BASICTEXT
else: else:
@ -861,7 +861,7 @@ def _get_kwallet_network_wallet(keyring, logger):
'dbus-send', '--session', '--print-reply=literal', 'dbus-send', '--session', '--print-reply=literal',
f'--dest={service_name}', f'--dest={service_name}',
wallet_path, wallet_path,
'org.kde.KWallet.networkWallet' 'org.kde.KWallet.networkWallet',
], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) ], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
if returncode: if returncode:
@ -891,7 +891,7 @@ def _get_kwallet_password(browser_keyring_name, keyring, logger):
'kwallet-query', 'kwallet-query',
'--read-password', f'{browser_keyring_name} Safe Storage', '--read-password', f'{browser_keyring_name} Safe Storage',
'--folder', f'{browser_keyring_name} Keys', '--folder', f'{browser_keyring_name} Keys',
network_wallet network_wallet,
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
if returncode: if returncode:
@ -931,9 +931,8 @@ def _get_gnome_keyring_password(browser_keyring_name, logger):
for item in col.get_all_items(): for item in col.get_all_items():
if item.get_label() == f'{browser_keyring_name} Safe Storage': if item.get_label() == f'{browser_keyring_name} Safe Storage':
return item.get_secret() return item.get_secret()
else: logger.error('failed to read from keyring')
logger.error('failed to read from keyring') return b''
return b''
def _get_linux_keyring_password(browser_keyring_name, keyring, logger): def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
@ -1053,7 +1052,7 @@ class DATA_BLOB(ctypes.Structure):
None, # pvReserved: must be NULL None, # pvReserved: must be NULL
None, # pPromptStruct: information about prompts to display None, # pPromptStruct: information about prompts to display
0, # dwFlags 0, # dwFlags
ctypes.byref(blob_out) # pDataOut ctypes.byref(blob_out), # pDataOut
) )
if not ret: if not ret:
logger.warning('failed to decrypt with DPAPI', only_once=True) logger.warning('failed to decrypt with DPAPI', only_once=True)
@ -1129,24 +1128,24 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
_LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}') _LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}')
_RESERVED = { _RESERVED = {
"expires", 'expires',
"path", 'path',
"comment", 'comment',
"domain", 'domain',
"max-age", 'max-age',
"secure", 'secure',
"httponly", 'httponly',
"version", 'version',
"samesite", 'samesite',
} }
_FLAGS = {"secure", "httponly"} _FLAGS = {'secure', 'httponly'}
# Added 'bad' group to catch the remaining value # Added 'bad' group to catch the remaining value
_COOKIE_PATTERN = re.compile(r""" _COOKIE_PATTERN = re.compile(r'''
\s* # Optional whitespace at start of cookie \s* # Optional whitespace at start of cookie
(?P<key> # Start of group 'key' (?P<key> # Start of group 'key'
[""" + _LEGAL_KEY_CHARS + r"""]+?# Any word of at least one letter [''' + _LEGAL_KEY_CHARS + r''']+?# Any word of at least one letter
) # End of group 'key' ) # End of group 'key'
( # Optional group: there may not be a value. ( # Optional group: there may not be a value.
\s*=\s* # Equal Sign \s*=\s* # Equal Sign
@ -1156,7 +1155,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
| # or | # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr \w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or | # or
[""" + _LEGAL_VALUE_CHARS + r"""]* # Any word or empty string [''' + _LEGAL_VALUE_CHARS + r''']* # Any word or empty string
) # End of group 'val' ) # End of group 'val'
| # or | # or
(?P<bad>(?:\\;|[^;])*?) # 'bad' group fallback for invalid values (?P<bad>(?:\\;|[^;])*?) # 'bad' group fallback for invalid values
@ -1164,7 +1163,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
)? # End of optional value group )? # End of optional value group
\s* # Any number of spaces. \s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS. (\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII | re.VERBOSE) ''', re.ASCII | re.VERBOSE)
def load(self, data): def load(self, data):
# Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776 # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776
@ -1260,14 +1259,14 @@ def _really_save(self, f, ignore_discard, ignore_expires):
# with no name, whereas http.cookiejar regards it as a # with no name, whereas http.cookiejar regards it as a
# cookie with no value. # cookie with no value.
name, value = '', name name, value = '', name
f.write('%s\n' % '\t'.join(( f.write('{}\n'.format('\t'.join((
cookie.domain, cookie.domain,
self._true_or_false(cookie.domain.startswith('.')), self._true_or_false(cookie.domain.startswith('.')),
cookie.path, cookie.path,
self._true_or_false(cookie.secure), self._true_or_false(cookie.secure),
str_or_none(cookie.expires, default=''), str_or_none(cookie.expires, default=''),
name, value name, value,
))) ))))
def save(self, filename=None, ignore_discard=True, ignore_expires=True): def save(self, filename=None, ignore_discard=True, ignore_expires=True):
""" """
@ -1306,10 +1305,10 @@ def prepare_line(line):
return line return line
cookie_list = line.split('\t') cookie_list = line.split('\t')
if len(cookie_list) != self._ENTRY_LEN: if len(cookie_list) != self._ENTRY_LEN:
raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list)) raise http.cookiejar.LoadError(f'invalid length {len(cookie_list)}')
cookie = self._CookieFileEntry(*cookie_list) cookie = self._CookieFileEntry(*cookie_list)
if cookie.expires_at and not cookie.expires_at.isdigit(): if cookie.expires_at and not cookie.expires_at.isdigit():
raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at) raise http.cookiejar.LoadError(f'invalid expires at {cookie.expires_at}')
return line return line
cf = io.StringIO() cf = io.StringIO()

View file

@ -404,7 +404,7 @@ def with_fields(*tups, default=''):
def report_resuming_byte(self, resume_len): def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte.""" """Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len) self.to_screen(f'[download] Resuming download at byte {resume_len}')
def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True): def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True):
"""Report retry""" """Report retry"""

View file

@ -55,7 +55,7 @@ def real_download(self, filename, info_dict):
# correct and expected termination thus all postprocessing # correct and expected termination thus all postprocessing
# should take place # should take place
retval = 0 retval = 0
self.to_screen('[%s] Interrupted by user' % self.get_basename()) self.to_screen(f'[{self.get_basename()}] Interrupted by user')
finally: finally:
if self._cookies_tempfile: if self._cookies_tempfile:
self.try_remove(self._cookies_tempfile) self.try_remove(self._cookies_tempfile)
@ -172,7 +172,7 @@ def _call_downloader(self, tmpfilename, info_dict):
decrypt_fragment = self.decrypter(info_dict) decrypt_fragment = self.decrypter(info_dict)
dest, _ = self.sanitize_open(tmpfilename, 'wb') dest, _ = self.sanitize_open(tmpfilename, 'wb')
for frag_index, fragment in enumerate(info_dict['fragments']): for frag_index, fragment in enumerate(info_dict['fragments']):
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index) fragment_filename = f'{tmpfilename}-Frag{frag_index}'
try: try:
src, _ = self.sanitize_open(fragment_filename, 'rb') src, _ = self.sanitize_open(fragment_filename, 'rb')
except OSError as err: except OSError as err:
@ -186,7 +186,7 @@ def _call_downloader(self, tmpfilename, info_dict):
if not self.params.get('keep_fragments', False): if not self.params.get('keep_fragments', False):
self.try_remove(encodeFilename(fragment_filename)) self.try_remove(encodeFilename(fragment_filename))
dest.close() dest.close()
self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename)) self.try_remove(encodeFilename(f'{tmpfilename}.frag.urls'))
return 0 return 0
def _call_process(self, cmd, info_dict): def _call_process(self, cmd, info_dict):
@ -336,11 +336,11 @@ def _make_cmd(self, tmpfilename, info_dict):
if 'fragments' in info_dict: if 'fragments' in info_dict:
cmd += ['--uri-selector=inorder'] cmd += ['--uri-selector=inorder']
url_list_file = '%s.frag.urls' % tmpfilename url_list_file = f'{tmpfilename}.frag.urls'
url_list = [] url_list = []
for frag_index, fragment in enumerate(info_dict['fragments']): for frag_index, fragment in enumerate(info_dict['fragments']):
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index) fragment_filename = f'{os.path.basename(tmpfilename)}-Frag{frag_index}'
url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename))) url_list.append('{}\n\tout={}'.format(fragment['url'], self._aria2c_filename(fragment_filename)))
stream, _ = self.sanitize_open(url_list_file, 'wb') stream, _ = self.sanitize_open(url_list_file, 'wb')
stream.write('\n'.join(url_list).encode()) stream.write('\n'.join(url_list).encode())
stream.close() stream.close()
@ -357,7 +357,7 @@ def aria2c_rpc(self, rpc_port, rpc_secret, method, params=()):
'id': sanitycheck, 'id': sanitycheck,
'method': method, 'method': method,
'params': [f'token:{rpc_secret}', *params], 'params': [f'token:{rpc_secret}', *params],
}).encode('utf-8') }).encode()
request = Request( request = Request(
f'http://localhost:{rpc_port}/jsonrpc', f'http://localhost:{rpc_port}/jsonrpc',
data=d, headers={ data=d, headers={
@ -416,7 +416,7 @@ def get_stat(key, *obj, average=False):
'total_bytes_estimate': total, 'total_bytes_estimate': total,
'eta': (total - downloaded) / (speed or 1), 'eta': (total - downloaded) / (speed or 1),
'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None, 'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None,
'elapsed': time.time() - started 'elapsed': time.time() - started,
}) })
self._hook_progress(status, info_dict) self._hook_progress(status, info_dict)
@ -509,12 +509,12 @@ def _call_downloader(self, tmpfilename, info_dict):
proxy = self.params.get('proxy') proxy = self.params.get('proxy')
if proxy: if proxy:
if not re.match(r'^[\da-zA-Z]+://', proxy): if not re.match(r'^[\da-zA-Z]+://', proxy):
proxy = 'http://%s' % proxy proxy = f'http://{proxy}'
if proxy.startswith('socks'): if proxy.startswith('socks'):
self.report_warning( self.report_warning(
'%s does not support SOCKS proxies. Downloading is likely to fail. ' f'{self.get_basename()} does not support SOCKS proxies. Downloading is likely to fail. '
'Consider adding --hls-prefer-native to your command.' % self.get_basename()) 'Consider adding --hls-prefer-native to your command.')
# Since December 2015 ffmpeg supports -http_proxy option (see # Since December 2015 ffmpeg supports -http_proxy option (see
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd) # http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
@ -575,7 +575,7 @@ def _call_downloader(self, tmpfilename, info_dict):
if end_time: if end_time:
args += ['-t', str(end_time - start_time)] args += ['-t', str(end_time - start_time)]
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', fmt['url']] args += [*self._configuration_args((f'_i{i + 1}', '_i')), '-i', fmt['url']]
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'): if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
args += ['-c', 'copy'] args += ['-c', 'copy']

View file

@ -67,12 +67,12 @@ def read_asrt(self):
self.read_bytes(3) self.read_bytes(3)
quality_entry_count = self.read_unsigned_char() quality_entry_count = self.read_unsigned_char()
# QualityEntryCount # QualityEntryCount
for i in range(quality_entry_count): for _ in range(quality_entry_count):
self.read_string() self.read_string()
segment_run_count = self.read_unsigned_int() segment_run_count = self.read_unsigned_int()
segments = [] segments = []
for i in range(segment_run_count): for _ in range(segment_run_count):
first_segment = self.read_unsigned_int() first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int() fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment)) segments.append((first_segment, fragments_per_segment))
@ -91,12 +91,12 @@ def read_afrt(self):
quality_entry_count = self.read_unsigned_char() quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers # QualitySegmentUrlModifiers
for i in range(quality_entry_count): for _ in range(quality_entry_count):
self.read_string() self.read_string()
fragments_count = self.read_unsigned_int() fragments_count = self.read_unsigned_int()
fragments = [] fragments = []
for i in range(fragments_count): for _ in range(fragments_count):
first = self.read_unsigned_int() first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long() first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int() duration = self.read_unsigned_int()
@ -135,11 +135,11 @@ def read_abst(self):
self.read_string() # MovieIdentifier self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char() server_count = self.read_unsigned_char()
# ServerEntryTable # ServerEntryTable
for i in range(server_count): for _ in range(server_count):
self.read_string() self.read_string()
quality_count = self.read_unsigned_char() quality_count = self.read_unsigned_char()
# QualityEntryTable # QualityEntryTable
for i in range(quality_count): for _ in range(quality_count):
self.read_string() self.read_string()
# DrmData # DrmData
self.read_string() self.read_string()
@ -148,14 +148,14 @@ def read_abst(self):
segments_count = self.read_unsigned_char() segments_count = self.read_unsigned_char()
segments = [] segments = []
for i in range(segments_count): for _ in range(segments_count):
box_size, box_type, box_data = self.read_box_info() box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt' assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt() segment = FlvReader(box_data).read_asrt()
segments.append(segment) segments.append(segment)
fragments_run_count = self.read_unsigned_char() fragments_run_count = self.read_unsigned_char()
fragments = [] fragments = []
for i in range(fragments_run_count): for _ in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info() box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt' assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt()) fragments.append(FlvReader(box_data).read_afrt())
@ -309,7 +309,7 @@ def _parse_bootstrap_node(self, node, base_url):
def real_download(self, filename, info_dict): def real_download(self, filename, info_dict):
man_url = info_dict['url'] man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr') requested_bitrate = info_dict.get('tbr')
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME) self.to_screen(f'[{self.FD_NAME}] Downloading f4m manifest')
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url)) urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
man_url = urlh.url man_url = urlh.url
@ -326,8 +326,8 @@ def real_download(self, filename, info_dict):
formats = sorted(formats, key=lambda f: f[0]) formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1] rate, media = formats[-1]
else: else:
rate, media = list(filter( rate, media = next(filter(
lambda f: int(f[0]) == requested_bitrate, formats))[0] lambda f: int(f[0]) == requested_bitrate, formats))
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec. # Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
man_base_url = get_base_url(doc) or man_url man_base_url = get_base_url(doc) or man_url

View file

@ -199,7 +199,7 @@ def _prepare_frag_download(self, ctx):
'.ytdl file is corrupt' if is_corrupt else '.ytdl file is corrupt' if is_corrupt else
'Inconsistent state of incomplete fragment download') 'Inconsistent state of incomplete fragment download')
self.report_warning( self.report_warning(
'%s. Restarting from the beginning ...' % message) f'{message}. Restarting from the beginning ...')
ctx['fragment_index'] = resume_len = 0 ctx['fragment_index'] = resume_len = 0
if 'ytdl_corrupt' in ctx: if 'ytdl_corrupt' in ctx:
del ctx['ytdl_corrupt'] del ctx['ytdl_corrupt']
@ -366,10 +366,10 @@ def decrypt_fragment(fragment, frag_content):
return decrypt_fragment return decrypt_fragment
def download_and_append_fragments_multiple(self, *args, **kwargs): def download_and_append_fragments_multiple(self, *args, **kwargs):
''' """
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ... @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
all args must be either tuple or list all args must be either tuple or list
''' """
interrupt_trigger = [True] interrupt_trigger = [True]
max_progress = len(args) max_progress = len(args)
if max_progress == 1: if max_progress == 1:
@ -424,7 +424,7 @@ def interrupt_trigger_iter(fg):
finally: finally:
tpe.shutdown(wait=True) tpe.shutdown(wait=True)
if not interrupt_trigger[0] and not is_live: if not interrupt_trigger[0] and not is_live:
raise KeyboardInterrupt() raise KeyboardInterrupt
# we expect the user wants to stop and DO WANT the preceding postprocessors to run; # we expect the user wants to stop and DO WANT the preceding postprocessors to run;
# so returning a intermediate result here instead of KeyboardInterrupt on live # so returning a intermediate result here instead of KeyboardInterrupt on live
return result return result

View file

@ -72,7 +72,7 @@ def check_results():
def real_download(self, filename, info_dict): def real_download(self, filename, info_dict):
man_url = info_dict['url'] man_url = info_dict['url']
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME) self.to_screen(f'[{self.FD_NAME}] Downloading m3u8 manifest')
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url)) urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
man_url = urlh.url man_url = urlh.url
@ -228,7 +228,7 @@ def is_ad_fragment_end(s):
'url': frag_url, 'url': frag_url,
'decrypt_info': decrypt_info, 'decrypt_info': decrypt_info,
'byte_range': byte_range, 'byte_range': byte_range,
'media_sequence': media_sequence 'media_sequence': media_sequence,
}) })
media_sequence += 1 media_sequence += 1
@ -350,9 +350,8 @@ def pack_fragment(frag_content, frag_index):
# XXX: this should probably be silent as well # XXX: this should probably be silent as well
# or verify that all segments contain the same data # or verify that all segments contain the same data
self.report_warning(bug_reports_message( self.report_warning(bug_reports_message(
'Discarding a %s block found in the middle of the stream; ' f'Discarding a {type(block).__name__} block found in the middle of the stream; '
'if the subtitles display incorrectly,' 'if the subtitles display incorrectly,'))
% (type(block).__name__)))
continue continue
block.write_into(output) block.write_into(output)

View file

@ -176,7 +176,7 @@ def establish_connection():
'downloaded_bytes': ctx.resume_len, 'downloaded_bytes': ctx.resume_len,
'total_bytes': ctx.resume_len, 'total_bytes': ctx.resume_len,
}, info_dict) }, info_dict)
raise SucceedDownload() raise SucceedDownload
else: else:
# The length does not match, we start the download over # The length does not match, we start the download over
self.report_unable_to_resume() self.report_unable_to_resume()
@ -194,7 +194,7 @@ def establish_connection():
def close_stream(): def close_stream():
if ctx.stream is not None: if ctx.stream is not None:
if not ctx.tmpfilename == '-': if ctx.tmpfilename != '-':
ctx.stream.close() ctx.stream.close()
ctx.stream = None ctx.stream = None
@ -268,20 +268,20 @@ def retry(e):
ctx.filename = self.undo_temp_name(ctx.tmpfilename) ctx.filename = self.undo_temp_name(ctx.tmpfilename)
self.report_destination(ctx.filename) self.report_destination(ctx.filename)
except OSError as err: except OSError as err:
self.report_error('unable to open for writing: %s' % str(err)) self.report_error(f'unable to open for writing: {err}')
return False return False
if self.params.get('xattr_set_filesize', False) and data_len is not None: if self.params.get('xattr_set_filesize', False) and data_len is not None:
try: try:
write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode()) write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode())
except (XAttrUnavailableError, XAttrMetadataError) as err: except (XAttrUnavailableError, XAttrMetadataError) as err:
self.report_error('unable to set filesize xattr: %s' % str(err)) self.report_error(f'unable to set filesize xattr: {err}')
try: try:
ctx.stream.write(data_block) ctx.stream.write(data_block)
except OSError as err: except OSError as err:
self.to_stderr('\n') self.to_stderr('\n')
self.report_error('unable to write data: %s' % str(err)) self.report_error(f'unable to write data: {err}')
return False return False
# Apply rate limit # Apply rate limit
@ -327,7 +327,7 @@ def retry(e):
elif now - ctx.throttle_start > 3: elif now - ctx.throttle_start > 3:
if ctx.stream is not None and ctx.tmpfilename != '-': if ctx.stream is not None and ctx.tmpfilename != '-':
ctx.stream.close() ctx.stream.close()
raise ThrottledDownload() raise ThrottledDownload
elif speed: elif speed:
ctx.throttle_start = None ctx.throttle_start = None
@ -338,7 +338,7 @@ def retry(e):
if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len: if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
ctx.resume_len = byte_counter ctx.resume_len = byte_counter
raise NextFragment() raise NextFragment
if ctx.tmpfilename != '-': if ctx.tmpfilename != '-':
ctx.stream.close() ctx.stream.close()

View file

@ -251,7 +251,7 @@ def real_download(self, filename, info_dict):
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True) skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
frag_index = 0 frag_index = 0
for i, segment in enumerate(segments): for segment in segments:
frag_index += 1 frag_index += 1
if frag_index <= ctx['fragment_index']: if frag_index <= ctx['fragment_index']:
continue continue

View file

@ -10,7 +10,7 @@
class MhtmlFD(FragmentFD): class MhtmlFD(FragmentFD):
_STYLESHEET = """\ _STYLESHEET = '''\
html, body { html, body {
margin: 0; margin: 0;
padding: 0; padding: 0;
@ -45,7 +45,7 @@ class MhtmlFD(FragmentFD):
max-width: 100%; max-width: 100%;
max-height: calc(100vh - 5em); max-height: calc(100vh - 5em);
} }
""" '''
_STYLESHEET = re.sub(r'\s+', ' ', _STYLESHEET) _STYLESHEET = re.sub(r'\s+', ' ', _STYLESHEET)
_STYLESHEET = re.sub(r'\B \B|(?<=[\w\-]) (?=[^\w\-])|(?<=[^\w\-]) (?=[\w\-])', '', _STYLESHEET) _STYLESHEET = re.sub(r'\B \B|(?<=[\w\-]) (?=[^\w\-])|(?<=[^\w\-]) (?=[\w\-])', '', _STYLESHEET)
@ -57,24 +57,19 @@ def _escape_mime(s):
)).decode('us-ascii') + '?=' )).decode('us-ascii') + '?='
def _gen_cid(self, i, fragment, frag_boundary): def _gen_cid(self, i, fragment, frag_boundary):
return '%u.%s@yt-dlp.github.io.invalid' % (i, frag_boundary) return f'{i}.{frag_boundary}@yt-dlp.github.io.invalid'
def _gen_stub(self, *, fragments, frag_boundary, title): def _gen_stub(self, *, fragments, frag_boundary, title):
output = io.StringIO() output = io.StringIO()
output.write(( output.write(
'<!DOCTYPE html>' '<!DOCTYPE html>'
'<html>' '<html>'
'<head>' '<head>'
'' '<meta name="generator" content="yt-dlp {version}">' f'<meta name="generator" content="yt-dlp {escapeHTML(YT_DLP_VERSION)}">'
'' '<title>{title}</title>' f'<title>{escapeHTML(title)}</title>'
'' '<style>{styles}</style>' f'<style>{self._STYLESHEET}</style>'
'<body>' '<body>')
).format(
version=escapeHTML(YT_DLP_VERSION),
styles=self._STYLESHEET,
title=escapeHTML(title)
))
t0 = 0 t0 = 0
for i, frag in enumerate(fragments): for i, frag in enumerate(fragments):
@ -87,15 +82,12 @@ def _gen_stub(self, *, fragments, frag_boundary, title):
num=i + 1, num=i + 1,
t0=srt_subtitles_timecode(t0), t0=srt_subtitles_timecode(t0),
t1=srt_subtitles_timecode(t1), t1=srt_subtitles_timecode(t1),
duration=formatSeconds(frag['duration'], msec=True) duration=formatSeconds(frag['duration'], msec=True),
)) ))
except (KeyError, ValueError, TypeError): except (KeyError, ValueError, TypeError):
t1 = None t1 = None
output.write(( output.write(f'<figcaption>Slide #{i + 1}</figcaption>')
'<figcaption>Slide #{num}</figcaption>' output.write(f'<img src="cid:{self._gen_cid(i, frag, frag_boundary)}">')
).format(num=i + 1))
output.write('<img src="cid:{cid}">'.format(
cid=self._gen_cid(i, frag, frag_boundary)))
output.write('</figure>') output.write('</figure>')
t0 = t1 t0 = t1
@ -126,31 +118,24 @@ def real_download(self, filename, info_dict):
stub = self._gen_stub( stub = self._gen_stub(
fragments=fragments, fragments=fragments,
frag_boundary=frag_boundary, frag_boundary=frag_boundary,
title=title title=title,
) )
ctx['dest_stream'].write(( ctx['dest_stream'].write((
'MIME-Version: 1.0\r\n' 'MIME-Version: 1.0\r\n'
'From: <nowhere@yt-dlp.github.io.invalid>\r\n' 'From: <nowhere@yt-dlp.github.io.invalid>\r\n'
'To: <nowhere@yt-dlp.github.io.invalid>\r\n' 'To: <nowhere@yt-dlp.github.io.invalid>\r\n'
'Subject: {title}\r\n' f'Subject: {self._escape_mime(title)}\r\n'
'Content-type: multipart/related; ' 'Content-type: multipart/related; '
'' 'boundary="{boundary}"; ' f'boundary="{frag_boundary}"; '
'' 'type="text/html"\r\n' 'type="text/html"\r\n'
'X.yt-dlp.Origin: {origin}\r\n' f'X.yt-dlp.Origin: {origin}\r\n'
'\r\n' '\r\n'
'--{boundary}\r\n' f'--{frag_boundary}\r\n'
'Content-Type: text/html; charset=utf-8\r\n' 'Content-Type: text/html; charset=utf-8\r\n'
'Content-Length: {length}\r\n' f'Content-Length: {len(stub)}\r\n'
'\r\n' '\r\n'
'{stub}\r\n' f'{stub}\r\n').encode())
).format(
origin=origin,
boundary=frag_boundary,
length=len(stub),
title=self._escape_mime(title),
stub=stub
).encode())
extra_state['header_written'] = True extra_state['header_written'] = True
for i, fragment in enumerate(fragments): for i, fragment in enumerate(fragments):

View file

@ -15,7 +15,7 @@ class NiconicoDmcFD(FileDownloader):
def real_download(self, filename, info_dict): def real_download(self, filename, info_dict):
from ..extractor.niconico import NiconicoIE from ..extractor.niconico import NiconicoIE
self.to_screen('[%s] Downloading from DMC' % self.FD_NAME) self.to_screen(f'[{self.FD_NAME}] Downloading from DMC')
ie = NiconicoIE(self.ydl) ie = NiconicoIE(self.ydl)
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict) info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
@ -34,7 +34,7 @@ def heartbeat():
try: try:
self.ydl.urlopen(request).read() self.ydl.urlopen(request).read()
except Exception: except Exception:
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME) self.to_screen(f'[{self.FD_NAME}] Heartbeat failed')
with heartbeat_lock: with heartbeat_lock:
if not download_complete: if not download_complete:
@ -85,14 +85,14 @@ def communicate_ws(reconnect):
'quality': live_quality, 'quality': live_quality,
'protocol': 'hls+fmp4', 'protocol': 'hls+fmp4',
'latency': live_latency, 'latency': live_latency,
'chasePlay': False 'chasePlay': False,
}, },
'room': { 'room': {
'protocol': 'webSocket', 'protocol': 'webSocket',
'commentable': True 'commentable': True,
}, },
'reconnect': True, 'reconnect': True,
} },
})) }))
else: else:
ws = ws_extractor ws = ws_extractor
@ -118,7 +118,7 @@ def communicate_ws(reconnect):
elif self.ydl.params.get('verbose', False): elif self.ydl.params.get('verbose', False):
if len(recv) > 100: if len(recv) > 100:
recv = recv[:100] + '...' recv = recv[:100] + '...'
self.to_screen('[debug] Server said: %s' % recv) self.to_screen(f'[debug] Server said: {recv}')
def ws_main(): def ws_main():
reconnect = False reconnect = False
@ -128,7 +128,7 @@ def ws_main():
if ret is True: if ret is True:
return return
except BaseException as e: except BaseException as e:
self.to_screen('[%s] %s: Connection error occured, reconnecting after 10 seconds: %s' % ('niconico:live', video_id, str_or_none(e))) self.to_screen('[{}] {}: Connection error occured, reconnecting after 10 seconds: {}'.format('niconico:live', video_id, str_or_none(e)))
time.sleep(10) time.sleep(10)
continue continue
finally: finally:

View file

@ -180,9 +180,9 @@ def run_rtmpdump(args):
while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live: while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live:
prevsize = os.path.getsize(encodeFilename(tmpfilename)) prevsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] Downloaded %s bytes' % prevsize) self.to_screen(f'[rtmpdump] Downloaded {prevsize} bytes')
time.sleep(5.0) # This seems to be needed time.sleep(5.0) # This seems to be needed
args = basic_args + ['--resume'] args = [*basic_args, '--resume']
if retval == RD_FAILED: if retval == RD_FAILED:
args += ['--skip', '1'] args += ['--skip', '1']
args = [encodeArgument(a) for a in args] args = [encodeArgument(a) for a in args]
@ -197,7 +197,7 @@ def run_rtmpdump(args):
break break
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE): if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
fsize = os.path.getsize(encodeFilename(tmpfilename)) fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] Downloaded %s bytes' % fsize) self.to_screen(f'[rtmpdump] Downloaded {fsize} bytes')
self.try_rename(tmpfilename, filename) self.try_rename(tmpfilename, filename)
self._hook_progress({ self._hook_progress({
'downloaded_bytes': fsize, 'downloaded_bytes': fsize,

View file

@ -18,7 +18,7 @@ class YoutubeLiveChatFD(FragmentFD):
def real_download(self, filename, info_dict): def real_download(self, filename, info_dict):
video_id = info_dict['video_id'] video_id = info_dict['video_id']
self.to_screen('[%s] Downloading live chat' % self.FD_NAME) self.to_screen(f'[{self.FD_NAME}] Downloading live chat')
if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat': if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat':
self.report_warning('Live chat download runs until the livestream ends. ' self.report_warning('Live chat download runs until the livestream ends. '
'If you wish to download the video simultaneously, run a separate yt-dlp instance') 'If you wish to download the video simultaneously, run a separate yt-dlp instance')

View file

@ -4,7 +4,6 @@
import time import time
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
dict_get, dict_get,
@ -67,7 +66,7 @@ class ABCIE(InfoExtractor):
'ext': 'mp4', 'ext': 'mp4',
'title': 'WWI Centenary', 'title': 'WWI Centenary',
'description': 'md5:c2379ec0ca84072e86b446e536954546', 'description': 'md5:c2379ec0ca84072e86b446e536954546',
} },
}, { }, {
'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074', 'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074',
'info_dict': { 'info_dict': {
@ -75,7 +74,7 @@ class ABCIE(InfoExtractor):
'ext': 'mp4', 'ext': 'mp4',
'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia', 'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia',
'description': 'md5:2961a17dc53abc558589ccd0fb8edd6f', 'description': 'md5:2961a17dc53abc558589ccd0fb8edd6f',
} },
}, { }, {
'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476', 'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476',
'info_dict': { 'info_dict': {
@ -86,7 +85,7 @@ class ABCIE(InfoExtractor):
'upload_date': '20200813', 'upload_date': '20200813',
'uploader': 'Behind the News', 'uploader': 'Behind the News',
'uploader_id': 'behindthenews', 'uploader_id': 'behindthenews',
} },
}, { }, {
'url': 'https://www.abc.net.au/news/2023-06-25/wagner-boss-orders-troops-back-to-bases-to-avoid-bloodshed/102520540', 'url': 'https://www.abc.net.au/news/2023-06-25/wagner-boss-orders-troops-back-to-bases-to-avoid-bloodshed/102520540',
'info_dict': { 'info_dict': {
@ -95,7 +94,7 @@ class ABCIE(InfoExtractor):
'ext': 'mp4', 'ext': 'mp4',
'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.', 'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.',
'thumbnail': 'https://live-production.wcms.abc-cdn.net.au/0c170f5b57f0105c432f366c0e8e267b?impolicy=wcms_crop_resize&cropH=2813&cropW=5000&xPos=0&yPos=249&width=862&height=485', 'thumbnail': 'https://live-production.wcms.abc-cdn.net.au/0c170f5b57f0105c432f366c0e8e267b?impolicy=wcms_crop_resize&cropH=2813&cropW=5000&xPos=0&yPos=249&width=862&height=485',
} },
}] }]
def _real_extract(self, url): def _real_extract(self, url):
@ -126,7 +125,7 @@ def _real_extract(self, url):
if mobj is None: if mobj is None:
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None) expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
if expired: if expired:
raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True) raise ExtractorError(f'{self.IE_NAME} said: {expired}', expected=True)
raise ExtractorError('Unable to extract video urls') raise ExtractorError('Unable to extract video urls')
urls_info = self._parse_json( urls_info = self._parse_json(
@ -164,7 +163,7 @@ def _real_extract(self, url):
'height': height, 'height': height,
'tbr': bitrate, 'tbr': bitrate,
'filesize': int_or_none(url_info.get('filesize')), 'filesize': int_or_none(url_info.get('filesize')),
'format_id': format_id 'format_id': format_id,
}) })
return { return {
@ -288,13 +287,12 @@ def _real_extract(self, url):
stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream')) stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))
house_number = video_params.get('episodeHouseNumber') or video_id house_number = video_params.get('episodeHouseNumber') or video_id
path = '/auth/hls/sign?ts={0}&hn={1}&d=android-tablet'.format( path = f'/auth/hls/sign?ts={int(time.time())}&hn={house_number}&d=android-tablet'
int(time.time()), house_number)
sig = hmac.new( sig = hmac.new(
b'android.content.res.Resources', b'android.content.res.Resources',
path.encode('utf-8'), hashlib.sha256).hexdigest() path.encode(), hashlib.sha256).hexdigest()
token = self._download_webpage( token = self._download_webpage(
'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id) f'http://iview.abc.net.au{path}&sig={sig}', video_id)
def tokenize_url(url, token): def tokenize_url(url, token):
return update_url_query(url, { return update_url_query(url, {
@ -303,7 +301,7 @@ def tokenize_url(url, token):
for sd in ('1080', '720', 'sd', 'sd-low'): for sd in ('1080', '720', 'sd', 'sd-low'):
sd_url = try_get( sd_url = try_get(
stream, lambda x: x['streams']['hls'][sd], compat_str) stream, lambda x: x['streams']['hls'][sd], str)
if not sd_url: if not sd_url:
continue continue
formats = self._extract_m3u8_formats( formats = self._extract_m3u8_formats(
@ -358,7 +356,7 @@ class ABCIViewShowSeriesIE(InfoExtractor):
'description': 'md5:93119346c24a7c322d446d8eece430ff', 'description': 'md5:93119346c24a7c322d446d8eece430ff',
'series': 'Upper Middle Bogan', 'series': 'Upper Middle Bogan',
'season': 'Series 1', 'season': 'Series 1',
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$' 'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$',
}, },
'playlist_count': 8, 'playlist_count': 8,
}, { }, {
@ -386,7 +384,7 @@ class ABCIViewShowSeriesIE(InfoExtractor):
'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.', 'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.',
'series': '7.30 Mark Humphries Satire', 'series': '7.30 Mark Humphries Satire',
'season': 'Episodes', 'season': 'Episodes',
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$' 'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$',
}, },
'playlist_count': 15, 'playlist_count': 15,
}] }]
@ -398,7 +396,7 @@ def _real_extract(self, url):
r'window\.__INITIAL_STATE__\s*=\s*[\'"](.+?)[\'"]\s*;', r'window\.__INITIAL_STATE__\s*=\s*[\'"](.+?)[\'"]\s*;',
webpage, 'initial state') webpage, 'initial state')
video_data = self._parse_json( video_data = self._parse_json(
unescapeHTML(webpage_data).encode('utf-8').decode('unicode_escape'), show_id) unescapeHTML(webpage_data).encode().decode('unicode_escape'), show_id)
video_data = video_data['route']['pageData']['_embedded'] video_data = video_data['route']['pageData']['_embedded']
highlight = try_get(video_data, lambda x: x['highlightVideo']['shareUrl']) highlight = try_get(video_data, lambda x: x['highlightVideo']['shareUrl'])

View file

@ -58,7 +58,7 @@ def _real_extract(self, url):
display_id = mobj.group('display_id') display_id = mobj.group('display_id')
video_id = mobj.group('id') video_id = mobj.group('id')
info_dict = self._extract_feed_info( info_dict = self._extract_feed_info(
'http://abcnews.go.com/video/itemfeed?id=%s' % video_id) f'http://abcnews.go.com/video/itemfeed?id={video_id}')
info_dict.update({ info_dict.update({
'id': video_id, 'id': video_id,
'display_id': display_id, 'display_id': display_id,

View file

@ -1,5 +1,4 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
dict_get, dict_get,
int_or_none, int_or_none,
@ -57,11 +56,11 @@ def _real_extract(self, url):
data = self._download_json( data = self._download_json(
'https://api.abcotvs.com/v2/content', display_id, query={ 'https://api.abcotvs.com/v2/content', display_id, query={
'id': video_id, 'id': video_id,
'key': 'otv.web.%s.story' % station, 'key': f'otv.web.{station}.story',
'station': station, 'station': station,
})['data'] })['data']
video = try_get(data, lambda x: x['featuredMedia']['video'], dict) or data video = try_get(data, lambda x: x['featuredMedia']['video'], dict) or data
video_id = compat_str(dict_get(video, ('id', 'publishedKey'), video_id)) video_id = str(dict_get(video, ('id', 'publishedKey'), video_id))
title = video.get('title') or video['linkText'] title = video.get('title') or video['linkText']
formats = [] formats = []

View file

@ -66,8 +66,8 @@ def _get_videokey_from_ticket(self, ticket):
query={'t': media_token}, query={'t': media_token},
data=json.dumps({ data=json.dumps({
'kv': 'a', 'kv': 'a',
'lt': ticket 'lt': ticket,
}).encode('utf-8'), }).encode(),
headers={ headers={
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}) })
@ -77,7 +77,7 @@ def _get_videokey_from_ticket(self, ticket):
h = hmac.new( h = hmac.new(
binascii.unhexlify(self.HKEY), binascii.unhexlify(self.HKEY),
(license_response['cid'] + self.ie._DEVICE_ID).encode('utf-8'), (license_response['cid'] + self.ie._DEVICE_ID).encode(),
digestmod=hashlib.sha256) digestmod=hashlib.sha256)
enckey = bytes_to_intlist(h.digest()) enckey = bytes_to_intlist(h.digest())
@ -103,11 +103,11 @@ class AbemaTVBaseIE(InfoExtractor):
@classmethod @classmethod
def _generate_aks(cls, deviceid): def _generate_aks(cls, deviceid):
deviceid = deviceid.encode('utf-8') deviceid = deviceid.encode()
# add 1 hour and then drop minute and secs # add 1 hour and then drop minute and secs
ts_1hour = int((time_seconds() // 3600 + 1) * 3600) ts_1hour = int((time_seconds() // 3600 + 1) * 3600)
time_struct = time.gmtime(ts_1hour) time_struct = time.gmtime(ts_1hour)
ts_1hour_str = str(ts_1hour).encode('utf-8') ts_1hour_str = str(ts_1hour).encode()
tmp = None tmp = None
@ -119,7 +119,7 @@ def mix_once(nonce):
def mix_tmp(count): def mix_tmp(count):
nonlocal tmp nonlocal tmp
for i in range(count): for _ in range(count):
mix_once(tmp) mix_once(tmp)
def mix_twist(nonce): def mix_twist(nonce):
@ -160,7 +160,7 @@ def _get_device_token(self):
data=json.dumps({ data=json.dumps({
'deviceId': self._DEVICE_ID, 'deviceId': self._DEVICE_ID,
'applicationKeySecret': aks, 'applicationKeySecret': aks,
}).encode('utf-8'), }).encode(),
headers={ headers={
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}) })
@ -180,7 +180,7 @@ def _get_media_token(self, invalidate=False, to_show=True):
'osLang': 'ja_JP', 'osLang': 'ja_JP',
'osTimezone': 'Asia/Tokyo', 'osTimezone': 'Asia/Tokyo',
'appId': 'tv.abema', 'appId': 'tv.abema',
'appVersion': '3.27.1' 'appVersion': '3.27.1',
}, headers={ }, headers={
'Authorization': f'bearer {self._get_device_token()}', 'Authorization': f'bearer {self._get_device_token()}',
})['token'] })['token']
@ -202,8 +202,8 @@ def _perform_login(self, username, password):
f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in', f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
data=json.dumps({ data=json.dumps({
method: username, method: username,
'password': password 'password': password,
}).encode('utf-8'), headers={ }).encode(), headers={
'Authorization': f'bearer {self._get_device_token()}', 'Authorization': f'bearer {self._get_device_token()}',
'Origin': 'https://abema.tv', 'Origin': 'https://abema.tv',
'Referer': 'https://abema.tv/', 'Referer': 'https://abema.tv/',
@ -344,7 +344,7 @@ def _real_extract(self, url):
description = self._html_search_regex( description = self._html_search_regex(
(r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div', (r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div',
r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div',), r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div'),
webpage, 'description', default=None, group=1) webpage, 'description', default=None, group=1)
if not description: if not description:
og_desc = self._html_search_meta( og_desc = self._html_search_meta(

View file

@ -67,7 +67,7 @@ class ACastIE(ACastBaseIE):
'display_id': '2.raggarmordet-rosterurdetforflutna', 'display_id': '2.raggarmordet-rosterurdetforflutna',
'season_number': 4, 'season_number': 4,
'season': 'Season 4', 'season': 'Season 4',
} },
}, { }, {
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015', 'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
'only_matching': True, 'only_matching': True,
@ -93,13 +93,13 @@ class ACastIE(ACastBaseIE):
'series': 'Democracy Sausage with Mark Kenny', 'series': 'Democracy Sausage with Mark Kenny',
'timestamp': 1684826362, 'timestamp': 1684826362,
'description': 'md5:feabe1fc5004c78ee59c84a46bf4ba16', 'description': 'md5:feabe1fc5004c78ee59c84a46bf4ba16',
} },
}] }]
def _real_extract(self, url): def _real_extract(self, url):
channel, display_id = self._match_valid_url(url).groups() channel, display_id = self._match_valid_url(url).groups()
episode = self._call_api( episode = self._call_api(
'%s/episodes/%s' % (channel, display_id), f'{channel}/episodes/{display_id}',
display_id, {'showInfo': 'true'}) display_id, {'showInfo': 'true'})
return self._extract_episode( return self._extract_episode(
episode, self._extract_show_info(episode.get('show') or {})) episode, self._extract_show_info(episode.get('show') or {}))
@ -130,7 +130,7 @@ class ACastChannelIE(ACastBaseIE):
@classmethod @classmethod
def suitable(cls, url): def suitable(cls, url):
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url) return False if ACastIE.suitable(url) else super().suitable(url)
def _real_extract(self, url): def _real_extract(self, url):
show_slug = self._match_id(url) show_slug = self._match_id(url)

View file

@ -25,7 +25,7 @@ def _extract_metadata(self, video_id, video_info):
'width': int_or_none(video.get('width')), 'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')), 'height': int_or_none(video.get('height')),
'tbr': float_or_none(video.get('avgBitrate')), 'tbr': float_or_none(video.get('avgBitrate')),
**parse_codecs(video.get('codecs', '')) **parse_codecs(video.get('codecs', '')),
}) })
return { return {
@ -77,7 +77,7 @@ class AcFunVideoIE(AcFunVideoBaseIE):
'comment_count': int, 'comment_count': int,
'thumbnail': r're:^https?://.*\.(jpg|jpeg)', 'thumbnail': r're:^https?://.*\.(jpg|jpeg)',
'description': 'md5:67583aaf3a0f933bd606bc8a2d3ebb17', 'description': 'md5:67583aaf3a0f933bd606bc8a2d3ebb17',
} },
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View file

@ -7,7 +7,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7 from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
from ..compat import compat_b64decode
from ..networking.exceptions import HTTPError from ..networking.exceptions import HTTPError
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
@ -111,9 +110,9 @@ def _get_subtitles(self, sub_url, video_id):
# http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js # http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes( dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
compat_b64decode(enc_subtitles[24:]), base64.b64decode(enc_subtitles[24:]),
binascii.unhexlify(self._K + '7fac1178830cfe0c'), binascii.unhexlify(self._K + '7fac1178830cfe0c'),
compat_b64decode(enc_subtitles[:24]))) base64.b64decode(enc_subtitles[:24])))
subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False) subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False)
if not subtitles_json: if not subtitles_json:
return None return None
@ -136,7 +135,7 @@ def _get_subtitles(self, sub_url, video_id):
if start is None or end is None or text is None: if start is None or end is None or text is None:
continue continue
alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0) alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % ( ssa += os.linesep + 'Dialogue: Marked=0,{},{},Default,,0,0,0,,{}{}'.format(
ass_subtitles_timecode(start), ass_subtitles_timecode(start),
ass_subtitles_timecode(end), ass_subtitles_timecode(end),
'{\\a%d}' % alignment if alignment != 2 else '', '{\\a%d}' % alignment if alignment != 2 else '',
@ -178,7 +177,7 @@ def _perform_login(self, username, password):
def _real_extract(self, url): def _real_extract(self, url):
lang, video_id = self._match_valid_url(url).group('lang', 'id') lang, video_id = self._match_valid_url(url).group('lang', 'id')
video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id video_base_url = self._PLAYER_BASE_URL + f'video/{video_id}/'
player = self._download_json( player = self._download_json(
video_base_url + 'configuration', video_id, video_base_url + 'configuration', video_id,
'Downloading player config JSON metadata', 'Downloading player config JSON metadata',
@ -219,12 +218,12 @@ def _real_extract(self, url):
links_url, video_id, 'Downloading links JSON metadata', headers={ links_url, video_id, 'Downloading links JSON metadata', headers={
'X-Player-Token': authorization, 'X-Player-Token': authorization,
'X-Target-Distribution': lang, 'X-Target-Distribution': lang,
**self._HEADERS **self._HEADERS,
}, query={ }, query={
'freeWithAds': 'true', 'freeWithAds': 'true',
'adaptive': 'false', 'adaptive': 'false',
'withMetadata': 'true', 'withMetadata': 'true',
'source': 'Web' 'source': 'Web',
}) })
break break
except ExtractorError as e: except ExtractorError as e:
@ -256,7 +255,7 @@ def _real_extract(self, url):
for quality, load_balancer_url in qualities.items(): for quality, load_balancer_url in qualities.items():
load_balancer_data = self._download_json( load_balancer_data = self._download_json(
load_balancer_url, video_id, load_balancer_url, video_id,
'Downloading %s %s JSON metadata' % (format_id, quality), f'Downloading {format_id} {quality} JSON metadata',
fatal=False) or {} fatal=False) or {}
m3u8_url = load_balancer_data.get('location') m3u8_url = load_balancer_data.get('location')
if not m3u8_url: if not m3u8_url:
@ -276,7 +275,7 @@ def _real_extract(self, url):
self.raise_login_required('This video requires a subscription', method='password') self.raise_login_required('This video requires a subscription', method='password')
video = (self._download_json( video = (self._download_json(
self._API_BASE_URL + 'video/%s' % video_id, video_id, self._API_BASE_URL + f'video/{video_id}', video_id,
'Downloading additional video metadata', fatal=False) or {}).get('video') or {} 'Downloading additional video metadata', fatal=False) or {}).get('video') or {}
show = video.get('show') or {} show = video.get('show') or {}
@ -320,7 +319,7 @@ def _real_extract(self, url):
f'{self._API_BASE_URL}video/show/{show_id}', video_show_slug, f'{self._API_BASE_URL}video/show/{show_id}', video_show_slug,
'Downloading episode list', headers={ 'Downloading episode list', headers={
'X-Target-Distribution': lang, 'X-Target-Distribution': lang,
**self._HEADERS **self._HEADERS,
}, query={ }, query={
'order': 'asc', 'order': 'asc',
'limit': '-1', 'limit': '-1',

View file

@ -1,8 +1,6 @@
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
class AdobeConnectIE(InfoExtractor): class AdobeConnectIE(InfoExtractor):
@ -12,13 +10,13 @@ def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
title = self._html_extract_title(webpage) title = self._html_extract_title(webpage)
qs = compat_parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1]) qs = urllib.parse.parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1])
is_live = qs.get('isLive', ['false'])[0] == 'true' is_live = qs.get('isLive', ['false'])[0] == 'true'
formats = [] formats = []
for con_string in qs['conStrings'][0].split(','): for con_string in qs['conStrings'][0].split(','):
formats.append({ formats.append({
'format_id': con_string.split('://')[0], 'format_id': con_string.split('://')[0],
'app': compat_urlparse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]), 'app': urllib.parse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]),
'ext': 'flv', 'ext': 'flv',
'play_path': 'mp4:' + qs['streamName'][0], 'play_path': 'mp4:' + qs['streamName'][0],
'rtmp_conn': 'S:' + qs['ticket'][0], 'rtmp_conn': 'S:' + qs['ticket'][0],

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,6 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
ISO639Utils, ISO639Utils,
OnDemandPagedList, OnDemandPagedList,
@ -36,7 +35,7 @@ def _parse_subtitles(self, video_data, url_key):
return subtitles return subtitles
def _parse_video_data(self, video_data): def _parse_video_data(self, video_data):
video_id = compat_str(video_data['id']) video_id = str(video_data['id'])
title = video_data['title'] title = video_data['title']
s3_extracted = False s3_extracted = False
@ -151,7 +150,7 @@ def _fetch_page(self, display_id, query, page):
page += 1 page += 1
query['page'] = page query['page'] = page
for element_data in self._call_api( for element_data in self._call_api(
self._RESOURCE, display_id, query, 'Download Page %d' % page): self._RESOURCE, display_id, query, f'Download Page {page}'):
yield self._process_data(element_data) yield self._process_data(element_data)
def _extract_playlist_entries(self, display_id, query): def _extract_playlist_entries(self, display_id, query):

View file

@ -91,7 +91,7 @@ def _real_extract(self, url):
getShowBySlug(slug:"%s") { getShowBySlug(slug:"%s") {
%%s %%s
} }
}''' % show_path }''' % show_path # noqa: UP031
if episode_path: if episode_path:
query = query % '''title query = query % '''title
getVideoBySlug(slug:"%s") { getVideoBySlug(slug:"%s") {
@ -128,7 +128,7 @@ def _real_extract(self, url):
episode_title = title = video_data['title'] episode_title = title = video_data['title']
series = show_data.get('title') series = show_data.get('title')
if series: if series:
title = '%s - %s' % (series, title) title = f'{series} - {title}'
info = { info = {
'id': video_id, 'id': video_id,
'title': title, 'title': title,
@ -191,7 +191,7 @@ def _real_extract(self, url):
if not slug: if not slug:
continue continue
entries.append(self.url_result( entries.append(self.url_result(
'http://adultswim.com/videos/%s/%s' % (show_path, slug), f'http://adultswim.com/videos/{show_path}/{slug}',
'AdultSwim', video.get('_id'))) 'AdultSwim', video.get('_id')))
return self.playlist_result( return self.playlist_result(
entries, show_path, show_data.get('title'), entries, show_path, show_data.get('title'),

View file

@ -73,8 +73,8 @@ def _extract_aen_smil(self, smil_url, video_id, auth=None):
def _extract_aetn_info(self, domain, filter_key, filter_value, url): def _extract_aetn_info(self, domain, filter_key, filter_value, url):
requestor_id, brand = self._DOMAIN_MAP[domain] requestor_id, brand = self._DOMAIN_MAP[domain]
result = self._download_json( result = self._download_json(
'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand, f'https://feeds.video.aetnd.com/api/v2/{brand}/videos',
filter_value, query={'filter[%s]' % filter_key: filter_value}) filter_value, query={f'filter[{filter_key}]': filter_value})
result = traverse_obj( result = traverse_obj(
result, ('results', result, ('results',
lambda k, v: k == 0 and v[filter_key] == filter_value), lambda k, v: k == 0 and v[filter_key] == filter_value),
@ -142,7 +142,7 @@ class AENetworksIE(AENetworksBaseIE):
'skip_download': True, 'skip_download': True,
}, },
'add_ie': ['ThePlatform'], 'add_ie': ['ThePlatform'],
'skip': 'Geo-restricted - This content is not available in your location.' 'skip': 'Geo-restricted - This content is not available in your location.',
}, { }, {
'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1', 'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
'info_dict': { 'info_dict': {
@ -171,28 +171,28 @@ class AENetworksIE(AENetworksBaseIE):
'skip': 'This video is only available for users of participating TV providers.', 'skip': 'This video is only available for users of participating TV providers.',
}, { }, {
'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8', 'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6', 'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie', 'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'https://watch.lifetimemovieclub.com/movies/10-year-reunion/full-movie', 'url': 'https://watch.lifetimemovieclub.com/movies/10-year-reunion/full-movie',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special', 'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/preview-hunting-jonbenets-killer-the-untold-story', 'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/preview-hunting-jonbenets-killer-the-untold-story',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'http://www.history.com/videos/history-of-valentines-day', 'url': 'http://www.history.com/videos/history-of-valentines-day',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'https://play.aetv.com/shows/duck-dynasty/videos/best-of-duck-dynasty-getting-quack-in-shape', 'url': 'https://play.aetv.com/shows/duck-dynasty/videos/best-of-duck-dynasty-getting-quack-in-shape',
'only_matching': True 'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):
@ -209,14 +209,14 @@ def _call_api(self, resource, slug, brand, fields):
%s(slug: "%s") { %s(slug: "%s") {
%s %s
} }
}''' % (resource, slug, fields), }''' % (resource, slug, fields), # noqa: UP031
}))['data'][resource] }))['data'][resource]
def _real_extract(self, url): def _real_extract(self, url):
domain, slug = self._match_valid_url(url).groups() domain, slug = self._match_valid_url(url).groups()
_, brand = self._DOMAIN_MAP[domain] _, brand = self._DOMAIN_MAP[domain]
playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS) playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
base_url = 'http://watch.%s' % domain base_url = f'http://watch.{domain}'
entries = [] entries = []
for item in (playlist.get(self._ITEMS_KEY) or []): for item in (playlist.get(self._ITEMS_KEY) or []):
@ -248,10 +248,10 @@ class AENetworksCollectionIE(AENetworksListBaseIE):
'playlist_mincount': 12, 'playlist_mincount': 12,
}, { }, {
'url': 'https://watch.historyvault.com/shows/america-the-story-of-us-2/season-1/list/america-the-story-of-us', 'url': 'https://watch.historyvault.com/shows/america-the-story-of-us-2/season-1/list/america-the-story-of-us',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'https://www.historyvault.com/collections/mysteryquest', 'url': 'https://www.historyvault.com/collections/mysteryquest',
'only_matching': True 'only_matching': True,
}] }]
_RESOURCE = 'list' _RESOURCE = 'list'
_ITEMS_KEY = 'items' _ITEMS_KEY = 'items'
@ -309,7 +309,7 @@ class HistoryTopicIE(AENetworksBaseIE):
'info_dict': { 'info_dict': {
'id': '40700995724', 'id': '40700995724',
'ext': 'mp4', 'ext': 'mp4',
'title': "History of Valentines Day", 'title': 'History of Valentines Day',
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7', 'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
'timestamp': 1375819729, 'timestamp': 1375819729,
'upload_date': '20130806', 'upload_date': '20130806',
@ -364,6 +364,6 @@ def _real_extract(self, url):
display_id = self._match_id(url) display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id) webpage = self._download_webpage(url, display_id)
player_url = self._search_regex( player_url = self._search_regex(
r'<phoenix-iframe[^>]+src="(%s)' % HistoryPlayerIE._VALID_URL, rf'<phoenix-iframe[^>]+src="({HistoryPlayerIE._VALID_URL})',
webpage, 'player URL') webpage, 'player URL')
return self.url_result(player_url, HistoryPlayerIE.ie_key()) return self.url_result(player_url, HistoryPlayerIE.ie_key())

View file

@ -16,8 +16,8 @@ class AeonCoIE(InfoExtractor):
'uploader': 'Semiconductor', 'uploader': 'Semiconductor',
'uploader_id': 'semiconductor', 'uploader_id': 'semiconductor',
'uploader_url': 'https://vimeo.com/semiconductor', 'uploader_url': 'https://vimeo.com/semiconductor',
'duration': 348 'duration': 348,
} },
}, { }, {
'url': 'https://aeon.co/videos/dazzling-timelapse-shows-how-microbes-spoil-our-food-and-sometimes-enrich-it', 'url': 'https://aeon.co/videos/dazzling-timelapse-shows-how-microbes-spoil-our-food-and-sometimes-enrich-it',
'md5': '03582d795382e49f2fd0b427b55de409', 'md5': '03582d795382e49f2fd0b427b55de409',
@ -29,8 +29,8 @@ class AeonCoIE(InfoExtractor):
'uploader': 'Aeon Video', 'uploader': 'Aeon Video',
'uploader_id': 'aeonvideo', 'uploader_id': 'aeonvideo',
'uploader_url': 'https://vimeo.com/aeonvideo', 'uploader_url': 'https://vimeo.com/aeonvideo',
'duration': 1344 'duration': 1344,
} },
}, { }, {
'url': 'https://aeon.co/videos/chew-over-the-prisoners-dilemma-and-see-if-you-can-find-the-rational-path-out', 'url': 'https://aeon.co/videos/chew-over-the-prisoners-dilemma-and-see-if-you-can-find-the-rational-path-out',
'md5': '1cfda0bf3ae24df17d00f2c0cb6cc21b', 'md5': '1cfda0bf3ae24df17d00f2c0cb6cc21b',

View file

@ -55,7 +55,7 @@ def _perform_login(self, username, password):
if result != 1: if result != 1:
error = _ERRORS.get(result, 'You have failed to log in.') error = _ERRORS.get(result, 'You have failed to log in.')
raise ExtractorError( raise ExtractorError(
'Unable to login: %s said: %s' % (self.IE_NAME, error), f'Unable to login: {self.IE_NAME} said: {error}',
expected=True) expected=True)
@ -227,7 +227,7 @@ def _real_extract(self, url):
**traverse_obj(file_element, { **traverse_obj(file_element, {
'duration': ('duration', {functools.partial(int_or_none, scale=1000)}), 'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
'timestamp': ('file_start', {unified_timestamp}), 'timestamp': ('file_start', {unified_timestamp}),
}) }),
}) })
if traverse_obj(data, ('adult_status', {str})) == 'notLogin': if traverse_obj(data, ('adult_status', {str})) == 'notLogin':

View file

@ -168,7 +168,7 @@ def _real_extract(self, url):
for ext in ('aac', 'mp3'): for ext in ('aac', 'mp3'):
url_data = self._download_json( url_data = self._download_json(
f'https://api.podcast.radioagora.pl/api4/getSongUrl?podcast_id={media_id}&device_id={uuid.uuid4()}&ppre=false&audio={ext}', f'https://api.podcast.radioagora.pl/api4/getSongUrl?podcast_id={media_id}&device_id={uuid.uuid4()}&ppre=false&audio={ext}',
media_id, 'Downloading podcast %s URL' % ext) media_id, f'Downloading podcast {ext} URL')
# prevents inserting the mp3 (default) multiple times # prevents inserting the mp3 (default) multiple times
if 'link_ssl' in url_data and f'.{ext}' in url_data['link_ssl']: if 'link_ssl' in url_data and f'.{ext}' in url_data['link_ssl']:
formats.append({ formats.append({
@ -206,8 +206,8 @@ class TokFMAuditionIE(InfoExtractor):
} }
@staticmethod @staticmethod
def _create_url(id): def _create_url(video_id):
return f'https://audycje.tokfm.pl/audycja/{id}' return f'https://audycje.tokfm.pl/audycja/{video_id}'
def _real_extract(self, url): def _real_extract(self, url):
audition_id = self._match_id(url) audition_id = self._match_id(url)

View file

@ -26,7 +26,7 @@ class AirTVIE(InfoExtractor):
'view_count': int, 'view_count': int,
'thumbnail': 'https://cdn-sp-gcs.air.tv/videos/W/8/W87jcWleSn2hXZN47zJZsQ/b13fc56464f47d9d62a36d110b9b5a72-4096x2160_9.jpg', 'thumbnail': 'https://cdn-sp-gcs.air.tv/videos/W/8/W87jcWleSn2hXZN47zJZsQ/b13fc56464f47d9d62a36d110b9b5a72-4096x2160_9.jpg',
'timestamp': 1664792603, 'timestamp': 1664792603,
} },
}, { }, {
# with youtube_id # with youtube_id
'url': 'https://www.air.tv/watch?v=sv57EC8tRXG6h8dNXFUU1Q', 'url': 'https://www.air.tv/watch?v=sv57EC8tRXG6h8dNXFUU1Q',
@ -54,7 +54,7 @@ class AirTVIE(InfoExtractor):
'channel': 'Newsflare', 'channel': 'Newsflare',
'duration': 37, 'duration': 37,
'upload_date': '20180511', 'upload_date': '20180511',
} },
}] }]
def _get_formats_and_subtitle(self, json_data, video_id): def _get_formats_and_subtitle(self, json_data, video_id):

View file

@ -22,7 +22,7 @@ class AitubeKZVideoIE(InfoExtractor):
'timestamp': 1667370519, 'timestamp': 1667370519,
'title': 'Ангел хранитель 1 серия', 'title': 'Ангел хранитель 1 серия',
'channel_follower_count': int, 'channel_follower_count': int,
} },
}, { }, {
# embed url # embed url
'url': 'https://aitube.kz/embed/?id=9291d29b-c038-49a1-ad42-3da2051d353c', 'url': 'https://aitube.kz/embed/?id=9291d29b-c038-49a1-ad42-3da2051d353c',

View file

@ -1,5 +1,4 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
float_or_none, float_or_none,
try_get, try_get,
@ -44,7 +43,7 @@ def _real_extract(self, url):
'title': title, 'title': title,
'thumbnail': data.get('coverUrl'), 'thumbnail': data.get('coverUrl'),
'uploader': try_get( 'uploader': try_get(
data, lambda x: x['followBar']['name'], compat_str), data, lambda x: x['followBar']['name'], str),
'timestamp': float_or_none(data.get('startTimeLong'), scale=1000), 'timestamp': float_or_none(data.get('startTimeLong'), scale=1000),
'formats': formats, 'formats': formats,
} }

View file

@ -18,7 +18,7 @@ class AlJazeeraIE(InfoExtractor):
'timestamp': 1636219149, 'timestamp': 1636219149,
'description': 'U sarajevskim naseljima Rajlovac i Reljevo stambeni objekti, ali i industrijska postrojenja i dalje su pod vodom.', 'description': 'U sarajevskim naseljima Rajlovac i Reljevo stambeni objekti, ali i industrijska postrojenja i dalje su pod vodom.',
'upload_date': '20211106', 'upload_date': '20211106',
} },
}, { }, {
'url': 'https://balkans.aljazeera.net/videos/2021/11/6/djokovic-usao-u-finale-mastersa-u-parizu', 'url': 'https://balkans.aljazeera.net/videos/2021/11/6/djokovic-usao-u-finale-mastersa-u-parizu',
'info_dict': { 'info_dict': {
@ -33,7 +33,7 @@ class AlJazeeraIE(InfoExtractor):
BRIGHTCOVE_URL_RE = r'https?://players.brightcove.net/(?P<account>\d+)/(?P<player_id>[a-zA-Z0-9]+)_(?P<embed>[^/]+)/index.html\?videoId=(?P<id>\d+)' BRIGHTCOVE_URL_RE = r'https?://players.brightcove.net/(?P<account>\d+)/(?P<player_id>[a-zA-Z0-9]+)_(?P<embed>[^/]+)/index.html\?videoId=(?P<id>\d+)'
def _real_extract(self, url): def _real_extract(self, url):
base, post_type, id = self._match_valid_url(url).groups() base, post_type, display_id = self._match_valid_url(url).groups()
wp = { wp = {
'balkans.aljazeera.net': 'ajb', 'balkans.aljazeera.net': 'ajb',
'chinese.aljazeera.net': 'chinese', 'chinese.aljazeera.net': 'chinese',
@ -47,11 +47,11 @@ def _real_extract(self, url):
'news': 'news', 'news': 'news',
}[post_type.split('/')[0]] }[post_type.split('/')[0]]
video = self._download_json( video = self._download_json(
f'https://{base}/graphql', id, query={ f'https://{base}/graphql', display_id, query={
'wp-site': wp, 'wp-site': wp,
'operationName': 'ArchipelagoSingleArticleQuery', 'operationName': 'ArchipelagoSingleArticleQuery',
'variables': json.dumps({ 'variables': json.dumps({
'name': id, 'name': display_id,
'postType': post_type, 'postType': post_type,
}), }),
}, headers={ }, headers={
@ -64,7 +64,7 @@ def _real_extract(self, url):
embed = 'default' embed = 'default'
if video_id is None: if video_id is None:
webpage = self._download_webpage(url, id) webpage = self._download_webpage(url, display_id)
account, player_id, embed, video_id = self._search_regex(self.BRIGHTCOVE_URL_RE, webpage, 'video id', account, player_id, embed, video_id = self._search_regex(self.BRIGHTCOVE_URL_RE, webpage, 'video id',
group=(1, 2, 3, 4), default=(None, None, None, None)) group=(1, 2, 3, 4), default=(None, None, None, None))
@ -73,11 +73,11 @@ def _real_extract(self, url):
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',
'url': url, 'url': url,
'ie_key': 'Generic' 'ie_key': 'Generic',
} }
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',
'url': f'https://players.brightcove.net/{account}/{player_id}_{embed}/index.html?videoId={video_id}', 'url': f'https://players.brightcove.net/{account}/{player_id}_{embed}/index.html?videoId={video_id}',
'ie_key': 'BrightcoveNew' 'ie_key': 'BrightcoveNew',
} }

View file

@ -1,5 +1,4 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
int_or_none, int_or_none,
qualities, qualities,
@ -95,11 +94,11 @@ def _real_extract(self, url):
duration = int_or_none(video.get('duration')) duration = int_or_none(video.get('duration'))
view_count = int_or_none(video.get('view_count')) view_count = int_or_none(video.get('view_count'))
timestamp = unified_timestamp(try_get( timestamp = unified_timestamp(try_get(
video, lambda x: x['added_at']['date'], compat_str)) video, lambda x: x['added_at']['date'], str))
else: else:
video_id = display_id video_id = display_id
media_data = self._download_json( media_data = self._download_json(
'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media=%s' % video_id, display_id) f'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media={video_id}', display_id)
title = remove_end(strip_or_none(self._html_extract_title(webpage), ' - AlloCiné')) title = remove_end(strip_or_none(self._html_extract_title(webpage), ' - AlloCiné'))
for key, value in media_data['video'].items(): for key, value in media_data['video'].items():
if not key.endswith('Path'): if not key.endswith('Path'):

View file

@ -33,27 +33,27 @@
video: getClip(clipIdentifier: $id) { video: getClip(clipIdentifier: $id) {
%s %s %s %s
} }
}''' % (_FIELDS, _EXTRA_FIELDS), }''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031
'montage': '''query ($id: String!) { 'montage': '''query ($id: String!) {
video: getMontage(clipIdentifier: $id) { video: getMontage(clipIdentifier: $id) {
%s %s
} }
}''' % _FIELDS, }''' % _FIELDS, # noqa: UP031
'Clips': '''query ($page: Int!, $user: String!, $game: Int) { 'Clips': '''query ($page: Int!, $user: String!, $game: Int) {
videos: clips(search: createdDate, page: $page, user: $user, mobile: false, game: $game) { videos: clips(search: createdDate, page: $page, user: $user, mobile: false, game: $game) {
data { %s %s } data { %s %s }
} }
}''' % (_FIELDS, _EXTRA_FIELDS), }''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031
'Montages': '''query ($page: Int!, $user: String!) { 'Montages': '''query ($page: Int!, $user: String!) {
videos: montages(search: createdDate, page: $page, user: $user) { videos: montages(search: createdDate, page: $page, user: $user) {
data { %s } data { %s }
} }
}''' % _FIELDS, }''' % _FIELDS, # noqa: UP031
'Mobile Clips': '''query ($page: Int!, $user: String!) { 'Mobile Clips': '''query ($page: Int!, $user: String!) {
videos: clips(search: createdDate, page: $page, user: $user, mobile: true) { videos: clips(search: createdDate, page: $page, user: $user, mobile: true) {
data { %s %s } data { %s %s }
} }
}''' % (_FIELDS, _EXTRA_FIELDS), }''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031
} }
@ -121,7 +121,7 @@ class AllstarIE(AllstarBaseIE):
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d', 'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
'upload_date': '20230425', 'upload_date': '20230425',
'view_count': int, 'view_count': int,
} },
}, { }, {
'url': 'https://allstar.gg/clip?clip=8LJLY4JKB', 'url': 'https://allstar.gg/clip?clip=8LJLY4JKB',
'info_dict': { 'info_dict': {
@ -139,7 +139,7 @@ class AllstarIE(AllstarBaseIE):
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d', 'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
'upload_date': '20230702', 'upload_date': '20230702',
'view_count': int, 'view_count': int,
} },
}, { }, {
'url': 'https://allstar.gg/montage?montage=643e64089da7e9363e1fa66c', 'url': 'https://allstar.gg/montage?montage=643e64089da7e9363e1fa66c',
'info_dict': { 'info_dict': {
@ -155,7 +155,7 @@ class AllstarIE(AllstarBaseIE):
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d', 'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
'upload_date': '20230418', 'upload_date': '20230418',
'view_count': int, 'view_count': int,
} },
}, { }, {
'url': 'https://allstar.gg/montage?montage=RILJMH6QOS', 'url': 'https://allstar.gg/montage?montage=RILJMH6QOS',
'info_dict': { 'info_dict': {
@ -171,7 +171,7 @@ class AllstarIE(AllstarBaseIE):
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d', 'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
'upload_date': '20230703', 'upload_date': '20230703',
'view_count': int, 'view_count': int,
} },
}] }]
def _real_extract(self, url): def _real_extract(self, url):
@ -191,28 +191,28 @@ class AllstarProfileIE(AllstarBaseIE):
'id': '62b8bdfc9021052f7905882d-clips', 'id': '62b8bdfc9021052f7905882d-clips',
'title': 'cherokee - Clips', 'title': 'cherokee - Clips',
}, },
'playlist_mincount': 15 'playlist_mincount': 15,
}, { }, {
'url': 'https://allstar.gg/u/cherokee?game=730&view=Clips', 'url': 'https://allstar.gg/u/cherokee?game=730&view=Clips',
'info_dict': { 'info_dict': {
'id': '62b8bdfc9021052f7905882d-clips-730', 'id': '62b8bdfc9021052f7905882d-clips-730',
'title': 'cherokee - Clips - 730', 'title': 'cherokee - Clips - 730',
}, },
'playlist_mincount': 15 'playlist_mincount': 15,
}, { }, {
'url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d?view=Montages', 'url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d?view=Montages',
'info_dict': { 'info_dict': {
'id': '62b8bdfc9021052f7905882d-montages', 'id': '62b8bdfc9021052f7905882d-montages',
'title': 'cherokee - Montages', 'title': 'cherokee - Montages',
}, },
'playlist_mincount': 4 'playlist_mincount': 4,
}, { }, {
'url': 'https://allstar.gg/profile?user=cherokee&view=Mobile Clips', 'url': 'https://allstar.gg/profile?user=cherokee&view=Mobile Clips',
'info_dict': { 'info_dict': {
'id': '62b8bdfc9021052f7905882d-mobile', 'id': '62b8bdfc9021052f7905882d-mobile',
'title': 'cherokee - Mobile Clips', 'title': 'cherokee - Mobile Clips',
}, },
'playlist_mincount': 1 'playlist_mincount': 1,
}] }]
_PAGE_SIZE = 10 _PAGE_SIZE = 10

View file

@ -25,7 +25,7 @@ class AlphaPornoIE(InfoExtractor):
'tbr': 1145, 'tbr': 1145,
'categories': list, 'categories': list,
'age_limit': 18, 'age_limit': 18,
} },
} }
def _real_extract(self, url): def _real_extract(self, url):

View file

@ -12,7 +12,7 @@
class Alsace20TVBaseIE(InfoExtractor): class Alsace20TVBaseIE(InfoExtractor):
def _extract_video(self, video_id, url=None): def _extract_video(self, video_id, url=None):
info = self._download_json( info = self._download_json(
'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key=%s&habillage=0&mode=html' % (video_id, ), f'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key={video_id}&habillage=0&mode=html',
video_id) or {} video_id) or {}
title = info.get('titre') title = info.get('titre')
@ -24,9 +24,9 @@ def _extract_video(self, video_id, url=None):
else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False)) else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False))
webpage = (url and self._download_webpage(url, video_id, fatal=False)) or '' webpage = (url and self._download_webpage(url, video_id, fatal=False)) or ''
thumbnail = url_or_none(dict_get(info, ('image', 'preview', )) or self._og_search_thumbnail(webpage)) thumbnail = url_or_none(dict_get(info, ('image', 'preview')) or self._og_search_thumbnail(webpage))
upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None) upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None)
upload_date = unified_strdate('20%s-%s-%s' % (upload_date[:2], upload_date[2:4], upload_date[4:])) if upload_date else None upload_date = unified_strdate(f'20{upload_date[:2]}-{upload_date[2:4]}-{upload_date[4:]}') if upload_date else None
return { return {
'id': video_id, 'id': video_id,
'title': title, 'title': title,

View file

@ -34,7 +34,7 @@ class AltCensoredIE(InfoExtractor):
'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg', 'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg',
'view_count': int, 'view_count': int,
'categories': ['News & Politics'], 'categories': ['News & Politics'],
} },
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View file

@ -1,7 +1,7 @@
import re import re
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
clean_html, clean_html,
@ -21,7 +21,7 @@ class AluraIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '60095', 'id': '60095',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Referências, ref-set e alter' 'title': 'Referências, ref-set e alter',
}, },
'skip': 'Requires alura account credentials'}, 'skip': 'Requires alura account credentials'},
{ {
@ -30,7 +30,7 @@ class AluraIE(InfoExtractor):
'only_matching': True}, 'only_matching': True},
{ {
'url': 'https://cursos.alura.com.br/course/fundamentos-market-digital/task/55219', 'url': 'https://cursos.alura.com.br/course/fundamentos-market-digital/task/55219',
'only_matching': True} 'only_matching': True},
] ]
def _real_extract(self, url): def _real_extract(self, url):
@ -62,7 +62,7 @@ def _real_extract(self, url):
return { return {
'id': video_id, 'id': video_id,
'title': video_title, 'title': video_title,
"formats": formats 'formats': formats,
} }
def _perform_login(self, username, password): def _perform_login(self, username, password):
@ -91,7 +91,7 @@ def is_logged(webpage):
'post url', default=self._LOGIN_URL, group='url') 'post url', default=self._LOGIN_URL, group='url')
if not post_url.startswith('http'): if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) post_url = urllib.parse.urljoin(self._LOGIN_URL, post_url)
response = self._download_webpage( response = self._download_webpage(
post_url, None, 'Logging in', post_url, None, 'Logging in',
@ -103,7 +103,7 @@ def is_logged(webpage):
r'(?s)<p[^>]+class="alert-message[^"]*">(.+?)</p>', r'(?s)<p[^>]+class="alert-message[^"]*">(.+?)</p>',
response, 'error message', default=None) response, 'error message', default=None)
if error: if error:
raise ExtractorError('Unable to login: %s' % error, expected=True) raise ExtractorError(f'Unable to login: {error}', expected=True)
raise ExtractorError('Unable to log in') raise ExtractorError('Unable to log in')
@ -119,7 +119,7 @@ class AluraCourseIE(AluraIE): # XXX: Do not subclass from concrete IE
@classmethod @classmethod
def suitable(cls, url): def suitable(cls, url):
return False if AluraIE.suitable(url) else super(AluraCourseIE, cls).suitable(url) return False if AluraIE.suitable(url) else super().suitable(url)
def _real_extract(self, url): def _real_extract(self, url):
@ -157,7 +157,7 @@ def _real_extract(self, url):
'url': video_url, 'url': video_url,
'id_key': self.ie_key(), 'id_key': self.ie_key(),
'chapter': chapter, 'chapter': chapter,
'chapter_number': chapter_number 'chapter_number': chapter_number,
} }
entries.append(entry) entries.append(entry)
return self.playlist_result(entries, course_path, course_title) return self.playlist_result(entries, course_path, course_title)

View file

@ -24,7 +24,7 @@ class AmadeusTVIE(InfoExtractor):
'display_id': '65091a87ff85af59d9fc54c3', 'display_id': '65091a87ff85af59d9fc54c3',
'view_count': int, 'view_count': int,
'description': 'md5:a0357b9c215489e2067cbae0b777bb95', 'description': 'md5:a0357b9c215489e2067cbae0b777bb95',
} },
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View file

@ -25,7 +25,7 @@ class AmaraIE(InfoExtractor):
'uploader': 'PBS NewsHour', 'uploader': 'PBS NewsHour',
'uploader_id': 'PBSNewsHour', 'uploader_id': 'PBSNewsHour',
'timestamp': 1549639570, 'timestamp': 1549639570,
} },
}, { }, {
# Vimeo # Vimeo
'url': 'https://amara.org/en/videos/kYkK1VUTWW5I/info/vimeo-at-ces-2011', 'url': 'https://amara.org/en/videos/kYkK1VUTWW5I/info/vimeo-at-ces-2011',
@ -40,8 +40,8 @@ class AmaraIE(InfoExtractor):
'timestamp': 1294763658, 'timestamp': 1294763658,
'upload_date': '20110111', 'upload_date': '20110111',
'uploader': 'Sam Morrill', 'uploader': 'Sam Morrill',
'uploader_id': 'sammorrill' 'uploader_id': 'sammorrill',
} },
}, { }, {
# Direct Link # Direct Link
'url': 'https://amara.org/en/videos/s8KL7I3jLmh6/info/the-danger-of-a-single-story/', 'url': 'https://amara.org/en/videos/s8KL7I3jLmh6/info/the-danger-of-a-single-story/',
@ -55,13 +55,13 @@ class AmaraIE(InfoExtractor):
'subtitles': dict, 'subtitles': dict,
'upload_date': '20091007', 'upload_date': '20091007',
'timestamp': 1254942511, 'timestamp': 1254942511,
} },
}] }]
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
meta = self._download_json( meta = self._download_json(
'https://amara.org/api/videos/%s/' % video_id, f'https://amara.org/api/videos/{video_id}/',
video_id, query={'format': 'json'}) video_id, query={'format': 'json'})
title = meta['title'] title = meta['title']
video_url = meta['all_urls'][0] video_url = meta['all_urls'][0]

View file

@ -61,13 +61,13 @@ class AmazonStoreIE(InfoExtractor):
}] }]
def _real_extract(self, url): def _real_extract(self, url):
id = self._match_id(url) playlist_id = self._match_id(url)
for retry in self.RetryManager(): for retry in self.RetryManager():
webpage = self._download_webpage(url, id) webpage = self._download_webpage(url, playlist_id)
try: try:
data_json = self._search_json( data_json = self._search_json(
r'var\s?obj\s?=\s?jQuery\.parseJSON\(\'', webpage, 'data', id, r'var\s?obj\s?=\s?jQuery\.parseJSON\(\'', webpage, 'data', playlist_id,
transform_source=js_to_json) transform_source=js_to_json)
except ExtractorError as e: except ExtractorError as e:
retry.error = e retry.error = e
@ -81,7 +81,7 @@ def _real_extract(self, url):
'height': int_or_none(video.get('videoHeight')), 'height': int_or_none(video.get('videoHeight')),
'width': int_or_none(video.get('videoWidth')), 'width': int_or_none(video.get('videoWidth')),
} for video in (data_json.get('videos') or []) if video.get('isVideo') and video.get('url')] } for video in (data_json.get('videos') or []) if video.get('isVideo') and video.get('url')]
return self.playlist_result(entries, playlist_id=id, playlist_title=data_json.get('title')) return self.playlist_result(entries, playlist_id=playlist_id, playlist_title=data_json.get('title'))
class AmazonReviewsIE(InfoExtractor): class AmazonReviewsIE(InfoExtractor):

View file

@ -25,7 +25,7 @@ def _call_api(self, asin, data=None, note=None):
asin, note=note, headers={ asin, note=note, headers={
'Content-Type': 'application/json', 'Content-Type': 'application/json',
'currentpageurl': '/', 'currentpageurl': '/',
'currentplatform': 'dWeb' 'currentplatform': 'dWeb',
}, data=json.dumps(data).encode() if data else None, }, data=json.dumps(data).encode() if data else None,
query=None if data else { query=None if data else {
'deviceType': 'A1WMMUXPCUJL4N', 'deviceType': 'A1WMMUXPCUJL4N',

View file

@ -64,8 +64,8 @@ def _real_extract(self, url):
site, display_id = self._match_valid_url(url).groups() site, display_id = self._match_valid_url(url).groups()
requestor_id = self._REQUESTOR_ID_MAP[site] requestor_id = self._REQUESTOR_ID_MAP[site]
page_data = self._download_json( page_data = self._download_json(
'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s' f'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/{requestor_id.lower()}/url/{display_id}',
% (requestor_id.lower(), display_id), display_id)['data'] display_id)['data']
properties = page_data.get('properties') or {} properties = page_data.get('properties') or {}
query = { query = {
'mbr': 'true', 'mbr': 'true',
@ -76,15 +76,15 @@ def _real_extract(self, url):
try: try:
for v in page_data['children']: for v in page_data['children']:
if v.get('type') == 'video-player': if v.get('type') == 'video-player':
releasePid = v['properties']['currentVideo']['meta']['releasePid'] release_pid = v['properties']['currentVideo']['meta']['releasePid']
tp_path = 'M_UwQC/' + releasePid tp_path = 'M_UwQC/' + release_pid
media_url = 'https://link.theplatform.com/s/' + tp_path media_url = 'https://link.theplatform.com/s/' + tp_path
video_player_count += 1 video_player_count += 1
except KeyError: except KeyError:
pass pass
if video_player_count > 1: if video_player_count > 1:
self.report_warning( self.report_warning(
'The JSON data has %d video players. Only one will be extracted' % video_player_count) f'The JSON data has {video_player_count} video players. Only one will be extracted')
# Fall back to videoPid if releasePid not found. # Fall back to videoPid if releasePid not found.
# TODO: Fall back to videoPid if releasePid manifest uses DRM. # TODO: Fall back to videoPid if releasePid manifest uses DRM.
@ -131,7 +131,7 @@ def _real_extract(self, url):
}) })
ns_keys = theplatform_metadata.get('$xmlns', {}).keys() ns_keys = theplatform_metadata.get('$xmlns', {}).keys()
if ns_keys: if ns_keys:
ns = list(ns_keys)[0] ns = next(iter(ns_keys))
episode = theplatform_metadata.get(ns + '$episodeTitle') or None episode = theplatform_metadata.get(ns + '$episodeTitle') or None
episode_number = int_or_none( episode_number = int_or_none(
theplatform_metadata.get(ns + '$episode')) theplatform_metadata.get(ns + '$episode'))

View file

@ -87,13 +87,13 @@ def _real_extract(self, url):
resource_type = 'episodes' resource_type = 'episodes'
resource = self._download_json( resource = self._download_json(
'https://www.americastestkitchen.com/api/v6/%s/%s' % (resource_type, video_id), video_id) f'https://www.americastestkitchen.com/api/v6/{resource_type}/{video_id}', video_id)
video = resource['video'] if is_episode else resource video = resource['video'] if is_episode else resource
episode = resource if is_episode else resource.get('episode') or {} episode = resource if is_episode else resource.get('episode') or {}
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',
'url': 'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ' % video['zypeId'], 'url': 'https://player.zype.com/embed/{}.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ'.format(video['zypeId']),
'ie_key': 'Zype', 'ie_key': 'Zype',
'description': clean_html(video.get('description')), 'description': clean_html(video.get('description')),
'timestamp': unified_timestamp(video.get('publishDate')), 'timestamp': unified_timestamp(video.get('publishDate')),
@ -174,22 +174,22 @@ def _real_extract(self, url):
] ]
if season_number: if season_number:
playlist_id = 'season_%d' % season_number playlist_id = f'season_{season_number}'
playlist_title = 'Season %d' % season_number playlist_title = f'Season {season_number}'
facet_filters.append('search_season_list:' + playlist_title) facet_filters.append('search_season_list:' + playlist_title)
else: else:
playlist_id = show playlist_id = show
playlist_title = title playlist_title = title
season_search = self._download_json( season_search = self._download_json(
'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug, f'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_{slug}_season_desc_production',
playlist_id, headers={ playlist_id, headers={
'Origin': 'https://www.americastestkitchen.com', 'Origin': 'https://www.americastestkitchen.com',
'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805', 'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805',
'X-Algolia-Application-Id': 'Y1FNZXUI30', 'X-Algolia-Application-Id': 'Y1FNZXUI30',
}, query={ }, query={
'facetFilters': json.dumps(facet_filters), 'facetFilters': json.dumps(facet_filters),
'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title,search_atk_episode_season' % slug, 'attributesToRetrieve': f'description,search_{slug}_episode_number,search_document_date,search_url,title,search_atk_episode_season',
'attributesToHighlight': '', 'attributesToHighlight': '',
'hitsPerPage': 1000, 'hitsPerPage': 1000,
}) })
@ -207,7 +207,7 @@ def entries():
'description': episode.get('description'), 'description': episode.get('description'),
'timestamp': unified_timestamp(episode.get('search_document_date')), 'timestamp': unified_timestamp(episode.get('search_document_date')),
'season_number': season_number, 'season_number': season_number,
'episode_number': int_or_none(episode.get('search_%s_episode_number' % slug)), 'episode_number': int_or_none(episode.get(f'search_{slug}_episode_number')),
'ie_key': AmericasTestKitchenIE.ie_key(), 'ie_key': AmericasTestKitchenIE.ie_key(),
} }

View file

@ -19,12 +19,12 @@ def _extract_feed_info(self, url):
'Unable to download Akamai AMP feed', transform_source=strip_jsonp) 'Unable to download Akamai AMP feed', transform_source=strip_jsonp)
item = feed.get('channel', {}).get('item') item = feed.get('channel', {}).get('item')
if not item: if not item:
raise ExtractorError('%s said: %s' % (self.IE_NAME, feed['error'])) raise ExtractorError('{} said: {}'.format(self.IE_NAME, feed['error']))
video_id = item['guid'] video_id = item['guid']
def get_media_node(name, default=None): def get_media_node(name, default=None):
media_name = 'media-%s' % name media_name = f'media-{name}'
media_group = item.get('media-group') or item media_group = item.get('media-group') or item
return media_group.get(media_name) or item.get(media_name) or item.get(name, default) return media_group.get(media_name) or item.get(media_name) or item.get(name, default)

View file

@ -29,7 +29,7 @@ class AnchorFMEpisodeIE(InfoExtractor):
'release_date': '20230121', 'release_date': '20230121',
'release_timestamp': 1674285179, 'release_timestamp': 1674285179,
'episode_id': 'e1tpt3d', 'episode_id': 'e1tpt3d',
} },
}, { }, {
# embed url # embed url
'url': 'https://anchor.fm/apakatatempo/embed/episodes/S2E75-Perang-Bintang-di-Balik-Kasus-Ferdy-Sambo-dan-Ismail-Bolong-e1shjqd', 'url': 'https://anchor.fm/apakatatempo/embed/episodes/S2E75-Perang-Bintang-di-Balik-Kasus-Ferdy-Sambo-dan-Ismail-Bolong-e1shjqd',
@ -50,7 +50,7 @@ class AnchorFMEpisodeIE(InfoExtractor):
'season': 'Season 2', 'season': 'Season 2',
'season_number': 2, 'season_number': 2,
'episode_id': 'e1shjqd', 'episode_id': 'e1shjqd',
} },
}] }]
_WEBPAGE_TESTS = [{ _WEBPAGE_TESTS = [{
@ -72,7 +72,7 @@ class AnchorFMEpisodeIE(InfoExtractor):
'thumbnail': 'https://s3-us-west-2.amazonaws.com/anchor-generated-image-bank/production/podcast_uploaded_episode400/2627805/2627805-1671590688729-4db3882ac9e4b.jpg', 'thumbnail': 'https://s3-us-west-2.amazonaws.com/anchor-generated-image-bank/production/podcast_uploaded_episode400/2627805/2627805-1671590688729-4db3882ac9e4b.jpg',
'uploader': 'Podcast Tempo', 'uploader': 'Podcast Tempo',
'channel': 'apakatatempo', 'channel': 'apakatatempo',
} },
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View file

@ -15,8 +15,8 @@ class AngelIE(InfoExtractor):
'title': 'Tuttle Twins Season 1, Episode 1: When Laws Give You Lemons', 'title': 'Tuttle Twins Season 1, Episode 1: When Laws Give You Lemons',
'description': 'md5:73b704897c20ab59c433a9c0a8202d5e', 'description': 'md5:73b704897c20ab59c433a9c0a8202d5e',
'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$', 'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$',
'duration': 1359.0 'duration': 1359.0,
} },
}, { }, {
'url': 'https://www.angel.com/watch/the-chosen/episode/8dfb714d-bca5-4812-8125-24fb9514cd10/season-1/episode-1/i-have-called-you-by-name', 'url': 'https://www.angel.com/watch/the-chosen/episode/8dfb714d-bca5-4812-8125-24fb9514cd10/season-1/episode-1/i-have-called-you-by-name',
'md5': 'e4774bad0a5f0ad2e90d175cafdb797d', 'md5': 'e4774bad0a5f0ad2e90d175cafdb797d',
@ -26,8 +26,8 @@ class AngelIE(InfoExtractor):
'title': 'The Chosen Season 1, Episode 1: I Have Called You By Name', 'title': 'The Chosen Season 1, Episode 1: I Have Called You By Name',
'description': 'md5:aadfb4827a94415de5ff6426e6dee3be', 'description': 'md5:aadfb4827a94415de5ff6426e6dee3be',
'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$', 'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$',
'duration': 3276.0 'duration': 3276.0,
} },
}] }]
def _real_extract(self, url): def _real_extract(self, url):
@ -44,7 +44,7 @@ def _real_extract(self, url):
'title': self._og_search_title(webpage), 'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage), 'description': self._og_search_description(webpage),
'formats': formats, 'formats': formats,
'subtitles': subtitles 'subtitles': subtitles,
} }
# Angel uses cloudinary in the background and supports image transformations. # Angel uses cloudinary in the background and supports image transformations.

View file

@ -105,7 +105,7 @@ def _real_extract(self, url):
info = self._search_json_ld(webpage, video_id, expected_type='NewsArticle') info = self._search_json_ld(webpage, video_id, expected_type='NewsArticle')
embed_urls = list(Ant1NewsGrEmbedIE._extract_embed_urls(url, webpage)) embed_urls = list(Ant1NewsGrEmbedIE._extract_embed_urls(url, webpage))
if not embed_urls: if not embed_urls:
raise ExtractorError('no videos found for %s' % video_id, expected=True) raise ExtractorError(f'no videos found for {video_id}', expected=True)
return self.playlist_from_matches( return self.playlist_from_matches(
embed_urls, video_id, info.get('title'), ie=Ant1NewsGrEmbedIE.ie_key(), embed_urls, video_id, info.get('title'), ie=Ant1NewsGrEmbedIE.ie_key(),
video_kwargs={'url_transparent': True, 'timestamp': info.get('timestamp')}) video_kwargs={'url_transparent': True, 'timestamp': info.get('timestamp')})

View file

@ -238,7 +238,7 @@ class AnvatoIE(InfoExtractor):
'gray': 'anvato_mcp_gray_web_prod_4c10f067c393ed8fc453d3930f8ab2b159973900', 'gray': 'anvato_mcp_gray_web_prod_4c10f067c393ed8fc453d3930f8ab2b159973900',
'hearst': 'anvato_mcp_hearst_web_prod_5356c3de0fc7c90a3727b4863ca7fec3a4524a99', 'hearst': 'anvato_mcp_hearst_web_prod_5356c3de0fc7c90a3727b4863ca7fec3a4524a99',
'cbs': 'anvato_mcp_cbs_web_prod_02f26581ff80e5bda7aad28226a8d369037f2cbe', 'cbs': 'anvato_mcp_cbs_web_prod_02f26581ff80e5bda7aad28226a8d369037f2cbe',
'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582' 'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582',
} }
def _generate_nfl_token(self, anvack, mcp_id): def _generate_nfl_token(self, anvack, mcp_id):
@ -255,7 +255,7 @@ def _generate_nfl_token(self, anvack, mcp_id):
token token
} }
} }
}''' % (anvack, mcp_id), }''' % (anvack, mcp_id), # noqa: UP031
}).encode(), headers={ }).encode(), headers={
'Authorization': auth_token, 'Authorization': auth_token,
'Content-Type': 'application/json', 'Content-Type': 'application/json',
@ -299,7 +299,7 @@ def _get_video_json(self, access_key, video_id, extracted_token):
return self._download_json( return self._download_json(
video_data_url, video_id, transform_source=strip_jsonp, query=query, video_data_url, video_id, transform_source=strip_jsonp, query=query,
data=json.dumps({'api': api}, separators=(',', ':')).encode('utf-8')) data=json.dumps({'api': api}, separators=(',', ':')).encode())
def _get_anvato_videos(self, access_key, video_id, token): def _get_anvato_videos(self, access_key, video_id, token):
video_data = self._get_video_json(access_key, video_id, token) video_data = self._get_video_json(access_key, video_id, token)
@ -358,7 +358,7 @@ def _get_anvato_videos(self, access_key, video_id, token):
for caption in video_data.get('captions', []): for caption in video_data.get('captions', []):
a_caption = { a_caption = {
'url': caption['url'], 'url': caption['url'],
'ext': 'tt' if caption.get('format') == 'SMPTE-TT' else None 'ext': 'tt' if caption.get('format') == 'SMPTE-TT' else None,
} }
subtitles.setdefault(caption['language'], []).append(a_caption) subtitles.setdefault(caption['language'], []).append(a_caption)
subtitles = self._merge_subtitles(subtitles, hls_subs, vtt_subs) subtitles = self._merge_subtitles(subtitles, hls_subs, vtt_subs)

View file

@ -30,7 +30,7 @@ class AolIE(YahooIE): # XXX: Do not subclass from concrete IE
'params': { 'params': {
# m3u8 download # m3u8 download
'skip_download': True, 'skip_download': True,
} },
}, { }, {
# video with vidible ID # video with vidible ID
'url': 'https://www.aol.com/video/view/netflix-is-raising-rates/5707d6b8e4b090497b04f706/', 'url': 'https://www.aol.com/video/view/netflix-is-raising-rates/5707d6b8e4b090497b04f706/',
@ -46,7 +46,7 @@ class AolIE(YahooIE): # XXX: Do not subclass from concrete IE
'params': { 'params': {
# m3u8 download # m3u8 download
'skip_download': True, 'skip_download': True,
} },
}, { }, {
'url': 'https://www.aol.com/video/view/park-bench-season-2-trailer/559a1b9be4b0c3bfad3357a7/', 'url': 'https://www.aol.com/video/view/park-bench-season-2-trailer/559a1b9be4b0c3bfad3357a7/',
'only_matching': True, 'only_matching': True,
@ -83,10 +83,10 @@ def _real_extract(self, url):
return self._extract_yahoo_video(video_id, 'us') return self._extract_yahoo_video(video_id, 'us')
response = self._download_json( response = self._download_json(
'https://feedapi.b2c.on.aol.com/v1.0/app/videos/aolon/%s/details' % video_id, f'https://feedapi.b2c.on.aol.com/v1.0/app/videos/aolon/{video_id}/details',
video_id)['response'] video_id)['response']
if response['statusText'] != 'Ok': if response['statusText'] != 'Ok':
raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusText']), expected=True) raise ExtractorError('{} said: {}'.format(self.IE_NAME, response['statusText']), expected=True)
video_data = response['data'] video_data = response['data']
formats = [] formats = []

View file

@ -34,7 +34,7 @@ def _real_extract(self, url):
video_id, base_url = mobj.group('id', 'base_url') video_id, base_url = mobj.group('id', 'base_url')
webpage = self._download_webpage( webpage = self._download_webpage(
'%s/player/%s' % (base_url, video_id), video_id) f'{base_url}/player/{video_id}', video_id)
jwplatform_id = self._search_regex( jwplatform_id = self._search_regex(
r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage, r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage,
@ -47,7 +47,7 @@ def _real_extract(self, url):
def extract(field, name=None): def extract(field, name=None):
return self._search_regex( return self._search_regex(
r'\b%s["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1' % field, rf'\b{field}["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
webpage, name or field, default=None, group='value') webpage, name or field, default=None, group='value')
title = extract('title') or video_id title = extract('title') or video_id

View file

@ -24,7 +24,7 @@ class ApplePodcastsIE(InfoExtractor):
'duration': 6454, 'duration': 6454,
'series': 'The Tim Dillon Show', 'series': 'The Tim Dillon Show',
'thumbnail': 're:.+[.](png|jpe?g|webp)', 'thumbnail': 're:.+[.](png|jpe?g|webp)',
} },
}, { }, {
'url': 'https://podcasts.apple.com/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777', 'url': 'https://podcasts.apple.com/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777',
'only_matching': True, 'only_matching': True,

View file

@ -1,8 +1,8 @@
import json import json
import re import re
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import ( from ..utils import (
int_or_none, int_or_none,
parse_duration, parse_duration,
@ -64,7 +64,7 @@ class AppleTrailersIE(InfoExtractor):
'uploader_id': 'wb', 'uploader_id': 'wb',
}, },
}, },
] ],
}, { }, {
'url': 'http://trailers.apple.com/trailers/magnolia/blackthorn/', 'url': 'http://trailers.apple.com/trailers/magnolia/blackthorn/',
'info_dict': { 'info_dict': {
@ -99,7 +99,7 @@ def _real_extract(self, url):
webpage = self._download_webpage(url, movie) webpage = self._download_webpage(url, movie)
film_id = self._search_regex(r"FilmId\s*=\s*'(\d+)'", webpage, 'film id') film_id = self._search_regex(r"FilmId\s*=\s*'(\d+)'", webpage, 'film id')
film_data = self._download_json( film_data = self._download_json(
'http://trailers.apple.com/trailers/feeds/data/%s.json' % film_id, f'http://trailers.apple.com/trailers/feeds/data/{film_id}.json',
film_id, fatal=False) film_id, fatal=False)
if film_data: if film_data:
@ -114,7 +114,7 @@ def _real_extract(self, url):
if not src: if not src:
continue continue
formats.append({ formats.append({
'format_id': '%s-%s' % (version, size), 'format_id': f'{version}-{size}',
'url': re.sub(r'_(\d+p\.mov)', r'_h\1', src), 'url': re.sub(r'_(\d+p\.mov)', r'_h\1', src),
'width': int_or_none(size_data.get('width')), 'width': int_or_none(size_data.get('width')),
'height': int_or_none(size_data.get('height')), 'height': int_or_none(size_data.get('height')),
@ -134,7 +134,7 @@ def _real_extract(self, url):
page_data = film_data.get('page', {}) page_data = film_data.get('page', {})
return self.playlist_result(entries, film_id, page_data.get('movie_title')) return self.playlist_result(entries, film_id, page_data.get('movie_title'))
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc') playlist_url = urllib.parse.urljoin(url, 'includes/playlists/itunes.inc')
def fix_html(s): def fix_html(s):
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s) s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
@ -143,10 +143,9 @@ def fix_html(s):
# like: http://trailers.apple.com/trailers/wb/gravity/ # like: http://trailers.apple.com/trailers/wb/gravity/
def _clean_json(m): def _clean_json(m):
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', '&#39;') return 'iTunes.playURL({});'.format(m.group(1).replace('\'', '&#39;'))
s = re.sub(self._JSON_RE, _clean_json, s) s = re.sub(self._JSON_RE, _clean_json, s)
s = '<html>%s</html>' % s return f'<html>{s}</html>'
return s
doc = self._download_xml(playlist_url, movie, transform_source=fix_html) doc = self._download_xml(playlist_url, movie, transform_source=fix_html)
playlist = [] playlist = []
@ -170,18 +169,18 @@ def _clean_json(m):
duration = 60 * int(m.group('minutes')) + int(m.group('seconds')) duration = 60 * int(m.group('minutes')) + int(m.group('seconds'))
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower() trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id) settings_json_url = urllib.parse.urljoin(url, f'includes/settings/{trailer_id}.json')
settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json') settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json')
formats = [] formats = []
for format in settings['metadata']['sizes']: for fmt in settings['metadata']['sizes']:
# The src is a file pointing to the real video file # The src is a file pointing to the real video file
format_url = re.sub(r'_(\d*p\.mov)', r'_h\1', format['src']) format_url = re.sub(r'_(\d*p\.mov)', r'_h\1', fmt['src'])
formats.append({ formats.append({
'url': format_url, 'url': format_url,
'format': format['type'], 'format': fmt['type'],
'width': int_or_none(format['width']), 'width': int_or_none(fmt['width']),
'height': int_or_none(format['height']), 'height': int_or_none(fmt['height']),
}) })
playlist.append({ playlist.append({
@ -229,7 +228,7 @@ class AppleTrailersSectionIE(InfoExtractor):
'title': 'Movie Studios', 'title': 'Movie Studios',
}, },
} }
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/#section=(?P<id>%s)' % '|'.join(_SECTIONS) _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/#section=(?P<id>{})'.format('|'.join(_SECTIONS))
_TESTS = [{ _TESTS = [{
'url': 'http://trailers.apple.com/#section=justadded', 'url': 'http://trailers.apple.com/#section=justadded',
'info_dict': { 'info_dict': {
@ -270,7 +269,7 @@ class AppleTrailersSectionIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
section = self._match_id(url) section = self._match_id(url)
section_data = self._download_json( section_data = self._download_json(
'http://trailers.apple.com/trailers/home/feeds/%s.json' % self._SECTIONS[section]['feed_path'], 'http://trailers.apple.com/trailers/home/feeds/{}.json'.format(self._SECTIONS[section]['feed_path']),
section) section)
entries = [ entries = [
self.url_result('http://trailers.apple.com' + e['location']) self.url_result('http://trailers.apple.com' + e['location'])

View file

@ -1,10 +1,11 @@
from __future__ import annotations
import json import json
import re import re
import urllib.parse import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from .youtube import YoutubeBaseInfoExtractor, YoutubeIE from .youtube import YoutubeBaseInfoExtractor, YoutubeIE
from ..compat import compat_urllib_parse_unquote
from ..networking import HEADRequest from ..networking import HEADRequest
from ..networking.exceptions import HTTPError from ..networking.exceptions import HTTPError
from ..utils import ( from ..utils import (
@ -145,7 +146,7 @@ class ArchiveOrgIE(InfoExtractor):
'title': 'Bells Of Rostov', 'title': 'Bells Of Rostov',
'ext': 'mp3', 'ext': 'mp3',
}, },
'skip': 'restricted' 'skip': 'restricted',
}, { }, {
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3', 'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3',
'md5': '1d0aabe03edca83ca58d9ed3b493a3c3', 'md5': '1d0aabe03edca83ca58d9ed3b493a3c3',
@ -158,7 +159,7 @@ class ArchiveOrgIE(InfoExtractor):
'description': 'md5:012b2d668ae753be36896f343d12a236', 'description': 'md5:012b2d668ae753be36896f343d12a236',
'upload_date': '20190928', 'upload_date': '20190928',
}, },
'skip': 'restricted' 'skip': 'restricted',
}, { }, {
# Original formats are private # Original formats are private
'url': 'https://archive.org/details/irelandthemakingofarepublic', 'url': 'https://archive.org/details/irelandthemakingofarepublic',
@ -202,8 +203,8 @@ class ArchiveOrgIE(InfoExtractor):
'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel2_001554.jpg', 'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel2_001554.jpg',
'display_id': 'irelandthemakingofarepublicreel2.mov', 'display_id': 'irelandthemakingofarepublicreel2.mov',
}, },
} },
] ],
}] }]
@staticmethod @staticmethod
@ -220,7 +221,7 @@ def _playlist_data(webpage):
def _real_extract(self, url): def _real_extract(self, url):
video_id = urllib.parse.unquote_plus(self._match_id(url)) video_id = urllib.parse.unquote_plus(self._match_id(url))
identifier, entry_id = (video_id.split('/', 1) + [None])[:2] identifier, _, entry_id = video_id.partition('/')
# Archive.org metadata API doesn't clearly demarcate playlist entries # Archive.org metadata API doesn't clearly demarcate playlist entries
# or subtitle tracks, so we get them from the embeddable player. # or subtitle tracks, so we get them from the embeddable player.
@ -246,7 +247,7 @@ def _real_extract(self, url):
if track['kind'] != 'subtitles': if track['kind'] != 'subtitles':
continue continue
entries[p['orig']][track['label']] = { entries[p['orig']][track['label']] = {
'url': 'https://archive.org/' + track['file'].lstrip('/') 'url': 'https://archive.org/' + track['file'].lstrip('/'),
} }
metadata = self._download_json('http://archive.org/metadata/' + identifier, identifier) metadata = self._download_json('http://archive.org/metadata/' + identifier, identifier)
@ -293,7 +294,9 @@ def _real_extract(self, url):
'height': int_or_none(f.get('width')), 'height': int_or_none(f.get('width')),
'filesize': int_or_none(f.get('size'))}) 'filesize': int_or_none(f.get('size'))})
extension = (f['name'].rsplit('.', 1) + [None])[1] _, has_ext, extension = f['name'].rpartition('.')
if not has_ext:
extension = None
# We don't want to skip private formats if the user has access to them, # We don't want to skip private formats if the user has access to them,
# however without access to an account with such privileges we can't implement/test this. # however without access to an account with such privileges we can't implement/test this.
@ -308,7 +311,7 @@ def _real_extract(self, url):
'filesize': int_or_none(f.get('size')), 'filesize': int_or_none(f.get('size')),
'protocol': 'https', 'protocol': 'https',
'source_preference': 0 if f.get('source') == 'original' else -1, 'source_preference': 0 if f.get('source') == 'original' else -1,
'format_note': f.get('source') 'format_note': f.get('source'),
}) })
for entry in entries.values(): for entry in entries.values():
@ -371,7 +374,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'uploader_url': 'https://www.youtube.com/user/Zeurel', 'uploader_url': 'https://www.youtube.com/user/Zeurel',
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'channel_url': 'https://www.youtube.com/channel/UCukCyHaD-bK3in_pKpfH9Eg', 'channel_url': 'https://www.youtube.com/channel/UCukCyHaD-bK3in_pKpfH9Eg',
} },
}, { }, {
# Internal link # Internal link
'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0', 'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
@ -388,7 +391,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'uploader_url': 'https://www.youtube.com/user/1veritasium', 'uploader_url': 'https://www.youtube.com/user/1veritasium',
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'channel_url': 'https://www.youtube.com/channel/UCHnyfMqiRRG1u-2MsSQLbXA', 'channel_url': 'https://www.youtube.com/channel/UCHnyfMqiRRG1u-2MsSQLbXA',
} },
}, { }, {
# Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description. # Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description.
# Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description # Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description
@ -403,8 +406,8 @@ class YoutubeWebArchiveIE(InfoExtractor):
'uploader_id': 'machinima', 'uploader_id': 'machinima',
'uploader_url': 'https://www.youtube.com/user/machinima', 'uploader_url': 'https://www.youtube.com/user/machinima',
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader': 'machinima' 'uploader': 'machinima',
} },
}, { }, {
# FLV video. Video file URL does not provide itag information # FLV video. Video file URL does not provide itag information
'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw', 'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
@ -421,7 +424,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'channel_url': 'https://www.youtube.com/channel/UC4QobU6STFB0P71PMvOGN5A', 'channel_url': 'https://www.youtube.com/channel/UC4QobU6STFB0P71PMvOGN5A',
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader': 'jawed', 'uploader': 'jawed',
} },
}, { }, {
'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA', 'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
'info_dict': { 'info_dict': {
@ -437,7 +440,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'uploader_url': 'https://www.youtube.com/user/itsmadeon', 'uploader_url': 'https://www.youtube.com/user/itsmadeon',
'channel_url': 'https://www.youtube.com/channel/UCqMDNf3Pn5L7pcNkuSEeO3w', 'channel_url': 'https://www.youtube.com/channel/UCqMDNf3Pn5L7pcNkuSEeO3w',
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
} },
}, { }, {
# First capture is of dead video, second is the oldest from CDX response. # First capture is of dead video, second is the oldest from CDX response.
'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E', 'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E',
@ -454,7 +457,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'channel_url': 'https://www.youtube.com/channel/UCdIaNUarhzLSXGoItz7BHVA', 'channel_url': 'https://www.youtube.com/channel/UCdIaNUarhzLSXGoItz7BHVA',
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader': 'ETC News', 'uploader': 'ETC News',
} },
}, { }, {
# First capture of dead video, capture date in link links to dead capture. # First capture of dead video, capture date in link links to dead capture.
'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E', 'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E',
@ -473,15 +476,15 @@ class YoutubeWebArchiveIE(InfoExtractor):
'uploader': 'ETC News', 'uploader': 'ETC News',
}, },
'expected_warnings': [ 'expected_warnings': [
r'unable to download capture webpage \(it may not be archived\)' r'unable to download capture webpage \(it may not be archived\)',
] ],
}, { # Very old YouTube page, has - YouTube in title. }, { # Very old YouTube page, has - YouTube in title.
'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg', 'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg',
'info_dict': { 'info_dict': {
'id': '-06-KB9XTzg', 'id': '-06-KB9XTzg',
'ext': 'flv', 'ext': 'flv',
'title': 'New Coin Hack!! 100% Safe!!' 'title': 'New Coin Hack!! 100% Safe!!',
} },
}, { }, {
'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8', 'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8',
'info_dict': { 'info_dict': {
@ -495,7 +498,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc', 'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc',
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader': 'DankPods', 'uploader': 'DankPods',
} },
}, { }, {
# player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093 # player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093
'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4', 'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4',
@ -512,7 +515,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'uploader_id': 'PewDiePie', 'uploader_id': 'PewDiePie',
'uploader_url': 'https://www.youtube.com/user/PewDiePie', 'uploader_url': 'https://www.youtube.com/user/PewDiePie',
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
} },
}, { }, {
# ~June 2010 Capture. swfconfig # ~June 2010 Capture. swfconfig
'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=8XeW5ilk-9Y', 'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=8XeW5ilk-9Y',
@ -527,7 +530,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'uploader_url': 'https://www.youtube.com/user/HowTheWorldWorks', 'uploader_url': 'https://www.youtube.com/user/HowTheWorldWorks',
'upload_date': '20090520', 'upload_date': '20090520',
} },
}, { }, {
# Jan 2011: watch-video-date/eow-date surrounded by whitespace # Jan 2011: watch-video-date/eow-date surrounded by whitespace
'url': 'https://web.archive.org/web/20110126141719/http://www.youtube.com/watch?v=Q_yjX80U7Yc', 'url': 'https://web.archive.org/web/20110126141719/http://www.youtube.com/watch?v=Q_yjX80U7Yc',
@ -542,7 +545,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'duration': 132, 'duration': 132,
'uploader_url': 'https://www.youtube.com/user/claybutlermusic', 'uploader_url': 'https://www.youtube.com/user/claybutlermusic',
} },
}, { }, {
# ~May 2009 swfArgs. ytcfg is spread out over various vars # ~May 2009 swfArgs. ytcfg is spread out over various vars
'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=c5uJgG05xUY', 'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=c5uJgG05xUY',
@ -557,7 +560,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'description': 'md5:4ca77d79538064e41e4cc464e93f44f0', 'description': 'md5:4ca77d79538064e41e4cc464e93f44f0',
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'duration': 754, 'duration': 754,
} },
}, { }, {
# ~June 2012. Upload date is in another lang so cannot extract. # ~June 2012. Upload date is in another lang so cannot extract.
'url': 'https://web.archive.org/web/20120607174520/http://www.youtube.com/watch?v=xWTLLl-dQaA', 'url': 'https://web.archive.org/web/20120607174520/http://www.youtube.com/watch?v=xWTLLl-dQaA',
@ -571,7 +574,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'uploader': 'BlackNerdComedy', 'uploader': 'BlackNerdComedy',
'duration': 182, 'duration': 182,
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
} },
}, { }, {
# ~July 2013 # ~July 2013
'url': 'https://web.archive.org/web/*/https://www.youtube.com/watch?v=9eO1aasHyTM', 'url': 'https://web.archive.org/web/*/https://www.youtube.com/watch?v=9eO1aasHyTM',
@ -587,7 +590,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'channel_url': 'https://www.youtube.com/channel/UC62R2cBezNBOqxSerfb1nMQ', 'channel_url': 'https://www.youtube.com/channel/UC62R2cBezNBOqxSerfb1nMQ',
'upload_date': '20060428', 'upload_date': '20060428',
'uploader': 'punkybird', 'uploader': 'punkybird',
} },
}, { }, {
# April 2020: Player response in player config # April 2020: Player response in player config
'url': 'https://web.archive.org/web/20200416034815/https://www.youtube.com/watch?v=Cf7vS8jc7dY&gl=US&hl=en', 'url': 'https://web.archive.org/web/20200416034815/https://www.youtube.com/watch?v=Cf7vS8jc7dY&gl=US&hl=en',
@ -604,7 +607,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'description': 'md5:c625bb3c02c4f5fb4205971e468fa341', 'description': 'md5:c625bb3c02c4f5fb4205971e468fa341',
'uploader_url': 'https://www.youtube.com/user/GameGrumps', 'uploader_url': 'https://www.youtube.com/user/GameGrumps',
} },
}, { }, {
# watch7-user-header with yt-user-info # watch7-user-header with yt-user-info
'url': 'ytarchive:kbh4T_b4Ixw:20160307085057', 'url': 'ytarchive:kbh4T_b4Ixw:20160307085057',
@ -619,7 +622,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
'thumbnail': r're:https?://.*\.(jpg|webp)', 'thumbnail': r're:https?://.*\.(jpg|webp)',
'upload_date': '20150503', 'upload_date': '20150503',
'channel_id': 'UCnTaGvsHmMy792DWeT6HbGA', 'channel_id': 'UCnTaGvsHmMy792DWeT6HbGA',
} },
}, { }, {
# April 2012 # April 2012
'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=SOm7mPoPskU', 'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=SOm7mPoPskU',
@ -634,35 +637,35 @@ class YoutubeWebArchiveIE(InfoExtractor):
'duration': 200, 'duration': 200,
'upload_date': '20120407', 'upload_date': '20120407',
'uploader_id': 'thecomputernerd01', 'uploader_id': 'thecomputernerd01',
} },
}, { }, {
'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw', 'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'https://web.archive.org/web/20050214000000_if/http://www.youtube.com/watch?v=0altSZ96U4M', 'url': 'https://web.archive.org/web/20050214000000_if/http://www.youtube.com/watch?v=0altSZ96U4M',
'only_matching': True 'only_matching': True,
}, { }, {
# Video not archived, only capture is unavailable video page # Video not archived, only capture is unavailable video page
'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10', 'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
'only_matching': True 'only_matching': True,
}, { # Encoded url }, { # Encoded url
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den', 'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den', 'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'https://web.archive.org/web/20060527081937/http://www.youtube.com:80/watch.php?v=ELTFsLT73fA&amp;search=soccer', 'url': 'https://web.archive.org/web/20060527081937/http://www.youtube.com:80/watch.php?v=ELTFsLT73fA&amp;search=soccer',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'https://web.archive.org/http://www.youtube.com:80/watch?v=-05VVye-ffg', 'url': 'https://web.archive.org/http://www.youtube.com:80/watch?v=-05VVye-ffg',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'ytarchive:BaW_jenozKc:20050214000000', 'url': 'ytarchive:BaW_jenozKc:20050214000000',
'only_matching': True 'only_matching': True,
}, { }, {
'url': 'ytarchive:BaW_jenozKc', 'url': 'ytarchive:BaW_jenozKc',
'only_matching': True 'only_matching': True,
}, },
] ]
_YT_INITIAL_DATA_RE = YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE _YT_INITIAL_DATA_RE = YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE
@ -673,13 +676,13 @@ class YoutubeWebArchiveIE(InfoExtractor):
_YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers _YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers
_YT_ALL_THUMB_SERVERS = orderedSet( _YT_ALL_THUMB_SERVERS = orderedSet(
_YT_DEFAULT_THUMB_SERVERS + ['img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(0, 5), 9)]]) [*_YT_DEFAULT_THUMB_SERVERS, 'img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(5), 9)]])
_WAYBACK_BASE_URL = 'https://web.archive.org/web/%sif_/' _WAYBACK_BASE_URL = 'https://web.archive.org/web/%sif_/'
_OLDEST_CAPTURE_DATE = 20050214000000 _OLDEST_CAPTURE_DATE = 20050214000000
_NEWEST_CAPTURE_DATE = 20500101000000 _NEWEST_CAPTURE_DATE = 20500101000000
def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note=None, fatal=False): def _call_cdx_api(self, item_id, url, filters: list | None = None, collapse: list | None = None, query: dict | None = None, note=None, fatal=False):
# CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md # CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md
query = { query = {
'url': url, 'url': url,
@ -688,14 +691,14 @@ def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = Non
'limit': 500, 'limit': 500,
'filter': ['statuscode:200'] + (filters or []), 'filter': ['statuscode:200'] + (filters or []),
'collapse': collapse or [], 'collapse': collapse or [],
**(query or {}) **(query or {}),
} }
res = self._download_json( res = self._download_json(
'https://web.archive.org/cdx/search/cdx', item_id, 'https://web.archive.org/cdx/search/cdx', item_id,
note or 'Downloading CDX API JSON', query=query, fatal=fatal) note or 'Downloading CDX API JSON', query=query, fatal=fatal)
if isinstance(res, list) and len(res) >= 2: if isinstance(res, list) and len(res) >= 2:
# format response to make it easier to use # format response to make it easier to use
return list(dict(zip(res[0], v)) for v in res[1:]) return [dict(zip(res[0], v)) for v in res[1:]]
elif not isinstance(res, list) or len(res) != 0: elif not isinstance(res, list) or len(res) != 0:
self.report_warning('Error while parsing CDX API response' + bug_reports_message()) self.report_warning('Error while parsing CDX API response' + bug_reports_message())
@ -852,7 +855,7 @@ def _extract_thumbnails(self, video_id):
{ {
'url': (self._WAYBACK_BASE_URL % (int_or_none(thumbnail_dict.get('timestamp')) or self._OLDEST_CAPTURE_DATE)) + thumbnail_dict.get('original'), 'url': (self._WAYBACK_BASE_URL % (int_or_none(thumbnail_dict.get('timestamp')) or self._OLDEST_CAPTURE_DATE)) + thumbnail_dict.get('original'),
'filesize': int_or_none(thumbnail_dict.get('length')), 'filesize': int_or_none(thumbnail_dict.get('length')),
'preference': int_or_none(thumbnail_dict.get('length')) 'preference': int_or_none(thumbnail_dict.get('length')),
} for thumbnail_dict in response) } for thumbnail_dict in response)
if not try_all: if not try_all:
break break
@ -893,7 +896,7 @@ def _real_extract(self, url):
for retry in retry_manager: for retry in retry_manager:
try: try:
urlh = self._request_webpage( urlh = self._request_webpage(
HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id), HEADRequest(f'https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/{video_id}'),
video_id, note='Fetching archived video file url', expected_status=True) video_id, note='Fetching archived video file url', expected_status=True)
except ExtractorError as e: except ExtractorError as e:
# HTTP Error 404 is expected if the video is not saved. # HTTP Error 404 is expected if the video is not saved.
@ -924,21 +927,21 @@ def _real_extract(self, url):
info['thumbnails'] = self._extract_thumbnails(video_id) info['thumbnails'] = self._extract_thumbnails(video_id)
if urlh: if urlh:
url = compat_urllib_parse_unquote(urlh.url) url = urllib.parse.unquote(urlh.url)
video_file_url_qs = parse_qs(url) video_file_url_qs = parse_qs(url)
# Attempt to recover any ext & format info from playback url & response headers # Attempt to recover any ext & format info from playback url & response headers
format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))} fmt = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
itag = try_get(video_file_url_qs, lambda x: x['itag'][0]) itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
if itag and itag in YoutubeIE._formats: if itag and itag in YoutubeIE._formats:
format.update(YoutubeIE._formats[itag]) fmt.update(YoutubeIE._formats[itag])
format.update({'format_id': itag}) fmt.update({'format_id': itag})
else: else:
mime = try_get(video_file_url_qs, lambda x: x['mime'][0]) mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
ext = (mimetype2ext(mime) ext = (mimetype2ext(mime)
or urlhandle_detect_ext(urlh) or urlhandle_detect_ext(urlh)
or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type'))) or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type')))
format.update({'ext': ext}) fmt.update({'ext': ext})
info['formats'] = [format] info['formats'] = [fmt]
if not info.get('duration'): if not info.get('duration'):
info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0])) info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))

View file

@ -11,7 +11,7 @@
class ArcPublishingIE(InfoExtractor): class ArcPublishingIE(InfoExtractor):
_UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}' _UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}'
_VALID_URL = r'arcpublishing:(?P<org>[a-z]+):(?P<id>%s)' % _UUID_REGEX _VALID_URL = rf'arcpublishing:(?P<org>[a-z]+):(?P<id>{_UUID_REGEX})'
_TESTS = [{ _TESTS = [{
# https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/ # https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/
'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab', 'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
@ -74,12 +74,12 @@ class ArcPublishingIE(InfoExtractor):
def _extract_embed_urls(cls, url, webpage): def _extract_embed_urls(cls, url, webpage):
entries = [] entries = []
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview # https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage): for powa_el in re.findall(rf'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="{ArcPublishingIE._UUID_REGEX}"[^>]*>)', webpage):
powa = extract_attributes(powa_el) or {} powa = extract_attributes(powa_el) or {}
org = powa.get('data-org') org = powa.get('data-org')
uuid = powa.get('data-uuid') uuid = powa.get('data-uuid')
if org and uuid: if org and uuid:
entries.append('arcpublishing:%s:%s' % (org, uuid)) entries.append(f'arcpublishing:{org}:{uuid}')
return entries return entries
def _real_extract(self, url): def _real_extract(self, url):
@ -122,7 +122,7 @@ def _real_extract(self, url):
elif stream_type in ('ts', 'hls'): elif stream_type in ('ts', 'hls'):
m3u8_formats = self._extract_m3u8_formats( m3u8_formats = self._extract_m3u8_formats(
s_url, uuid, 'mp4', live=is_live, m3u8_id='hls', fatal=False) s_url, uuid, 'mp4', live=is_live, m3u8_id='hls', fatal=False)
if all([f.get('acodec') == 'none' for f in m3u8_formats]): if all(f.get('acodec') == 'none' for f in m3u8_formats):
continue continue
for f in m3u8_formats: for f in m3u8_formats:
height = f.get('height') height = f.get('height')
@ -136,7 +136,7 @@ def _real_extract(self, url):
else: else:
vbr = int_or_none(s.get('bitrate')) vbr = int_or_none(s.get('bitrate'))
formats.append({ formats.append({
'format_id': '%s-%d' % (stream_type, vbr) if vbr else stream_type, 'format_id': f'{stream_type}-{vbr}' if vbr else stream_type,
'vbr': vbr, 'vbr': vbr,
'width': int_or_none(s.get('width')), 'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')), 'height': int_or_none(s.get('height')),

View file

@ -85,7 +85,7 @@ def _extract_formats(self, media_info, video_id):
formats.extend(self._extract_f4m_formats( formats.extend(self._extract_f4m_formats(
update_url_query(stream_url, { update_url_query(stream_url, {
'hdcore': '3.1.1', 'hdcore': '3.1.1',
'plugin': 'aasp-3.1.1.69.124' 'plugin': 'aasp-3.1.1.69.124',
}), video_id, f4m_id='hds', fatal=False)) }), video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8': elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats( formats.extend(self._extract_m3u8_formats(
@ -96,12 +96,12 @@ def _extract_formats(self, media_info, video_id):
f = { f = {
'url': server, 'url': server,
'play_path': stream_url, 'play_path': stream_url,
'format_id': 'a%s-rtmp-%s' % (num, quality), 'format_id': f'a{num}-rtmp-{quality}',
} }
else: else:
f = { f = {
'url': stream_url, 'url': stream_url,
'format_id': 'a%s-%s-%s' % (num, ext, quality) 'format_id': f'a{num}-{ext}-{quality}',
} }
m = re.search( m = re.search(
r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$',

View file

@ -64,7 +64,7 @@ def _real_extract(self, url):
raise ExtractorError('Invalid URL', expected=True) raise ExtractorError('Invalid URL', expected=True)
media = self._download_json( media = self._download_json(
'https://video.qbrick.com/api/v1/public/accounts/%s/medias/%s' % (account_id, video_id), f'https://video.qbrick.com/api/v1/public/accounts/{account_id}/medias/{video_id}',
video_id, query={ video_id, query={
# https://video.qbrick.com/docs/api/examples/library-api.html # https://video.qbrick.com/docs/api/examples/library-api.html
'fields': 'asset/resources/*/renditions/*(height,id,language,links/*(href,mimeType),type,size,videos/*(audios/*(codec,sampleRate),bitrate,codec,duration,height,width),width),created,metadata/*(title,description),tags', 'fields': 'asset/resources/*/renditions/*(height,id,language,links/*(href,mimeType),type,size,videos/*(audios/*(codec,sampleRate),bitrate,codec,duration,height,width),width),created,metadata/*(title,description),tags',

View file

@ -1,8 +1,6 @@
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import ( from ..utils import (
float_or_none, float_or_none,
format_field, format_field,
@ -35,7 +33,7 @@ class ArnesIE(InfoExtractor):
'view_count': int, 'view_count': int,
'tags': ['linearna_algebra'], 'tags': ['linearna_algebra'],
'start_time': 10, 'start_time': 10,
} },
}, { }, {
'url': 'https://video.arnes.si/api/asset/s1YjnV7hadlC/play.mp4', 'url': 'https://video.arnes.si/api/asset/s1YjnV7hadlC/play.mp4',
'only_matching': True, 'only_matching': True,
@ -93,6 +91,6 @@ def _real_extract(self, url):
'duration': float_or_none(video.get('duration'), 1000), 'duration': float_or_none(video.get('duration'), 1000),
'view_count': int_or_none(video.get('views')), 'view_count': int_or_none(video.get('views')),
'tags': video.get('hashtags'), 'tags': video.get('hashtags'),
'start_time': int_or_none(compat_parse_qs( 'start_time': int_or_none(urllib.parse.parse_qs(
compat_urllib_parse_urlparse(url).query).get('t', [None])[0]), urllib.parse.urlparse(url).query).get('t', [None])[0]),
} }

Some files were not shown because too many files have changed in this diff Show more