0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-12-18 05:19:59 +00:00

[cleanup] Bump ruff to 0.8.x (#11608)

Authored by: seproDev
This commit is contained in:
sepro 2024-12-02 16:29:30 +01:00 committed by GitHub
parent 2bea793632
commit d8fb349086
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 42 additions and 46 deletions

View file

@ -76,7 +76,7 @@ dev = [
] ]
static-analysis = [ static-analysis = [
"autopep8~=2.0", "autopep8~=2.0",
"ruff~=0.7.0", "ruff~=0.8.0",
] ]
test = [ test = [
"pytest~=8.1", "pytest~=8.1",
@ -186,6 +186,7 @@ ignore = [
"E501", # line-too-long "E501", # line-too-long
"E731", # lambda-assignment "E731", # lambda-assignment
"E741", # ambiguous-variable-name "E741", # ambiguous-variable-name
"UP031", # printf-string-formatting
"UP036", # outdated-version-block "UP036", # outdated-version-block
"B006", # mutable-argument-default "B006", # mutable-argument-default
"B008", # function-call-in-default-argument "B008", # function-call-in-default-argument
@ -258,9 +259,6 @@ select = [
"A002", # builtin-argument-shadowing "A002", # builtin-argument-shadowing
"C408", # unnecessary-collection-call "C408", # unnecessary-collection-call
] ]
"yt_dlp/jsinterp.py" = [
"UP031", # printf-string-formatting
]
[tool.ruff.lint.isort] [tool.ruff.lint.isort]
known-first-party = [ known-first-party = [

View file

@ -1116,7 +1116,7 @@ def report_file_delete(self, file_name):
def raise_no_formats(self, info, forced=False, *, msg=None): def raise_no_formats(self, info, forced=False, *, msg=None):
has_drm = info.get('_has_drm') has_drm = info.get('_has_drm')
ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg) ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!' msg = msg or (has_drm and 'This video is DRM protected') or 'No video formats found!'
if forced or not ignored: if forced or not ignored:
raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'], raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
expected=has_drm or ignored or expected) expected=has_drm or ignored or expected)
@ -2196,7 +2196,7 @@ def _select_formats(self, formats, selector):
def _default_format_spec(self, info_dict): def _default_format_spec(self, info_dict):
prefer_best = ( prefer_best = (
self.params['outtmpl']['default'] == '-' self.params['outtmpl']['default'] == '-'
or info_dict.get('is_live') and not self.params.get('live_from_start')) or (info_dict.get('is_live') and not self.params.get('live_from_start')))
def can_merge(): def can_merge():
merger = FFmpegMergerPP(self) merger = FFmpegMergerPP(self)
@ -2365,7 +2365,7 @@ def _merge(formats_pair):
vexts=[f['ext'] for f in video_fmts], vexts=[f['ext'] for f in video_fmts],
aexts=[f['ext'] for f in audio_fmts], aexts=[f['ext'] for f in audio_fmts],
preferences=(try_call(lambda: self.params['merge_output_format'].split('/')) preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
or self.params.get('prefer_free_formats') and ('webm', 'mkv'))) or (self.params.get('prefer_free_formats') and ('webm', 'mkv'))))
filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info)) filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
@ -3541,8 +3541,8 @@ def ffmpeg_fixup(cndn, msg, cls):
and info_dict.get('container') == 'm4a_dash', and info_dict.get('container') == 'm4a_dash',
'writing DASH m4a. Only some players support this container', 'writing DASH m4a. Only some players support this container',
FFmpegFixupM4aPP) FFmpegFixupM4aPP)
ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts') ffmpeg_fixup((downloader == 'hlsnative' and not self.params.get('hls_use_mpegts'))
or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None, or (info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None),
'Possible MPEG-TS in MP4 container or malformed AAC timestamps', 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
FFmpegFixupM3u8PP) FFmpegFixupM3u8PP)
ffmpeg_fixup(downloader == 'dashsegments' ffmpeg_fixup(downloader == 'dashsegments'

View file

@ -1062,7 +1062,7 @@ def make_row(target, handler):
# If we only have a single process attached, then the executable was double clicked # If we only have a single process attached, then the executable was double clicked
# When using `pyinstaller` with `--onefile`, two processes get attached # When using `pyinstaller` with `--onefile`, two processes get attached
is_onefile = hasattr(sys, '_MEIPASS') and os.path.basename(sys._MEIPASS).startswith('_MEI') is_onefile = hasattr(sys, '_MEIPASS') and os.path.basename(sys._MEIPASS).startswith('_MEI')
if attached_processes == 1 or is_onefile and attached_processes == 2: if attached_processes == 1 or (is_onefile and attached_processes == 2):
print(parser._generate_error_message( print(parser._generate_error_message(
'Do not double-click the executable, instead call it from a command line.\n' 'Do not double-click the executable, instead call it from a command line.\n'
'Please read the README for further information on how to use yt-dlp: ' 'Please read the README for further information on how to use yt-dlp: '
@ -1109,9 +1109,9 @@ def main(argv=None):
from .extractor import gen_extractors, list_extractors from .extractor import gen_extractors, list_extractors
__all__ = [ __all__ = [
'main',
'YoutubeDL', 'YoutubeDL',
'parse_options',
'gen_extractors', 'gen_extractors',
'list_extractors', 'list_extractors',
'main',
'parse_options',
] ]

View file

@ -534,19 +534,17 @@ def ghash(subkey, data):
__all__ = [ __all__ = [
'aes_cbc_decrypt', 'aes_cbc_decrypt',
'aes_cbc_decrypt_bytes', 'aes_cbc_decrypt_bytes',
'aes_ctr_decrypt',
'aes_decrypt_text',
'aes_decrypt',
'aes_ecb_decrypt',
'aes_gcm_decrypt_and_verify',
'aes_gcm_decrypt_and_verify_bytes',
'aes_cbc_encrypt', 'aes_cbc_encrypt',
'aes_cbc_encrypt_bytes', 'aes_cbc_encrypt_bytes',
'aes_ctr_decrypt',
'aes_ctr_encrypt', 'aes_ctr_encrypt',
'aes_decrypt',
'aes_decrypt_text',
'aes_ecb_decrypt',
'aes_ecb_encrypt', 'aes_ecb_encrypt',
'aes_encrypt', 'aes_encrypt',
'aes_gcm_decrypt_and_verify',
'aes_gcm_decrypt_and_verify_bytes',
'key_expansion', 'key_expansion',
'pad_block', 'pad_block',
'pkcs7_padding', 'pkcs7_padding',

View file

@ -1276,8 +1276,8 @@ def open(self, file, *, write=False):
def _really_save(self, f, ignore_discard, ignore_expires): def _really_save(self, f, ignore_discard, ignore_expires):
now = time.time() now = time.time()
for cookie in self: for cookie in self:
if (not ignore_discard and cookie.discard if ((not ignore_discard and cookie.discard)
or not ignore_expires and cookie.is_expired(now)): or (not ignore_expires and cookie.is_expired(now))):
continue continue
name, value = cookie.name, cookie.value name, value = cookie.name, cookie.value
if value is None: if value is None:

View file

@ -119,12 +119,12 @@ def real_download(self, filename, info_dict):
self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}') self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')
def is_ad_fragment_start(s): def is_ad_fragment_start(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s)
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad')) or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad')))
def is_ad_fragment_end(s): def is_ad_fragment_end(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s)
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment')) or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment')))
fragments = [] fragments = []

View file

@ -123,8 +123,8 @@ def download_and_parse_fragment(url, frag_index, request_data=None, headers=None
data, data,
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {} lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}
func = (info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live func = ((info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live)
or frag_index == 1 and try_refresh_replay_beginning or (frag_index == 1 and try_refresh_replay_beginning)
or parse_actions_replay) or parse_actions_replay)
return (True, *func(live_chat_continuation)) return (True, *func(live_chat_continuation))
except HTTPError as err: except HTTPError as err:

View file

@ -662,12 +662,12 @@ def _real_extract(self, url):
video_id, title = video_data['bvid'], video_data.get('title') video_id, title = video_data['bvid'], video_data.get('title')
# Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself. # Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself.
page_list_json = not is_festival and traverse_obj( page_list_json = (not is_festival and traverse_obj(
self._download_json( self._download_json(
'https://api.bilibili.com/x/player/pagelist', video_id, 'https://api.bilibili.com/x/player/pagelist', video_id,
fatal=False, query={'bvid': video_id, 'jsonp': 'jsonp'}, fatal=False, query={'bvid': video_id, 'jsonp': 'jsonp'},
note='Extracting videos in anthology', headers=headers), note='Extracting videos in anthology', headers=headers),
'data', expected_type=list) or [] 'data', expected_type=list)) or []
is_anthology = len(page_list_json) > 1 is_anthology = len(page_list_json) > 1
part_id = int_or_none(parse_qs(url).get('p', [None])[-1]) part_id = int_or_none(parse_qs(url).get('p', [None])[-1])

View file

@ -3803,7 +3803,7 @@ def _cookies_passed(self):
def mark_watched(self, *args, **kwargs): def mark_watched(self, *args, **kwargs):
if not self.get_param('mark_watched', False): if not self.get_param('mark_watched', False):
return return
if self.supports_login() and self._get_login_info()[0] is not None or self._cookies_passed: if (self.supports_login() and self._get_login_info()[0] is not None) or self._cookies_passed:
self._mark_watched(*args, **kwargs) self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs): def _mark_watched(self, *args, **kwargs):

View file

@ -193,9 +193,9 @@ def _real_extract(self, url):
for lang, version, fmt in self._get_experiences(episode): for lang, version, fmt in self._get_experiences(episode):
experience_id = str(fmt['experienceId']) experience_id = str(fmt['experienceId'])
if (only_initial_experience and experience_id != initial_experience_id if ((only_initial_experience and experience_id != initial_experience_id)
or requested_languages and lang.lower() not in requested_languages or (requested_languages and lang.lower() not in requested_languages)
or requested_versions and version.lower() not in requested_versions): or (requested_versions and version.lower() not in requested_versions)):
continue continue
thumbnails.append({'url': fmt.get('poster')}) thumbnails.append({'url': fmt.get('poster')})
duration = max(duration, fmt.get('duration', 0)) duration = max(duration, fmt.get('duration', 0))

View file

@ -2925,7 +2925,7 @@ def _extract_sequence_from_mpd(refresh_sequence, immediate):
# Obtain from MPD's maximum seq value # Obtain from MPD's maximum seq value
old_mpd_url = mpd_url old_mpd_url = mpd_url
last_error = ctx.pop('last_error', None) last_error = ctx.pop('last_error', None)
expire_fast = immediate or last_error and isinstance(last_error, HTTPError) and last_error.status == 403 expire_fast = immediate or (last_error and isinstance(last_error, HTTPError) and last_error.status == 403)
mpd_url, stream_number, is_live = (mpd_feed(format_id, 5 if expire_fast else 18000) mpd_url, stream_number, is_live = (mpd_feed(format_id, 5 if expire_fast else 18000)
or (mpd_url, stream_number, False)) or (mpd_url, stream_number, False))
if not refresh_sequence: if not refresh_sequence:
@ -3995,8 +3995,8 @@ def append_client(*client_names):
return prs, player_url return prs, player_url
def _needs_live_processing(self, live_status, duration): def _needs_live_processing(self, live_status, duration):
if (live_status == 'is_live' and self.get_param('live_from_start') if ((live_status == 'is_live' and self.get_param('live_from_start'))
or live_status == 'post_live' and (duration or 0) > 2 * 3600): or (live_status == 'post_live' and (duration or 0) > 2 * 3600)):
return live_status return live_status
def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, live_status, duration): def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, live_status, duration):
@ -4192,7 +4192,7 @@ def build_fragments(f):
skip_manifests = set(self._configuration_arg('skip')) skip_manifests = set(self._configuration_arg('skip'))
if (not self.get_param('youtube_include_hls_manifest', True) if (not self.get_param('youtube_include_hls_manifest', True)
or needs_live_processing == 'is_live' # These will be filtered out by YoutubeDL anyway or needs_live_processing == 'is_live' # These will be filtered out by YoutubeDL anyway
or needs_live_processing and skip_bad_formats): or (needs_live_processing and skip_bad_formats)):
skip_manifests.add('hls') skip_manifests.add('hls')
if not self.get_param('youtube_include_dash_manifest', True): if not self.get_param('youtube_include_dash_manifest', True):
@ -4390,14 +4390,14 @@ def _real_extract(self, url):
expected_type=dict) expected_type=dict)
translated_title = self._get_text(microformats, (..., 'title')) translated_title = self._get_text(microformats, (..., 'title'))
video_title = (self._preferred_lang and translated_title video_title = ((self._preferred_lang and translated_title)
or get_first(video_details, 'title') # primary or get_first(video_details, 'title') # primary
or translated_title or translated_title
or search_meta(['og:title', 'twitter:title', 'title'])) or search_meta(['og:title', 'twitter:title', 'title']))
translated_description = self._get_text(microformats, (..., 'description')) translated_description = self._get_text(microformats, (..., 'description'))
original_description = get_first(video_details, 'shortDescription') original_description = get_first(video_details, 'shortDescription')
video_description = ( video_description = (
self._preferred_lang and translated_description (self._preferred_lang and translated_description)
# If original description is blank, it will be an empty string. # If original description is blank, it will be an empty string.
# Do not prefer translated description in this case. # Do not prefer translated description in this case.
or original_description if original_description is not None else translated_description) or original_description if original_description is not None else translated_description)
@ -6837,7 +6837,7 @@ def _extract_tab_id_and_name(self, tab, base_url='https://www.youtube.com'):
tab_url = urljoin(base_url, traverse_obj( tab_url = urljoin(base_url, traverse_obj(
tab, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url'))) tab, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url')))
tab_id = (tab_url and self._get_url_mobj(tab_url)['tab'][1:] tab_id = ((tab_url and self._get_url_mobj(tab_url)['tab'][1:])
or traverse_obj(tab, 'tabIdentifier', expected_type=str)) or traverse_obj(tab, 'tabIdentifier', expected_type=str))
if tab_id: if tab_id:
return { return {

View file

@ -183,4 +183,4 @@ def load_plugins(name, suffix):
sys.meta_path.insert(0, PluginFinder(f'{PACKAGE_NAME}.extractor', f'{PACKAGE_NAME}.postprocessor')) sys.meta_path.insert(0, PluginFinder(f'{PACKAGE_NAME}.extractor', f'{PACKAGE_NAME}.postprocessor'))
__all__ = ['directories', 'load_plugins', 'PACKAGE_NAME', 'COMPAT_PACKAGE_NAME'] __all__ = ['COMPAT_PACKAGE_NAME', 'PACKAGE_NAME', 'directories', 'load_plugins']

View file

@ -44,4 +44,4 @@ def get_postprocessor(key):
globals().update(_PLUGIN_CLASSES) globals().update(_PLUGIN_CLASSES)
__all__ = [name for name in globals() if name.endswith('PP')] __all__ = [name for name in globals() if name.endswith('PP')]
__all__.extend(('PostProcessor', 'FFmpegPostProcessor')) __all__.extend(('FFmpegPostProcessor', 'PostProcessor'))

View file

@ -626,7 +626,7 @@ def run(self, info):
sub_ext = sub_info['ext'] sub_ext = sub_info['ext']
if sub_ext == 'json': if sub_ext == 'json':
self.report_warning('JSON subtitles cannot be embedded') self.report_warning('JSON subtitles cannot be embedded')
elif ext != 'webm' or ext == 'webm' and sub_ext == 'vtt': elif ext != 'webm' or (ext == 'webm' and sub_ext == 'vtt'):
sub_langs.append(lang) sub_langs.append(lang)
sub_names.append(sub_info.get('name')) sub_names.append(sub_info.get('name'))
sub_filenames.append(sub_info['filepath']) sub_filenames.append(sub_info['filepath'])

View file

@ -2683,8 +2683,8 @@ def merge_dicts(*dicts):
merged = {} merged = {}
for a_dict in dicts: for a_dict in dicts:
for k, v in a_dict.items(): for k, v in a_dict.items():
if (v is not None and k not in merged if ((v is not None and k not in merged)
or isinstance(v, str) and merged[k] == ''): or (isinstance(v, str) and merged[k] == '')):
merged[k] = v merged[k] = v
return merged return merged