diff --git a/yt_dlp/extractor/afreecatv.py b/yt_dlp/extractor/afreecatv.py index b56abb1e6..af0587ae6 100644 --- a/yt_dlp/extractor/afreecatv.py +++ b/yt_dlp/extractor/afreecatv.py @@ -257,7 +257,7 @@ def _real_extract(self, url): if flag and flag == 'SUCCEED': break if flag == 'PARTIAL_ADULT': - self._downloader.report_warning( + self.report_warning( 'In accordance with local laws and regulations, underage users are restricted from watching adult content. ' 'Only content suitable for all ages will be downloaded. ' 'Provide account credentials if you wish to download restricted content.') diff --git a/yt_dlp/extractor/br.py b/yt_dlp/extractor/br.py index 9bde7f2d8..a3bc3a10a 100644 --- a/yt_dlp/extractor/br.py +++ b/yt_dlp/extractor/br.py @@ -114,7 +114,7 @@ def _real_extract(self, url): medias.append(media) if len(medias) > 1: - self._downloader.report_warning( + self.report_warning( 'found multiple medias; please ' 'report this with the video URL to http://yt-dl.org/bug') if not medias: diff --git a/yt_dlp/extractor/common.py b/yt_dlp/extractor/common.py index 4d77dba49..40ea9339f 100644 --- a/yt_dlp/extractor/common.py +++ b/yt_dlp/extractor/common.py @@ -683,7 +683,7 @@ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fa if fatal: raise ExtractorError(errmsg, sys.exc_info()[2], cause=err) else: - self._downloader.report_warning(errmsg) + self.report_warning(errmsg) return False def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None): @@ -1044,7 +1044,7 @@ def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, f elif fatal: raise RegexNotFoundError('Unable to extract %s' % _name) else: - self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message()) + self.report_warning('unable to extract %s' % _name + bug_reports_message()) return None def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): @@ -1072,7 +1072,7 @@ def _get_netrc_login_info(self, netrc_machine=None): raise netrc.NetrcParseError( 'No authenticators for %s' % netrc_machine) except (IOError, netrc.NetrcParseError) as err: - self._downloader.report_warning( + self.report_warning( 'parsing .netrc: %s' % error_to_compat_str(err)) return username, password @@ -1247,7 +1247,7 @@ def _search_json_ld(self, html, video_id, expected_type=None, **kwargs): elif fatal: raise RegexNotFoundError('Unable to extract JSON-LD') else: - self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message()) + self.report_warning('unable to extract JSON-LD %s' % bug_reports_message()) return {} def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None): @@ -3203,7 +3203,7 @@ def _int(self, v, name, fatal=False, **kwargs): if fatal: raise ExtractorError(msg) else: - self._downloader.report_warning(msg) + self.report_warning(msg) return res def _float(self, v, name, fatal=False, **kwargs): @@ -3213,7 +3213,7 @@ def _float(self, v, name, fatal=False, **kwargs): if fatal: raise ExtractorError(msg) else: - self._downloader.report_warning(msg) + self.report_warning(msg) return res def _set_cookie(self, domain, name, value, expire_time=None, port=None, @@ -3389,7 +3389,7 @@ def _real_extract(self, query): if n <= 0: raise ExtractorError('invalid download number %s for query "%s"' % (n, query)) elif n > self._MAX_RESULTS: - self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) + self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) n = self._MAX_RESULTS return self._get_n_results(query, n) diff --git a/yt_dlp/extractor/deezer.py b/yt_dlp/extractor/deezer.py index 3031671c1..3b1833c8d 100644 --- a/yt_dlp/extractor/deezer.py +++ b/yt_dlp/extractor/deezer.py @@ -14,7 +14,7 @@ class DeezerBaseInfoExtractor(InfoExtractor): def get_data(self, url): if not self._downloader.params.get('test'): - self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!') + self.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!') mobj = re.match(self._VALID_URL, url) data_id = mobj.group('id') diff --git a/yt_dlp/extractor/elonet.py b/yt_dlp/extractor/elonet.py index c64ab5de3..3647c0a9c 100644 --- a/yt_dlp/extractor/elonet.py +++ b/yt_dlp/extractor/elonet.py @@ -79,7 +79,7 @@ def _get_subtitles(self, fmt, doc, url): elif fmt == 'mpd': subs = self._parse_mpd_subtitles(doc) else: - self._downloader.report_warning( + self.report_warning( "Cannot download subtitles from '%s' streams." % (fmt)) subs = {} return subs diff --git a/yt_dlp/extractor/facebook.py b/yt_dlp/extractor/facebook.py index 7906e813c..7a76dbb22 100644 --- a/yt_dlp/extractor/facebook.py +++ b/yt_dlp/extractor/facebook.py @@ -348,7 +348,7 @@ def _login(self): login_results, 'login error', default=None, group='error') if error: raise ExtractorError('Unable to login: %s' % error, expected=True) - self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.') + self.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.') return fb_dtsg = self._search_regex( @@ -369,9 +369,9 @@ def _login(self): check_response = self._download_webpage(check_req, None, note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: - self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.') + self.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err)) + self.report_warning('unable to log in: %s' % error_to_compat_str(err)) return def _real_initialize(self): diff --git a/yt_dlp/extractor/generic.py b/yt_dlp/extractor/generic.py index 54cba2f6b..4250d1093 100644 --- a/yt_dlp/extractor/generic.py +++ b/yt_dlp/extractor/generic.py @@ -2376,7 +2376,7 @@ def _real_extract(self, url): if default_search in ('auto', 'auto_warning', 'fixup_error'): if re.match(r'^[^\s/]+\.[^\s/]+/', url): - self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http') + self.report_warning('The url doesn\'t specify the protocol, trying with http') return self.url_result('http://' + url) elif default_search != 'fixup_error': if default_search == 'auto_warning': @@ -2385,7 +2385,7 @@ def _real_extract(self, url): 'Invalid URL: %r . Call yt-dlp like this: yt-dlp -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url, expected=True) else: - self._downloader.report_warning( + self.report_warning( 'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url) return self.url_result('ytsearch:' + url) @@ -2461,7 +2461,7 @@ def _real_extract(self, url): if not self._downloader.params.get('test', False) and not is_intentional: force = self._downloader.params.get('force_generic_extractor', False) - self._downloader.report_warning( + self.report_warning( '%s on generic information extractor.' % ('Forcing' if force else 'Falling back')) if not full_response: @@ -2488,7 +2488,7 @@ def _real_extract(self, url): # Maybe it's a direct link to a video? # Be careful not to download the whole thing! if not is_html(first_bytes): - self._downloader.report_warning( + self.report_warning( 'URL could be a direct video link, returning it as such.') info_dict.update({ 'direct': True, diff --git a/yt_dlp/extractor/iqiyi.py b/yt_dlp/extractor/iqiyi.py index 0d7cc65d7..e33e23f08 100644 --- a/yt_dlp/extractor/iqiyi.py +++ b/yt_dlp/extractor/iqiyi.py @@ -280,7 +280,7 @@ def _login(self): msg = 'error %s' % code if validation_result.get('msg'): msg += ': ' + validation_result['msg'] - self._downloader.report_warning('unable to log in: ' + msg) + self.report_warning('unable to log in: ' + msg) return False return True diff --git a/yt_dlp/extractor/lynda.py b/yt_dlp/extractor/lynda.py index b3d8653d0..06662be1a 100644 --- a/yt_dlp/extractor/lynda.py +++ b/yt_dlp/extractor/lynda.py @@ -331,7 +331,7 @@ def _real_extract(self, url): }) if unaccessible_videos > 0: - self._downloader.report_warning( + self.report_warning( '%s videos are only available for members (or paid members) and will not be downloaded. ' % unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT) diff --git a/yt_dlp/extractor/mildom.py b/yt_dlp/extractor/mildom.py index 50daaf583..3606f09b4 100644 --- a/yt_dlp/extractor/mildom.py +++ b/yt_dlp/extractor/mildom.py @@ -262,7 +262,7 @@ class MildomUserVodIE(MildomBaseIE): def _real_extract(self, url): user_id = self._match_id(url) - self._downloader.report_warning('To download ongoing live, please use "https://www.mildom.com/%s" instead. This will list up VODs belonging to user.' % user_id) + self.report_warning('To download ongoing live, please use "https://www.mildom.com/%s" instead. This will list up VODs belonging to user.' % user_id) profile = self._call_api( 'https://cloudac.mildom.com/nonolive/gappserv/user/profileV2', user_id, diff --git a/yt_dlp/extractor/niconico.py b/yt_dlp/extractor/niconico.py index e7aee65c3..84437e450 100644 --- a/yt_dlp/extractor/niconico.py +++ b/yt_dlp/extractor/niconico.py @@ -190,7 +190,7 @@ def _login(self): if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login': login_ok = False if not login_ok: - self._downloader.report_warning('unable to log in: bad username or password') + self.report_warning('unable to log in: bad username or password') return login_ok def _get_heartbeat_info(self, info_dict): diff --git a/yt_dlp/extractor/soundcloud.py b/yt_dlp/extractor/soundcloud.py index d72a02dca..103b23bf7 100644 --- a/yt_dlp/extractor/soundcloud.py +++ b/yt_dlp/extractor/soundcloud.py @@ -312,7 +312,7 @@ def _download_json(self, *args, **kwargs): self._update_client_id() continue elif non_fatal: - self._downloader.report_warning(error_to_compat_str(e)) + self.report_warning(error_to_compat_str(e)) return False raise diff --git a/yt_dlp/extractor/vimeo.py b/yt_dlp/extractor/vimeo.py index 36e92fd1b..61a1d9b68 100644 --- a/yt_dlp/extractor/vimeo.py +++ b/yt_dlp/extractor/vimeo.py @@ -804,7 +804,7 @@ def is_rented(): video_description = self._html_search_meta( 'description', orig_webpage, default=None) if not video_description and not is_player: - self._downloader.report_warning('Cannot find video description') + self.report_warning('Cannot find video description') # Extract upload date if not timestamp: diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index 547f5b171..6d5ef0193 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -128,7 +128,7 @@ def req(url, f_req, note, errnote): }) def warn(message): - self._downloader.report_warning(message) + self.report_warning(message) lookup_req = [ username, @@ -1739,7 +1739,7 @@ def extract_thread(parent_renderer): # See: https://github.com/ytdl-org/youtube-dl/issues/28194 last_error = 'Incomplete data received' if count >= retries: - self._downloader.report_error(last_error) + raise ExtractorError(last_error) if not response: break @@ -3303,7 +3303,7 @@ def _real_extract_alerts(): warnings.append([alert_type, alert_message]) for alert_type, alert_message in (warnings + errors[:-1]): - self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message)) + self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message)) if errors: raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected) @@ -3414,7 +3414,7 @@ def _extract_webpage(self, url, item_id): if data.get('contents') or data.get('currentVideoEndpoint'): break if count >= retries: - self._downloader.report_error(last_error) + raise ExtractorError(last_error) return webpage, data def _real_extract(self, url): @@ -3426,7 +3426,7 @@ def _real_extract(self, url): mobj = re.match(r'(?P
%s)(?P/?(?![^#?]).*$)' % self._VALID_URL, url)
         mobj = mobj.groupdict() if mobj else {}
         if mobj and not mobj.get('not_channel'):
-            self._downloader.report_warning(
+            self.report_warning(
                 'A channel/user page was given. All the channel\'s videos will be downloaded. '
                 'To download only the videos in the home page, add a "/featured" to the URL')
             url = '%s/videos%s' % (mobj.get('pre'), mobj.get('post') or '')
@@ -3441,7 +3441,7 @@ def _real_extract(self, url):
                 # If there is neither video or playlist ids,
                 # youtube redirects to home page, which is undesirable
                 raise ExtractorError('Unable to recognize tab page')
-            self._downloader.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
+            self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
             url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
 
         if video_id and playlist_id:
@@ -3469,7 +3469,7 @@ def _real_extract(self, url):
             data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
             compat_str) or video_id
         if video_id:
-            self._downloader.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
+            self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
             return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
 
         raise ExtractorError('Unable to recognize tab page')