diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index d7d5b1521..777a52545 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -330,7 +330,7 @@ def prepare_filename(self, info_dict): template_dict['epoch'] = int(time.time()) template_dict['autonumber'] = u'%05d' % self._num_downloads - template_dict = dict((k, sanitize_filename(compat_str(v), self.params.get('restrictfilenames'))) for k,v in template_dict.items()) + template_dict = dict((k, sanitize_filename(u(v), self.params.get('restrictfilenames'))) for k,v in template_dict.items()) filename = self.params['outtmpl'] % template_dict return filename except (ValueError, KeyError), err: @@ -397,7 +397,7 @@ def process_info(self, info_dict): if dn != '' and not os.path.exists(dn): # dn is already encoded os.makedirs(dn) except (OSError, IOError), err: - self.trouble(u'ERROR: unable to create directory ' + unicode(err)) + self.trouble(u'ERROR: unable to create directory ' + u(err)) return if self.params.get('writedescription', False): diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index cea30dad8..0b0885cad 100644 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -238,7 +238,7 @@ def _real_initialize(self): else: raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) except (IOError, netrc.NetrcParseError), err: - self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) + self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % u(err)) return # Set language @@ -247,7 +247,7 @@ def _real_initialize(self): self.report_lang() urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err)) + self._downloader.to_stderr(u'WARNING: unable to set language: %s' % u(err)) return # No authentication to be performed @@ -270,7 +270,7 @@ def _real_initialize(self): self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') return except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) + self._downloader.to_stderr(u'WARNING: unable to log in: %s' % u(err)) return # Confirm age @@ -283,7 +283,7 @@ def _real_initialize(self): self.report_age_confirmation() age_results = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to confirm age: %s' % u(err)) return def _real_extract(self, url): @@ -305,7 +305,7 @@ def _real_extract(self, url): try: video_webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) return # Attempt to extract SWF player URL @@ -327,7 +327,7 @@ def _real_extract(self, url): if 'token' in video_info: break except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % u(err)) return if 'token' not in video_info: if 'reason' in video_info: @@ -390,7 +390,7 @@ def _real_extract(self, url): try: srt_list = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) + raise Trouble(u'WARNING: unable to download video subtitles: %s' % u(err)) srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) if not srt_lang_list: @@ -407,7 +407,7 @@ def _real_extract(self, url): try: srt_xml = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) + raise Trouble(u'WARNING: unable to download video subtitles: %s' % u(err)) if not srt_xml: raise Trouble(u'WARNING: unable to download video subtitles') video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8')) @@ -526,7 +526,7 @@ def _real_initialize(self): self.report_disclaimer() disclaimer = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % u(err)) return # Confirm age @@ -539,7 +539,7 @@ def _real_initialize(self): self.report_age_confirmation() disclaimer = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to confirm age: %s' % u(err)) return def _real_extract(self, url): @@ -563,7 +563,7 @@ def _real_extract(self, url): self.report_download_webpage(video_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % u(err)) return # Extract URL, uploader and title from webpage @@ -656,7 +656,7 @@ def _real_extract(self, url): self.report_download_webpage(video_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % u(err)) return # Extract URL, uploader and title from webpage @@ -754,7 +754,7 @@ def _real_extract(self, url): self.report_download_webpage(video_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) return # Extract URL, uploader, and title from webpage @@ -793,7 +793,7 @@ def _real_extract(self, url): try: webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) return mobj = re.search(r'', webpage) if mobj is None: @@ -849,7 +849,7 @@ def _real_extract(self, url): self.report_download_webpage(video_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) return # Extract URL, uploader, and title from webpage @@ -919,7 +919,7 @@ def _real_extract(self, url, new_video=True): try: webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) return mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) @@ -943,7 +943,7 @@ def _real_extract(self, url, new_video=True): self.report_download_webpage(video_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) return # Extract uploader and title from webpage @@ -1001,7 +1001,7 @@ def _real_extract(self, url, new_video=True): self.report_download_webpage(video_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) return # Extract media URL from playlist XML @@ -1059,7 +1059,7 @@ def _real_extract(self, url, new_video=True): self.report_download_webpage(video_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) return # Now we begin extracting as much information as we can from what we @@ -1362,7 +1362,7 @@ def _real_extract(self, url): self.report_download_webpage(video_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) return except ValueError, err: # since this is the last-resort InfoExtractor, if @@ -1483,7 +1483,7 @@ def _download_n_results(self, query, n): try: data = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download API page: %s' % u(err)) return api_response = json.loads(data)['data'] @@ -1560,7 +1560,7 @@ def _download_n_results(self, query, n): try: page = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) return # Extract video identifiers @@ -1643,7 +1643,7 @@ def _download_n_results(self, query, n): try: page = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) return # Extract video identifiers @@ -1713,7 +1713,7 @@ def _real_extract(self, url): try: page = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) return # Extract video identifiers @@ -1770,7 +1770,7 @@ def _real_extract(self, url): try: page = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) return # Extract video identifiers @@ -1833,7 +1833,7 @@ def _real_extract(self, url): try: page = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) return # Extract video identifiers @@ -1905,7 +1905,7 @@ def _real_extract(self, url): mobj = re.search(r'data-users-id="([^"]+)"', page) page_base = page_base % mobj.group(1) except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) return @@ -1993,7 +1993,7 @@ def _real_extract(self, url): self.report_download_webpage(file_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % u(err)) return # Search for the real file URL @@ -2110,7 +2110,7 @@ def _real_initialize(self): else: raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) except (IOError, netrc.NetrcParseError), err: - self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) + self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % u(err)) return if useremail is None: @@ -2130,7 +2130,7 @@ def _real_initialize(self): self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') return except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) + self._downloader.to_stderr(u'WARNING: unable to log in: %s' % u(err)) return def _real_extract(self, url): @@ -2147,7 +2147,7 @@ def _real_extract(self, url): page = urllib2.urlopen(request) video_webpage = page.read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) return # Start extracting information @@ -2281,13 +2281,13 @@ def _real_extract(self, url): 'urlhandle': urlh } except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % u(err)) return if info is None: # Regular URL try: json_code = urlh.read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % u(err)) return try: @@ -2355,7 +2355,7 @@ def _real_extract(self,url): self.report_download_webpage(video_id) webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % u(err)) return self.report_extraction(video_id) @@ -2454,7 +2454,7 @@ def _real_extract(self, url): htmlHandle = urllib2.urlopen(req) html = htmlHandle.read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err)) + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) return if dlNewest: url = htmlHandle.geturl() @@ -2487,7 +2487,7 @@ def _real_extract(self, url): urlHandle = urllib2.urlopen(playerUrl_raw) playerUrl = urlHandle.geturl() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to find out player URL: ' + unicode(err)) + self._downloader.trouble(u'ERROR: unable to find out player URL: ' + u(err)) return uri = mMovieParams[0][1] @@ -2496,7 +2496,7 @@ def _real_extract(self, url): try: indexXml = urllib2.urlopen(indexUrl).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download episode index: ' + unicode(err)) + self._downloader.trouble(u'ERROR: unable to download episode index: ' + u(err)) return results = [] @@ -2517,7 +2517,7 @@ def _real_extract(self, url): try: configXml = urllib2.urlopen(configReq).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err)) + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % u(err)) return cdoc = xml.etree.ElementTree.fromstring(configXml) @@ -2600,7 +2600,7 @@ def _real_extract(self, url): m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err)) + self._downloader.trouble(u'ERROR: unable to download webpage: ' + u(err)) return descMatch = re.search('[0-9]+)"', webpage) @@ -2692,7 +2692,7 @@ def _real_extract(self, url): try: metaXml = urllib2.urlopen(xmlUrl).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % u(err)) return mdoc = xml.etree.ElementTree.fromstring(metaXml) @@ -2738,7 +2738,7 @@ def _real_extract(self, url): try: webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) return self.report_extraction(video_id) @@ -2824,7 +2824,7 @@ def _real_extract(self, url): try: webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) return self.report_extraction('%s/%s' % (uploader, slug_title)) @@ -2859,7 +2859,7 @@ def _real_extract(self, url): try: upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d') except Exception, e: - self._downloader.to_stderr(compat_str(e)) + self._downloader.to_stderr(u(e)) # for soundcloud, a request to a cross domain is required for cookies request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers) @@ -2903,7 +2903,7 @@ def _real_extract(self, url): try: webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % u(err)) return self.report_extraction(url) @@ -3017,7 +3017,7 @@ def _real_extract(self, url): self.report_download_json(file_url) jsonData = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err)) + self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % u(err)) return # parse JSON @@ -3094,7 +3094,7 @@ def _real_extract(self, url): try: metaXml = urllib2.urlopen(xmlUrl).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % unicode(err)) + self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % u(err)) return mdoc = xml.etree.ElementTree.fromstring(metaXml) try: @@ -3117,7 +3117,7 @@ def _real_extract(self, url): try: coursepage = urllib2.urlopen(url).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err)) + self._downloader.trouble(u'ERROR: unable to download course info page: ' + u(err)) return m = re.search('