Use except .. as everywhere (#180)

This commit is contained in:
Philipp Hagemeister 2012-11-27 23:31:55 +01:00
parent 96731798db
commit e08bee320e
4 changed files with 84 additions and 84 deletions

View File

@ -247,7 +247,7 @@ class FileDownloader(object):
if old_filename == new_filename: if old_filename == new_filename:
return return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError), err: except (IOError, OSError) as err:
self.trouble(u'ERROR: unable to rename file') self.trouble(u'ERROR: unable to rename file')
def try_utime(self, filename, last_modified_hdr): def try_utime(self, filename, last_modified_hdr):
@ -305,7 +305,7 @@ class FileDownloader(object):
"""Report file has already been fully downloaded.""" """Report file has already been fully downloaded."""
try: try:
self.to_screen(u'[download] %s has already been downloaded' % file_name) self.to_screen(u'[download] %s has already been downloaded' % file_name)
except (UnicodeEncodeError), err: except (UnicodeEncodeError) as err:
self.to_screen(u'[download] The file has already been downloaded') self.to_screen(u'[download] The file has already been downloaded')
def report_unable_to_resume(self): def report_unable_to_resume(self):
@ -336,7 +336,7 @@ class FileDownloader(object):
filename = self.params['outtmpl'] % template_dict filename = self.params['outtmpl'] % template_dict
return filename return filename
except (ValueError, KeyError), err: except (ValueError, KeyError) as err:
self.trouble(u'ERROR: invalid system charset or erroneous output template') self.trouble(u'ERROR: invalid system charset or erroneous output template')
return None return None
@ -402,7 +402,7 @@ class FileDownloader(object):
dn = os.path.dirname(encodeFilename(filename)) dn = os.path.dirname(encodeFilename(filename))
if dn != '' and not os.path.exists(dn): # dn is already encoded if dn != '' and not os.path.exists(dn): # dn is already encoded
os.makedirs(dn) os.makedirs(dn)
except (OSError, IOError), err: except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to create directory ' + compat_str(err)) self.trouble(u'ERROR: unable to create directory ' + compat_str(err))
return return
@ -459,19 +459,19 @@ class FileDownloader(object):
else: else:
try: try:
success = self._do_download(filename, info_dict) success = self._do_download(filename, info_dict)
except (OSError, IOError), err: except (OSError, IOError) as err:
raise UnavailableVideoError raise UnavailableVideoError
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self.trouble(u'ERROR: unable to download video data: %s' % str(err)) self.trouble(u'ERROR: unable to download video data: %s' % str(err))
return return
except (ContentTooShortError, ), err: except (ContentTooShortError, ) as err:
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return return
if success: if success:
try: try:
self.post_process(filename, info_dict) self.post_process(filename, info_dict)
except (PostProcessingError), err: except (PostProcessingError) as err:
self.trouble(u'ERROR: postprocessing: %s' % str(err)) self.trouble(u'ERROR: postprocessing: %s' % str(err))
return return
@ -612,7 +612,7 @@ class FileDownloader(object):
data = info_dict['urlhandle'] data = info_dict['urlhandle']
data = urllib2.urlopen(request) data = urllib2.urlopen(request)
break break
except (urllib2.HTTPError, ), err: except (urllib2.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416: if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error # Unexpected HTTP error
raise raise
@ -622,7 +622,7 @@ class FileDownloader(object):
# Open the connection again without the range header # Open the connection again without the range header
data = urllib2.urlopen(basic_request) data = urllib2.urlopen(basic_request)
content_length = data.info()['Content-Length'] content_length = data.info()['Content-Length']
except (urllib2.HTTPError, ), err: except (urllib2.HTTPError, ) as err:
if err.code < 500 or err.code >= 600: if err.code < 500 or err.code >= 600:
raise raise
else: else:
@ -676,12 +676,12 @@ class FileDownloader(object):
assert stream is not None assert stream is not None
filename = self.undo_temp_name(tmpfilename) filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename) self.report_destination(filename)
except (OSError, IOError), err: except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
return False return False
try: try:
stream.write(data_block) stream.write(data_block)
except (IOError, OSError), err: except (IOError, OSError) as err:
self.trouble(u'\nERROR: unable to write data: %s' % str(err)) self.trouble(u'\nERROR: unable to write data: %s' % str(err))
return False return False
if not self.params.get('noresizebuffer', False): if not self.params.get('noresizebuffer', False):

View File

@ -252,7 +252,7 @@ class YoutubeIE(InfoExtractor):
password = info[2] password = info[2]
else: else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError), err: except (IOError, netrc.NetrcParseError) as err:
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
return return
@ -261,7 +261,7 @@ class YoutubeIE(InfoExtractor):
try: try:
self.report_lang() self.report_lang()
urllib2.urlopen(request).read() urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err))
return return
@ -284,7 +284,7 @@ class YoutubeIE(InfoExtractor):
if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None: if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
return return
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
return return
@ -297,7 +297,7 @@ class YoutubeIE(InfoExtractor):
try: try:
self.report_age_confirmation() self.report_age_confirmation()
age_results = urllib2.urlopen(request).read() age_results = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
return return
@ -319,7 +319,7 @@ class YoutubeIE(InfoExtractor):
request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id) request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
try: try:
video_webpage = urllib2.urlopen(request).read() video_webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
@ -341,7 +341,7 @@ class YoutubeIE(InfoExtractor):
video_info = parse_qs(video_info_webpage) video_info = parse_qs(video_info_webpage)
if 'token' in video_info: if 'token' in video_info:
break break
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
return return
if 'token' not in video_info: if 'token' not in video_info:
@ -404,7 +404,7 @@ class YoutubeIE(InfoExtractor):
request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
try: try:
srt_list = urllib2.urlopen(request).read() srt_list = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))
srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
@ -421,7 +421,7 @@ class YoutubeIE(InfoExtractor):
request = urllib2.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) request = urllib2.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
try: try:
srt_xml = urllib2.urlopen(request).read() srt_xml = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))
if not srt_xml: if not srt_xml:
raise Trouble(u'WARNING: unable to download video subtitles') raise Trouble(u'WARNING: unable to download video subtitles')
@ -543,7 +543,7 @@ class MetacafeIE(InfoExtractor):
try: try:
self.report_disclaimer() self.report_disclaimer()
disclaimer = urllib2.urlopen(request).read() disclaimer = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))
return return
@ -556,7 +556,7 @@ class MetacafeIE(InfoExtractor):
try: try:
self.report_age_confirmation() self.report_age_confirmation()
disclaimer = urllib2.urlopen(request).read() disclaimer = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
return return
@ -580,7 +580,7 @@ class MetacafeIE(InfoExtractor):
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
return return
@ -671,7 +671,7 @@ class DailymotionIE(InfoExtractor):
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
return return
@ -767,7 +767,7 @@ class GoogleIE(InfoExtractor):
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
@ -806,7 +806,7 @@ class GoogleIE(InfoExtractor):
request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id))) request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
try: try:
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage) mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
@ -860,7 +860,7 @@ class PhotobucketIE(InfoExtractor):
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
@ -928,7 +928,7 @@ class YahooIE(InfoExtractor):
request = urllib2.Request(url) request = urllib2.Request(url)
try: try:
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
@ -952,7 +952,7 @@ class YahooIE(InfoExtractor):
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
@ -1010,7 +1010,7 @@ class YahooIE(InfoExtractor):
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
@ -1066,7 +1066,7 @@ class VimeoIE(InfoExtractor):
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
@ -1172,10 +1172,10 @@ class ArteTvIE(InfoExtractor):
try: try:
self.report_download_webpage(url) self.report_download_webpage(url)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
except ValueError, err: except ValueError as err:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
return webpage return webpage
@ -1368,10 +1368,10 @@ class GenericIE(InfoExtractor):
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
except ValueError, err: except ValueError as err:
# since this is the last-resort InfoExtractor, if # since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here # this error is thrown, it'll be thrown here
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
@ -1487,7 +1487,7 @@ class YoutubeSearchIE(InfoExtractor):
request = urllib2.Request(result_url) request = urllib2.Request(result_url)
try: try:
data = urllib2.urlopen(request).read() data = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
return return
api_response = json.loads(data)['data'] api_response = json.loads(data)['data']
@ -1564,7 +1564,7 @@ class GoogleSearchIE(InfoExtractor):
request = urllib2.Request(result_url) request = urllib2.Request(result_url)
try: try:
page = urllib2.urlopen(request).read() page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
@ -1647,7 +1647,7 @@ class YahooSearchIE(InfoExtractor):
request = urllib2.Request(result_url) request = urllib2.Request(result_url)
try: try:
page = urllib2.urlopen(request).read() page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
@ -1717,7 +1717,7 @@ class YoutubePlaylistIE(InfoExtractor):
request = urllib2.Request(url) request = urllib2.Request(url)
try: try:
page = urllib2.urlopen(request).read() page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
@ -1774,7 +1774,7 @@ class YoutubeChannelIE(InfoExtractor):
request = urllib2.Request(url) request = urllib2.Request(url)
try: try:
page = urllib2.urlopen(request).read() page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
@ -1837,7 +1837,7 @@ class YoutubeUserIE(InfoExtractor):
try: try:
page = urllib2.urlopen(request).read() page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
@ -1909,7 +1909,7 @@ class BlipTVUserIE(InfoExtractor):
page = urllib2.urlopen(request).read().decode('utf-8') page = urllib2.urlopen(request).read().decode('utf-8')
mobj = re.search(r'data-users-id="([^"]+)"', page) mobj = re.search(r'data-users-id="([^"]+)"', page)
page_base = page_base % mobj.group(1) page_base = page_base % mobj.group(1)
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
@ -1929,7 +1929,7 @@ class BlipTVUserIE(InfoExtractor):
try: try:
page = urllib2.urlopen(request).read().decode('utf-8') page = urllib2.urlopen(request).read().decode('utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
return return
@ -1997,7 +1997,7 @@ class DepositFilesIE(InfoExtractor):
try: try:
self.report_download_webpage(file_id) self.report_download_webpage(file_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))
return return
@ -2113,7 +2113,7 @@ class FacebookIE(InfoExtractor):
password = info[2] password = info[2]
else: else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError), err: except (IOError, netrc.NetrcParseError) as err:
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
return return
@ -2133,7 +2133,7 @@ class FacebookIE(InfoExtractor):
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return return
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
return return
@ -2150,7 +2150,7 @@ class FacebookIE(InfoExtractor):
try: try:
page = urllib2.urlopen(request) page = urllib2.urlopen(request)
video_webpage = page.read() video_webpage = page.read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
@ -2285,13 +2285,13 @@ class BlipTVIE(InfoExtractor):
'ext': ext, 'ext': ext,
'urlhandle': urlh 'urlhandle': urlh
} }
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
return return
if info is None: # Regular URL if info is None: # Regular URL
try: try:
json_code = urlh.read() json_code = urlh.read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
return return
@ -2321,7 +2321,7 @@ class BlipTVIE(InfoExtractor):
'description': data['description'], 'description': data['description'],
'player_url': data['embedUrl'] 'player_url': data['embedUrl']
} }
except (ValueError,KeyError), err: except (ValueError,KeyError) as err:
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
return return
@ -2359,7 +2359,7 @@ class MyVideoIE(InfoExtractor):
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
@ -2456,7 +2456,7 @@ class ComedyCentralIE(InfoExtractor):
try: try:
htmlHandle = urllib2.urlopen(req) htmlHandle = urllib2.urlopen(req)
html = htmlHandle.read() html = htmlHandle.read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
if dlNewest: if dlNewest:
@ -2489,7 +2489,7 @@ class ComedyCentralIE(InfoExtractor):
try: try:
urlHandle = urllib2.urlopen(playerUrl_raw) urlHandle = urllib2.urlopen(playerUrl_raw)
playerUrl = urlHandle.geturl() playerUrl = urlHandle.geturl()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err))
return return
@ -2498,7 +2498,7 @@ class ComedyCentralIE(InfoExtractor):
self.report_index_download(epTitle) self.report_index_download(epTitle)
try: try:
indexXml = urllib2.urlopen(indexUrl).read() indexXml = urllib2.urlopen(indexUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
return return
@ -2519,7 +2519,7 @@ class ComedyCentralIE(InfoExtractor):
self.report_config_download(epTitle) self.report_config_download(epTitle)
try: try:
configXml = urllib2.urlopen(configReq).read() configXml = urllib2.urlopen(configReq).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
@ -2602,7 +2602,7 @@ class EscapistIE(InfoExtractor):
webPageBytes = webPage.read() webPageBytes = webPage.read()
m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
return return
@ -2618,7 +2618,7 @@ class EscapistIE(InfoExtractor):
self.report_config_download(showName) self.report_config_download(showName)
try: try:
configJSON = urllib2.urlopen(configUrl).read() configJSON = urllib2.urlopen(configUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
return return
@ -2627,7 +2627,7 @@ class EscapistIE(InfoExtractor):
try: try:
config = json.loads(configJSON) config = json.loads(configJSON)
except (ValueError,), err: except (ValueError,) as err:
self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err)) self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
return return
@ -2674,7 +2674,7 @@ class CollegeHumorIE(InfoExtractor):
request = urllib2.Request(url) request = urllib2.Request(url)
try: try:
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
@ -2695,7 +2695,7 @@ class CollegeHumorIE(InfoExtractor):
xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id
try: try:
metaXml = urllib2.urlopen(xmlUrl).read() metaXml = urllib2.urlopen(xmlUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
return return
@ -2740,7 +2740,7 @@ class XVideosIE(InfoExtractor):
request = urllib2.Request(r'http://www.xvideos.com/video' + video_id) request = urllib2.Request(r'http://www.xvideos.com/video' + video_id)
try: try:
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
@ -2824,7 +2824,7 @@ class SoundcloudIE(InfoExtractor):
request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title)) request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title))
try: try:
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
@ -2901,7 +2901,7 @@ class InfoQIE(InfoExtractor):
request = urllib2.Request(url) request = urllib2.Request(url)
try: try:
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
@ -2980,7 +2980,7 @@ class MixcloudIE(InfoExtractor):
try: try:
urllib2.urlopen(url) urllib2.urlopen(url)
return url return url
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
url = None url = None
return None return None
@ -3013,7 +3013,7 @@ class MixcloudIE(InfoExtractor):
try: try:
self.report_download_json(file_url) self.report_download_json(file_url)
jsonData = urllib2.urlopen(request).read() jsonData = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))
return return
@ -3092,7 +3092,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
xmlUrl = baseUrl + video + '.xml' xmlUrl = baseUrl + video + '.xml'
try: try:
metaXml = urllib2.urlopen(xmlUrl).read() metaXml = urllib2.urlopen(xmlUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
return return
mdoc = xml.etree.ElementTree.fromstring(metaXml) mdoc = xml.etree.ElementTree.fromstring(metaXml)
@ -3116,7 +3116,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
self.report_download_webpage(info['id']) self.report_download_webpage(info['id'])
try: try:
coursepage = urllib2.urlopen(url).read() coursepage = urllib2.urlopen(url).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
return return
@ -3155,7 +3155,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
try: try:
rootpage = urllib2.urlopen(rootURL).read() rootpage = urllib2.urlopen(rootURL).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
return return
@ -3202,7 +3202,7 @@ class MTVIE(InfoExtractor):
request = urllib2.Request(url) request = urllib2.Request(url)
try: try:
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
@ -3235,7 +3235,7 @@ class MTVIE(InfoExtractor):
request = urllib2.Request(videogen_url) request = urllib2.Request(videogen_url)
try: try:
metadataXml = urllib2.urlopen(request).read() metadataXml = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
return return
@ -3417,7 +3417,7 @@ class XNXXIE(InfoExtractor):
# Get webpage content # Get webpage content
try: try:
webpage = urllib2.urlopen(url).read() webpage = urllib2.urlopen(url).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
return return
@ -3497,7 +3497,7 @@ class GooglePlusIE(InfoExtractor):
request = urllib2.Request(post_url) request = urllib2.Request(post_url)
try: try:
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
return return
@ -3539,7 +3539,7 @@ class GooglePlusIE(InfoExtractor):
request = urllib2.Request(video_page) request = urllib2.Request(video_page)
try: try:
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
self.report_extract_vid_page(video_page) self.report_extract_vid_page(video_page)

View File

@ -74,7 +74,7 @@ def updateSelf(downloader, filename):
urlh.close() urlh.close()
with open(exe + '.new', 'wb') as outf: with open(exe + '.new', 'wb') as outf:
outf.write(newcontent) outf.write(newcontent)
except (IOError, OSError), err: except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version') sys.exit('ERROR: unable to download latest version')
try: try:
@ -89,7 +89,7 @@ del "%s"
b.close() b.close()
os.startfile(bat) os.startfile(bat)
except (IOError, OSError), err: except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version') sys.exit('ERROR: unable to overwrite current version')
else: else:
@ -97,13 +97,13 @@ del "%s"
urlh = urllib2.urlopen(UPDATE_URL) urlh = urllib2.urlopen(UPDATE_URL)
newcontent = urlh.read() newcontent = urlh.read()
urlh.close() urlh.close()
except (IOError, OSError), err: except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version') sys.exit('ERROR: unable to download latest version')
try: try:
with open(filename, 'wb') as outf: with open(filename, 'wb') as outf:
outf.write(newcontent) outf.write(newcontent)
except (IOError, OSError), err: except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version') sys.exit('ERROR: unable to overwrite current version')
downloader.to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.') downloader.to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
@ -386,7 +386,7 @@ def _real_main():
jar = cookielib.MozillaCookieJar(opts.cookiefile) jar = cookielib.MozillaCookieJar(opts.cookiefile)
if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK): if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
jar.load() jar.load()
except (IOError, OSError), err: except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to open cookie file') sys.exit(u'ERROR: unable to open cookie file')
# Set user agent # Set user agent
if opts.user_agent is not None: if opts.user_agent is not None:
@ -394,7 +394,7 @@ def _real_main():
# Dump user agent # Dump user agent
if opts.dump_user_agent: if opts.dump_user_agent:
print std_headers['User-Agent'] print(std_headers['User-Agent'])
sys.exit(0) sys.exit(0)
# Batch file verification # Batch file verification
@ -450,7 +450,7 @@ def _real_main():
if opts.retries is not None: if opts.retries is not None:
try: try:
opts.retries = int(opts.retries) opts.retries = int(opts.retries)
except (TypeError, ValueError), err: except (TypeError, ValueError) as err:
parser.error(u'invalid retry count specified') parser.error(u'invalid retry count specified')
if opts.buffersize is not None: if opts.buffersize is not None:
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
@ -461,13 +461,13 @@ def _real_main():
opts.playliststart = int(opts.playliststart) opts.playliststart = int(opts.playliststart)
if opts.playliststart <= 0: if opts.playliststart <= 0:
raise ValueError(u'Playlist start must be positive') raise ValueError(u'Playlist start must be positive')
except (TypeError, ValueError), err: except (TypeError, ValueError) as err:
parser.error(u'invalid playlist start number specified') parser.error(u'invalid playlist start number specified')
try: try:
opts.playlistend = int(opts.playlistend) opts.playlistend = int(opts.playlistend)
if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart): if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
raise ValueError(u'Playlist end must be greater than playlist start') raise ValueError(u'Playlist end must be greater than playlist start')
except (TypeError, ValueError), err: except (TypeError, ValueError) as err:
parser.error(u'invalid playlist end number specified') parser.error(u'invalid playlist end number specified')
if opts.extractaudio: if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']: if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']:
@ -559,7 +559,7 @@ def _real_main():
if opts.cookiefile is not None: if opts.cookiefile is not None:
try: try:
jar.save() jar.save()
except (IOError, OSError), err: except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to save cookie jar') sys.exit(u'ERROR: unable to save cookie jar')
sys.exit(retcode) sys.exit(retcode)

View File

@ -177,7 +177,7 @@ def sanitize_open(filename, open_mode):
return (sys.stdout, filename) return (sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode) stream = open(encodeFilename(filename), open_mode)
return (stream, filename) return (stream, filename)
except (IOError, OSError), err: except (IOError, OSError) as err:
# In case of error, try to remove win32 forbidden chars # In case of error, try to remove win32 forbidden chars
filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename) filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)