raise exceptions on warnings during tests - and solve a couple of them
This commit is contained in:
parent
476203d025
commit
be95cac157
2 changed files with 23 additions and 28 deletions
|
@ -40,18 +40,12 @@ def _try_rm(filename):
|
||||||
|
|
||||||
class FileDownloader(youtube_dl.FileDownloader):
|
class FileDownloader(youtube_dl.FileDownloader):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self._to_stderr = self.to_stderr
|
|
||||||
self.to_stderr = self.to_screen
|
self.to_stderr = self.to_screen
|
||||||
self.processed_info_dicts = []
|
self.processed_info_dicts = []
|
||||||
return youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
|
return youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
|
||||||
def report_warning(self, message):
|
def report_warning(self, message):
|
||||||
# let warnings pass to output
|
# Don't accept warnings during tests
|
||||||
if sys.stderr.isatty() and os.name != 'nt':
|
raise ExtractorError(message)
|
||||||
_msg_header=u'\033[0;33mWARNING:\033[0m'
|
|
||||||
else:
|
|
||||||
_msg_header=u'WARNING:'
|
|
||||||
warning_message=u'%s %s' % (_msg_header,message)
|
|
||||||
self._to_stderr(warning_message)
|
|
||||||
def process_info(self, info_dict):
|
def process_info(self, info_dict):
|
||||||
self.processed_info_dicts.append(info_dict)
|
self.processed_info_dicts.append(info_dict)
|
||||||
return youtube_dl.FileDownloader.process_info(self, info_dict)
|
return youtube_dl.FileDownloader.process_info(self, info_dict)
|
||||||
|
|
|
@ -3161,7 +3161,7 @@ class GooglePlusIE(InfoExtractor):
|
||||||
}]
|
}]
|
||||||
|
|
||||||
class NBAIE(InfoExtractor):
|
class NBAIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*)(\?.*)?$'
|
_VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*?)(?:/index\.html)?(?:\?.*)?$'
|
||||||
IE_NAME = u'nba'
|
IE_NAME = u'nba'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -3170,8 +3170,6 @@ class NBAIE(InfoExtractor):
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||||
|
|
||||||
video_id = mobj.group(1)
|
video_id = mobj.group(1)
|
||||||
if video_id.endswith('/index.html'):
|
|
||||||
video_id = video_id[:-len('/index.html')]
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
@ -3181,7 +3179,8 @@ class NBAIE(InfoExtractor):
|
||||||
title = self._search_regex(r'<meta property="og:title" content="(.*?)"',
|
title = self._search_regex(r'<meta property="og:title" content="(.*?)"',
|
||||||
webpage, 'title', default=shortened_video_id).replace('NBA.com: ', '')
|
webpage, 'title', default=shortened_video_id).replace('NBA.com: ', '')
|
||||||
|
|
||||||
uploader_date = self._search_regex(r'<b>Date:</b> (.*?)</div>', webpage, 'upload_date', fatal=False)
|
# It isn't there in the HTML it returns to us
|
||||||
|
# uploader_date = self._search_regex(r'<b>Date:</b> (.*?)</div>', webpage, 'upload_date', fatal=False)
|
||||||
|
|
||||||
description = self._search_regex(r'<meta name="description" (?:content|value)="(.*?)" />', webpage, 'description', fatal=False)
|
description = self._search_regex(r'<meta name="description" (?:content|value)="(.*?)" />', webpage, 'description', fatal=False)
|
||||||
|
|
||||||
|
@ -3190,7 +3189,7 @@ class NBAIE(InfoExtractor):
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': title,
|
'title': title,
|
||||||
'uploader_date': uploader_date,
|
# 'uploader_date': uploader_date,
|
||||||
'description': description,
|
'description': description,
|
||||||
}
|
}
|
||||||
return [info]
|
return [info]
|
||||||
|
@ -3541,19 +3540,22 @@ class YouPornIE(InfoExtractor):
|
||||||
req.add_header('Cookie', 'age_verified=1')
|
req.add_header('Cookie', 'age_verified=1')
|
||||||
webpage = self._download_webpage(req, video_id)
|
webpage = self._download_webpage(req, video_id)
|
||||||
|
|
||||||
# Get the video title
|
# Get JSON parameters
|
||||||
video_title = self._search_regex(r'<h1.*?>(?P<title>.*)</h1>',
|
json_params = self._search_regex(r'var currentVideo = new Video\((.*)\);', webpage, u'JSON parameters')
|
||||||
webpage, u'title').strip()
|
try:
|
||||||
|
params = json.loads(json_params)
|
||||||
|
except:
|
||||||
|
raise ExtractorError(u'Invalid JSON')
|
||||||
|
|
||||||
# Get the video date
|
self.report_extraction(video_id)
|
||||||
upload_date = self._search_regex(r'Date:</label>(?P<date>.*) </li>',
|
try:
|
||||||
webpage, u'upload date', fatal=False)
|
video_title = params['title']
|
||||||
if upload_date: upload_date = unified_strdate(upload_date.strip())
|
upload_date = unified_strdate(params['release_date_f'])
|
||||||
|
video_description = params['description']
|
||||||
# Get the video uploader
|
video_uploader = params['submitted_by']
|
||||||
video_uploader = self._search_regex(r'Submitted:</label>(?P<uploader>.*)</li>',
|
thumbnail = params['thumbnails'][0]['image']
|
||||||
webpage, u'uploader', fatal=False)
|
except KeyError:
|
||||||
if video_uploader: video_uploader = clean_html(video_uploader.strip())
|
raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1])
|
||||||
|
|
||||||
# Get all of the formats available
|
# Get all of the formats available
|
||||||
DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
|
DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
|
||||||
|
@ -3592,9 +3594,8 @@ class YouPornIE(InfoExtractor):
|
||||||
'title': title,
|
'title': title,
|
||||||
'ext': extension,
|
'ext': extension,
|
||||||
'format': format,
|
'format': format,
|
||||||
'thumbnail': None,
|
'thumbnail': thumbnail,
|
||||||
'description': None,
|
'description': video_description
|
||||||
'player_url': None
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if self._downloader.params.get('listformats', None):
|
if self._downloader.params.get('listformats', None):
|
||||||
|
|
Loading…
Reference in a new issue