Move Metacafe and Statigram into their own files, and remove absolute import
This commit is contained in:
parent
93d3a642a9
commit
38cbc40a64
4 changed files with 145 additions and 128 deletions
|
@ -24,106 +24,12 @@ from .utils import *
|
||||||
|
|
||||||
|
|
||||||
from .extractor.common import InfoExtractor, SearchInfoExtractor
|
from .extractor.common import InfoExtractor, SearchInfoExtractor
|
||||||
|
from .extractor.metacafe import MetacafeIE
|
||||||
|
from .extractor.statigram import StatigramIE
|
||||||
from .extractor.youtube import YoutubeIE, YoutubePlaylistIE, YoutubeUserIE, YoutubeChannelIE
|
from .extractor.youtube import YoutubeIE, YoutubePlaylistIE, YoutubeUserIE, YoutubeChannelIE
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class MetacafeIE(InfoExtractor):
|
|
||||||
"""Information Extractor for metacafe.com."""
|
|
||||||
|
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
|
|
||||||
_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
|
|
||||||
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
|
|
||||||
IE_NAME = u'metacafe'
|
|
||||||
|
|
||||||
def report_disclaimer(self):
|
|
||||||
"""Report disclaimer retrieval."""
|
|
||||||
self.to_screen(u'Retrieving disclaimer')
|
|
||||||
|
|
||||||
def _real_initialize(self):
|
|
||||||
# Retrieve disclaimer
|
|
||||||
request = compat_urllib_request.Request(self._DISCLAIMER)
|
|
||||||
try:
|
|
||||||
self.report_disclaimer()
|
|
||||||
disclaimer = compat_urllib_request.urlopen(request).read()
|
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
|
||||||
raise ExtractorError(u'Unable to retrieve disclaimer: %s' % compat_str(err))
|
|
||||||
|
|
||||||
# Confirm age
|
|
||||||
disclaimer_form = {
|
|
||||||
'filters': '0',
|
|
||||||
'submit': "Continue - I'm over 18",
|
|
||||||
}
|
|
||||||
request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
|
|
||||||
try:
|
|
||||||
self.report_age_confirmation()
|
|
||||||
disclaimer = compat_urllib_request.urlopen(request).read()
|
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
|
||||||
raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
# Extract id and simplified title from URL
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
||||||
|
|
||||||
video_id = mobj.group(1)
|
|
||||||
|
|
||||||
# Check if video comes from YouTube
|
|
||||||
mobj2 = re.match(r'^yt-(.*)$', video_id)
|
|
||||||
if mobj2 is not None:
|
|
||||||
return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1), 'Youtube')]
|
|
||||||
|
|
||||||
# Retrieve video webpage to extract further information
|
|
||||||
webpage = self._download_webpage('http://www.metacafe.com/watch/%s/' % video_id, video_id)
|
|
||||||
|
|
||||||
# Extract URL, uploader and title from webpage
|
|
||||||
self.report_extraction(video_id)
|
|
||||||
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
|
|
||||||
if mobj is not None:
|
|
||||||
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
|
|
||||||
video_extension = mediaURL[-3:]
|
|
||||||
|
|
||||||
# Extract gdaKey if available
|
|
||||||
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
|
|
||||||
if mobj is None:
|
|
||||||
video_url = mediaURL
|
|
||||||
else:
|
|
||||||
gdaKey = mobj.group(1)
|
|
||||||
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
|
|
||||||
else:
|
|
||||||
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
|
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Unable to extract media URL')
|
|
||||||
vardict = compat_parse_qs(mobj.group(1))
|
|
||||||
if 'mediaData' not in vardict:
|
|
||||||
raise ExtractorError(u'Unable to extract media URL')
|
|
||||||
mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
|
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Unable to extract media URL')
|
|
||||||
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
|
|
||||||
video_extension = mediaURL[-3:]
|
|
||||||
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
|
|
||||||
|
|
||||||
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
|
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Unable to extract title')
|
|
||||||
video_title = mobj.group(1).decode('utf-8')
|
|
||||||
|
|
||||||
mobj = re.search(r'submitter=(.*?);', webpage)
|
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Unable to extract uploader nickname')
|
|
||||||
video_uploader = mobj.group(1)
|
|
||||||
|
|
||||||
return [{
|
|
||||||
'id': video_id.decode('utf-8'),
|
|
||||||
'url': video_url.decode('utf-8'),
|
|
||||||
'uploader': video_uploader.decode('utf-8'),
|
|
||||||
'upload_date': None,
|
|
||||||
'title': video_title,
|
|
||||||
'ext': video_extension.decode('utf-8'),
|
|
||||||
}]
|
|
||||||
|
|
||||||
class DailymotionIE(InfoExtractor):
|
class DailymotionIE(InfoExtractor):
|
||||||
"""Information Extractor for Dailymotion"""
|
"""Information Extractor for Dailymotion"""
|
||||||
|
|
||||||
|
@ -3621,37 +3527,6 @@ class GametrailersIE(InfoExtractor):
|
||||||
'description': video_description,
|
'description': video_description,
|
||||||
}
|
}
|
||||||
|
|
||||||
class StatigramIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?statigr\.am/p/([^/]+)'
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
|
|
||||||
video_id = mobj.group(1)
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
video_url = self._html_search_regex(
|
|
||||||
r'<meta property="og:video:secure_url" content="(.+?)">',
|
|
||||||
webpage, u'video URL')
|
|
||||||
thumbnail_url = self._html_search_regex(
|
|
||||||
r'<meta property="og:image" content="(.+?)" />',
|
|
||||||
webpage, u'thumbnail URL', fatal=False)
|
|
||||||
html_title = self._html_search_regex(
|
|
||||||
r'<title>(.+?)</title>',
|
|
||||||
webpage, u'title')
|
|
||||||
title = html_title.rpartition(u' | Statigram')[0]
|
|
||||||
uploader_id = self._html_search_regex(
|
|
||||||
r'@([^ ]+)', title, u'uploader name', fatal=False)
|
|
||||||
ext = 'mp4'
|
|
||||||
|
|
||||||
return [{
|
|
||||||
'id': video_id,
|
|
||||||
'url': video_url,
|
|
||||||
'ext': ext,
|
|
||||||
'title': title,
|
|
||||||
'thumbnail': thumbnail_url,
|
|
||||||
'uploader_id' : uploader_id
|
|
||||||
}]
|
|
||||||
|
|
||||||
def gen_extractors():
|
def gen_extractors():
|
||||||
""" Return a list of an instance of every supported extractor.
|
""" Return a list of an instance of every supported extractor.
|
||||||
The order does matter; the first extractor matched is the one handling the URL.
|
The order does matter; the first extractor matched is the one handling the URL.
|
||||||
|
|
110
youtube_dl/extractor/metacafe.py
Normal file
110
youtube_dl/extractor/metacafe.py
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
import re
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_http_client,
|
||||||
|
compat_parse_qs,
|
||||||
|
compat_urllib_error,
|
||||||
|
compat_urllib_parse,
|
||||||
|
compat_urllib_request,
|
||||||
|
compat_str,
|
||||||
|
|
||||||
|
ExtractorError,
|
||||||
|
)
|
||||||
|
|
||||||
|
class MetacafeIE(InfoExtractor):
|
||||||
|
"""Information Extractor for metacafe.com."""
|
||||||
|
|
||||||
|
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
|
||||||
|
_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
|
||||||
|
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
|
||||||
|
IE_NAME = u'metacafe'
|
||||||
|
|
||||||
|
def report_disclaimer(self):
|
||||||
|
"""Report disclaimer retrieval."""
|
||||||
|
self.to_screen(u'Retrieving disclaimer')
|
||||||
|
|
||||||
|
def _real_initialize(self):
|
||||||
|
# Retrieve disclaimer
|
||||||
|
request = compat_urllib_request.Request(self._DISCLAIMER)
|
||||||
|
try:
|
||||||
|
self.report_disclaimer()
|
||||||
|
compat_urllib_request.urlopen(request).read()
|
||||||
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
|
raise ExtractorError(u'Unable to retrieve disclaimer: %s' % compat_str(err))
|
||||||
|
|
||||||
|
# Confirm age
|
||||||
|
disclaimer_form = {
|
||||||
|
'filters': '0',
|
||||||
|
'submit': "Continue - I'm over 18",
|
||||||
|
}
|
||||||
|
request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
|
||||||
|
try:
|
||||||
|
self.report_age_confirmation()
|
||||||
|
compat_urllib_request.urlopen(request).read()
|
||||||
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
|
raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
# Extract id and simplified title from URL
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
if mobj is None:
|
||||||
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||||
|
|
||||||
|
video_id = mobj.group(1)
|
||||||
|
|
||||||
|
# Check if video comes from YouTube
|
||||||
|
mobj2 = re.match(r'^yt-(.*)$', video_id)
|
||||||
|
if mobj2 is not None:
|
||||||
|
return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1), 'Youtube')]
|
||||||
|
|
||||||
|
# Retrieve video webpage to extract further information
|
||||||
|
webpage = self._download_webpage('http://www.metacafe.com/watch/%s/' % video_id, video_id)
|
||||||
|
|
||||||
|
# Extract URL, uploader and title from webpage
|
||||||
|
self.report_extraction(video_id)
|
||||||
|
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
|
||||||
|
if mobj is not None:
|
||||||
|
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
|
||||||
|
video_extension = mediaURL[-3:]
|
||||||
|
|
||||||
|
# Extract gdaKey if available
|
||||||
|
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
|
||||||
|
if mobj is None:
|
||||||
|
video_url = mediaURL
|
||||||
|
else:
|
||||||
|
gdaKey = mobj.group(1)
|
||||||
|
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
|
||||||
|
else:
|
||||||
|
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
|
||||||
|
if mobj is None:
|
||||||
|
raise ExtractorError(u'Unable to extract media URL')
|
||||||
|
vardict = compat_parse_qs(mobj.group(1))
|
||||||
|
if 'mediaData' not in vardict:
|
||||||
|
raise ExtractorError(u'Unable to extract media URL')
|
||||||
|
mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
|
||||||
|
if mobj is None:
|
||||||
|
raise ExtractorError(u'Unable to extract media URL')
|
||||||
|
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
|
||||||
|
video_extension = mediaURL[-3:]
|
||||||
|
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
|
||||||
|
|
||||||
|
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
|
||||||
|
if mobj is None:
|
||||||
|
raise ExtractorError(u'Unable to extract title')
|
||||||
|
video_title = mobj.group(1).decode('utf-8')
|
||||||
|
|
||||||
|
mobj = re.search(r'submitter=(.*?);', webpage)
|
||||||
|
if mobj is None:
|
||||||
|
raise ExtractorError(u'Unable to extract uploader nickname')
|
||||||
|
video_uploader = mobj.group(1)
|
||||||
|
|
||||||
|
return [{
|
||||||
|
'id': video_id.decode('utf-8'),
|
||||||
|
'url': video_url.decode('utf-8'),
|
||||||
|
'uploader': video_uploader.decode('utf-8'),
|
||||||
|
'upload_date': None,
|
||||||
|
'title': video_title,
|
||||||
|
'ext': video_extension.decode('utf-8'),
|
||||||
|
}]
|
33
youtube_dl/extractor/statigram.py
Normal file
33
youtube_dl/extractor/statigram.py
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
class StatigramIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'(?:http://)?(?:www\.)?statigr\.am/p/([^/]+)'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group(1)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
video_url = self._html_search_regex(
|
||||||
|
r'<meta property="og:video:secure_url" content="(.+?)">',
|
||||||
|
webpage, u'video URL')
|
||||||
|
thumbnail_url = self._html_search_regex(
|
||||||
|
r'<meta property="og:image" content="(.+?)" />',
|
||||||
|
webpage, u'thumbnail URL', fatal=False)
|
||||||
|
html_title = self._html_search_regex(
|
||||||
|
r'<title>(.+?)</title>',
|
||||||
|
webpage, u'title')
|
||||||
|
title = html_title.rpartition(u' | Statigram')[0]
|
||||||
|
uploader_id = self._html_search_regex(
|
||||||
|
r'@([^ ]+)', title, u'uploader name', fatal=False)
|
||||||
|
ext = 'mp4'
|
||||||
|
|
||||||
|
return [{
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': ext,
|
||||||
|
'title': title,
|
||||||
|
'thumbnail': thumbnail_url,
|
||||||
|
'uploader_id' : uploader_id
|
||||||
|
}]
|
|
@ -1,5 +1,4 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import absolute_import
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import netrc
|
import netrc
|
||||||
|
|
Loading…
Reference in a new issue