Merge branch 'master' of github.com:rg3/youtube-dl
This commit is contained in:
commit
05358deeca
1 changed files with 216 additions and 120 deletions
|
@ -1,27 +1,29 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
int_or_none,
|
xpath_attr,
|
||||||
orderedSet,
|
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
|
xpath_text,
|
||||||
|
orderedSet,
|
||||||
|
int_or_none,
|
||||||
|
float_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
determine_ext,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class LivestreamIE(InfoExtractor):
|
class LivestreamIE(InfoExtractor):
|
||||||
IE_NAME = 'livestream'
|
IE_NAME = 'livestream'
|
||||||
_VALID_URL = r'https?://(?:new\.)?livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])'
|
_VALID_URL = r'https?://(?:new\.)?livestream\.com/(?:accounts/(?P<account_id>\d+)|(?P<account_name>[^/]+))/(?:events/(?P<event_id>\d+)|(?P<event_name>[^/]+))(?:/videos/(?P<id>\d+))?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
|
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
|
||||||
'md5': '53274c76ba7754fb0e8d072716f2292b',
|
'md5': '53274c76ba7754fb0e8d072716f2292b',
|
||||||
|
@ -29,7 +31,9 @@ class LivestreamIE(InfoExtractor):
|
||||||
'id': '4719370',
|
'id': '4719370',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Live from Webster Hall NYC',
|
'title': 'Live from Webster Hall NYC',
|
||||||
|
'timestamp': 1350008072,
|
||||||
'upload_date': '20121012',
|
'upload_date': '20121012',
|
||||||
|
'duration': 5968.0,
|
||||||
'like_count': int,
|
'like_count': int,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'thumbnail': 're:^http://.*\.jpg$'
|
'thumbnail': 're:^http://.*\.jpg$'
|
||||||
|
@ -55,39 +59,20 @@ class LivestreamIE(InfoExtractor):
|
||||||
'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015',
|
'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
_API_URL_TEMPLATE = 'http://livestream.com/api/accounts/%s/events/%s'
|
||||||
|
|
||||||
|
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
|
||||||
|
base_ele = find_xpath_attr(
|
||||||
|
smil, self._xpath_ns('.//meta', namespace), 'name', 'httpBase')
|
||||||
|
base = base_ele.get('content') if base_ele else 'http://livestreamvod-f.akamaihd.net/'
|
||||||
|
|
||||||
def _parse_smil(self, video_id, smil_url):
|
|
||||||
formats = []
|
formats = []
|
||||||
_SWITCH_XPATH = (
|
video_nodes = smil.findall(self._xpath_ns('.//video', namespace))
|
||||||
'.//{http://www.w3.org/2001/SMIL20/Language}body/'
|
|
||||||
'{http://www.w3.org/2001/SMIL20/Language}switch')
|
|
||||||
smil_doc = self._download_xml(
|
|
||||||
smil_url, video_id,
|
|
||||||
note='Downloading SMIL information',
|
|
||||||
errnote='Unable to download SMIL information',
|
|
||||||
fatal=False)
|
|
||||||
if smil_doc is False: # Download failed
|
|
||||||
return formats
|
|
||||||
title_node = find_xpath_attr(
|
|
||||||
smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta',
|
|
||||||
'name', 'title')
|
|
||||||
if title_node is None:
|
|
||||||
self.report_warning('Cannot find SMIL id')
|
|
||||||
switch_node = smil_doc.find(_SWITCH_XPATH)
|
|
||||||
else:
|
|
||||||
title_id = title_node.attrib['content']
|
|
||||||
switch_node = find_xpath_attr(
|
|
||||||
smil_doc, _SWITCH_XPATH, 'id', title_id)
|
|
||||||
if switch_node is None:
|
|
||||||
raise ExtractorError('Cannot find switch node')
|
|
||||||
video_nodes = switch_node.findall(
|
|
||||||
'{http://www.w3.org/2001/SMIL20/Language}video')
|
|
||||||
|
|
||||||
for vn in video_nodes:
|
for vn in video_nodes:
|
||||||
tbr = int_or_none(vn.attrib.get('system-bitrate'))
|
tbr = int_or_none(vn.attrib.get('system-bitrate'), 1000)
|
||||||
furl = (
|
furl = (
|
||||||
'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145' %
|
'%s%s?v=3.0.3&fp=WIN%%2014,0,0,145' % (base, vn.attrib['src']))
|
||||||
(vn.attrib['src']))
|
|
||||||
if 'clipBegin' in vn.attrib:
|
if 'clipBegin' in vn.attrib:
|
||||||
furl += '&ssek=' + vn.attrib['clipBegin']
|
furl += '&ssek=' + vn.attrib['clipBegin']
|
||||||
formats.append({
|
formats.append({
|
||||||
|
@ -106,97 +91,149 @@ class LivestreamIE(InfoExtractor):
|
||||||
('sd', 'progressive_url'),
|
('sd', 'progressive_url'),
|
||||||
('hd', 'progressive_url_hd'),
|
('hd', 'progressive_url_hd'),
|
||||||
)
|
)
|
||||||
formats = [{
|
|
||||||
'format_id': format_id,
|
formats = []
|
||||||
'url': video_data[key],
|
for format_id, key in FORMAT_KEYS:
|
||||||
'quality': i + 1,
|
video_url = video_data.get(key)
|
||||||
} for i, (format_id, key) in enumerate(FORMAT_KEYS)
|
if video_url:
|
||||||
if video_data.get(key)]
|
ext = determine_ext(video_url)
|
||||||
|
bitrate = int_or_none(self._search_regex(
|
||||||
|
r'(\d+)\.%s' % ext, video_url, 'bitrate', default=None))
|
||||||
|
formats.append({
|
||||||
|
'url': video_url,
|
||||||
|
'format_id': format_id,
|
||||||
|
'tbr': bitrate,
|
||||||
|
'ext': ext,
|
||||||
|
})
|
||||||
|
|
||||||
smil_url = video_data.get('smil_url')
|
smil_url = video_data.get('smil_url')
|
||||||
if smil_url:
|
if smil_url:
|
||||||
formats.extend(self._parse_smil(video_id, smil_url))
|
smil_formats = self._extract_smil_formats(smil_url, video_id)
|
||||||
|
if smil_formats:
|
||||||
|
formats.extend(smil_formats)
|
||||||
|
|
||||||
|
m3u8_url = video_data.get('m3u8_url')
|
||||||
|
if m3u8_url:
|
||||||
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
|
m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
|
||||||
|
if m3u8_formats:
|
||||||
|
formats.extend(m3u8_formats)
|
||||||
|
|
||||||
|
f4m_url = video_data.get('f4m_url')
|
||||||
|
if f4m_url:
|
||||||
|
f4m_formats = self._extract_f4m_formats(
|
||||||
|
f4m_url, video_id, f4m_id='hds', fatal=False)
|
||||||
|
if f4m_formats:
|
||||||
|
formats.extend(f4m_formats)
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
comments = [{
|
||||||
|
'author_id': comment.get('author_id'),
|
||||||
|
'author': comment.get('author', {}).get('full_name'),
|
||||||
|
'id': comment.get('id'),
|
||||||
|
'text': comment['text'],
|
||||||
|
'timestamp': parse_iso8601(comment.get('created_at')),
|
||||||
|
} for comment in video_data.get('comments', {}).get('data', [])]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'title': video_data['caption'],
|
'title': video_data['caption'],
|
||||||
|
'description': video_data.get('description'),
|
||||||
'thumbnail': video_data.get('thumbnail_url'),
|
'thumbnail': video_data.get('thumbnail_url'),
|
||||||
'upload_date': video_data['updated_at'].replace('-', '')[:8],
|
'duration': float_or_none(video_data.get('duration'), 1000),
|
||||||
|
'timestamp': parse_iso8601(video_data.get('publish_at')),
|
||||||
'like_count': video_data.get('likes', {}).get('total'),
|
'like_count': video_data.get('likes', {}).get('total'),
|
||||||
|
'comment_count': video_data.get('comments', {}).get('total'),
|
||||||
'view_count': video_data.get('views'),
|
'view_count': video_data.get('views'),
|
||||||
|
'comments': comments,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _extract_event(self, info):
|
def _extract_stream_info(self, stream_info):
|
||||||
event_id = compat_str(info['id'])
|
broadcast_id = stream_info['broadcast_id']
|
||||||
account = compat_str(info['owner_account_id'])
|
is_live = stream_info.get('is_live')
|
||||||
root_url = (
|
|
||||||
'https://new.livestream.com/api/accounts/{account}/events/{event}/'
|
|
||||||
'feed.json'.format(account=account, event=event_id))
|
|
||||||
|
|
||||||
def _extract_videos():
|
formats = []
|
||||||
last_video = None
|
smil_url = stream_info.get('play_url')
|
||||||
for i in itertools.count(1):
|
if smil_url:
|
||||||
if last_video is None:
|
smil_formats = self._extract_smil_formats(smil_url, broadcast_id)
|
||||||
info_url = root_url
|
if smil_formats:
|
||||||
else:
|
formats.extend(smil_formats)
|
||||||
info_url = '{root}?&id={id}&newer=-1&type=video'.format(
|
|
||||||
root=root_url, id=last_video)
|
entry_protocol = 'm3u8' if is_live else 'm3u8_native'
|
||||||
videos_info = self._download_json(info_url, event_id, 'Downloading page {0}'.format(i))['data']
|
m3u8_url = stream_info.get('m3u8_url')
|
||||||
videos_info = [v['data'] for v in videos_info if v['type'] == 'video']
|
if m3u8_url:
|
||||||
if not videos_info:
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
break
|
m3u8_url, broadcast_id, 'mp4', entry_protocol, m3u8_id='hls', fatal=False)
|
||||||
for v in videos_info:
|
if m3u8_formats:
|
||||||
yield self._extract_video_info(v)
|
formats.extend(m3u8_formats)
|
||||||
last_video = videos_info[-1]['id']
|
|
||||||
return self.playlist_result(_extract_videos(), event_id, info['full_name'])
|
rtsp_url = stream_info.get('rtsp_url')
|
||||||
|
if rtsp_url:
|
||||||
|
formats.append({
|
||||||
|
'url': rtsp_url,
|
||||||
|
'format_id': 'rtsp',
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': broadcast_id,
|
||||||
|
'formats': formats,
|
||||||
|
'title': self._live_title(stream_info['stream_title']) if is_live else stream_info['stream_title'],
|
||||||
|
'thumbnail': stream_info.get('thumbnail_url'),
|
||||||
|
'is_live': is_live,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _extract_event(self, event_data):
|
||||||
|
event_id = compat_str(event_data['id'])
|
||||||
|
account_id = compat_str(event_data['owner_account_id'])
|
||||||
|
feed_root_url = self._API_URL_TEMPLATE % (account_id, event_id) + '/feed.json'
|
||||||
|
|
||||||
|
stream_info = event_data.get('stream_info')
|
||||||
|
if stream_info:
|
||||||
|
return self._extract_stream_info(stream_info)
|
||||||
|
|
||||||
|
last_video = None
|
||||||
|
entries = []
|
||||||
|
for i in itertools.count(1):
|
||||||
|
if last_video is None:
|
||||||
|
info_url = feed_root_url
|
||||||
|
else:
|
||||||
|
info_url = '{root}?&id={id}&newer=-1&type=video'.format(
|
||||||
|
root=feed_root_url, id=last_video)
|
||||||
|
videos_info = self._download_json(
|
||||||
|
info_url, event_id, 'Downloading page {0}'.format(i))['data']
|
||||||
|
videos_info = [v['data'] for v in videos_info if v['type'] == 'video']
|
||||||
|
if not videos_info:
|
||||||
|
break
|
||||||
|
for v in videos_info:
|
||||||
|
entries.append(self.url_result(
|
||||||
|
'http://livestream.com/accounts/%s/events/%s/videos/%s' % (account_id, event_id, v['id']),
|
||||||
|
'Livestream', v['id'], v['caption']))
|
||||||
|
last_video = videos_info[-1]['id']
|
||||||
|
return self.playlist_result(entries, event_id, event_data['full_name'])
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
event_name = mobj.group('event_name')
|
event = mobj.group('event_id') or mobj.group('event_name')
|
||||||
webpage = self._download_webpage(url, video_id or event_name)
|
account = mobj.group('account_id') or mobj.group('account_name')
|
||||||
|
api_url = self._API_URL_TEMPLATE % (account, event)
|
||||||
og_video = self._og_search_video_url(
|
if video_id:
|
||||||
webpage, 'player url', fatal=False, default=None)
|
video_data = self._download_json(
|
||||||
if og_video is not None:
|
api_url + '/videos/%s' % video_id, video_id)
|
||||||
query_str = compat_urllib_parse_urlparse(og_video).query
|
return self._extract_video_info(video_data)
|
||||||
query = compat_urlparse.parse_qs(query_str)
|
|
||||||
if 'play_url' in query:
|
|
||||||
api_url = query['play_url'][0].replace('.smil', '')
|
|
||||||
info = json.loads(self._download_webpage(
|
|
||||||
api_url, video_id, 'Downloading video info'))
|
|
||||||
return self._extract_video_info(info)
|
|
||||||
|
|
||||||
config_json = self._search_regex(
|
|
||||||
r'window.config = ({.*?});', webpage, 'window config')
|
|
||||||
info = json.loads(config_json)['event']
|
|
||||||
|
|
||||||
def is_relevant(vdata, vid):
|
|
||||||
result = vdata['type'] == 'video'
|
|
||||||
if video_id is not None:
|
|
||||||
result = result and compat_str(vdata['data']['id']) == vid
|
|
||||||
return result
|
|
||||||
|
|
||||||
if video_id is None:
|
|
||||||
# This is an event page:
|
|
||||||
return self._extract_event(info)
|
|
||||||
else:
|
else:
|
||||||
videos = [self._extract_video_info(video_data['data'])
|
event_data = self._download_json(api_url, video_id)
|
||||||
for video_data in info['feed']['data']
|
return self._extract_event(event_data)
|
||||||
if is_relevant(video_data, video_id)]
|
|
||||||
if not videos:
|
|
||||||
raise ExtractorError('Cannot find video %s' % video_id)
|
|
||||||
return videos[0]
|
|
||||||
|
|
||||||
|
|
||||||
# The original version of Livestream uses a different system
|
# The original version of Livestream uses a different system
|
||||||
class LivestreamOriginalIE(InfoExtractor):
|
class LivestreamOriginalIE(InfoExtractor):
|
||||||
IE_NAME = 'livestream:original'
|
IE_NAME = 'livestream:original'
|
||||||
_VALID_URL = r'''(?x)https?://original\.livestream\.com/
|
_VALID_URL = r'''(?x)https?://original\.livestream\.com/
|
||||||
(?P<user>[^/]+)/(?P<type>video|folder)
|
(?P<user>[^/\?#]+)(?:/(?P<type>video|folder)
|
||||||
(?:\?.*?Id=|/)(?P<id>.*?)(&|$)
|
(?:(?:\?.*?Id=|/)(?P<id>.*?)(&|$))?)?
|
||||||
'''
|
'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
|
'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
|
||||||
|
@ -204,6 +241,8 @@ class LivestreamOriginalIE(InfoExtractor):
|
||||||
'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
|
'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
|
'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
|
||||||
|
'duration': 771.301,
|
||||||
|
'view_count': int,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
|
'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
|
||||||
|
@ -211,26 +250,62 @@ class LivestreamOriginalIE(InfoExtractor):
|
||||||
'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
|
'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 4,
|
'playlist_mincount': 4,
|
||||||
|
}, {
|
||||||
|
# live stream
|
||||||
|
'url': 'http://www.livestream.com/znsbahamas',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _extract_video(self, user, video_id):
|
def _extract_video_info(self, user, video_id):
|
||||||
api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id)
|
api_url = 'http://x%sx.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id=%s' % (user, video_id)
|
||||||
|
|
||||||
info = self._download_xml(api_url, video_id)
|
info = self._download_xml(api_url, video_id)
|
||||||
# this url is used on mobile devices
|
|
||||||
stream_url = 'http://x{0}x.api.channel.livestream.com/3.0/getstream.json?id={1}'.format(user, video_id)
|
|
||||||
stream_info = self._download_json(stream_url, video_id)
|
|
||||||
item = info.find('channel').find('item')
|
item = info.find('channel').find('item')
|
||||||
ns = {'media': 'http://search.yahoo.com/mrss'}
|
title = xpath_text(item, 'title')
|
||||||
thumbnail_url = item.find(xpath_with_ns('media:thumbnail', ns)).attrib['url']
|
media_ns = {'media': 'http://search.yahoo.com/mrss'}
|
||||||
|
thumbnail_url = xpath_attr(
|
||||||
|
item, xpath_with_ns('media:thumbnail', media_ns), 'url')
|
||||||
|
duration = float_or_none(xpath_attr(
|
||||||
|
item, xpath_with_ns('media:content', media_ns), 'duration'))
|
||||||
|
ls_ns = {'ls': 'http://api.channel.livestream.com/2.0'}
|
||||||
|
view_count = int_or_none(xpath_text(
|
||||||
|
item, xpath_with_ns('ls:viewsCount', ls_ns)))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': item.find('title').text,
|
'title': title,
|
||||||
'url': stream_info['progressiveUrl'],
|
|
||||||
'thumbnail': thumbnail_url,
|
'thumbnail': thumbnail_url,
|
||||||
|
'duration': duration,
|
||||||
|
'view_count': view_count,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def _extract_video_formats(self, video_data, video_id, entry_protocol):
|
||||||
|
formats = []
|
||||||
|
|
||||||
|
progressive_url = video_data.get('progressiveUrl')
|
||||||
|
if progressive_url:
|
||||||
|
formats.append({
|
||||||
|
'url': progressive_url,
|
||||||
|
'format_id': 'http',
|
||||||
|
})
|
||||||
|
|
||||||
|
m3u8_url = video_data.get('httpUrl')
|
||||||
|
if m3u8_url:
|
||||||
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
|
m3u8_url, video_id, 'mp4', entry_protocol, m3u8_id='hls', fatal=False)
|
||||||
|
if m3u8_formats:
|
||||||
|
formats.extend(m3u8_formats)
|
||||||
|
|
||||||
|
rtsp_url = video_data.get('rtspUrl')
|
||||||
|
if rtsp_url:
|
||||||
|
formats.append({
|
||||||
|
'url': rtsp_url,
|
||||||
|
'format_id': 'rtsp',
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
return formats
|
||||||
|
|
||||||
def _extract_folder(self, url, folder_id):
|
def _extract_folder(self, url, folder_id):
|
||||||
webpage = self._download_webpage(url, folder_id)
|
webpage = self._download_webpage(url, folder_id)
|
||||||
paths = orderedSet(re.findall(
|
paths = orderedSet(re.findall(
|
||||||
|
@ -239,24 +314,45 @@ class LivestreamOriginalIE(InfoExtractor):
|
||||||
<a\s+href="(?=https?://livestre\.am/)
|
<a\s+href="(?=https?://livestre\.am/)
|
||||||
)([^"]+)"''', webpage))
|
)([^"]+)"''', webpage))
|
||||||
|
|
||||||
return {
|
entries = [{
|
||||||
'_type': 'playlist',
|
'_type': 'url',
|
||||||
'id': folder_id,
|
'url': compat_urlparse.urljoin(url, p),
|
||||||
'entries': [{
|
} for p in paths]
|
||||||
'_type': 'url',
|
|
||||||
'url': compat_urlparse.urljoin(url, p),
|
return self.playlist_result(entries, folder_id)
|
||||||
} for p in paths],
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
id = mobj.group('id')
|
|
||||||
user = mobj.group('user')
|
user = mobj.group('user')
|
||||||
url_type = mobj.group('type')
|
url_type = mobj.group('type')
|
||||||
|
content_id = mobj.group('id')
|
||||||
if url_type == 'folder':
|
if url_type == 'folder':
|
||||||
return self._extract_folder(url, id)
|
return self._extract_folder(url, content_id)
|
||||||
else:
|
else:
|
||||||
return self._extract_video(user, id)
|
# this url is used on mobile devices
|
||||||
|
stream_url = 'http://x%sx.api.channel.livestream.com/3.0/getstream.json' % user
|
||||||
|
info = {}
|
||||||
|
if content_id:
|
||||||
|
stream_url += '?id=%s' % content_id
|
||||||
|
info = self._extract_video_info(user, content_id)
|
||||||
|
else:
|
||||||
|
content_id = user
|
||||||
|
webpage = self._download_webpage(url, content_id)
|
||||||
|
info = {
|
||||||
|
'title': self._og_search_title(webpage),
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
'thumbnail': self._search_regex(r'channelLogo.src\s*=\s*"([^"]+)"', webpage, 'thumbnail', None),
|
||||||
|
}
|
||||||
|
video_data = self._download_json(stream_url, content_id)
|
||||||
|
is_live = video_data.get('isLive')
|
||||||
|
entry_protocol = 'm3u8' if is_live else 'm3u8_native'
|
||||||
|
info.update({
|
||||||
|
'id': content_id,
|
||||||
|
'title': self._live_title(info['title']) if is_live else info['title'],
|
||||||
|
'formats': self._extract_video_formats(video_data, content_id, entry_protocol),
|
||||||
|
'is_live': is_live,
|
||||||
|
})
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
# The server doesn't support HEAD request, the generic extractor can't detect
|
# The server doesn't support HEAD request, the generic extractor can't detect
|
||||||
|
|
Loading…
Reference in a new issue