[srgssr] handle all play urls only in SRGSSRIE and keep RTSIE for articles

This commit is contained in:
remitamine 2015-12-05 15:57:10 +01:00
parent e09f58b3bc
commit 7db2897ded
2 changed files with 79 additions and 67 deletions

View file

@ -3,7 +3,7 @@ from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .srgssr import SRGSSRIE
from ..compat import ( from ..compat import (
compat_str, compat_str,
compat_urllib_parse_urlparse, compat_urllib_parse_urlparse,
@ -17,27 +17,18 @@ from ..utils import (
) )
class RTSIE(InfoExtractor): class RTSIE(SRGSSRIE):
IE_DESC = 'RTS.ch' IE_DESC = 'RTS.ch'
_VALID_URL = r'''(?x) _VALID_URL = r'rts:(?P<rts_id>\d+)|https?://(?:www\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html'
(?:
rts:(?P<rts_id>\d+)|
https?://
(?:www\.)?rts\.ch/
(?:
(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|
play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+)
)
)'''
_TESTS = [ _TESTS = [
{ {
'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html', 'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html',
'md5': '753b877968ad8afaeddccc374d4256a5', 'md5': 'f254c4b26fb1d3c183793d52bc40d3e7',
'info_dict': { 'info_dict': {
'id': '3449373', 'id': '3449373',
'display_id': 'les-enfants-terribles', 'display_id': 'les-enfants-terribles',
'ext': 'mp4', 'ext': 'flv',
'duration': 1488, 'duration': 1488,
'title': 'Les Enfants Terribles', 'title': 'Les Enfants Terribles',
'description': 'France Pommier et sa soeur Luce Feral, les deux filles de ce groupe de 5.', 'description': 'France Pommier et sa soeur Luce Feral, les deux filles de ce groupe de 5.',
@ -50,11 +41,11 @@ class RTSIE(InfoExtractor):
}, },
{ {
'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html', 'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html',
'md5': 'c148457a27bdc9e5b1ffe081a7a8337b', 'md5': 'f1077ac5af686c76528dc8d7c5df29ba',
'info_dict': { 'info_dict': {
'id': '5624067', 'id': '5742494',
'display_id': 'entre-ciel-et-mer', 'display_id': '5742494',
'ext': 'mp4', 'ext': 'flv',
'duration': 3720, 'duration': 3720,
'title': 'Les yeux dans les cieux - Mon homard au Canada', 'title': 'Les yeux dans les cieux - Mon homard au Canada',
'description': 'md5:d22ee46f5cc5bac0912e5a0c6d44a9f7', 'description': 'md5:d22ee46f5cc5bac0912e5a0c6d44a9f7',
@ -85,11 +76,11 @@ class RTSIE(InfoExtractor):
}, },
{ {
'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html', 'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html',
'md5': '9bb06503773c07ce83d3cbd793cebb91', 'md5': '9f713382f15322181bb366cc8c3a4ff0',
'info_dict': { 'info_dict': {
'id': '5745356', 'id': '5745356',
'display_id': 'londres-cachee-par-un-epais-smog', 'display_id': 'londres-cachee-par-un-epais-smog',
'ext': 'mp4', 'ext': 'flv',
'duration': 33, 'duration': 33,
'title': 'Londres cachée par un épais smog', 'title': 'Londres cachée par un épais smog',
'description': 'Un important voile de smog recouvre Londres depuis mercredi, provoqué par la pollution et du sable du Sahara.', 'description': 'Un important voile de smog recouvre Londres depuis mercredi, provoqué par la pollution et du sable du Sahara.',
@ -114,23 +105,6 @@ class RTSIE(InfoExtractor):
'timestamp': 1396551600, 'timestamp': 1396551600,
}, },
}, },
{
'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
'md5': '968777c8779e5aa2434be96c54e19743',
'info_dict': {
'id': '6348260',
'display_id': 'le-19h30',
'ext': 'mp4',
'duration': 1796,
'title': 'Le 19h30',
'description': '',
'uploader': 'Le 19h30',
'upload_date': '20141201',
'timestamp': 1417458600,
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
},
{ {
# article with videos on rhs # article with videos on rhs
'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html', 'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html',
@ -139,42 +113,47 @@ class RTSIE(InfoExtractor):
'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse', 'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse',
}, },
'playlist_mincount': 5, 'playlist_mincount': 5,
},
{
'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280',
'only_matching': True,
} }
] ]
def _real_extract(self, url): def _real_extract(self, url):
m = re.match(self._VALID_URL, url) m = re.match(self._VALID_URL, url)
video_id = m.group('rts_id') or m.group('id') or m.group('id_new') media_id = m.group('rts_id') or m.group('id')
display_id = m.group('display_id') or m.group('display_id_new') display_id = m.group('display_id') or media_id
def download_json(internal_id): def download_json(internal_id):
return self._download_json( return self._download_json(
'http://www.rts.ch/a/%s.html?f=json/article' % internal_id, 'http://www.rts.ch/a/%s.html?f=json/article' % internal_id,
display_id) display_id)
all_info = download_json(video_id) all_info = download_json(media_id)
# video_id extracted out of URL is not always a real id # media_id extracted out of URL is not always a real id
if 'video' not in all_info and 'audio' not in all_info: if 'video' not in all_info and 'audio' not in all_info:
page = self._download_webpage(url, display_id) page = self._download_webpage(url, display_id)
# article with videos on rhs # article with videos on rhs
videos = re.findall( videos = re.findall(
r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:rts:video:(\d+)"', r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:([^"]+)"',
page)
if not videos:
videos = re.findall(
r'(?s)<iframe[^>]+class="srg-player"[^>]+src="[^"]+urn:([^"]+)"',
page) page)
if videos: if videos:
entries = [self.url_result('rts:%s' % video_urn, 'RTS') for video_urn in videos] entries = [self.url_result('srgssr:%s' % video_urn, 'SRGSSR') for video_urn in videos]
return self.playlist_result(entries, video_id, self._og_search_title(page)) return self.playlist_result(entries, media_id, self._og_search_title(page))
internal_id = self._html_search_regex( internal_id = self._html_search_regex(
r'<(?:video|audio) data-id="([0-9]+)"', page, r'<(?:video|audio) data-id="([0-9]+)"', page,
'internal video id') 'internal video id')
all_info = download_json(internal_id) all_info = download_json(internal_id)
media_type = 'video' if 'video' in all_info else 'audio'
# check for errors
self.get_media_data('rts', media_type, media_id)
info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio'] info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio']
upload_timestamp = parse_iso8601(info.get('broadcast_date')) upload_timestamp = parse_iso8601(info.get('broadcast_date'))
@ -190,19 +169,27 @@ class RTSIE(InfoExtractor):
formats = [] formats = []
for format_id, format_url in info['streams'].items(): for format_id, format_url in info['streams'].items():
if format_id == 'hds_sd' and 'hds' in info['streams']:
continue
if format_id == 'hls_sd' and 'hls' in info['streams']:
continue
if format_url.endswith('.f4m'): if format_url.endswith('.f4m'):
token = self._download_xml( token = self._download_xml(
'http://tp.srgssr.ch/token/akahd.xml?stream=%s/*' % compat_urllib_parse_urlparse(format_url).path, 'http://tp.srgssr.ch/token/akahd.xml?stream=%s/*' % compat_urllib_parse_urlparse(format_url).path,
video_id, 'Downloading %s token' % format_id) media_id, 'Downloading %s token' % format_id)
auth_params = xpath_text(token, './/authparams', 'auth params') auth_params = xpath_text(token, './/authparams', 'auth params')
if not auth_params: if not auth_params:
continue continue
formats.extend(self._extract_f4m_formats( f4m_formats = self._extract_f4m_formats(
'%s?%s&hdcore=3.4.0&plugin=aasp-3.4.0.132.66' % (format_url, auth_params), '%s?%s&hdcore=3.4.0&plugin=aasp-3.4.0.132.66' % (format_url, auth_params),
video_id, f4m_id=format_id)) media_id, f4m_id=format_id, fatal=False)
if f4m_formats:
formats.extend(f4m_formats)
elif format_url.endswith('.m3u8'): elif format_url.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats( m3u8_formats = self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id=format_id)) format_url, media_id, 'mp4', m3u8_id=format_id, fatal=False)
if m3u8_formats:
formats.extend(m3u8_formats)
else: else:
formats.append({ formats.append({
'format_id': format_id, 'format_id': format_id,
@ -217,11 +204,11 @@ class RTSIE(InfoExtractor):
'tbr': media['rate'] or extract_bitrate(media['url']), 'tbr': media['rate'] or extract_bitrate(media['url']),
} for media in info['media'] if media.get('rate')]) } for media in info['media'] if media.get('rate')])
self._check_formats(formats, video_id) self._check_formats(formats, media_id)
self._sort_formats(formats) self._sort_formats(formats)
return { return {
'id': video_id, 'id': media_id,
'display_id': display_id, 'display_id': display_id,
'formats': formats, 'formats': formats,
'title': info['title'], 'title': info['title'],

View file

@ -23,19 +23,23 @@ class SRGSSRIE(InfoExtractor):
'STARTDATE': 'This video is not yet available. Please try again later.', 'STARTDATE': 'This video is not yet available. Please try again later.',
} }
def _real_extract(self, url): def get_media_data(self, bu, media_type, media_id):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
media_data = self._download_json( media_data = self._download_json(
'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id), 'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id),
media_id)[media_type.capitalize()] media_id)[media_type.capitalize()]
if media_data.get('block') and media_data['block'] in self._ERRORS: if media_data.get('block') and media_data['block'] in self._ERRORS:
raise ExtractorError( raise ExtractorError('%s said: %s' % (self.IE_NAME, self._ERRORS[media_data['block']]), expected=True)
'%s said: %s' % (
self.IE_NAME, return media_data
self._ERRORS[media_data['block']]),
expected=True) def _real_extract(self, url):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
if bu == 'rts':
return self.url_result('rts:%s' % media_id, 'RTS')
media_data = self.get_media_data(bu, media_type, media_id)
metadata = media_data['AssetMetadatas']['AssetMetadata'][0] metadata = media_data['AssetMetadatas']['AssetMetadata'][0]
title = metadata['title'] title = metadata['title']
@ -61,9 +65,13 @@ class SRGSSRIE(InfoExtractor):
assets[quality['@quality']] = quality['text'] assets[quality['@quality']] = quality['text']
asset_url = assets.get('HD') or assets.get('HQ') or assets.get('SD') or assets.get('MQ') or assets.get('LQ') asset_url = assets.get('HD') or assets.get('HQ') or assets.get('SD') or assets.get('MQ') or assets.get('LQ')
if '.f4m' in asset_url: if '.f4m' in asset_url:
formats.extend(self._extract_f4m_formats(asset_url + '?hdcore=3.4.0', media_id, f4m_id='hds')) f4m_formats = formats.extend(self._extract_f4m_formats(asset_url + '?hdcore=3.4.0', media_id, f4m_id='hds', fatal=False))
if f4m_formats:
formats.extend(f4m_formats)
elif '.m3u8' in asset_url: elif '.m3u8' in asset_url:
formats.extend(self._extract_m3u8_formats(asset_url, media_id, m3u8_id='hls')) m3u8_formats = formats.extend(self._extract_m3u8_formats(asset_url, media_id, m3u8_id='hls', fatal=False))
if m3u8_formats:
formats.extend(m3u8_formats)
else: else:
for asset in source['url']: for asset in source['url']:
asset_url = asset['text'] asset_url = asset['text']
@ -135,8 +143,25 @@ class SRGSSRPlayIE(InfoExtractor):
# rtmp download # rtmp download
'skip_download': True, 'skip_download': True,
}, },
}, {
'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
'md5': '67a2a9ae4e8e62a68d0e9820cc9782df',
'info_dict': {
'id': '6348260',
'display_id': '6348260',
'ext': 'flv',
'duration': 1796,
'title': 'Le 19h30',
'description': '',
'uploader': '19h30',
'upload_date': '20141201',
'timestamp': 1417458600,
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
}] }]
def _real_extract(self, url): def _real_extract(self, url):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups() bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
# other info can be extracted from url + '&layout=json'
return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR') return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')