2013-06-23 14:24:07 -04:00
|
|
|
import re
|
2013-06-26 18:09:51 -04:00
|
|
|
import json
|
2013-07-04 12:06:47 -04:00
|
|
|
import xml.etree.ElementTree
|
2013-06-23 14:24:07 -04:00
|
|
|
|
|
|
|
from .common import InfoExtractor
|
|
|
|
from ..utils import (
|
2013-06-26 18:09:51 -04:00
|
|
|
# This is used by the not implemented extractLiveStream method
|
2013-06-23 14:24:07 -04:00
|
|
|
compat_urllib_parse,
|
|
|
|
|
|
|
|
ExtractorError,
|
|
|
|
unified_strdate,
|
|
|
|
)
|
|
|
|
|
|
|
|
class ArteTvIE(InfoExtractor):
|
2013-06-30 07:38:22 -04:00
|
|
|
"""
|
|
|
|
There are two sources of video in arte.tv: videos.arte.tv and
|
|
|
|
www.arte.tv/guide, the extraction process is different for each one.
|
|
|
|
The videos expire in 7 days, so we can't add tests.
|
|
|
|
"""
|
2013-07-02 11:34:40 -04:00
|
|
|
_EMISSION_URL = r'(?:http://)?www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
2013-07-04 12:06:47 -04:00
|
|
|
_VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
|
2013-06-23 14:24:07 -04:00
|
|
|
_LIVE_URL = r'index-[0-9]+\.html$'
|
|
|
|
|
|
|
|
IE_NAME = u'arte.tv'
|
|
|
|
|
2013-06-30 07:38:22 -04:00
|
|
|
@classmethod
|
|
|
|
def suitable(cls, url):
|
|
|
|
return any(re.match(regex, url) for regex in (cls._EMISSION_URL, cls._VIDEOS_URL))
|
|
|
|
|
2013-06-23 14:26:35 -04:00
|
|
|
# TODO implement Live Stream
|
|
|
|
# def extractLiveStream(self, url):
|
|
|
|
# video_lang = url.split('/')[-4]
|
|
|
|
# info = self.grep_webpage(
|
|
|
|
# url,
|
|
|
|
# r'src="(.*?/videothek_js.*?\.js)',
|
|
|
|
# 0,
|
|
|
|
# [
|
|
|
|
# (1, 'url', u'Invalid URL: %s' % url)
|
|
|
|
# ]
|
|
|
|
# )
|
|
|
|
# http_host = url.split('/')[2]
|
|
|
|
# next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
|
|
|
|
# info = self.grep_webpage(
|
|
|
|
# next_url,
|
|
|
|
# r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
|
|
|
|
# '(http://.*?\.swf).*?' +
|
|
|
|
# '(rtmp://.*?)\'',
|
|
|
|
# re.DOTALL,
|
|
|
|
# [
|
|
|
|
# (1, 'path', u'could not extract video path: %s' % url),
|
|
|
|
# (2, 'player', u'could not extract video player: %s' % url),
|
|
|
|
# (3, 'url', u'could not extract video url: %s' % url)
|
|
|
|
# ]
|
|
|
|
# )
|
|
|
|
# video_url = u'%s/%s' % (info.get('url'), info.get('path'))
|
2013-06-23 14:24:07 -04:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
2013-06-30 07:38:22 -04:00
|
|
|
mobj = re.match(self._EMISSION_URL, url)
|
|
|
|
if mobj is not None:
|
|
|
|
name = mobj.group('name')
|
2013-07-02 11:34:40 -04:00
|
|
|
lang = mobj.group('lang')
|
2013-06-30 07:38:22 -04:00
|
|
|
# This is not a real id, it can be for example AJT for the news
|
|
|
|
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
|
|
|
|
video_id = mobj.group('id')
|
2013-07-02 11:34:40 -04:00
|
|
|
return self._extract_emission(url, video_id, lang)
|
2013-06-30 07:38:22 -04:00
|
|
|
|
|
|
|
mobj = re.match(self._VIDEOS_URL, url)
|
|
|
|
if mobj is not None:
|
|
|
|
id = mobj.group('id')
|
2013-07-04 12:06:47 -04:00
|
|
|
lang = mobj.group('lang')
|
|
|
|
return self._extract_video(url, id, lang)
|
2013-06-23 14:24:07 -04:00
|
|
|
|
|
|
|
if re.search(self._LIVE_URL, video_id) is not None:
|
2013-06-23 14:26:35 -04:00
|
|
|
raise ExtractorError(u'Arte live streams are not yet supported, sorry')
|
|
|
|
# self.extractLiveStream(url)
|
|
|
|
# return
|
2013-06-26 18:09:51 -04:00
|
|
|
|
2013-07-02 11:34:40 -04:00
|
|
|
def _extract_emission(self, url, video_id, lang):
|
2013-06-30 07:38:22 -04:00
|
|
|
"""Extract from www.arte.tv/guide"""
|
2013-07-05 06:56:41 -04:00
|
|
|
if video_id.replace('-','').isdigit():
|
|
|
|
json_url = 'http://org-www.arte.tv/papi/tvguide/videos/stream/player/F/%s_PLUS7-F/ALL/ALL.json' % video_id
|
|
|
|
else:
|
|
|
|
# We don't know the real id of the video, we have to search in the webpage
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
|
2013-06-26 18:09:51 -04:00
|
|
|
|
|
|
|
json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
|
|
|
|
self.report_extraction(video_id)
|
|
|
|
info = json.loads(json_info)
|
|
|
|
player_info = info['videoJsonPlayer']
|
|
|
|
|
|
|
|
info_dict = {'id': player_info['VID'],
|
|
|
|
'title': player_info['VTI'],
|
|
|
|
'description': player_info['VDE'],
|
|
|
|
'upload_date': unified_strdate(player_info['VDA'].split(' ')[0]),
|
|
|
|
'thumbnail': player_info['programImage'],
|
2013-06-30 07:38:22 -04:00
|
|
|
'ext': 'flv',
|
2013-06-26 18:09:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
formats = player_info['VSR'].values()
|
2013-07-02 11:34:40 -04:00
|
|
|
def _match_lang(f):
|
|
|
|
# Return true if that format is in the language of the url
|
|
|
|
if lang == 'fr':
|
|
|
|
l = 'F'
|
|
|
|
elif lang == 'de':
|
|
|
|
l = 'A'
|
|
|
|
regexes = [r'VO?%s' % l, r'V%s-ST.' % l]
|
|
|
|
return any(re.match(r, f['versionCode']) for r in regexes)
|
|
|
|
# Some formats may not be in the same language as the url
|
|
|
|
formats = filter(_match_lang, formats)
|
2013-06-26 18:09:51 -04:00
|
|
|
# We order the formats by quality
|
|
|
|
formats = sorted(formats, key=lambda f: int(f['height']))
|
|
|
|
# Pick the best quality
|
|
|
|
format_info = formats[-1]
|
|
|
|
if format_info['mediaType'] == u'rtmp':
|
|
|
|
info_dict['url'] = format_info['streamer']
|
|
|
|
info_dict['play_path'] = 'mp4:' + format_info['url']
|
2013-06-23 14:24:07 -04:00
|
|
|
else:
|
2013-06-26 18:09:51 -04:00
|
|
|
info_dict['url'] = format_info['url']
|
2013-06-23 14:24:07 -04:00
|
|
|
|
2013-06-26 18:09:51 -04:00
|
|
|
return info_dict
|
2013-06-30 07:38:22 -04:00
|
|
|
|
2013-07-04 12:06:47 -04:00
|
|
|
def _extract_video(self, url, video_id, lang):
|
2013-06-30 07:38:22 -04:00
|
|
|
"""Extract from videos.arte.tv"""
|
2013-07-04 12:06:47 -04:00
|
|
|
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
|
|
|
|
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
|
|
|
|
ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
|
|
|
|
ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
|
|
|
|
config_node = ref_xml_doc.find('.//video[@lang="%s"]' % lang)
|
|
|
|
config_xml_url = config_node.attrib['ref']
|
|
|
|
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
|
2013-06-30 07:38:22 -04:00
|
|
|
|
|
|
|
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
|
|
|
|
def _key(m):
|
|
|
|
quality = m.group('quality')
|
|
|
|
if quality == 'hd':
|
|
|
|
return 2
|
|
|
|
else:
|
|
|
|
return 1
|
|
|
|
# We pick the best quality
|
|
|
|
video_urls = sorted(video_urls, key=_key)
|
|
|
|
video_url = list(video_urls)[-1].group('url')
|
|
|
|
|
|
|
|
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
|
|
|
|
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
|
|
|
|
config_xml, 'thumbnail')
|
|
|
|
return {'id': video_id,
|
|
|
|
'title': title,
|
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': 'flv',
|
|
|
|
}
|