2013-06-23 18:24:07 +00:00
|
|
|
import re
|
2013-06-26 22:09:51 +00:00
|
|
|
import json
|
2013-07-04 16:06:47 +00:00
|
|
|
import xml.etree.ElementTree
|
2013-06-23 18:24:07 +00:00
|
|
|
|
|
|
|
from .common import InfoExtractor
|
|
|
|
from ..utils import (
|
|
|
|
ExtractorError,
|
2013-07-11 14:12:16 +00:00
|
|
|
find_xpath_attr,
|
2013-06-23 18:24:07 +00:00
|
|
|
unified_strdate,
|
|
|
|
)
|
|
|
|
|
|
|
|
class ArteTvIE(InfoExtractor):
|
2013-06-30 11:38:22 +00:00
|
|
|
"""
|
|
|
|
There are two sources of video in arte.tv: videos.arte.tv and
|
|
|
|
www.arte.tv/guide, the extraction process is different for each one.
|
|
|
|
The videos expire in 7 days, so we can't add tests.
|
|
|
|
"""
|
2013-07-02 15:34:40 +00:00
|
|
|
_EMISSION_URL = r'(?:http://)?www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
2013-07-04 16:06:47 +00:00
|
|
|
_VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
|
2013-08-03 17:07:04 +00:00
|
|
|
_LIVEWEB_URL = r'(?:http://)?liveweb.arte.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
|
2013-06-23 18:24:07 +00:00
|
|
|
_LIVE_URL = r'index-[0-9]+\.html$'
|
|
|
|
|
|
|
|
IE_NAME = u'arte.tv'
|
|
|
|
|
2013-06-30 11:38:22 +00:00
|
|
|
@classmethod
|
|
|
|
def suitable(cls, url):
|
2013-08-03 17:07:04 +00:00
|
|
|
return any(re.match(regex, url) for regex in (cls._EMISSION_URL, cls._VIDEOS_URL, cls._LIVEWEB_URL))
|
2013-06-30 11:38:22 +00:00
|
|
|
|
2013-06-23 18:26:35 +00:00
|
|
|
# TODO implement Live Stream
|
2013-07-08 00:13:50 +00:00
|
|
|
# from ..utils import compat_urllib_parse
|
2013-06-23 18:26:35 +00:00
|
|
|
# def extractLiveStream(self, url):
|
|
|
|
# video_lang = url.split('/')[-4]
|
|
|
|
# info = self.grep_webpage(
|
|
|
|
# url,
|
|
|
|
# r'src="(.*?/videothek_js.*?\.js)',
|
|
|
|
# 0,
|
|
|
|
# [
|
|
|
|
# (1, 'url', u'Invalid URL: %s' % url)
|
|
|
|
# ]
|
|
|
|
# )
|
|
|
|
# http_host = url.split('/')[2]
|
|
|
|
# next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
|
|
|
|
# info = self.grep_webpage(
|
|
|
|
# next_url,
|
|
|
|
# r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
|
|
|
|
# '(http://.*?\.swf).*?' +
|
|
|
|
# '(rtmp://.*?)\'',
|
|
|
|
# re.DOTALL,
|
|
|
|
# [
|
|
|
|
# (1, 'path', u'could not extract video path: %s' % url),
|
|
|
|
# (2, 'player', u'could not extract video player: %s' % url),
|
|
|
|
# (3, 'url', u'could not extract video url: %s' % url)
|
|
|
|
# ]
|
|
|
|
# )
|
|
|
|
# video_url = u'%s/%s' % (info.get('url'), info.get('path'))
|
2013-06-23 18:24:07 +00:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
2013-06-30 11:38:22 +00:00
|
|
|
mobj = re.match(self._EMISSION_URL, url)
|
|
|
|
if mobj is not None:
|
2013-07-02 15:34:40 +00:00
|
|
|
lang = mobj.group('lang')
|
2013-06-30 11:38:22 +00:00
|
|
|
# This is not a real id, it can be for example AJT for the news
|
|
|
|
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
|
|
|
|
video_id = mobj.group('id')
|
2013-07-02 15:34:40 +00:00
|
|
|
return self._extract_emission(url, video_id, lang)
|
2013-06-30 11:38:22 +00:00
|
|
|
|
|
|
|
mobj = re.match(self._VIDEOS_URL, url)
|
|
|
|
if mobj is not None:
|
|
|
|
id = mobj.group('id')
|
2013-07-04 16:06:47 +00:00
|
|
|
lang = mobj.group('lang')
|
|
|
|
return self._extract_video(url, id, lang)
|
2013-06-23 18:24:07 +00:00
|
|
|
|
2013-08-03 17:07:04 +00:00
|
|
|
mobj = re.match(self._LIVEWEB_URL, url)
|
|
|
|
if mobj is not None:
|
|
|
|
name = mobj.group('name')
|
|
|
|
lang = mobj.group('lang')
|
|
|
|
return self._extract_liveweb(url, name, lang)
|
|
|
|
|
2013-06-23 18:24:07 +00:00
|
|
|
if re.search(self._LIVE_URL, video_id) is not None:
|
2013-06-23 18:26:35 +00:00
|
|
|
raise ExtractorError(u'Arte live streams are not yet supported, sorry')
|
|
|
|
# self.extractLiveStream(url)
|
|
|
|
# return
|
2013-06-26 22:09:51 +00:00
|
|
|
|
2013-07-02 15:34:40 +00:00
|
|
|
def _extract_emission(self, url, video_id, lang):
|
2013-06-30 11:38:22 +00:00
|
|
|
"""Extract from www.arte.tv/guide"""
|
2013-07-07 23:28:19 +00:00
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
|
2013-06-26 22:09:51 +00:00
|
|
|
|
|
|
|
json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
|
|
|
|
self.report_extraction(video_id)
|
|
|
|
info = json.loads(json_info)
|
|
|
|
player_info = info['videoJsonPlayer']
|
|
|
|
|
|
|
|
info_dict = {'id': player_info['VID'],
|
|
|
|
'title': player_info['VTI'],
|
2013-08-03 15:32:29 +00:00
|
|
|
'description': player_info.get('VDE'),
|
2013-06-26 22:09:51 +00:00
|
|
|
'upload_date': unified_strdate(player_info['VDA'].split(' ')[0]),
|
|
|
|
'thumbnail': player_info['programImage'],
|
2013-06-30 11:38:22 +00:00
|
|
|
'ext': 'flv',
|
2013-06-26 22:09:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
formats = player_info['VSR'].values()
|
2013-07-02 15:34:40 +00:00
|
|
|
def _match_lang(f):
|
|
|
|
# Return true if that format is in the language of the url
|
|
|
|
if lang == 'fr':
|
|
|
|
l = 'F'
|
|
|
|
elif lang == 'de':
|
|
|
|
l = 'A'
|
2013-08-01 09:48:17 +00:00
|
|
|
regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
|
2013-07-02 15:34:40 +00:00
|
|
|
return any(re.match(r, f['versionCode']) for r in regexes)
|
|
|
|
# Some formats may not be in the same language as the url
|
|
|
|
formats = filter(_match_lang, formats)
|
2013-06-26 22:09:51 +00:00
|
|
|
# We order the formats by quality
|
|
|
|
formats = sorted(formats, key=lambda f: int(f['height']))
|
2013-08-03 15:32:29 +00:00
|
|
|
# Prefer videos without subtitles in the same language
|
|
|
|
formats = sorted(formats, key=lambda f: re.match(r'VO(F|A)-STM\1', f['versionCode']) is None)
|
2013-06-26 22:09:51 +00:00
|
|
|
# Pick the best quality
|
|
|
|
format_info = formats[-1]
|
|
|
|
if format_info['mediaType'] == u'rtmp':
|
|
|
|
info_dict['url'] = format_info['streamer']
|
|
|
|
info_dict['play_path'] = 'mp4:' + format_info['url']
|
2013-06-23 18:24:07 +00:00
|
|
|
else:
|
2013-06-26 22:09:51 +00:00
|
|
|
info_dict['url'] = format_info['url']
|
2013-06-23 18:24:07 +00:00
|
|
|
|
2013-06-26 22:09:51 +00:00
|
|
|
return info_dict
|
2013-06-30 11:38:22 +00:00
|
|
|
|
2013-07-04 16:06:47 +00:00
|
|
|
def _extract_video(self, url, video_id, lang):
|
2013-06-30 11:38:22 +00:00
|
|
|
"""Extract from videos.arte.tv"""
|
2013-07-04 16:06:47 +00:00
|
|
|
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
|
|
|
|
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
|
|
|
|
ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
|
|
|
|
ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
|
2013-07-11 14:12:16 +00:00
|
|
|
config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
|
2013-07-04 16:06:47 +00:00
|
|
|
config_xml_url = config_node.attrib['ref']
|
|
|
|
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
|
2013-06-30 11:38:22 +00:00
|
|
|
|
|
|
|
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
|
|
|
|
def _key(m):
|
|
|
|
quality = m.group('quality')
|
|
|
|
if quality == 'hd':
|
|
|
|
return 2
|
|
|
|
else:
|
|
|
|
return 1
|
|
|
|
# We pick the best quality
|
|
|
|
video_urls = sorted(video_urls, key=_key)
|
|
|
|
video_url = list(video_urls)[-1].group('url')
|
|
|
|
|
|
|
|
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
|
|
|
|
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
|
|
|
|
config_xml, 'thumbnail')
|
|
|
|
return {'id': video_id,
|
|
|
|
'title': title,
|
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': 'flv',
|
|
|
|
}
|
2013-08-03 17:07:04 +00:00
|
|
|
|
|
|
|
def _extract_liveweb(self, url, name, lang):
|
|
|
|
"""Extract form http://liveweb.arte.tv/"""
|
|
|
|
webpage = self._download_webpage(url, name)
|
|
|
|
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id')
|
|
|
|
config_xml = self._download_webpage('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
|
|
|
|
video_id, u'Downloading information')
|
|
|
|
config_doc = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
|
|
|
|
event_doc = config_doc.find('event')
|
|
|
|
url_node = event_doc.find('video').find('urlHd')
|
|
|
|
if url_node is None:
|
|
|
|
url_node = video_doc.find('urlSd')
|
|
|
|
|
|
|
|
return {'id': video_id,
|
|
|
|
'title': event_doc.find('name%s' % lang.capitalize()).text,
|
|
|
|
'url': url_node.text.replace('MP4', 'mp4'),
|
|
|
|
'ext': 'flv',
|
|
|
|
'thumbnail': self._og_search_thumbnail(webpage),
|
|
|
|
}
|