mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-01-07 17:16:08 +00:00
[vevo] remove request to old api and catch apiv2 errors
This commit is contained in:
parent
cf0cabbe50
commit
9bccdc7004
|
@ -4,9 +4,9 @@ import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_etree_fromstring,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
|
compat_HTTPError,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -140,21 +140,6 @@ class VevoIE(VevoBaseIE):
|
||||||
'url': 'http://www.vevo.com/watch/INS171400764',
|
'url': 'http://www.vevo.com/watch/INS171400764',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
_SMIL_BASE_URL = 'http://smil.lvl3.vevo.com'
|
|
||||||
_SOURCE_TYPES = {
|
|
||||||
0: 'youtube',
|
|
||||||
1: 'brightcove',
|
|
||||||
2: 'http',
|
|
||||||
3: 'hls_ios',
|
|
||||||
4: 'hls',
|
|
||||||
5: 'smil', # http
|
|
||||||
7: 'f4m_cc',
|
|
||||||
8: 'f4m_ak',
|
|
||||||
9: 'f4m_l3',
|
|
||||||
10: 'ism',
|
|
||||||
13: 'smil', # rtmp
|
|
||||||
18: 'dash',
|
|
||||||
}
|
|
||||||
_VERSIONS = {
|
_VERSIONS = {
|
||||||
0: 'youtube', # only in AuthenticateVideo videoVersions
|
0: 'youtube', # only in AuthenticateVideo videoVersions
|
||||||
1: 'level3',
|
1: 'level3',
|
||||||
|
@ -163,41 +148,6 @@ class VevoIE(VevoBaseIE):
|
||||||
4: 'amazon',
|
4: 'amazon',
|
||||||
}
|
}
|
||||||
|
|
||||||
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
|
|
||||||
formats = []
|
|
||||||
els = smil.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
|
|
||||||
for el in els:
|
|
||||||
src = el.attrib['src']
|
|
||||||
m = re.match(r'''(?xi)
|
|
||||||
(?P<ext>[a-z0-9]+):
|
|
||||||
(?P<path>
|
|
||||||
[/a-z0-9]+ # The directory and main part of the URL
|
|
||||||
_(?P<tbr>[0-9]+)k
|
|
||||||
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
|
|
||||||
_(?P<vcodec>[a-z0-9]+)
|
|
||||||
_(?P<vbr>[0-9]+)
|
|
||||||
_(?P<acodec>[a-z0-9]+)
|
|
||||||
_(?P<abr>[0-9]+)
|
|
||||||
\.[a-z0-9]+ # File extension
|
|
||||||
)''', src)
|
|
||||||
if not m:
|
|
||||||
continue
|
|
||||||
|
|
||||||
format_url = self._SMIL_BASE_URL + m.group('path')
|
|
||||||
formats.append({
|
|
||||||
'url': format_url,
|
|
||||||
'format_id': 'smil_' + m.group('tbr'),
|
|
||||||
'vcodec': m.group('vcodec'),
|
|
||||||
'acodec': m.group('acodec'),
|
|
||||||
'tbr': int(m.group('tbr')),
|
|
||||||
'vbr': int(m.group('vbr')),
|
|
||||||
'abr': int(m.group('abr')),
|
|
||||||
'ext': m.group('ext'),
|
|
||||||
'width': int(m.group('width')),
|
|
||||||
'height': int(m.group('height')),
|
|
||||||
})
|
|
||||||
return formats
|
|
||||||
|
|
||||||
def _initialize_api(self, video_id):
|
def _initialize_api(self, video_id):
|
||||||
req = sanitized_Request(
|
req = sanitized_Request(
|
||||||
'http://www.vevo.com/auth', data=b'')
|
'http://www.vevo.com/auth', data=b'')
|
||||||
|
@ -214,148 +164,91 @@ class VevoIE(VevoBaseIE):
|
||||||
self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['access_token']
|
self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['access_token']
|
||||||
|
|
||||||
def _call_api(self, path, *args, **kwargs):
|
def _call_api(self, path, *args, **kwargs):
|
||||||
return self._download_json(self._api_url_template % path, *args, **kwargs)
|
try:
|
||||||
|
data = self._download_json(self._api_url_template % path, *args, **kwargs)
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, compat_HTTPError):
|
||||||
|
errors = self._parse_json(e.cause.read().decode(), None)['errors']
|
||||||
|
error_message = ', '.join([error['message'] for error in errors])
|
||||||
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
|
||||||
|
raise
|
||||||
|
return data
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
json_url = 'http://api.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
|
self._initialize_api(video_id)
|
||||||
response = self._download_json(
|
|
||||||
json_url, video_id, 'Downloading video info',
|
video_info = self._call_api(
|
||||||
'Unable to download info', fatal=False) or {}
|
'video/%s' % video_id, video_id, 'Downloading api video info',
|
||||||
video_info = response.get('video') or {}
|
'Failed to download video info')
|
||||||
|
|
||||||
|
video_versions = self._call_api(
|
||||||
|
'video/%s/streams' % video_id, video_id,
|
||||||
|
'Downloading video versions info',
|
||||||
|
'Failed to download video versions info',
|
||||||
|
fatal=False)
|
||||||
|
|
||||||
|
# Some videos are only available via webpage (e.g.
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/9366)
|
||||||
|
if not video_versions:
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
video_versions = self._extract_json(webpage, video_id, 'streams')[video_id][0]
|
||||||
|
|
||||||
|
uploader = None
|
||||||
artist = None
|
artist = None
|
||||||
featured_artist = None
|
featured_artist = None
|
||||||
uploader = None
|
artists = video_info.get('artists')
|
||||||
view_count = None
|
for curr_artist in artists:
|
||||||
|
if curr_artist.get('role') == 'Featured':
|
||||||
|
featured_artist = curr_artist['name']
|
||||||
|
else:
|
||||||
|
artist = uploader = curr_artist['name']
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
for video_version in video_versions:
|
||||||
|
version = self._VERSIONS.get(video_version['version'])
|
||||||
|
version_url = video_version.get('url')
|
||||||
|
if not version_url:
|
||||||
|
continue
|
||||||
|
|
||||||
if not video_info:
|
if '.ism' in version_url:
|
||||||
try:
|
continue
|
||||||
self._initialize_api(video_id)
|
elif '.mpd' in version_url:
|
||||||
except ExtractorError:
|
formats.extend(self._extract_mpd_formats(
|
||||||
ytid = response.get('errorInfo', {}).get('ytid')
|
version_url, video_id, mpd_id='dash-%s' % version,
|
||||||
if ytid:
|
note='Downloading %s MPD information' % version,
|
||||||
self.report_warning(
|
errnote='Failed to download %s MPD information' % version,
|
||||||
'Video is geoblocked, trying with the YouTube video %s' % ytid)
|
fatal=False))
|
||||||
return self.url_result(ytid, 'Youtube', ytid)
|
elif '.m3u8' in version_url:
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
raise
|
version_url, video_id, 'mp4', 'm3u8_native',
|
||||||
|
m3u8_id='hls-%s' % version,
|
||||||
video_info = self._call_api(
|
note='Downloading %s m3u8 information' % version,
|
||||||
'video/%s' % video_id, video_id, 'Downloading api video info',
|
errnote='Failed to download %s m3u8 information' % version,
|
||||||
'Failed to download video info')
|
fatal=False))
|
||||||
|
else:
|
||||||
video_versions = self._call_api(
|
m = re.search(r'''(?xi)
|
||||||
'video/%s/streams' % video_id, video_id,
|
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
|
||||||
'Downloading video versions info',
|
_(?P<vcodec>[a-z0-9]+)
|
||||||
'Failed to download video versions info',
|
_(?P<vbr>[0-9]+)
|
||||||
fatal=False)
|
_(?P<acodec>[a-z0-9]+)
|
||||||
|
_(?P<abr>[0-9]+)
|
||||||
# Some videos are only available via webpage (e.g.
|
\.(?P<ext>[a-z0-9]+)''', version_url)
|
||||||
# https://github.com/rg3/youtube-dl/issues/9366)
|
if not m:
|
||||||
if not video_versions:
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
video_versions = self._extract_json(webpage, video_id, 'streams')[video_id][0]
|
|
||||||
|
|
||||||
timestamp = parse_iso8601(video_info.get('releaseDate'))
|
|
||||||
artists = video_info.get('artists')
|
|
||||||
for curr_artist in artists:
|
|
||||||
if curr_artist.get('role') == 'Featured':
|
|
||||||
featured_artist = curr_artist['name']
|
|
||||||
else:
|
|
||||||
artist = uploader = curr_artist['name']
|
|
||||||
view_count = int_or_none(video_info.get('views', {}).get('total'))
|
|
||||||
|
|
||||||
for video_version in video_versions:
|
|
||||||
version = self._VERSIONS.get(video_version['version'])
|
|
||||||
version_url = video_version.get('url')
|
|
||||||
if not version_url:
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if '.ism' in version_url:
|
formats.append({
|
||||||
continue
|
'url': version_url,
|
||||||
elif '.mpd' in version_url:
|
'format_id': 'http-%s-%s' % (version, video_version['quality']),
|
||||||
formats.extend(self._extract_mpd_formats(
|
'vcodec': m.group('vcodec'),
|
||||||
version_url, video_id, mpd_id='dash-%s' % version,
|
'acodec': m.group('acodec'),
|
||||||
note='Downloading %s MPD information' % version,
|
'vbr': int(m.group('vbr')),
|
||||||
errnote='Failed to download %s MPD information' % version,
|
'abr': int(m.group('abr')),
|
||||||
fatal=False))
|
'ext': m.group('ext'),
|
||||||
elif '.m3u8' in version_url:
|
'width': int(m.group('width')),
|
||||||
formats.extend(self._extract_m3u8_formats(
|
'height': int(m.group('height')),
|
||||||
version_url, video_id, 'mp4', 'm3u8_native',
|
})
|
||||||
m3u8_id='hls-%s' % version,
|
|
||||||
note='Downloading %s m3u8 information' % version,
|
|
||||||
errnote='Failed to download %s m3u8 information' % version,
|
|
||||||
fatal=False))
|
|
||||||
else:
|
|
||||||
m = re.search(r'''(?xi)
|
|
||||||
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
|
|
||||||
_(?P<vcodec>[a-z0-9]+)
|
|
||||||
_(?P<vbr>[0-9]+)
|
|
||||||
_(?P<acodec>[a-z0-9]+)
|
|
||||||
_(?P<abr>[0-9]+)
|
|
||||||
\.(?P<ext>[a-z0-9]+)''', version_url)
|
|
||||||
if not m:
|
|
||||||
continue
|
|
||||||
|
|
||||||
formats.append({
|
|
||||||
'url': version_url,
|
|
||||||
'format_id': 'http-%s-%s' % (version, video_version['quality']),
|
|
||||||
'vcodec': m.group('vcodec'),
|
|
||||||
'acodec': m.group('acodec'),
|
|
||||||
'vbr': int(m.group('vbr')),
|
|
||||||
'abr': int(m.group('abr')),
|
|
||||||
'ext': m.group('ext'),
|
|
||||||
'width': int(m.group('width')),
|
|
||||||
'height': int(m.group('height')),
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
timestamp = int_or_none(self._search_regex(
|
|
||||||
r'/Date\((\d+)\)/',
|
|
||||||
video_info['releaseDate'], 'release date', fatal=False),
|
|
||||||
scale=1000)
|
|
||||||
artists = video_info.get('mainArtists')
|
|
||||||
if artists:
|
|
||||||
artist = uploader = artists[0]['artistName']
|
|
||||||
|
|
||||||
featured_artists = video_info.get('featuredArtists')
|
|
||||||
if featured_artists:
|
|
||||||
featured_artist = featured_artists[0]['artistName']
|
|
||||||
|
|
||||||
smil_parsed = False
|
|
||||||
for video_version in video_info['videoVersions']:
|
|
||||||
version = self._VERSIONS.get(video_version['version'])
|
|
||||||
if version == 'youtube':
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
source_type = self._SOURCE_TYPES.get(video_version['sourceType'])
|
|
||||||
renditions = compat_etree_fromstring(video_version['data'])
|
|
||||||
if source_type == 'http':
|
|
||||||
for rend in renditions.findall('rendition'):
|
|
||||||
attr = rend.attrib
|
|
||||||
formats.append({
|
|
||||||
'url': attr['url'],
|
|
||||||
'format_id': 'http-%s-%s' % (version, attr['name']),
|
|
||||||
'height': int_or_none(attr.get('frameheight')),
|
|
||||||
'width': int_or_none(attr.get('frameWidth')),
|
|
||||||
'tbr': int_or_none(attr.get('totalBitrate')),
|
|
||||||
'vbr': int_or_none(attr.get('videoBitrate')),
|
|
||||||
'abr': int_or_none(attr.get('audioBitrate')),
|
|
||||||
'vcodec': attr.get('videoCodec'),
|
|
||||||
'acodec': attr.get('audioCodec'),
|
|
||||||
})
|
|
||||||
elif source_type == 'hls':
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
renditions.find('rendition').attrib['url'], video_id,
|
|
||||||
'mp4', 'm3u8_native', m3u8_id='hls-%s' % version,
|
|
||||||
note='Downloading %s m3u8 information' % version,
|
|
||||||
errnote='Failed to download %s m3u8 information' % version,
|
|
||||||
fatal=False))
|
|
||||||
elif source_type == 'smil' and version == 'level3' and not smil_parsed:
|
|
||||||
formats.extend(self._extract_smil_formats(
|
|
||||||
renditions.find('rendition').attrib['url'], video_id, False))
|
|
||||||
smil_parsed = True
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
track = video_info['title']
|
track = video_info['title']
|
||||||
|
@ -376,17 +269,15 @@ class VevoIE(VevoBaseIE):
|
||||||
else:
|
else:
|
||||||
age_limit = None
|
age_limit = None
|
||||||
|
|
||||||
duration = video_info.get('duration')
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'),
|
'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'),
|
||||||
'timestamp': timestamp,
|
'timestamp': parse_iso8601(video_info.get('releaseDate')),
|
||||||
'uploader': uploader,
|
'uploader': uploader,
|
||||||
'duration': duration,
|
'duration': int_or_none(video_info.get('duration')),
|
||||||
'view_count': view_count,
|
'view_count': int_or_none(video_info.get('views', {}).get('total')),
|
||||||
'age_limit': age_limit,
|
'age_limit': age_limit,
|
||||||
'track': track,
|
'track': track,
|
||||||
'artist': uploader,
|
'artist': uploader,
|
||||||
|
|
Loading…
Reference in a new issue