1
0
Fork 0
mirror of https://gitlab.com/dstftw/youtube-dl.git synced 2020-11-16 09:42:26 +00:00

Merge branch 'master' of git://github.com/rg3/youtube-dl into closed-captions

This commit is contained in:
Filippo Valsorda 2012-03-15 14:05:34 +01:00
commit cfcf32d038
2 changed files with 104 additions and 118 deletions

View file

@ -2314,9 +2314,7 @@ class GenericIE(InfoExtractor):
class YoutubeSearchIE(InfoExtractor): class YoutubeSearchIE(InfoExtractor):
"""Information Extractor for YouTube search queries.""" """Information Extractor for YouTube search queries."""
_VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en' _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
_VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
_youtube_ie = None _youtube_ie = None
_max_youtube_results = 1000 _max_youtube_results = 1000
IE_NAME = u'youtube:search' IE_NAME = u'youtube:search'
@ -2367,37 +2365,31 @@ class YoutubeSearchIE(InfoExtractor):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] video_ids = []
already_seen = set() pagenum = 0
pagenum = 1 limit = n
while True: while (50 * pagenum) < limit:
self.report_download_page(query, pagenum) self.report_download_page(query, pagenum+1)
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) result_url = self._API_URL % (urllib.quote_plus(query), (50*pagenum)+1)
request = urllib2.Request(result_url) request = urllib2.Request(result_url)
try: try:
page = urllib2.urlopen(request).read() data = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download API page: %s' % str(err))
return return
api_response = json.loads(data)['data']
# Extract video identifiers new_ids = list(video['id'] for video in api_response['items'])
for mobj in re.finditer(self._VIDEO_INDICATOR, page): video_ids += new_ids
video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
if video_id not in already_seen:
video_ids.append(video_id)
already_seen.add(video_id)
if len(video_ids) == n:
# Specified n videos reached
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
if re.search(self._MORE_PAGES_INDICATOR, page) is None: limit = min(n, api_response['totalItems'])
for id in video_ids: pagenum += 1
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
pagenum = pagenum + 1 if len(video_ids) > n:
video_ids = video_ids[:n]
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
class GoogleSearchIE(InfoExtractor): class GoogleSearchIE(InfoExtractor):
@ -2581,7 +2573,7 @@ class YoutubePlaylistIE(InfoExtractor):
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' _VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&' _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;list=PL%s&'
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>' _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
_youtube_ie = None _youtube_ie = None
IE_NAME = u'youtube:playlist' IE_NAME = u'youtube:playlist'
@ -2633,7 +2625,7 @@ class YoutubePlaylistIE(InfoExtractor):
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(self._VIDEO_INDICATOR, page): for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1)) ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
@ -2644,7 +2636,10 @@ class YoutubePlaylistIE(InfoExtractor):
playliststart = self._downloader.params.get('playliststart', 1) - 1 playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1) playlistend = self._downloader.params.get('playlistend', -1)
video_ids = video_ids[playliststart:playlistend] if playlistend == -1:
video_ids = video_ids[playliststart:]
else:
video_ids = video_ids[playliststart:playlistend]
for id in video_ids: for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)

View file

@ -2058,7 +2058,7 @@ class VimeoIE(InfoExtractor):
video_id = mobj.group(1) video_id = mobj.group(1)
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = urllib2.Request("http://vimeo.com/moogaloop/load/clip:%s" % video_id, None, std_headers) request = urllib2.Request(url, None, std_headers)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = urllib2.urlopen(request).read()
@ -2071,77 +2071,75 @@ class VimeoIE(InfoExtractor):
# and latter we extract those that are Vimeo specific. # and latter we extract those that are Vimeo specific.
self.report_extraction(video_id) self.report_extraction(video_id)
# Extract title # Extract the config JSON
mobj = re.search(r'<caption>(.*?)</caption>', webpage) config = webpage.split(' = {config:')[1].split(',assets:')[0]
if mobj is None: try:
self._downloader.trouble(u'ERROR: unable to extract video title') config = json.loads(config)
except:
self._downloader.trouble(u'ERROR: unable to extract info section')
return return
video_title = mobj.group(1).decode('utf-8')
# Extract title
video_title = config["video"]["title"]
simple_title = _simplify_title(video_title) simple_title = _simplify_title(video_title)
# Extract uploader # Extract uploader
mobj = re.search(r'<uploader_url>http://vimeo.com/(.*?)</uploader_url>', webpage) video_uploader = config["video"]["owner"]["name"]
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video uploader')
return
video_uploader = mobj.group(1).decode('utf-8')
# Extract video thumbnail # Extract video thumbnail
mobj = re.search(r'<thumbnail>(.*?)</thumbnail>', webpage) video_thumbnail = config["video"]["thumbnail"]
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return
video_thumbnail = mobj.group(1).decode('utf-8')
# # Extract video description # Extract video description
# mobj = re.search(r'<meta property="og:description" content="(.*)" />', webpage) try:
# if mobj is None: lxml.etree
# self._downloader.trouble(u'ERROR: unable to extract video description') except NameError:
# return video_description = u'No description available.'
# video_description = mobj.group(1).decode('utf-8') mobj = re.search(r'<meta name="description" content="(.*?)" />', webpage, re.MULTILINE)
# if not video_description: video_description = 'No description available.' if mobj is not None:
video_description = 'Foo.' video_description = mobj.group(1)
# Vimeo specific: extract request signature
mobj = re.search(r'<request_signature>(.*?)</request_signature>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract request signature')
return
sig = mobj.group(1).decode('utf-8')
# Vimeo specific: extract video quality information
mobj = re.search(r'<isHD>(\d+)</isHD>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video quality information')
return
quality = mobj.group(1).decode('utf-8')
if int(quality) == 1:
quality = 'hd'
else: else:
quality = 'sd' html_parser = lxml.etree.HTMLParser()
vwebpage_doc = lxml.etree.parse(StringIO.StringIO(webpage), html_parser)
video_description = u''.join(vwebpage_doc.xpath('id("description")//text()')).strip()
# TODO use another parser
# Vimeo specific: Extract request signature expiration # Extract upload date
mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage) video_upload_date = u'NA'
if mobj is None: mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage)
self._downloader.trouble(u'ERROR: unable to extract request signature expiration') if mobj is not None:
video_upload_date = mobj.group(1)
# Vimeo specific: extract request signature and timestamp
sig = config['request']['signature']
timestamp = config['request']['timestamp']
# Vimeo specific: extract video codec and quality information
# TODO bind to format param
codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
for codec in codecs:
if codec[0] in config["video"]["files"]:
video_codec = codec[0]
video_extension = codec[1]
if 'hd' in config["video"]["files"][codec[0]]: quality = 'hd'
else: quality = 'sd'
break
else:
self._downloader.trouble(u'ERROR: no known codec found')
return return
sig_exp = mobj.group(1).decode('utf-8')
video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s/?q=%s" % (video_id, sig, sig_exp, quality) video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
%(video_id, sig, timestamp, quality, video_codec.upper())
try: try:
# Process video information # Process video information
self._downloader.process_info({ self._downloader.process_info({
'id': video_id.decode('utf-8'), 'id': video_id,
'url': video_url, 'url': video_url,
'uploader': video_uploader, 'uploader': video_uploader,
'upload_date': u'NA', 'upload_date': video_upload_date,
'title': video_title, 'title': video_title,
'stitle': simple_title, 'stitle': simple_title,
'ext': u'mp4', 'ext': video_extension,
'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description,
'thumbnail': video_thumbnail, 'thumbnail': video_thumbnail,
'description': video_description, 'description': video_description,
'player_url': None, 'player_url': None,
@ -2250,9 +2248,7 @@ class GenericIE(InfoExtractor):
class YoutubeSearchIE(InfoExtractor): class YoutubeSearchIE(InfoExtractor):
"""Information Extractor for YouTube search queries.""" """Information Extractor for YouTube search queries."""
_VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en' _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
_VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
_youtube_ie = None _youtube_ie = None
_max_youtube_results = 1000 _max_youtube_results = 1000
IE_NAME = u'youtube:search' IE_NAME = u'youtube:search'
@ -2303,45 +2299,39 @@ class YoutubeSearchIE(InfoExtractor):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] video_ids = []
already_seen = set() pagenum = 0
pagenum = 1 limit = n
while True: while (50 * pagenum) < limit:
self.report_download_page(query, pagenum) self.report_download_page(query, pagenum+1)
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) result_url = self._API_URL % (urllib.quote_plus(query), (50*pagenum)+1)
request = urllib2.Request(result_url) request = urllib2.Request(result_url)
try: try:
page = urllib2.urlopen(request).read() data = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download API page: %s' % str(err))
return return
api_response = json.loads(data)['data']
# Extract video identifiers new_ids = list(video['id'] for video in api_response['items'])
for mobj in re.finditer(self._VIDEO_INDICATOR, page): video_ids += new_ids
video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
if video_id not in already_seen:
video_ids.append(video_id)
already_seen.add(video_id)
if len(video_ids) == n:
# Specified n videos reached
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
if re.search(self._MORE_PAGES_INDICATOR, page) is None: limit = min(n, api_response['totalItems'])
for id in video_ids: pagenum += 1
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
pagenum = pagenum + 1 if len(video_ids) > n:
video_ids = video_ids[:n]
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
class GoogleSearchIE(InfoExtractor): class GoogleSearchIE(InfoExtractor):
"""Information Extractor for Google Video search queries.""" """Information Extractor for Google Video search queries."""
_VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en' _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
_VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&' _VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)'
_MORE_PAGES_INDICATOR = r'<span>Next</span>' _MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"'
_google_ie = None _google_ie = None
_max_google_results = 1000 _max_google_results = 1000
IE_NAME = u'video.google:search' IE_NAME = u'video.google:search'
@ -2392,12 +2382,11 @@ class GoogleSearchIE(InfoExtractor):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] video_ids = []
already_seen = set() pagenum = 0
pagenum = 1
while True: while True:
self.report_download_page(query, pagenum) self.report_download_page(query, pagenum)
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum*10)
request = urllib2.Request(result_url) request = urllib2.Request(result_url)
try: try:
page = urllib2.urlopen(request).read() page = urllib2.urlopen(request).read()
@ -2408,9 +2397,8 @@ class GoogleSearchIE(InfoExtractor):
# Extract video identifiers # Extract video identifiers
for mobj in re.finditer(self._VIDEO_INDICATOR, page): for mobj in re.finditer(self._VIDEO_INDICATOR, page):
video_id = mobj.group(1) video_id = mobj.group(1)
if video_id not in already_seen: if video_id not in video_ids:
video_ids.append(video_id) video_ids.append(video_id)
already_seen.add(video_id)
if len(video_ids) == n: if len(video_ids) == n:
# Specified n videos reached # Specified n videos reached
for id in video_ids: for id in video_ids:
@ -2519,7 +2507,7 @@ class YoutubePlaylistIE(InfoExtractor):
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' _VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&' _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;list=PL%s&'
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>' _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
_youtube_ie = None _youtube_ie = None
IE_NAME = u'youtube:playlist' IE_NAME = u'youtube:playlist'
@ -2571,7 +2559,7 @@ class YoutubePlaylistIE(InfoExtractor):
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(self._VIDEO_INDICATOR, page): for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1)) ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
@ -2582,7 +2570,10 @@ class YoutubePlaylistIE(InfoExtractor):
playliststart = self._downloader.params.get('playliststart', 1) - 1 playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1) playlistend = self._downloader.params.get('playlistend', -1)
video_ids = video_ids[playliststart:playlistend] if playlistend == -1:
video_ids = video_ids[playliststart:]
else:
video_ids = video_ids[playliststart:playlistend]
for id in video_ids: for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)