mirror of
https://gitlab.com/dstftw/youtube-dl.git
synced 2020-11-16 09:42:26 +00:00
This commit is contained in:
parent
f03b88b3fb
commit
6324fd1d74
|
@ -8,7 +8,7 @@ import json
|
|||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.InfoExtractors import YoutubeUserIE,YoutubePlaylistIE
|
||||
from youtube_dl.InfoExtractors import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE
|
||||
from youtube_dl.utils import *
|
||||
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
|
||||
|
@ -38,11 +38,8 @@ class TestYoutubeLists(unittest.TestCase):
|
|||
DL = FakeDownloader()
|
||||
IE = YoutubePlaylistIE(DL)
|
||||
IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
self.assertEqual(DL.result, [
|
||||
['http://www.youtube.com/watch?v=bV9L5Ht9LgY'],
|
||||
['http://www.youtube.com/watch?v=FXxLjLQi3Fg'],
|
||||
['http://www.youtube.com/watch?v=tU3Bgo5qJZE']
|
||||
])
|
||||
self.assertEqual(map(lambda x: YoutubeIE()._extract_id(x[0]), DL.result),
|
||||
[ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE' ])
|
||||
|
||||
def test_youtube_playlist_long(self):
|
||||
DL = FakeDownloader()
|
||||
|
@ -50,14 +47,21 @@ class TestYoutubeLists(unittest.TestCase):
|
|||
IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
self.assertTrue(len(DL.result) >= 799)
|
||||
|
||||
def test_youtube_playlist_with_deleted(self):
|
||||
DL = FakeDownloader()
|
||||
IE = YoutubePlaylistIE(DL)
|
||||
IE.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
self.assertFalse('pElCt5oNDuI' in map(lambda x: YoutubeIE()._extract_id(x[0]), DL.result))
|
||||
self.assertFalse('KdPEApIVdWM' in map(lambda x: YoutubeIE()._extract_id(x[0]), DL.result))
|
||||
|
||||
def test_youtube_course(self):
|
||||
DL = FakeDownloader()
|
||||
IE = YoutubePlaylistIE(DL)
|
||||
# TODO find a > 100 (paginating?) videos course
|
||||
IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs'])
|
||||
self.assertEqual(YoutubeIE()._extract_id(DL.result[0][0]), 'j9WZyLZCBzs')
|
||||
self.assertEqual(len(DL.result), 25)
|
||||
self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0'])
|
||||
self.assertEqual(YoutubeIE()._extract_id(DL.result[-1][0]), 'rYefUsYuEp0')
|
||||
|
||||
def test_youtube_channel(self):
|
||||
# I give up, please find a channel that does paginate and test this like test_youtube_playlist_long
|
||||
|
|
|
@ -15,6 +15,7 @@ import email.utils
|
|||
import xml.etree.ElementTree
|
||||
import random
|
||||
import math
|
||||
import operator
|
||||
|
||||
from .utils import *
|
||||
|
||||
|
@ -1662,22 +1663,40 @@ class YahooSearchIE(InfoExtractor):
|
|||
class YoutubePlaylistIE(InfoExtractor):
|
||||
"""Information Extractor for YouTube playlists."""
|
||||
|
||||
_VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*'
|
||||
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
|
||||
_VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&([^&"]+&)*list=.*?%s'
|
||||
_MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
|
||||
_VALID_URL = r"""(?:
|
||||
(?:https?://)?
|
||||
(?:\w+\.)?
|
||||
youtube\.com/
|
||||
(?:
|
||||
(?:course|view_play_list|my_playlists|artist|playlist)
|
||||
\? .*? (p|a|list)=
|
||||
| user/.*?/user/
|
||||
| p/
|
||||
| user/.*?#[pg]/c/
|
||||
)
|
||||
(?:PL|EC)?
|
||||
|PL|EC)
|
||||
([0-9A-Za-z-_]{10,})
|
||||
(?:/.*?/([0-9A-Za-z_-]+))?
|
||||
.*"""
|
||||
_TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json'
|
||||
_MAX_RESULTS = 50
|
||||
IE_NAME = u'youtube:playlist'
|
||||
|
||||
def __init__(self, downloader=None):
|
||||
InfoExtractor.__init__(self, downloader)
|
||||
|
||||
def suitable(self, url):
|
||||
"""Receives a URL and returns True if suitable for this IE."""
|
||||
return re.match(self._VALID_URL, url, re.VERBOSE) is not None
|
||||
|
||||
def report_download_page(self, playlist_id, pagenum):
|
||||
"""Report attempt to download playlist page with given number."""
|
||||
self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract playlist id
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: invalid url: %s' % url)
|
||||
return
|
||||
|
@ -1687,55 +1706,51 @@ class YoutubePlaylistIE(InfoExtractor):
|
|||
self._downloader.download([mobj.group(3)])
|
||||
return
|
||||
|
||||
# Download playlist pages
|
||||
# prefix is 'p' as default for playlists but there are other types that need extra care
|
||||
playlist_prefix = mobj.group(1)
|
||||
if playlist_prefix == 'a':
|
||||
playlist_access = 'artist'
|
||||
else:
|
||||
playlist_prefix = 'p'
|
||||
playlist_access = 'view_play_list'
|
||||
# Download playlist videos from API
|
||||
playlist_id = mobj.group(2)
|
||||
video_ids = []
|
||||
pagenum = 1
|
||||
page_num = 1
|
||||
videos = []
|
||||
|
||||
while True:
|
||||
self.report_download_page(playlist_id, pagenum)
|
||||
url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
|
||||
request = compat_urllib_request.Request(url)
|
||||
self.report_download_page(playlist_id, page_num)
|
||||
|
||||
url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
|
||||
try:
|
||||
page = compat_urllib_request.urlopen(request).read().decode('utf-8')
|
||||
page = compat_urllib_request.urlopen(url).read().decode('utf8')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
# Extract video identifiers
|
||||
ids_in_page = []
|
||||
for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page):
|
||||
if mobj.group(1) not in ids_in_page:
|
||||
ids_in_page.append(mobj.group(1))
|
||||
video_ids.extend(ids_in_page)
|
||||
try:
|
||||
response = json.loads(page)
|
||||
except ValueError as err:
|
||||
self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err))
|
||||
return
|
||||
|
||||
if self._MORE_PAGES_INDICATOR not in page:
|
||||
videos += [(entry['yt$position']['$t'], entry['content']['src']) for entry in response['feed']['entry']]
|
||||
|
||||
if len(response['feed']['entry']) < self._MAX_RESULTS:
|
||||
break
|
||||
pagenum = pagenum + 1
|
||||
page_num += 1
|
||||
|
||||
total = len(video_ids)
|
||||
videos = map(operator.itemgetter(1), sorted(videos))
|
||||
|
||||
total = len(videos)
|
||||
|
||||
playliststart = self._downloader.params.get('playliststart', 1) - 1
|
||||
playlistend = self._downloader.params.get('playlistend', -1)
|
||||
if playlistend == -1:
|
||||
video_ids = video_ids[playliststart:]
|
||||
videos = videos[playliststart:]
|
||||
else:
|
||||
video_ids = video_ids[playliststart:playlistend]
|
||||
videos = videos[playliststart:playlistend]
|
||||
|
||||
if len(video_ids) == total:
|
||||
if len(videos) == total:
|
||||
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
|
||||
else:
|
||||
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids)))
|
||||
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos)))
|
||||
|
||||
for id in video_ids:
|
||||
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
|
||||
for video in videos:
|
||||
self._downloader.download([video])
|
||||
return
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue