1
0
Fork 0
mirror of https://gitlab.com/dstftw/youtube-dl.git synced 2020-11-16 09:42:26 +00:00
youtube-dl/youtube_dl/extractor/moniker.py

80 lines
2.4 KiB
Python
Raw Normal View History

2014-09-16 18:48:53 +00:00
# coding: utf-8
from __future__ import unicode_literals
2014-09-16 20:56:31 +00:00
import os.path
2014-09-16 18:48:53 +00:00
import re
from .common import InfoExtractor
from ..compat import (
2014-09-16 18:48:53 +00:00
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import ExtractorError
2014-09-16 18:48:53 +00:00
2014-09-18 19:37:09 +00:00
class MonikerIE(InfoExtractor):
IE_DESC = 'allmyvideos.net and vidspot.net'
_VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P<id>[a-zA-Z0-9_-]+)'
2014-09-16 18:48:53 +00:00
_TESTS = [{
2014-09-16 18:48:53 +00:00
'url': 'http://allmyvideos.net/jih3nce3x6wn',
2014-09-16 20:56:31 +00:00
'md5': '710883dee1bfc370ecf9fa6a89307c88',
2014-09-16 18:48:53 +00:00
'info_dict': {
'id': 'jih3nce3x6wn',
'ext': 'mp4',
'title': 'youtube-dl test video',
},
}, {
'url': 'http://vidspot.net/l2ngsmhs8ci5',
'md5': '710883dee1bfc370ecf9fa6a89307c88',
'info_dict': {
'id': 'l2ngsmhs8ci5',
'ext': 'mp4',
'title': 'youtube-dl test video',
},
}, {
'url': 'https://www.vidspot.net/l2ngsmhs8ci5',
'only_matching': True,
}]
2014-09-16 18:48:53 +00:00
def _real_extract(self, url):
video_id = self._match_id(url)
2014-09-16 19:05:50 +00:00
orig_webpage = self._download_webpage(url, video_id)
if '>File Not Found<' in orig_webpage:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
error = self._search_regex(
r'class="err">([^<]+)<', orig_webpage, 'error', default=None)
if error:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error), expected=True)
2014-09-16 19:05:50 +00:00
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
2014-09-16 20:56:31 +00:00
data = dict(fields)
2014-09-16 19:05:50 +00:00
post = compat_urllib_parse.urlencode(data)
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
}
req = compat_urllib_request.Request(url, post, headers)
2014-09-16 20:56:31 +00:00
webpage = self._download_webpage(
req, video_id, note='Downloading video page ...')
title = os.path.splitext(data['fname'])[0]
2014-09-16 19:05:50 +00:00
2014-11-23 19:41:03 +00:00
# Could be several links with different quality
2014-09-16 19:05:50 +00:00
links = re.findall(r'"file" : "?(.+?)",', webpage)
2014-09-16 20:56:31 +00:00
# Assume the links are ordered in quality
formats = [{
'url': l,
'quality': i,
} for i, l in enumerate(links)]
self._sort_formats(formats)
2014-09-16 19:05:50 +00:00
return {
'id': video_id,
2014-09-16 20:56:31 +00:00
'title': title,
'formats': formats,
}