mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-01-07 17:16:08 +00:00
Merge remote-tracking branch 'jtwaleson/master'
This commit is contained in:
commit
784b6d3a9b
|
@ -9,16 +9,17 @@ import youtube_dl
|
||||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
||||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
||||||
|
|
||||||
|
|
||||||
def build_completion(opt_parser):
|
def build_completion(opt_parser):
|
||||||
opts_flag = []
|
opts_flag = []
|
||||||
for group in opt_parser.option_groups:
|
for group in opt_parser.option_groups:
|
||||||
for option in group.option_list:
|
for option in group.option_list:
|
||||||
#for every long flag
|
# for every long flag
|
||||||
opts_flag.append(option.get_opt_string())
|
opts_flag.append(option.get_opt_string())
|
||||||
with open(BASH_COMPLETION_TEMPLATE) as f:
|
with open(BASH_COMPLETION_TEMPLATE) as f:
|
||||||
template = f.read()
|
template = f.read()
|
||||||
with open(BASH_COMPLETION_FILE, "w") as f:
|
with open(BASH_COMPLETION_FILE, "w") as f:
|
||||||
#just using the special char
|
# just using the special char
|
||||||
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
||||||
f.write(filled_template)
|
f.write(filled_template)
|
||||||
|
|
||||||
|
|
|
@ -233,6 +233,7 @@ def rmtree(path):
|
||||||
|
|
||||||
#==============================================================================
|
#==============================================================================
|
||||||
|
|
||||||
|
|
||||||
class BuildError(Exception):
|
class BuildError(Exception):
|
||||||
def __init__(self, output, code=500):
|
def __init__(self, output, code=500):
|
||||||
self.output = output
|
self.output = output
|
||||||
|
@ -369,7 +370,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
|
||||||
|
|
||||||
|
|
||||||
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
||||||
actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
|
actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
|
||||||
|
|
||||||
def do_GET(self):
|
def do_GET(self):
|
||||||
path = urlparse.urlparse(self.path)
|
path = urlparse.urlparse(self.path)
|
||||||
|
|
|
@ -23,6 +23,7 @@ EXTRA_ARGS = {
|
||||||
'batch-file': ['--require-parameter'],
|
'batch-file': ['--require-parameter'],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def build_completion(opt_parser):
|
def build_completion(opt_parser):
|
||||||
commands = []
|
commands = []
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import hashlib
|
import hashlib
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import tempfile
|
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
|
@ -73,4 +73,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
||||||
|
|
||||||
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
||||||
atom_file.write(atom_template)
|
atom_file.write(atom_template)
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(
|
||||||
|
|
||||||
import youtube_dl
|
import youtube_dl
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
||||||
template = tmplf.read()
|
template = tmplf.read()
|
||||||
|
|
|
@ -9,4 +9,4 @@ py2exe_options = {
|
||||||
"dll_excludes": ['w9xpopen.exe']
|
"dll_excludes": ['w9xpopen.exe']
|
||||||
}
|
}
|
||||||
|
|
||||||
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
|
setup(console=['youtube-dl.py'], options={"py2exe": py2exe_options}, zipfile=None)
|
||||||
|
|
|
@ -4,13 +4,17 @@ import sys, os
|
||||||
import urllib2
|
import urllib2
|
||||||
import json, hashlib
|
import json, hashlib
|
||||||
|
|
||||||
|
|
||||||
def rsa_verify(message, signature, key):
|
def rsa_verify(message, signature, key):
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from sys import version_info
|
from sys import version_info
|
||||||
|
|
||||||
def b(x):
|
def b(x):
|
||||||
if version_info[0] == 2: return x
|
if version_info[0] == 2:
|
||||||
else: return x.encode('latin1')
|
return x
|
||||||
|
else:
|
||||||
|
return x.encode('latin1')
|
||||||
assert(type(message) == type(b('')))
|
assert(type(message) == type(b('')))
|
||||||
block_size = 0
|
block_size = 0
|
||||||
n = key[0]
|
n = key[0]
|
||||||
|
@ -23,13 +27,17 @@ def rsa_verify(message, signature, key):
|
||||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
||||||
signature >>= 8
|
signature >>= 8
|
||||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
||||||
if signature[0:2] != b('\x00\x01'): return False
|
if signature[0:2] != b('\x00\x01'):
|
||||||
|
return False
|
||||||
signature = signature[2:]
|
signature = signature[2:]
|
||||||
if not b('\x00') in signature: return False
|
if not b('\x00') in signature:
|
||||||
signature = signature[signature.index(b('\x00'))+1:]
|
return False
|
||||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
signature = signature[signature.index(b('\x00')) + 1:]
|
||||||
|
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
|
||||||
|
return False
|
||||||
signature = signature[19:]
|
signature = signature[19:]
|
||||||
if signature != sha256(message).digest(): return False
|
if signature != sha256(message).digest():
|
||||||
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||||
|
@ -92,7 +100,7 @@ echo Updating youtube-dl...
|
||||||
ping 127.0.0.1 -n 5 -w 1000 > NUL
|
ping 127.0.0.1 -n 5 -w 1000 > NUL
|
||||||
move /Y "%s.new" "%s"
|
move /Y "%s.new" "%s"
|
||||||
del "%s"
|
del "%s"
|
||||||
\n""" %(exe, exe, bat))
|
\n""" % (exe, exe, bat))
|
||||||
b.close()
|
b.close()
|
||||||
|
|
||||||
os.startfile(bat)
|
os.startfile(bat)
|
||||||
|
|
|
@ -72,8 +72,10 @@ class FakeYDL(YoutubeDL):
|
||||||
def expect_warning(self, regex):
|
def expect_warning(self, regex):
|
||||||
# Silence an expected warning matching a regex
|
# Silence an expected warning matching a regex
|
||||||
old_report_warning = self.report_warning
|
old_report_warning = self.report_warning
|
||||||
|
|
||||||
def report_warning(self, message):
|
def report_warning(self, message):
|
||||||
if re.match(regex, message): return
|
if re.match(regex, message):
|
||||||
|
return
|
||||||
old_report_warning(message)
|
old_report_warning(message)
|
||||||
self.report_warning = types.MethodType(report_warning, self)
|
self.report_warning = types.MethodType(report_warning, self)
|
||||||
|
|
||||||
|
|
|
@ -266,6 +266,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'width': None,
|
'width': None,
|
||||||
}
|
}
|
||||||
|
|
||||||
def fname(templ):
|
def fname(templ):
|
||||||
ydl = YoutubeDL({'outtmpl': templ})
|
ydl = YoutubeDL({'outtmpl': templ})
|
||||||
return ydl.prepare_filename(info)
|
return ydl.prepare_filename(info)
|
||||||
|
|
|
@ -32,19 +32,19 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||||
def test_youtube_playlist_matching(self):
|
def test_youtube_playlist_matching(self):
|
||||||
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
||||||
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||||
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
|
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
|
||||||
assertPlaylist('PL63F0C78739B09958')
|
assertPlaylist('PL63F0C78739B09958')
|
||||||
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||||
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||||
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||||
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
|
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
|
||||||
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
||||||
# Top tracks
|
# Top tracks
|
||||||
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
|
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
|
||||||
|
|
||||||
def test_youtube_matching(self):
|
def test_youtube_matching(self):
|
||||||
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
|
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
|
||||||
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668
|
||||||
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
||||||
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
||||||
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
||||||
|
|
|
@ -40,18 +40,22 @@ from youtube_dl.extractor import get_info_extractor
|
||||||
|
|
||||||
RETRIES = 3
|
RETRIES = 3
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self.to_stderr = self.to_screen
|
self.to_stderr = self.to_screen
|
||||||
self.processed_info_dicts = []
|
self.processed_info_dicts = []
|
||||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
def report_warning(self, message):
|
def report_warning(self, message):
|
||||||
# Don't accept warnings during tests
|
# Don't accept warnings during tests
|
||||||
raise ExtractorError(message)
|
raise ExtractorError(message)
|
||||||
|
|
||||||
def process_info(self, info_dict):
|
def process_info(self, info_dict):
|
||||||
self.processed_info_dicts.append(info_dict)
|
self.processed_info_dicts.append(info_dict)
|
||||||
return super(YoutubeDL, self).process_info(info_dict)
|
return super(YoutubeDL, self).process_info(info_dict)
|
||||||
|
|
||||||
|
|
||||||
def _file_md5(fn):
|
def _file_md5(fn):
|
||||||
with open(fn, 'rb') as f:
|
with open(fn, 'rb') as f:
|
||||||
return hashlib.md5(f.read()).hexdigest()
|
return hashlib.md5(f.read()).hexdigest()
|
||||||
|
@ -61,10 +65,13 @@ defs = gettestcases()
|
||||||
|
|
||||||
class TestDownload(unittest.TestCase):
|
class TestDownload(unittest.TestCase):
|
||||||
maxDiff = None
|
maxDiff = None
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.defs = defs
|
self.defs = defs
|
||||||
|
|
||||||
### Dynamically generate tests
|
# Dynamically generate tests
|
||||||
|
|
||||||
|
|
||||||
def generator(test_case):
|
def generator(test_case):
|
||||||
|
|
||||||
def test_template(self):
|
def test_template(self):
|
||||||
|
@ -101,6 +108,7 @@ def generator(test_case):
|
||||||
ydl = YoutubeDL(params, auto_init=False)
|
ydl = YoutubeDL(params, auto_init=False)
|
||||||
ydl.add_default_info_extractors()
|
ydl.add_default_info_extractors()
|
||||||
finished_hook_called = set()
|
finished_hook_called = set()
|
||||||
|
|
||||||
def _hook(status):
|
def _hook(status):
|
||||||
if status['status'] == 'finished':
|
if status['status'] == 'finished':
|
||||||
finished_hook_called.add(status['filename'])
|
finished_hook_called.add(status['filename'])
|
||||||
|
@ -111,6 +119,7 @@ def generator(test_case):
|
||||||
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
||||||
|
|
||||||
res_dict = None
|
res_dict = None
|
||||||
|
|
||||||
def try_rm_tcs_files(tcs=None):
|
def try_rm_tcs_files(tcs=None):
|
||||||
if tcs is None:
|
if tcs is None:
|
||||||
tcs = test_cases
|
tcs = test_cases
|
||||||
|
@ -206,7 +215,7 @@ def generator(test_case):
|
||||||
|
|
||||||
return test_template
|
return test_template
|
||||||
|
|
||||||
### And add them to TestDownload
|
# And add them to TestDownload
|
||||||
for n, test_case in enumerate(defs):
|
for n, test_case in enumerate(defs):
|
||||||
test_method = generator(test_case)
|
test_method = generator(test_case)
|
||||||
tname = 'test_' + str(test_case['name'])
|
tname = 'test_' + str(test_case['name'])
|
||||||
|
|
|
@ -23,6 +23,7 @@ from youtube_dl.extractor import (
|
||||||
class BaseTestSubtitles(unittest.TestCase):
|
class BaseTestSubtitles(unittest.TestCase):
|
||||||
url = None
|
url = None
|
||||||
IE = None
|
IE = None
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.DL = FakeYDL()
|
self.DL = FakeYDL()
|
||||||
self.ie = self.IE(self.DL)
|
self.ie = self.IE(self.DL)
|
||||||
|
|
|
@ -45,7 +45,6 @@ from youtube_dl.utils import (
|
||||||
escape_rfc3986,
|
escape_rfc3986,
|
||||||
escape_url,
|
escape_url,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
get_filesystem_encoding,
|
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
args_to_str,
|
args_to_str,
|
||||||
)
|
)
|
||||||
|
@ -120,7 +119,7 @@ class TestUtil(unittest.TestCase):
|
||||||
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
||||||
self.assertEqual(orderedSet([]), [])
|
self.assertEqual(orderedSet([]), [])
|
||||||
self.assertEqual(orderedSet([1]), [1])
|
self.assertEqual(orderedSet([1]), [1])
|
||||||
#keep the list ordered
|
# keep the list ordered
|
||||||
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
|
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
|
||||||
|
|
||||||
def test_unescape_html(self):
|
def test_unescape_html(self):
|
||||||
|
@ -129,7 +128,7 @@ class TestUtil(unittest.TestCase):
|
||||||
unescapeHTML('é'), 'é')
|
unescapeHTML('é'), 'é')
|
||||||
|
|
||||||
def test_daterange(self):
|
def test_daterange(self):
|
||||||
_20century = DateRange("19000101","20000101")
|
_20century = DateRange("19000101", "20000101")
|
||||||
self.assertFalse("17890714" in _20century)
|
self.assertFalse("17890714" in _20century)
|
||||||
_ac = DateRange("00010101")
|
_ac = DateRange("00010101")
|
||||||
self.assertTrue("19690721" in _ac)
|
self.assertTrue("19690721" in _ac)
|
||||||
|
|
|
@ -31,19 +31,18 @@ params = get_params({
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
TEST_ID = 'gr51aVj-mLg'
|
TEST_ID = 'gr51aVj-mLg'
|
||||||
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
||||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
||||||
|
|
||||||
|
|
||||||
class TestAnnotations(unittest.TestCase):
|
class TestAnnotations(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
# Clear old files
|
# Clear old files
|
||||||
self.tearDown()
|
self.tearDown()
|
||||||
|
|
||||||
|
|
||||||
def test_info_json(self):
|
def test_info_json(self):
|
||||||
expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
|
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
|
||||||
ie = youtube_dl.extractor.YoutubeIE()
|
ie = youtube_dl.extractor.YoutubeIE()
|
||||||
ydl = YoutubeDL(params)
|
ydl = YoutubeDL(params)
|
||||||
ydl.add_info_extractor(ie)
|
ydl.add_info_extractor(ie)
|
||||||
|
@ -59,19 +58,18 @@ class TestAnnotations(unittest.TestCase):
|
||||||
self.assertEqual(annotationsTag.tag, 'annotations')
|
self.assertEqual(annotationsTag.tag, 'annotations')
|
||||||
annotations = annotationsTag.findall('annotation')
|
annotations = annotationsTag.findall('annotation')
|
||||||
|
|
||||||
#Not all the annotations have TEXT children and the annotations are returned unsorted.
|
# Not all the annotations have TEXT children and the annotations are returned unsorted.
|
||||||
for a in annotations:
|
for a in annotations:
|
||||||
self.assertEqual(a.tag, 'annotation')
|
self.assertEqual(a.tag, 'annotation')
|
||||||
if a.get('type') == 'text':
|
if a.get('type') == 'text':
|
||||||
textTag = a.find('TEXT')
|
textTag = a.find('TEXT')
|
||||||
text = textTag.text
|
text = textTag.text
|
||||||
self.assertTrue(text in expected) #assertIn only added in python 2.7
|
self.assertTrue(text in expected) # assertIn only added in python 2.7
|
||||||
#remove the first occurance, there could be more than one annotation with the same text
|
# remove the first occurance, there could be more than one annotation with the same text
|
||||||
expected.remove(text)
|
expected.remove(text)
|
||||||
#We should have seen (and removed) all the expected annotation texts.
|
# We should have seen (and removed) all the expected annotation texts.
|
||||||
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
||||||
|
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
try_rm(ANNOTATIONS_FILE)
|
try_rm(ANNOTATIONS_FILE)
|
||||||
|
|
||||||
|
|
|
@ -12,10 +12,6 @@ from test.helper import FakeYDL
|
||||||
from youtube_dl.extractor import (
|
from youtube_dl.extractor import (
|
||||||
YoutubePlaylistIE,
|
YoutubePlaylistIE,
|
||||||
YoutubeIE,
|
YoutubeIE,
|
||||||
YoutubeChannelIE,
|
|
||||||
YoutubeShowIE,
|
|
||||||
YoutubeTopListIE,
|
|
||||||
YoutubeSearchURLIE,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,6 @@ from .compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
shlex_quote,
|
|
||||||
)
|
)
|
||||||
from .utils import (
|
from .utils import (
|
||||||
escape_url,
|
escape_url,
|
||||||
|
@ -700,6 +699,7 @@ class YoutubeDL(object):
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'Extractor %s returned a compat_list result. '
|
'Extractor %s returned a compat_list result. '
|
||||||
'It needs to be updated.' % ie_result.get('extractor'))
|
'It needs to be updated.' % ie_result.get('extractor'))
|
||||||
|
|
||||||
def _fixup(r):
|
def _fixup(r):
|
||||||
self.add_extra_info(r,
|
self.add_extra_info(r,
|
||||||
{
|
{
|
||||||
|
@ -1111,7 +1111,7 @@ class YoutubeDL(object):
|
||||||
|
|
||||||
for url in url_list:
|
for url in url_list:
|
||||||
try:
|
try:
|
||||||
#It also downloads the videos
|
# It also downloads the videos
|
||||||
res = self.extract_info(url)
|
res = self.extract_info(url)
|
||||||
except UnavailableVideoError:
|
except UnavailableVideoError:
|
||||||
self.report_error('unable to download video')
|
self.report_error('unable to download video')
|
||||||
|
@ -1428,4 +1428,3 @@ class YoutubeDL(object):
|
||||||
if encoding is None:
|
if encoding is None:
|
||||||
encoding = preferredencoding()
|
encoding = preferredencoding()
|
||||||
return encoding
|
return encoding
|
||||||
|
|
||||||
|
|
|
@ -76,10 +76,10 @@ def _real_main(argv=None):
|
||||||
if opts.headers is not None:
|
if opts.headers is not None:
|
||||||
for h in opts.headers:
|
for h in opts.headers:
|
||||||
if h.find(':', 1) < 0:
|
if h.find(':', 1) < 0:
|
||||||
parser.error('wrong header formatting, it should be key:value, not "%s"'%h)
|
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
|
||||||
key, value = h.split(':', 2)
|
key, value = h.split(':', 2)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string('[debug] Adding header from command line option %s:%s\n'%(key, value))
|
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
|
||||||
std_headers[key] = value
|
std_headers[key] = value
|
||||||
|
|
||||||
# Dump user agent
|
# Dump user agent
|
||||||
|
@ -128,7 +128,6 @@ def _real_main(argv=None):
|
||||||
compat_print(desc)
|
compat_print(desc)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
# Conflicting, missing and erroneous options
|
# Conflicting, missing and erroneous options
|
||||||
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
||||||
parser.error('using .netrc conflicts with giving username/password')
|
parser.error('using .netrc conflicts with giving username/password')
|
||||||
|
@ -197,7 +196,7 @@ def _real_main(argv=None):
|
||||||
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
||||||
if opts.outtmpl is not None:
|
if opts.outtmpl is not None:
|
||||||
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
||||||
outtmpl =((opts.outtmpl is not None and opts.outtmpl)
|
outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
|
||||||
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
|
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
|
||||||
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
|
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
|
||||||
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
||||||
|
@ -317,7 +316,6 @@ def _real_main(argv=None):
|
||||||
ydl.add_post_processor(FFmpegAudioFixPP())
|
ydl.add_post_processor(FFmpegAudioFixPP())
|
||||||
ydl.add_post_processor(AtomicParsleyPP())
|
ydl.add_post_processor(AtomicParsleyPP())
|
||||||
|
|
||||||
|
|
||||||
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
|
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
|
||||||
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
|
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
|
||||||
if opts.exec_cmd:
|
if opts.exec_cmd:
|
||||||
|
|
|
@ -7,6 +7,7 @@ from .utils import bytes_to_intlist, intlist_to_bytes
|
||||||
|
|
||||||
BLOCK_SIZE_BYTES = 16
|
BLOCK_SIZE_BYTES = 16
|
||||||
|
|
||||||
|
|
||||||
def aes_ctr_decrypt(data, key, counter):
|
def aes_ctr_decrypt(data, key, counter):
|
||||||
"""
|
"""
|
||||||
Decrypt with aes in counter mode
|
Decrypt with aes in counter mode
|
||||||
|
@ -20,11 +21,11 @@ def aes_ctr_decrypt(data, key, counter):
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||||
|
|
||||||
decrypted_data=[]
|
decrypted_data = []
|
||||||
for i in range(block_count):
|
for i in range(block_count):
|
||||||
counter_block = counter.next_value()
|
counter_block = counter.next_value()
|
||||||
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||||
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||||
|
|
||||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||||
decrypted_data += xor(block, cipher_counter_block)
|
decrypted_data += xor(block, cipher_counter_block)
|
||||||
|
@ -32,6 +33,7 @@ def aes_ctr_decrypt(data, key, counter):
|
||||||
|
|
||||||
return decrypted_data
|
return decrypted_data
|
||||||
|
|
||||||
|
|
||||||
def aes_cbc_decrypt(data, key, iv):
|
def aes_cbc_decrypt(data, key, iv):
|
||||||
"""
|
"""
|
||||||
Decrypt with aes in CBC mode
|
Decrypt with aes in CBC mode
|
||||||
|
@ -44,11 +46,11 @@ def aes_cbc_decrypt(data, key, iv):
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||||
|
|
||||||
decrypted_data=[]
|
decrypted_data = []
|
||||||
previous_cipher_block = iv
|
previous_cipher_block = iv
|
||||||
for i in range(block_count):
|
for i in range(block_count):
|
||||||
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||||
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||||
|
|
||||||
decrypted_block = aes_decrypt(block, expanded_key)
|
decrypted_block = aes_decrypt(block, expanded_key)
|
||||||
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
||||||
|
@ -57,6 +59,7 @@ def aes_cbc_decrypt(data, key, iv):
|
||||||
|
|
||||||
return decrypted_data
|
return decrypted_data
|
||||||
|
|
||||||
|
|
||||||
def key_expansion(data):
|
def key_expansion(data):
|
||||||
"""
|
"""
|
||||||
Generate key schedule
|
Generate key schedule
|
||||||
|
@ -73,24 +76,25 @@ def key_expansion(data):
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
temp = key_schedule_core(temp, rcon_iteration)
|
temp = key_schedule_core(temp, rcon_iteration)
|
||||||
rcon_iteration += 1
|
rcon_iteration += 1
|
||||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
|
||||||
for _ in range(3):
|
for _ in range(3):
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
|
||||||
if key_size_bytes == 32:
|
if key_size_bytes == 32:
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
temp = sub_bytes(temp)
|
temp = sub_bytes(temp)
|
||||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
|
||||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
data = data[:expanded_key_size_bytes]
|
data = data[:expanded_key_size_bytes]
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def aes_encrypt(data, expanded_key):
|
def aes_encrypt(data, expanded_key):
|
||||||
"""
|
"""
|
||||||
Encrypt one block with aes
|
Encrypt one block with aes
|
||||||
|
@ -102,15 +106,16 @@ def aes_encrypt(data, expanded_key):
|
||||||
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||||
|
|
||||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||||
for i in range(1, rounds+1):
|
for i in range(1, rounds + 1):
|
||||||
data = sub_bytes(data)
|
data = sub_bytes(data)
|
||||||
data = shift_rows(data)
|
data = shift_rows(data)
|
||||||
if i != rounds:
|
if i != rounds:
|
||||||
data = mix_columns(data)
|
data = mix_columns(data)
|
||||||
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def aes_decrypt(data, expanded_key):
|
def aes_decrypt(data, expanded_key):
|
||||||
"""
|
"""
|
||||||
Decrypt one block with aes
|
Decrypt one block with aes
|
||||||
|
@ -122,7 +127,7 @@ def aes_decrypt(data, expanded_key):
|
||||||
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||||
|
|
||||||
for i in range(rounds, 0, -1):
|
for i in range(rounds, 0, -1):
|
||||||
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||||
if i != rounds:
|
if i != rounds:
|
||||||
data = mix_columns_inv(data)
|
data = mix_columns_inv(data)
|
||||||
data = shift_rows_inv(data)
|
data = shift_rows_inv(data)
|
||||||
|
@ -131,6 +136,7 @@ def aes_decrypt(data, expanded_key):
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def aes_decrypt_text(data, password, key_size_bytes):
|
def aes_decrypt_text(data, password, key_size_bytes):
|
||||||
"""
|
"""
|
||||||
Decrypt text
|
Decrypt text
|
||||||
|
@ -149,14 +155,15 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||||
data = bytes_to_intlist(base64.b64decode(data))
|
data = bytes_to_intlist(base64.b64decode(data))
|
||||||
password = bytes_to_intlist(password.encode('utf-8'))
|
password = bytes_to_intlist(password.encode('utf-8'))
|
||||||
|
|
||||||
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
|
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
||||||
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
||||||
|
|
||||||
nonce = data[:NONCE_LENGTH_BYTES]
|
nonce = data[:NONCE_LENGTH_BYTES]
|
||||||
cipher = data[NONCE_LENGTH_BYTES:]
|
cipher = data[NONCE_LENGTH_BYTES:]
|
||||||
|
|
||||||
class Counter:
|
class Counter:
|
||||||
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||||
|
|
||||||
def next_value(self):
|
def next_value(self):
|
||||||
temp = self.__value
|
temp = self.__value
|
||||||
self.__value = inc(self.__value)
|
self.__value = inc(self.__value)
|
||||||
|
@ -200,14 +207,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x
|
||||||
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
|
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
|
||||||
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
|
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
|
||||||
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
|
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
|
||||||
MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
|
MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
|
||||||
(0x1,0x2,0x3,0x1),
|
(0x1, 0x2, 0x3, 0x1),
|
||||||
(0x1,0x1,0x2,0x3),
|
(0x1, 0x1, 0x2, 0x3),
|
||||||
(0x3,0x1,0x1,0x2))
|
(0x3, 0x1, 0x1, 0x2))
|
||||||
MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
|
MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
|
||||||
(0x9,0xE,0xB,0xD),
|
(0x9, 0xE, 0xB, 0xD),
|
||||||
(0xD,0x9,0xE,0xB),
|
(0xD, 0x9, 0xE, 0xB),
|
||||||
(0xB,0xD,0x9,0xE))
|
(0xB, 0xD, 0x9, 0xE))
|
||||||
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
|
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
|
||||||
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
|
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
|
||||||
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
|
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
|
||||||
|
@ -241,15 +248,19 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
|
||||||
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
|
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
|
||||||
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
||||||
|
|
||||||
|
|
||||||
def sub_bytes(data):
|
def sub_bytes(data):
|
||||||
return [SBOX[x] for x in data]
|
return [SBOX[x] for x in data]
|
||||||
|
|
||||||
|
|
||||||
def sub_bytes_inv(data):
|
def sub_bytes_inv(data):
|
||||||
return [SBOX_INV[x] for x in data]
|
return [SBOX_INV[x] for x in data]
|
||||||
|
|
||||||
|
|
||||||
def rotate(data):
|
def rotate(data):
|
||||||
return data[1:] + [data[0]]
|
return data[1:] + [data[0]]
|
||||||
|
|
||||||
|
|
||||||
def key_schedule_core(data, rcon_iteration):
|
def key_schedule_core(data, rcon_iteration):
|
||||||
data = rotate(data)
|
data = rotate(data)
|
||||||
data = sub_bytes(data)
|
data = sub_bytes(data)
|
||||||
|
@ -257,14 +268,17 @@ def key_schedule_core(data, rcon_iteration):
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def xor(data1, data2):
|
def xor(data1, data2):
|
||||||
return [x^y for x, y in zip(data1, data2)]
|
return [x ^ y for x, y in zip(data1, data2)]
|
||||||
|
|
||||||
|
|
||||||
def rijndael_mul(a, b):
|
def rijndael_mul(a, b):
|
||||||
if(a==0 or b==0):
|
if(a == 0 or b == 0):
|
||||||
return 0
|
return 0
|
||||||
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
||||||
|
|
||||||
|
|
||||||
def mix_column(data, matrix):
|
def mix_column(data, matrix):
|
||||||
data_mixed = []
|
data_mixed = []
|
||||||
for row in range(4):
|
for row in range(4):
|
||||||
|
@ -275,33 +289,38 @@ def mix_column(data, matrix):
|
||||||
data_mixed.append(mixed)
|
data_mixed.append(mixed)
|
||||||
return data_mixed
|
return data_mixed
|
||||||
|
|
||||||
|
|
||||||
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
||||||
data_mixed = []
|
data_mixed = []
|
||||||
for i in range(4):
|
for i in range(4):
|
||||||
column = data[i*4 : (i+1)*4]
|
column = data[i * 4: (i + 1) * 4]
|
||||||
data_mixed += mix_column(column, matrix)
|
data_mixed += mix_column(column, matrix)
|
||||||
return data_mixed
|
return data_mixed
|
||||||
|
|
||||||
|
|
||||||
def mix_columns_inv(data):
|
def mix_columns_inv(data):
|
||||||
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
||||||
|
|
||||||
|
|
||||||
def shift_rows(data):
|
def shift_rows(data):
|
||||||
data_shifted = []
|
data_shifted = []
|
||||||
for column in range(4):
|
for column in range(4):
|
||||||
for row in range(4):
|
for row in range(4):
|
||||||
data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
|
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
|
||||||
return data_shifted
|
return data_shifted
|
||||||
|
|
||||||
|
|
||||||
def shift_rows_inv(data):
|
def shift_rows_inv(data):
|
||||||
data_shifted = []
|
data_shifted = []
|
||||||
for column in range(4):
|
for column in range(4):
|
||||||
for row in range(4):
|
for row in range(4):
|
||||||
data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
|
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
|
||||||
return data_shifted
|
return data_shifted
|
||||||
|
|
||||||
|
|
||||||
def inc(data):
|
def inc(data):
|
||||||
data = data[:] # copy
|
data = data[:] # copy
|
||||||
for i in range(len(data)-1,-1,-1):
|
for i in range(len(data) - 1, -1, -1):
|
||||||
if data[i] == 255:
|
if data[i] == 255:
|
||||||
data[i] = 0
|
data[i] = 0
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -182,8 +182,10 @@ except ImportError: # Python < 3.3
|
||||||
|
|
||||||
|
|
||||||
def compat_ord(c):
|
def compat_ord(c):
|
||||||
if type(c) is int: return c
|
if type(c) is int:
|
||||||
else: return ord(c)
|
return c
|
||||||
|
else:
|
||||||
|
return ord(c)
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 0):
|
if sys.version_info >= (3, 0):
|
||||||
|
@ -254,7 +256,7 @@ else:
|
||||||
drive = ''
|
drive = ''
|
||||||
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
|
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
|
||||||
|
|
||||||
if i != 1: #~user
|
if i != 1: # ~user
|
||||||
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
|
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
|
||||||
|
|
||||||
return userhome + path[i:]
|
return userhome + path[i:]
|
||||||
|
|
|
@ -55,7 +55,7 @@ class FlvReader(io.BytesIO):
|
||||||
if size == 1:
|
if size == 1:
|
||||||
real_size = self.read_unsigned_long_long()
|
real_size = self.read_unsigned_long_long()
|
||||||
header_end = 16
|
header_end = 16
|
||||||
return real_size, box_type, self.read(real_size-header_end)
|
return real_size, box_type, self.read(real_size - header_end)
|
||||||
|
|
||||||
def read_asrt(self):
|
def read_asrt(self):
|
||||||
# version
|
# version
|
||||||
|
@ -180,7 +180,7 @@ def build_fragments_list(boot_info):
|
||||||
n_frags = segment_run_entry[1]
|
n_frags = segment_run_entry[1]
|
||||||
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
||||||
first_frag_number = fragment_run_entry_table[0]['first']
|
first_frag_number = fragment_run_entry_table[0]['first']
|
||||||
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
|
for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)):
|
||||||
res.append((1, frag_number))
|
res.append((1, frag_number))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
|
@ -101,4 +101,3 @@ class NativeHlsFD(FileDownloader):
|
||||||
})
|
})
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
@ -46,13 +46,13 @@ class RtmpFD(FileDownloader):
|
||||||
continue
|
continue
|
||||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
|
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
|
||||||
if mobj:
|
if mobj:
|
||||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||||
percent = float(mobj.group(2))
|
percent = float(mobj.group(2))
|
||||||
if not resume_percent:
|
if not resume_percent:
|
||||||
resume_percent = percent
|
resume_percent = percent
|
||||||
resume_downloaded_data_len = downloaded_data_len
|
resume_downloaded_data_len = downloaded_data_len
|
||||||
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
|
eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
|
||||||
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
|
speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
|
||||||
data_len = None
|
data_len = None
|
||||||
if percent > 0:
|
if percent > 0:
|
||||||
data_len = int(downloaded_data_len * 100 / percent)
|
data_len = int(downloaded_data_len * 100 / percent)
|
||||||
|
@ -72,7 +72,7 @@ class RtmpFD(FileDownloader):
|
||||||
# no percent for live streams
|
# no percent for live streams
|
||||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
||||||
if mobj:
|
if mobj:
|
||||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||||
time_now = time.time()
|
time_now = time.time()
|
||||||
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
||||||
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
||||||
|
@ -88,7 +88,7 @@ class RtmpFD(FileDownloader):
|
||||||
if not cursor_in_new_line:
|
if not cursor_in_new_line:
|
||||||
self.to_screen('')
|
self.to_screen('')
|
||||||
cursor_in_new_line = True
|
cursor_in_new_line = True
|
||||||
self.to_screen('[rtmpdump] '+line)
|
self.to_screen('[rtmpdump] ' + line)
|
||||||
proc.wait()
|
proc.wait()
|
||||||
if not cursor_in_new_line:
|
if not cursor_in_new_line:
|
||||||
self.to_screen('')
|
self.to_screen('')
|
||||||
|
|
|
@ -529,4 +529,4 @@ def gen_extractors():
|
||||||
|
|
||||||
def get_info_extractor(ie_name):
|
def get_info_extractor(ie_name):
|
||||||
"""Returns the info extractor class with the given ie_name"""
|
"""Returns the info extractor class with the given ie_name"""
|
||||||
return globals()[ie_name+'IE']
|
return globals()[ie_name + 'IE']
|
||||||
|
|
|
@ -5,6 +5,7 @@ import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class AdultSwimIE(InfoExtractor):
|
class AdultSwimIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
|
_VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#coding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
|
|
@ -70,11 +70,13 @@ class AppleTrailersIE(InfoExtractor):
|
||||||
uploader_id = mobj.group('company')
|
uploader_id = mobj.group('company')
|
||||||
|
|
||||||
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
|
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
|
||||||
|
|
||||||
def fix_html(s):
|
def fix_html(s):
|
||||||
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
|
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
|
||||||
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
|
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
|
||||||
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
||||||
# like: http://trailers.apple.com/trailers/wb/gravity/
|
# like: http://trailers.apple.com/trailers/wb/gravity/
|
||||||
|
|
||||||
def _clean_json(m):
|
def _clean_json(m):
|
||||||
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
||||||
s = re.sub(self._JSON_RE, _clean_json, s)
|
s = re.sub(self._JSON_RE, _clean_json, s)
|
||||||
|
|
|
@ -192,4 +192,3 @@ class ARDIE(InfoExtractor):
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,17 +12,17 @@ class AudiomackIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
|
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
|
||||||
IE_NAME = 'audiomack'
|
IE_NAME = 'audiomack'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
#hosted on audiomack
|
# hosted on audiomack
|
||||||
{
|
{
|
||||||
'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
|
'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
|
||||||
'info_dict':
|
'info_dict':
|
||||||
{
|
{
|
||||||
'id' : 'roosh-williams/extraordinary',
|
'id': 'roosh-williams/extraordinary',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'Roosh Williams - Extraordinary'
|
'title': 'Roosh Williams - Extraordinary'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
#hosted on soundcloud via audiomack
|
# hosted on soundcloud via audiomack
|
||||||
{
|
{
|
||||||
'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
|
'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
|
||||||
'file': '172419696.mp3',
|
'file': '172419696.mp3',
|
||||||
|
@ -49,7 +49,7 @@ class AudiomackIE(InfoExtractor):
|
||||||
raise ExtractorError("Unable to deduce api url of song")
|
raise ExtractorError("Unable to deduce api url of song")
|
||||||
realurl = api_response["url"]
|
realurl = api_response["url"]
|
||||||
|
|
||||||
#Audiomack wraps a lot of soundcloud tracks in their branded wrapper
|
# Audiomack wraps a lot of soundcloud tracks in their branded wrapper
|
||||||
# - if so, pass the work off to the soundcloud extractor
|
# - if so, pass the work off to the soundcloud extractor
|
||||||
if SoundcloudIE.suitable(realurl):
|
if SoundcloudIE.suitable(realurl):
|
||||||
return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
|
return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
|
||||||
|
|
|
@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor):
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://bambuser.com/v/4050584',
|
'url': 'http://bambuser.com/v/4050584',
|
||||||
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
|
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
|
||||||
#u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
|
# u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4050584',
|
'id': '4050584',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
|
|
|
@ -83,12 +83,12 @@ class BandcampIE(InfoExtractor):
|
||||||
initial_url = mp3_info['url']
|
initial_url = mp3_info['url']
|
||||||
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
||||||
m_url = re.match(re_url, initial_url)
|
m_url = re.match(re_url, initial_url)
|
||||||
#We build the url we will use to get the final track url
|
# We build the url we will use to get the final track url
|
||||||
# This url is build in Bandcamp in the script download_bunde_*.js
|
# This url is build in Bandcamp in the script download_bunde_*.js
|
||||||
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
|
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
|
||||||
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
||||||
# If we could correctly generate the .rand field the url would be
|
# If we could correctly generate the .rand field the url would be
|
||||||
#in the "download_url" key
|
# in the "download_url" key
|
||||||
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
|
@ -5,6 +5,7 @@ import re
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import ExtractorError
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
class Channel9IE(InfoExtractor):
|
class Channel9IE(InfoExtractor):
|
||||||
'''
|
'''
|
||||||
Common extractor for channel9.msdn.com.
|
Common extractor for channel9.msdn.com.
|
||||||
|
@ -31,7 +32,7 @@ class Channel9IE(InfoExtractor):
|
||||||
'session_code': 'KOS002',
|
'session_code': 'KOS002',
|
||||||
'session_day': 'Day 1',
|
'session_day': 'Day 1',
|
||||||
'session_room': 'Arena 1A',
|
'session_room': 'Arena 1A',
|
||||||
'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
|
'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -44,7 +45,7 @@ class Channel9IE(InfoExtractor):
|
||||||
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
||||||
'duration': 1540,
|
'duration': 1540,
|
||||||
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
||||||
'authors': [ 'Mike Wilmot' ],
|
'authors': ['Mike Wilmot'],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@ -202,17 +203,17 @@ class Channel9IE(InfoExtractor):
|
||||||
|
|
||||||
if slides is not None:
|
if slides is not None:
|
||||||
d = common.copy()
|
d = common.copy()
|
||||||
d.update({ 'title': title + '-Slides', 'url': slides })
|
d.update({'title': title + '-Slides', 'url': slides})
|
||||||
result.append(d)
|
result.append(d)
|
||||||
|
|
||||||
if zip_ is not None:
|
if zip_ is not None:
|
||||||
d = common.copy()
|
d = common.copy()
|
||||||
d.update({ 'title': title + '-Zip', 'url': zip_ })
|
d.update({'title': title + '-Zip', 'url': zip_})
|
||||||
result.append(d)
|
result.append(d)
|
||||||
|
|
||||||
if len(formats) > 0:
|
if len(formats) > 0:
|
||||||
d = common.copy()
|
d = common.copy()
|
||||||
d.update({ 'title': title, 'formats': formats })
|
d.update({'title': title, 'formats': formats})
|
||||||
result.append(d)
|
result.append(d)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
|
@ -77,7 +77,7 @@ class CinemassacreIE(InfoExtractor):
|
||||||
if videolist_url:
|
if videolist_url:
|
||||||
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
|
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
|
||||||
formats = []
|
formats = []
|
||||||
baseurl = vidurl[:vidurl.rfind('/')+1]
|
baseurl = vidurl[:vidurl.rfind('/') + 1]
|
||||||
for video in videolist.findall('.//video'):
|
for video in videolist.findall('.//video'):
|
||||||
src = video.get('src')
|
src = video.get('src')
|
||||||
if not src:
|
if not src:
|
||||||
|
|
|
@ -39,6 +39,7 @@ class ClipsyndicateIE(InfoExtractor):
|
||||||
transform_source=fix_xml_ampersands)
|
transform_source=fix_xml_ampersands)
|
||||||
|
|
||||||
track_doc = pdoc.find('trackList/track')
|
track_doc = pdoc.find('trackList/track')
|
||||||
|
|
||||||
def find_param(name):
|
def find_param(name):
|
||||||
node = find_xpath_attr(track_doc, './/param', 'name', name)
|
node = find_xpath_attr(track_doc, './/param', 'name', name)
|
||||||
if node is not None:
|
if node is not None:
|
||||||
|
|
|
@ -423,17 +423,18 @@ class InfoExtractor(object):
|
||||||
"""Report attempt to log in."""
|
"""Report attempt to log in."""
|
||||||
self.to_screen('Logging in')
|
self.to_screen('Logging in')
|
||||||
|
|
||||||
#Methods for following #608
|
# Methods for following #608
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def url_result(url, ie=None, video_id=None):
|
def url_result(url, ie=None, video_id=None):
|
||||||
"""Returns a url that points to a page that should be processed"""
|
"""Returns a url that points to a page that should be processed"""
|
||||||
#TODO: ie should be the class used for getting the info
|
# TODO: ie should be the class used for getting the info
|
||||||
video_info = {'_type': 'url',
|
video_info = {'_type': 'url',
|
||||||
'url': url,
|
'url': url,
|
||||||
'ie_key': ie}
|
'ie_key': ie}
|
||||||
if video_id is not None:
|
if video_id is not None:
|
||||||
video_info['id'] = video_id
|
video_info['id'] = video_id
|
||||||
return video_info
|
return video_info
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def playlist_result(entries, playlist_id=None, playlist_title=None):
|
def playlist_result(entries, playlist_id=None, playlist_title=None):
|
||||||
"""Returns a playlist"""
|
"""Returns a playlist"""
|
||||||
|
|
|
@ -54,7 +54,7 @@ class CrackedIE(InfoExtractor):
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url':video_url,
|
'url': video_url,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
'timestamp': timestamp,
|
'timestamp': timestamp,
|
||||||
|
|
|
@ -69,11 +69,9 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
|
||||||
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
self._download_webpage(login_request, None, False, 'Wrong login info')
|
self._download_webpage(login_request, None, False, 'Wrong login info')
|
||||||
|
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
self._login()
|
self._login()
|
||||||
|
|
||||||
|
|
||||||
def _decrypt_subtitles(self, data, iv, id):
|
def _decrypt_subtitles(self, data, iv, id):
|
||||||
data = bytes_to_intlist(data)
|
data = bytes_to_intlist(data)
|
||||||
iv = bytes_to_intlist(iv)
|
iv = bytes_to_intlist(iv)
|
||||||
|
@ -99,8 +97,10 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
|
||||||
return shaHash + [0] * 12
|
return shaHash + [0] * 12
|
||||||
|
|
||||||
key = obfuscate_key(id)
|
key = obfuscate_key(id)
|
||||||
|
|
||||||
class Counter:
|
class Counter:
|
||||||
__value = iv
|
__value = iv
|
||||||
|
|
||||||
def next_value(self):
|
def next_value(self):
|
||||||
temp = self.__value
|
temp = self.__value
|
||||||
self.__value = inc(self.__value)
|
self.__value = inc(self.__value)
|
||||||
|
@ -183,7 +183,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def _real_extract(self,url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('video_id')
|
video_id = mobj.group('video_id')
|
||||||
|
|
||||||
|
@ -226,10 +226,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||||
formats = []
|
formats = []
|
||||||
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
|
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
|
||||||
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
||||||
video_format = fmt+'p'
|
video_format = fmt + 'p'
|
||||||
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
|
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
|
||||||
# urlencode doesn't work!
|
# urlencode doesn't work!
|
||||||
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
|
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
|
||||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
|
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
|
||||||
streamdata = self._download_xml(
|
streamdata = self._download_xml(
|
||||||
|
@ -248,8 +248,8 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
|
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
|
||||||
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
||||||
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
|
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,\
|
||||||
video_id, note='Downloading subtitles for '+sub_name)
|
video_id, note='Downloading subtitles for ' + sub_name)
|
||||||
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
||||||
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
||||||
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -18,6 +18,7 @@ from ..utils import (
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class DailymotionBaseInfoExtractor(InfoExtractor):
|
class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _build_request(url):
|
def _build_request(url):
|
||||||
|
@ -27,6 +28,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||||
request.add_header('Cookie', 'ff=off')
|
request.add_header('Cookie', 'ff=off')
|
||||||
return request
|
return request
|
||||||
|
|
||||||
|
|
||||||
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||||
"""Information Extractor for Dailymotion"""
|
"""Information Extractor for Dailymotion"""
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor):
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
|
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
|
||||||
info = self._download_json(info_url, video_id)
|
info = self._download_json(info_url, video_id)
|
||||||
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
|
date = time.gmtime(info['dateCreated'] / 1000) # The timestamp is in miliseconds
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|
|
@ -40,7 +40,7 @@ class FC2IE(InfoExtractor):
|
||||||
|
|
||||||
info_url = (
|
info_url = (
|
||||||
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
|
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
|
||||||
format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.','%2E')))
|
format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
|
||||||
|
|
||||||
info_webpage = self._download_webpage(
|
info_webpage = self._download_webpage(
|
||||||
info_url, video_id, note='Downloading info page')
|
info_url, video_id, note='Downloading info page')
|
||||||
|
|
|
@ -748,7 +748,7 @@ class GenericIE(InfoExtractor):
|
||||||
# Look for embedded blip.tv player
|
# Look for embedded blip.tv player
|
||||||
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
||||||
if mobj:
|
if mobj:
|
||||||
return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
|
return self.url_result('http://blip.tv/a/a-' + mobj.group(1), 'BlipTV')
|
||||||
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
|
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
|
||||||
if mobj:
|
if mobj:
|
||||||
return self.url_result(mobj.group(1), 'BlipTV')
|
return self.url_result(mobj.group(1), 'BlipTV')
|
||||||
|
@ -1025,4 +1025,3 @@ class GenericIE(InfoExtractor):
|
||||||
'_type': 'playlist',
|
'_type': 'playlist',
|
||||||
'entries': entries,
|
'entries': entries,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ class InternetVideoArchiveIE(InfoExtractor):
|
||||||
def _clean_query(query):
|
def _clean_query(query):
|
||||||
NEEDED_ARGS = ['publishedid', 'customerid']
|
NEEDED_ARGS = ['publishedid', 'customerid']
|
||||||
query_dic = compat_urlparse.parse_qs(query)
|
query_dic = compat_urlparse.parse_qs(query)
|
||||||
cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS)
|
cleaned_dic = dict((k, v[0]) for (k, v) in query_dic.items() if k in NEEDED_ARGS)
|
||||||
# Other player ids return m3u8 urls
|
# Other player ids return m3u8 urls
|
||||||
cleaned_dic['playerid'] = '247'
|
cleaned_dic['playerid'] = '247'
|
||||||
cleaned_dic['videokbrate'] = '100000'
|
cleaned_dic['videokbrate'] = '100000'
|
||||||
|
|
|
@ -54,7 +54,7 @@ class IPrimaIE(InfoExtractor):
|
||||||
|
|
||||||
player_url = (
|
player_url = (
|
||||||
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
|
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
|
||||||
(floor(random()*1073741824), floor(random()*1073741824))
|
(floor(random() * 1073741824), floor(random() * 1073741824))
|
||||||
)
|
)
|
||||||
|
|
||||||
req = compat_urllib_request.Request(player_url)
|
req = compat_urllib_request.Request(player_url)
|
||||||
|
|
|
@ -45,4 +45,3 @@ class JadoreCettePubIE(InfoExtractor):
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,4 +30,3 @@ class Ku6IE(InfoExtractor):
|
||||||
'title': title,
|
'title': title,
|
||||||
'url': downloadUrl
|
'url': downloadUrl
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,4 +75,3 @@ class Laola1TvIE(InfoExtractor):
|
||||||
'categories': categories,
|
'categories': categories,
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ class LifeNewsIE(InfoExtractor):
|
||||||
r'<div class=\'comments\'>\s*<span class=\'counter\'>(\d+)</span>', webpage, 'comment count', fatal=False)
|
r'<div class=\'comments\'>\s*<span class=\'counter\'>(\d+)</span>', webpage, 'comment count', fatal=False)
|
||||||
|
|
||||||
upload_date = self._html_search_regex(
|
upload_date = self._html_search_regex(
|
||||||
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
|
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date', fatal=False)
|
||||||
if upload_date is not None:
|
if upload_date is not None:
|
||||||
upload_date = unified_strdate(upload_date)
|
upload_date = unified_strdate(upload_date)
|
||||||
|
|
||||||
|
@ -71,4 +71,4 @@ class LifeNewsIE(InfoExtractor):
|
||||||
if len(videos) == 1:
|
if len(videos) == 1:
|
||||||
return make_entry(video_id, videos[0])
|
return make_entry(video_id, videos[0])
|
||||||
else:
|
else:
|
||||||
return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)]
|
return [make_entry(video_id, media, video_number + 1) for video_number, media in enumerate(videos)]
|
||||||
|
|
|
@ -19,8 +19,7 @@ class LiveLeakIE(InfoExtractor):
|
||||||
'uploader': 'ljfriel2',
|
'uploader': 'ljfriel2',
|
||||||
'title': 'Most unlucky car accident'
|
'title': 'Most unlucky car accident'
|
||||||
}
|
}
|
||||||
},
|
}, {
|
||||||
{
|
|
||||||
'url': 'http://www.liveleak.com/view?i=f93_1390833151',
|
'url': 'http://www.liveleak.com/view?i=f93_1390833151',
|
||||||
'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
|
'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -30,8 +29,7 @@ class LiveLeakIE(InfoExtractor):
|
||||||
'uploader': 'ARD_Stinkt',
|
'uploader': 'ARD_Stinkt',
|
||||||
'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
|
'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
|
||||||
}
|
}
|
||||||
},
|
}, {
|
||||||
{
|
|
||||||
'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
|
'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
|
||||||
'md5': '42c6d97d54f1db107958760788c5f48f',
|
'md5': '42c6d97d54f1db107958760788c5f48f',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|
|
@ -7,6 +7,7 @@ from ..utils import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class MalemotionIE(InfoExtractor):
|
class MalemotionIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
|
_VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
|
|
|
@ -54,7 +54,7 @@ class MonikerIE(InfoExtractor):
|
||||||
|
|
||||||
title = os.path.splitext(data['fname'])[0]
|
title = os.path.splitext(data['fname'])[0]
|
||||||
|
|
||||||
#Could be several links with different quality
|
# Could be several links with different quality
|
||||||
links = re.findall(r'"file" : "?(.+?)",', webpage)
|
links = re.findall(r'"file" : "?(.+?)",', webpage)
|
||||||
# Assume the links are ordered in quality
|
# Assume the links are ordered in quality
|
||||||
formats = [{
|
formats = [{
|
||||||
|
|
|
@ -27,7 +27,7 @@ class MoviezineIE(InfoExtractor):
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player')
|
jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player')
|
||||||
|
|
||||||
formats =[{
|
formats = [{
|
||||||
'format_id': 'sd',
|
'format_id': 'sd',
|
||||||
'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'),
|
'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'),
|
||||||
'quality': 0,
|
'quality': 0,
|
||||||
|
|
|
@ -60,7 +60,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
||||||
url = response.geturl()
|
url = response.geturl()
|
||||||
# Transform the url to get the best quality:
|
# Transform the url to get the best quality:
|
||||||
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
|
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
|
||||||
return [{'url': url,'ext': 'mp4'}]
|
return [{'url': url, 'ext': 'mp4'}]
|
||||||
|
|
||||||
def _extract_video_formats(self, mdoc, mtvn_id):
|
def _extract_video_formats(self, mdoc, mtvn_id):
|
||||||
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
|
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
|
||||||
|
@ -245,7 +245,7 @@ class MTVIE(MTVServicesInfoExtractor):
|
||||||
m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
|
m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
|
||||||
webpage, re.DOTALL)
|
webpage, re.DOTALL)
|
||||||
if m_vevo:
|
if m_vevo:
|
||||||
vevo_id = m_vevo.group(1);
|
vevo_id = m_vevo.group(1)
|
||||||
self.to_screen('Vevo video detected: %s' % vevo_id)
|
self.to_screen('Vevo video detected: %s' % vevo_id)
|
||||||
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
|
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
|
||||||
|
|
||||||
|
|
|
@ -73,4 +73,3 @@ class MuenchenTVIE(InfoExtractor):
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ class MuzuTVIE(InfoExtractor):
|
||||||
player_info_page = self._download_webpage('http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
|
player_info_page = self._download_webpage('http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
|
||||||
video_id, u'Downloading player info')
|
video_id, u'Downloading player info')
|
||||||
video_info = json.loads(player_info_page)['videos'][0]
|
video_info = json.loads(player_info_page)['videos'][0]
|
||||||
for quality in ['1080' , '720', '480', '360']:
|
for quality in ['1080', '720', '480', '360']:
|
||||||
if video_info.get('v%s' % quality):
|
if video_info.get('v%s' % quality):
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ class MyVideoIE(InfoExtractor):
|
||||||
# Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
|
# Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
|
||||||
# Released into the Public Domain by Tristan Fischer on 2013-05-19
|
# Released into the Public Domain by Tristan Fischer on 2013-05-19
|
||||||
# https://github.com/rg3/youtube-dl/pull/842
|
# https://github.com/rg3/youtube-dl/pull/842
|
||||||
def __rc4crypt(self,data, key):
|
def __rc4crypt(self, data, key):
|
||||||
x = 0
|
x = 0
|
||||||
box = list(range(256))
|
box = list(range(256))
|
||||||
for i in list(range(256)):
|
for i in list(range(256)):
|
||||||
|
@ -49,10 +49,10 @@ class MyVideoIE(InfoExtractor):
|
||||||
out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
|
out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def __md5(self,s):
|
def __md5(self, s):
|
||||||
return hashlib.md5(s).hexdigest().encode()
|
return hashlib.md5(s).hexdigest().encode()
|
||||||
|
|
||||||
def _real_extract(self,url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
@ -173,4 +173,3 @@ class MyVideoIE(InfoExtractor):
|
||||||
'play_path': video_playpath,
|
'play_path': video_playpath,
|
||||||
'player_url': video_swfobj,
|
'player_url': video_swfobj,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ class NaverIE(InfoExtractor):
|
||||||
raise ExtractorError('couldn\'t extract vid and key')
|
raise ExtractorError('couldn\'t extract vid and key')
|
||||||
vid = m_id.group(1)
|
vid = m_id.group(1)
|
||||||
key = m_id.group(2)
|
key = m_id.group(2)
|
||||||
query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key,})
|
query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
|
||||||
query_urls = compat_urllib_parse.urlencode({
|
query_urls = compat_urllib_parse.urlencode({
|
||||||
'masterVid': vid,
|
'masterVid': vid,
|
||||||
'protocol': 'p2p',
|
'protocol': 'p2p',
|
||||||
|
|
|
@ -39,7 +39,6 @@ class NBAIE(InfoExtractor):
|
||||||
duration = parse_duration(
|
duration = parse_duration(
|
||||||
self._html_search_meta('duration', webpage, 'duration', fatal=False))
|
self._html_search_meta('duration', webpage, 'duration', fatal=False))
|
||||||
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': shortened_video_id,
|
'id': shortened_video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
|
|
|
@ -97,4 +97,3 @@ class OoyalaIE(InfoExtractor):
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
return self._extract_result(videos_info[0], videos_more_info)
|
return self._extract_result(videos_info[0], videos_more_info)
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ import re
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import int_or_none
|
from ..utils import int_or_none
|
||||||
|
|
||||||
|
|
||||||
class PodomaticIE(InfoExtractor):
|
class PodomaticIE(InfoExtractor):
|
||||||
IE_NAME = 'podomatic'
|
IE_NAME = 'podomatic'
|
||||||
_VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)'
|
_VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)'
|
||||||
|
|
|
@ -56,7 +56,7 @@ class PornHubIE(InfoExtractor):
|
||||||
comment_count = self._extract_count(
|
comment_count = self._extract_count(
|
||||||
r'All comments \(<var class="videoCommentCount">([\d,\.]+)</var>', webpage, 'comment')
|
r'All comments \(<var class="videoCommentCount">([\d,\.]+)</var>', webpage, 'comment')
|
||||||
|
|
||||||
video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
|
video_urls = list(map(compat_urllib_parse.unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
|
||||||
if webpage.find('"encrypted":true') != -1:
|
if webpage.find('"encrypted":true') != -1:
|
||||||
password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
|
password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
|
||||||
video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
|
video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
|
||||||
|
|
|
@ -38,7 +38,7 @@ class PornotubeIE(InfoExtractor):
|
||||||
video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
|
video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
|
||||||
video_url = compat_urllib_parse.unquote(video_url)
|
video_url = compat_urllib_parse.unquote(video_url)
|
||||||
|
|
||||||
#Get the uploaded date
|
# Get the uploaded date
|
||||||
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
|
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
|
||||||
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
|
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
|
||||||
if upload_date:
|
if upload_date:
|
||||||
|
|
|
@ -41,4 +41,3 @@ class RingTVIE(InfoExtractor):
|
||||||
'thumbnail': thumbnail_url,
|
'thumbnail': thumbnail_url,
|
||||||
'description': description,
|
'description': description,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ class RtlXlIE(InfoExtractor):
|
||||||
|
|
||||||
formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4')
|
formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4')
|
||||||
|
|
||||||
video_urlpart = videopath.split('/flash/')[1][:-4]
|
video_urlpart = videopath.split('/flash/')[1][:-5]
|
||||||
PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
|
PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
|
||||||
|
|
||||||
formats.extend([
|
formats.extend([
|
||||||
|
|
|
@ -54,7 +54,6 @@ def _decrypt_url(png):
|
||||||
return url
|
return url
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class RTVEALaCartaIE(InfoExtractor):
|
class RTVEALaCartaIE(InfoExtractor):
|
||||||
IE_NAME = 'rtve.es:alacarta'
|
IE_NAME = 'rtve.es:alacarta'
|
||||||
IE_DESC = 'RTVE a la carta'
|
IE_DESC = 'RTVE a la carta'
|
||||||
|
|
|
@ -67,5 +67,3 @@ class ServingSysIE(InfoExtractor):
|
||||||
'title': title,
|
'title': title,
|
||||||
'entries': entries,
|
'entries': entries,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import os.path
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
import hashlib
|
import hashlib
|
||||||
|
@ -12,15 +11,15 @@ from ..utils import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
url_basename,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class SmotriIE(InfoExtractor):
|
class SmotriIE(InfoExtractor):
|
||||||
IE_DESC = 'Smotri.com'
|
IE_DESC = 'Smotri.com'
|
||||||
IE_NAME = 'smotri'
|
IE_NAME = 'smotri'
|
||||||
_VALID_URL = r'^https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<videoid>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})'
|
_VALID_URL = r'^https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<id>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})'
|
||||||
_NETRC_MACHINE = 'smotri'
|
_NETRC_MACHINE = 'smotri'
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
|
@ -35,7 +34,6 @@ class SmotriIE(InfoExtractor):
|
||||||
'uploader': 'rbc2008',
|
'uploader': 'rbc2008',
|
||||||
'uploader_id': 'rbc08',
|
'uploader_id': 'rbc08',
|
||||||
'upload_date': '20131118',
|
'upload_date': '20131118',
|
||||||
'description': 'катастрофа с камер видеонаблюдения, видео катастрофа с камер видеонаблюдения',
|
|
||||||
'thumbnail': 'http://frame6.loadup.ru/8b/a9/2610366.3.3.jpg',
|
'thumbnail': 'http://frame6.loadup.ru/8b/a9/2610366.3.3.jpg',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -50,7 +48,6 @@ class SmotriIE(InfoExtractor):
|
||||||
'uploader': 'Support Photofile@photofile',
|
'uploader': 'Support Photofile@photofile',
|
||||||
'uploader_id': 'support-photofile',
|
'uploader_id': 'support-photofile',
|
||||||
'upload_date': '20070704',
|
'upload_date': '20070704',
|
||||||
'description': 'test, видео test',
|
|
||||||
'thumbnail': 'http://frame4.loadup.ru/03/ed/57591.2.3.jpg',
|
'thumbnail': 'http://frame4.loadup.ru/03/ed/57591.2.3.jpg',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -66,7 +63,6 @@ class SmotriIE(InfoExtractor):
|
||||||
'uploader_id': 'timoxa40',
|
'uploader_id': 'timoxa40',
|
||||||
'upload_date': '20100404',
|
'upload_date': '20100404',
|
||||||
'thumbnail': 'http://frame7.loadup.ru/af/3f/1390466.3.3.jpg',
|
'thumbnail': 'http://frame7.loadup.ru/af/3f/1390466.3.3.jpg',
|
||||||
'description': 'TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1, видео TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1',
|
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'videopassword': 'qwerty',
|
'videopassword': 'qwerty',
|
||||||
|
@ -85,7 +81,6 @@ class SmotriIE(InfoExtractor):
|
||||||
'upload_date': '20101001',
|
'upload_date': '20101001',
|
||||||
'thumbnail': 'http://frame3.loadup.ru/75/75/1540889.1.3.jpg',
|
'thumbnail': 'http://frame3.loadup.ru/75/75/1540889.1.3.jpg',
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'description': 'этот ролик не покажут по ТВ, видео этот ролик не покажут по ТВ',
|
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'videopassword': '333'
|
'videopassword': '333'
|
||||||
|
@ -102,17 +97,11 @@ class SmotriIE(InfoExtractor):
|
||||||
'uploader': 'HannahL',
|
'uploader': 'HannahL',
|
||||||
'uploader_id': 'lisaha95',
|
'uploader_id': 'lisaha95',
|
||||||
'upload_date': '20090331',
|
'upload_date': '20090331',
|
||||||
'description': 'Shakira - Don\'t Bother, видео Shakira - Don\'t Bother',
|
|
||||||
'thumbnail': 'http://frame8.loadup.ru/44/0b/918809.7.3.jpg',
|
'thumbnail': 'http://frame8.loadup.ru/44/0b/918809.7.3.jpg',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
_SUCCESS = 0
|
|
||||||
_PASSWORD_NOT_VERIFIED = 1
|
|
||||||
_PASSWORD_DETECTED = 2
|
|
||||||
_VIDEO_NOT_FOUND = 3
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _extract_url(cls, webpage):
|
def _extract_url(cls, webpage):
|
||||||
mobj = re.search(
|
mobj = re.search(
|
||||||
|
@ -137,44 +126,44 @@ class SmotriIE(InfoExtractor):
|
||||||
return self._html_search_meta(name, html, display_name)
|
return self._html_search_meta(name, html, display_name)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('videoid')
|
|
||||||
real_video_id = mobj.group('realvideoid')
|
|
||||||
|
|
||||||
# Download video JSON data
|
video_form = {
|
||||||
video_json_url = 'http://smotri.com/vt.php?id=%s' % real_video_id
|
'ticket': video_id,
|
||||||
video_json_page = self._download_webpage(video_json_url, video_id, 'Downloading video JSON')
|
'video_url': '1',
|
||||||
video_json = json.loads(video_json_page)
|
'frame_url': '1',
|
||||||
|
'devid': 'LoadupFlashPlayer',
|
||||||
|
'getvideoinfo': '1',
|
||||||
|
}
|
||||||
|
|
||||||
status = video_json['status']
|
request = compat_urllib_request.Request(
|
||||||
if status == self._VIDEO_NOT_FOUND:
|
'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form))
|
||||||
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
|
||||||
|
video = self._download_json(request, video_id, 'Downloading video JSON')
|
||||||
|
|
||||||
|
if video.get('_moderate_no') or not video.get('moderated'):
|
||||||
|
raise ExtractorError('Video %s has not been approved by moderator' % video_id, expected=True)
|
||||||
|
|
||||||
|
if video.get('error'):
|
||||||
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||||
elif status == self._PASSWORD_DETECTED: # The video is protected by a password, retry with
|
|
||||||
# video-password set
|
|
||||||
video_password = self._downloader.params.get('videopassword', None)
|
|
||||||
if not video_password:
|
|
||||||
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
|
|
||||||
video_json_url += '&md5pass=%s' % hashlib.md5(video_password.encode('utf-8')).hexdigest()
|
|
||||||
video_json_page = self._download_webpage(video_json_url, video_id, 'Downloading video JSON (video-password set)')
|
|
||||||
video_json = json.loads(video_json_page)
|
|
||||||
status = video_json['status']
|
|
||||||
if status == self._PASSWORD_NOT_VERIFIED:
|
|
||||||
raise ExtractorError('Video password is invalid', expected=True)
|
|
||||||
|
|
||||||
if status != self._SUCCESS:
|
video_url = video.get('_vidURL') or video.get('_vidURL_mp4')
|
||||||
raise ExtractorError('Unexpected status value %s' % status)
|
title = video['title']
|
||||||
|
thumbnail = video['_imgURL']
|
||||||
# Extract the URL of the video
|
upload_date = unified_strdate(video['added'])
|
||||||
video_url = video_json['file_data']
|
uploader = video['userNick']
|
||||||
|
uploader_id = video['userLogin']
|
||||||
|
duration = int_or_none(video['duration'])
|
||||||
|
|
||||||
# Video JSON does not provide enough meta data
|
# Video JSON does not provide enough meta data
|
||||||
# We will extract some from the video web page instead
|
# We will extract some from the video web page instead
|
||||||
video_page_url = 'http://smotri.com/video/view/?id=%s' % video_id
|
webpage_url = 'http://smotri.com/video/view/?id=%s' % video_id
|
||||||
video_page = self._download_webpage(video_page_url, video_id, 'Downloading video page')
|
webpage = self._download_webpage(webpage_url, video_id, 'Downloading video page')
|
||||||
|
|
||||||
# Warning if video is unavailable
|
# Warning if video is unavailable
|
||||||
warning = self._html_search_regex(
|
warning = self._html_search_regex(
|
||||||
r'<div class="videoUnModer">(.*?)</div>', video_page,
|
r'<div class="videoUnModer">(.*?)</div>', webpage,
|
||||||
'warning message', default=None)
|
'warning message', default=None)
|
||||||
if warning is not None:
|
if warning is not None:
|
||||||
self._downloader.report_warning(
|
self._downloader.report_warning(
|
||||||
|
@ -182,84 +171,32 @@ class SmotriIE(InfoExtractor):
|
||||||
(video_id, warning))
|
(video_id, warning))
|
||||||
|
|
||||||
# Adult content
|
# Adult content
|
||||||
if re.search('EroConfirmText">', video_page) is not None:
|
if re.search('EroConfirmText">', webpage) is not None:
|
||||||
self.report_age_confirmation()
|
self.report_age_confirmation()
|
||||||
confirm_string = self._html_search_regex(
|
confirm_string = self._html_search_regex(
|
||||||
r'<a href="/video/view/\?id=%s&confirm=([^"]+)" title="[^"]+">' % video_id,
|
r'<a href="/video/view/\?id=%s&confirm=([^"]+)" title="[^"]+">' % video_id,
|
||||||
video_page, 'confirm string')
|
webpage, 'confirm string')
|
||||||
confirm_url = video_page_url + '&confirm=%s' % confirm_string
|
confirm_url = webpage_url + '&confirm=%s' % confirm_string
|
||||||
video_page = self._download_webpage(confirm_url, video_id, 'Downloading video page (age confirmed)')
|
webpage = self._download_webpage(confirm_url, video_id, 'Downloading video page (age confirmed)')
|
||||||
adult_content = True
|
adult_content = True
|
||||||
else:
|
else:
|
||||||
adult_content = False
|
adult_content = False
|
||||||
|
|
||||||
# Extract the rest of meta data
|
view_count = self._html_search_regex(
|
||||||
video_title = self._search_meta('name', video_page, 'title')
|
|
||||||
if not video_title:
|
|
||||||
video_title = os.path.splitext(url_basename(video_url))[0]
|
|
||||||
|
|
||||||
video_description = self._search_meta('description', video_page)
|
|
||||||
END_TEXT = ' на сайте Smotri.com'
|
|
||||||
if video_description and video_description.endswith(END_TEXT):
|
|
||||||
video_description = video_description[:-len(END_TEXT)]
|
|
||||||
START_TEXT = 'Смотреть онлайн ролик '
|
|
||||||
if video_description and video_description.startswith(START_TEXT):
|
|
||||||
video_description = video_description[len(START_TEXT):]
|
|
||||||
video_thumbnail = self._search_meta('thumbnail', video_page)
|
|
||||||
|
|
||||||
upload_date_str = self._search_meta('uploadDate', video_page, 'upload date')
|
|
||||||
if upload_date_str:
|
|
||||||
upload_date_m = re.search(r'(?P<year>\d{4})\.(?P<month>\d{2})\.(?P<day>\d{2})T', upload_date_str)
|
|
||||||
video_upload_date = (
|
|
||||||
(
|
|
||||||
upload_date_m.group('year') +
|
|
||||||
upload_date_m.group('month') +
|
|
||||||
upload_date_m.group('day')
|
|
||||||
)
|
|
||||||
if upload_date_m else None
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
video_upload_date = None
|
|
||||||
|
|
||||||
duration_str = self._search_meta('duration', video_page)
|
|
||||||
if duration_str:
|
|
||||||
duration_m = re.search(r'T(?P<hours>[0-9]{2})H(?P<minutes>[0-9]{2})M(?P<seconds>[0-9]{2})S', duration_str)
|
|
||||||
video_duration = (
|
|
||||||
(
|
|
||||||
(int(duration_m.group('hours')) * 60 * 60) +
|
|
||||||
(int(duration_m.group('minutes')) * 60) +
|
|
||||||
int(duration_m.group('seconds'))
|
|
||||||
)
|
|
||||||
if duration_m else None
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
video_duration = None
|
|
||||||
|
|
||||||
video_uploader = self._html_search_regex(
|
|
||||||
'<div class="DescrUser"><div>Автор.*?onmouseover="popup_user_info[^"]+">(.*?)</a>',
|
|
||||||
video_page, 'uploader', fatal=False, flags=re.MULTILINE|re.DOTALL)
|
|
||||||
|
|
||||||
video_uploader_id = self._html_search_regex(
|
|
||||||
'<div class="DescrUser"><div>Автор.*?onmouseover="popup_user_info\\(.*?\'([^\']+)\'\\);">',
|
|
||||||
video_page, 'uploader id', fatal=False, flags=re.MULTILINE|re.DOTALL)
|
|
||||||
|
|
||||||
video_view_count = self._html_search_regex(
|
|
||||||
'Общее количество просмотров.*?<span class="Number">(\\d+)</span>',
|
'Общее количество просмотров.*?<span class="Number">(\\d+)</span>',
|
||||||
video_page, 'view count', fatal=False, flags=re.MULTILINE|re.DOTALL)
|
webpage, 'view count', fatal=False, flags=re.MULTILINE | re.DOTALL)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'title': video_title,
|
'title': title,
|
||||||
'thumbnail': video_thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'description': video_description,
|
'uploader': uploader,
|
||||||
'uploader': video_uploader,
|
'upload_date': upload_date,
|
||||||
'upload_date': video_upload_date,
|
'uploader_id': uploader_id,
|
||||||
'uploader_id': video_uploader_id,
|
'duration': duration,
|
||||||
'duration': video_duration,
|
'view_count': int_or_none(view_count),
|
||||||
'view_count': int_or_none(video_view_count),
|
|
||||||
'age_limit': 18 if adult_content else 0,
|
'age_limit': 18 if adult_content else 0,
|
||||||
'video_page_url': video_page_url
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@ class SohuIE(InfoExtractor):
|
||||||
(allot, prot, clipsURL[i], su[i]))
|
(allot, prot, clipsURL[i], su[i]))
|
||||||
part_str = self._download_webpage(
|
part_str = self._download_webpage(
|
||||||
part_url, video_id,
|
part_url, video_id,
|
||||||
note=u'Downloading part %d of %d' % (i+1, part_count))
|
note=u'Downloading part %d of %d' % (i + 1, part_count))
|
||||||
|
|
||||||
part_info = part_str.split('|')
|
part_info = part_str.split('|')
|
||||||
video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
|
video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
|
||||||
|
|
|
@ -93,4 +93,3 @@ class SportDeutschlandIE(InfoExtractor):
|
||||||
'rtmp_live': asset.get('live'),
|
'rtmp_live': asset.get('live'),
|
||||||
'timestamp': parse_iso8601(asset.get('date')),
|
'timestamp': parse_iso8601(asset.get('date')),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ class SubtitlesInfoExtractor(InfoExtractor):
|
||||||
|
|
||||||
sub_lang_list = {}
|
sub_lang_list = {}
|
||||||
for sub_lang in requested_langs:
|
for sub_lang in requested_langs:
|
||||||
if not sub_lang in available_subs_list:
|
if sub_lang not in available_subs_list:
|
||||||
self._downloader.report_warning(u'no closed captions found in the specified language "%s"' % sub_lang)
|
self._downloader.report_warning(u'no closed captions found in the specified language "%s"' % sub_lang)
|
||||||
continue
|
continue
|
||||||
sub_lang_list[sub_lang] = available_subs_list[sub_lang]
|
sub_lang_list[sub_lang] = available_subs_list[sub_lang]
|
||||||
|
|
|
@ -80,7 +80,7 @@ class SWRMediathekIE(InfoExtractor):
|
||||||
|
|
||||||
if media_type == 'Video':
|
if media_type == 'Video':
|
||||||
fmt.update({
|
fmt.update({
|
||||||
'format_note': ['144p', '288p', '544p', '720p'][quality-1],
|
'format_note': ['144p', '288p', '544p', '720p'][quality - 1],
|
||||||
'vcodec': codec,
|
'vcodec': codec,
|
||||||
})
|
})
|
||||||
elif media_type == 'Audio':
|
elif media_type == 'Audio':
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .mitele import MiTeleIE
|
from .mitele import MiTeleIE
|
||||||
|
|
|
@ -35,11 +35,12 @@ class ThePlatformIE(InfoExtractor):
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
if mobj.group('config'):
|
if mobj.group('config'):
|
||||||
config_url = url+ '&form=json'
|
config_url = url + '&form=json'
|
||||||
config_url = config_url.replace('swf/', 'config/')
|
config_url = config_url.replace('swf/', 'config/')
|
||||||
config_url = config_url.replace('onsite/', 'onsite/config/')
|
config_url = config_url.replace('onsite/', 'onsite/config/')
|
||||||
config = self._download_json(config_url, video_id, 'Downloading config')
|
config = self._download_json(config_url, video_id, 'Downloading config')
|
||||||
|
@ -48,7 +49,6 @@ class ThePlatformIE(InfoExtractor):
|
||||||
smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
|
smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
|
||||||
'format=smil&mbr=true'.format(video_id))
|
'format=smil&mbr=true'.format(video_id))
|
||||||
|
|
||||||
|
|
||||||
meta = self._download_xml(smil_url, video_id)
|
meta = self._download_xml(smil_url, video_id)
|
||||||
try:
|
try:
|
||||||
error_msg = next(
|
error_msg = next(
|
||||||
|
@ -118,5 +118,5 @@ class ThePlatformIE(InfoExtractor):
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'description': info['description'],
|
'description': info['description'],
|
||||||
'thumbnail': info['defaultThumbnailUrl'],
|
'thumbnail': info['defaultThumbnailUrl'],
|
||||||
'duration': info['duration']//1000,
|
'duration': info['duration'] // 1000,
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
|
@ -25,7 +25,7 @@ class TrailerAddictIE(InfoExtractor):
|
||||||
webpage = self._download_webpage(url, name)
|
webpage = self._download_webpage(url, name)
|
||||||
|
|
||||||
title = self._search_regex(r'<title>(.+?)</title>',
|
title = self._search_regex(r'<title>(.+?)</title>',
|
||||||
webpage, 'video title').replace(' - Trailer Addict','')
|
webpage, 'video title').replace(' - Trailer Addict', '')
|
||||||
view_count_str = self._search_regex(
|
view_count_str = self._search_regex(
|
||||||
r'<span class="views_n">([0-9,.]+)</span>',
|
r'<span class="views_n">([0-9,.]+)</span>',
|
||||||
webpage, 'view count', fatal=False)
|
webpage, 'view count', fatal=False)
|
||||||
|
@ -43,10 +43,10 @@ class TrailerAddictIE(InfoExtractor):
|
||||||
fvar = "fvar"
|
fvar = "fvar"
|
||||||
|
|
||||||
info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
|
info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
|
||||||
info_webpage = self._download_webpage(info_url, video_id , "Downloading the info webpage")
|
info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")
|
||||||
|
|
||||||
final_url = self._search_regex(r'&fileurl=(.+)',
|
final_url = self._search_regex(r'&fileurl=(.+)',
|
||||||
info_webpage, 'Download url').replace('%3F','?')
|
info_webpage, 'Download url').replace('%3F', '?')
|
||||||
thumbnail_url = self._search_regex(r'&image=(.+?)&',
|
thumbnail_url = self._search_regex(r'&image=(.+?)&',
|
||||||
info_webpage, 'thumbnail url')
|
info_webpage, 'thumbnail url')
|
||||||
|
|
||||||
|
|
|
@ -63,4 +63,3 @@ class TriluliluIE(InfoExtractor):
|
||||||
'description': description,
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,11 +37,11 @@ class TudouIE(InfoExtractor):
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _url_for_id(self, id, quality = None):
|
def _url_for_id(self, id, quality = None):
|
||||||
info_url = "http://v2.tudou.com/f?id="+str(id)
|
info_url = "http://v2.tudou.com/f?id=" + str(id)
|
||||||
if quality:
|
if quality:
|
||||||
info_url += '&hd' + quality
|
info_url += '&hd' + quality
|
||||||
webpage = self._download_webpage(info_url, id, "Opening the info webpage")
|
webpage = self._download_webpage(info_url, id, "Opening the info webpage")
|
||||||
final_url = self._html_search_regex('>(.+?)</f>',webpage, 'video url')
|
final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url')
|
||||||
return final_url
|
return final_url
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -35,4 +35,3 @@ class ViceIE(InfoExtractor):
|
||||||
except ExtractorError:
|
except ExtractorError:
|
||||||
raise ExtractorError('The page doesn\'t contain a video', expected=True)
|
raise ExtractorError('The page doesn\'t contain a video', expected=True)
|
||||||
return self.url_result(ooyala_url, ie='Ooyala')
|
return self.url_result(ooyala_url, ie='Ooyala')
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class VideofyMeIE(InfoExtractor):
|
class VideofyMeIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
|
_VALID_URL = r'https?://(www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
|
||||||
IE_NAME = u'videofy.me'
|
IE_NAME = u'videofy.me'
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -30,4 +30,3 @@ class VidziIE(InfoExtractor):
|
||||||
'title': title,
|
'title': title,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,4 +51,3 @@ class WorldStarHipHopIE(InfoExtractor):
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,4 +47,3 @@ class XBefIE(InfoExtractor):
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ class XHamsterIE(InfoExtractor):
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self,url):
|
def _real_extract(self, url):
|
||||||
def extract_video_url(webpage):
|
def extract_video_url(webpage):
|
||||||
mp4 = re.search(r'<video\s+.*?file="([^"]+)".*?>', webpage)
|
mp4 = re.search(r'<video\s+.*?file="([^"]+)".*?>', webpage)
|
||||||
if mp4 is None:
|
if mp4 is None:
|
||||||
|
|
|
@ -97,7 +97,7 @@ class XTubeUserIE(InfoExtractor):
|
||||||
url, username, note='Retrieving profile page')
|
url, username, note='Retrieving profile page')
|
||||||
|
|
||||||
video_count = int(self._search_regex(
|
video_count = int(self._search_regex(
|
||||||
r'<strong>%s\'s Videos \(([0-9]+)\)</strong>'%username, profile_page,
|
r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' % username, profile_page,
|
||||||
'video count'))
|
'video count'))
|
||||||
|
|
||||||
PAGE_SIZE = 25
|
PAGE_SIZE = 25
|
||||||
|
|
|
@ -229,7 +229,7 @@ class YahooSearchIE(SearchInfoExtractor):
|
||||||
for pagenum in itertools.count(0):
|
for pagenum in itertools.count(0):
|
||||||
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
|
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
|
||||||
info = self._download_json(result_url, query,
|
info = self._download_json(result_url, query,
|
||||||
note='Downloading results page '+str(pagenum+1))
|
note='Downloading results page ' + str(pagenum + 1))
|
||||||
m = info['m']
|
m = info['m']
|
||||||
results = info['results']
|
results = info['results']
|
||||||
|
|
||||||
|
|
|
@ -35,10 +35,10 @@ class YoukuIE(InfoExtractor):
|
||||||
|
|
||||||
def _gen_sid(self):
|
def _gen_sid(self):
|
||||||
nowTime = int(time.time() * 1000)
|
nowTime = int(time.time() * 1000)
|
||||||
random1 = random.randint(1000,1998)
|
random1 = random.randint(1000, 1998)
|
||||||
random2 = random.randint(1000,9999)
|
random2 = random.randint(1000, 9999)
|
||||||
|
|
||||||
return "%d%d%d" %(nowTime,random1,random2)
|
return "%d%d%d" % (nowTime, random1, random2)
|
||||||
|
|
||||||
def _get_file_ID_mix_string(self, seed):
|
def _get_file_ID_mix_string(self, seed):
|
||||||
mixed = []
|
mixed = []
|
||||||
|
@ -49,7 +49,7 @@ class YoukuIE(InfoExtractor):
|
||||||
index = math.floor(seed / 65536 * len(source))
|
index = math.floor(seed / 65536 * len(source))
|
||||||
mixed.append(source[int(index)])
|
mixed.append(source[int(index)])
|
||||||
source.remove(source[int(index)])
|
source.remove(source[int(index)])
|
||||||
#return ''.join(mixed)
|
# return ''.join(mixed)
|
||||||
return mixed
|
return mixed
|
||||||
|
|
||||||
def _get_file_id(self, fileId, seed):
|
def _get_file_id(self, fileId, seed):
|
||||||
|
@ -100,12 +100,12 @@ class YoukuIE(InfoExtractor):
|
||||||
keys = [s['k'] for s in config['data'][0]['segs'][format]]
|
keys = [s['k'] for s in config['data'][0]['segs'][format]]
|
||||||
# segs is usually a dictionary, but an empty *list* if an error occured.
|
# segs is usually a dictionary, but an empty *list* if an error occured.
|
||||||
|
|
||||||
files_info=[]
|
files_info = []
|
||||||
sid = self._gen_sid()
|
sid = self._gen_sid()
|
||||||
fileid = self._get_file_id(fileid, seed)
|
fileid = self._get_file_id(fileid, seed)
|
||||||
|
|
||||||
#column 8,9 of fileid represent the segment number
|
# column 8,9 of fileid represent the segment number
|
||||||
#fileid[7:9] should be changed
|
# fileid[7:9] should be changed
|
||||||
for index, key in enumerate(keys):
|
for index, key in enumerate(keys):
|
||||||
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
|
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
|
||||||
download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
|
download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
|
||||||
|
|
|
@ -33,6 +33,7 @@ from ..utils import (
|
||||||
uppercase_escape,
|
uppercase_escape,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class YoutubeBaseInfoExtractor(InfoExtractor):
|
class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||||
"""Provide base functions for Youtube extractors"""
|
"""Provide base functions for Youtube extractors"""
|
||||||
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
|
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
|
||||||
|
@ -99,7 +100,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||||
|
|
||||||
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
|
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
|
||||||
# chokes on unicode
|
# chokes on unicode
|
||||||
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
|
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
|
||||||
login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
|
login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
|
||||||
|
|
||||||
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
|
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
|
||||||
|
@ -149,7 +150,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||||
'service': 'youtube',
|
'service': 'youtube',
|
||||||
'hl': 'en_US',
|
'hl': 'en_US',
|
||||||
}
|
}
|
||||||
tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
|
tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items())
|
||||||
tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
|
tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
|
||||||
|
|
||||||
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
|
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
|
||||||
|
@ -180,8 +181,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||||
'next_url': '/',
|
'next_url': '/',
|
||||||
'action_confirm': 'Confirm',
|
'action_confirm': 'Confirm',
|
||||||
}
|
}
|
||||||
req = compat_urllib_request.Request(self._AGE_URL,
|
req = compat_urllib_request.Request(
|
||||||
compat_urllib_parse.urlencode(age_form).encode('ascii'))
|
self._AGE_URL,
|
||||||
|
compat_urllib_parse.urlencode(age_form).encode('ascii')
|
||||||
|
)
|
||||||
|
|
||||||
self._download_webpage(
|
self._download_webpage(
|
||||||
req, None,
|
req, None,
|
||||||
|
@ -491,7 +494,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||||
def gen_sig_code(idxs):
|
def gen_sig_code(idxs):
|
||||||
def _genslice(start, end, step):
|
def _genslice(start, end, step):
|
||||||
starts = '' if start == 0 else str(start)
|
starts = '' if start == 0 else str(start)
|
||||||
ends = (':%d' % (end+step)) if end + step >= 0 else ':'
|
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
|
||||||
steps = '' if step == 1 else (':%d' % step)
|
steps = '' if step == 1 else (':%d' % step)
|
||||||
return 's[%s%s%s]' % (starts, ends, steps)
|
return 's[%s%s%s]' % (starts, ends, steps)
|
||||||
|
|
||||||
|
@ -618,7 +621,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||||
list_url = caption_url + '&' + list_params
|
list_url = caption_url + '&' + list_params
|
||||||
caption_list = self._download_xml(list_url, video_id)
|
caption_list = self._download_xml(list_url, video_id)
|
||||||
original_lang_node = caption_list.find('track')
|
original_lang_node = caption_list.find('track')
|
||||||
if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
|
if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr':
|
||||||
self._downloader.report_warning('Video doesn\'t have automatic captions')
|
self._downloader.report_warning('Video doesn\'t have automatic captions')
|
||||||
return {}
|
return {}
|
||||||
original_lang = original_lang_node.attrib['lang_code']
|
original_lang = original_lang_node.attrib['lang_code']
|
||||||
|
@ -651,6 +654,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||||
|
|
||||||
def _extract_from_m3u8(self, manifest_url, video_id):
|
def _extract_from_m3u8(self, manifest_url, video_id):
|
||||||
url_map = {}
|
url_map = {}
|
||||||
|
|
||||||
def _get_urls(_manifest):
|
def _get_urls(_manifest):
|
||||||
lines = _manifest.split('\n')
|
lines = _manifest.split('\n')
|
||||||
urls = filter(lambda l: l and not l.startswith('#'),
|
urls = filter(lambda l: l and not l.startswith('#'),
|
||||||
|
@ -900,7 +904,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||||
'player_url': player_url,
|
'player_url': player_url,
|
||||||
}]
|
}]
|
||||||
elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
|
elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
|
||||||
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
|
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
|
||||||
if 'rtmpe%3Dyes' in encoded_url_map:
|
if 'rtmpe%3Dyes' in encoded_url_map:
|
||||||
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
|
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
|
||||||
url_map = {}
|
url_map = {}
|
||||||
|
@ -974,6 +978,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||||
dash_manifest_url = video_info.get('dashmpd')[0]
|
dash_manifest_url = video_info.get('dashmpd')[0]
|
||||||
else:
|
else:
|
||||||
dash_manifest_url = ytplayer_config['args']['dashmpd']
|
dash_manifest_url = ytplayer_config['args']['dashmpd']
|
||||||
|
|
||||||
def decrypt_sig(mobj):
|
def decrypt_sig(mobj):
|
||||||
s = mobj.group(1)
|
s = mobj.group(1)
|
||||||
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
|
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
|
||||||
|
@ -1033,6 +1038,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
||||||
IE_DESC = 'YouTube.com playlists'
|
IE_DESC = 'YouTube.com playlists'
|
||||||
_VALID_URL = r"""(?x)(?:
|
_VALID_URL = r"""(?x)(?:
|
||||||
|
@ -1333,8 +1339,10 @@ class YoutubeUserIE(InfoExtractor):
|
||||||
# Don't return True if the url can be extracted with other youtube
|
# Don't return True if the url can be extracted with other youtube
|
||||||
# extractor, the regex would is too permissive and it would match.
|
# extractor, the regex would is too permissive and it would match.
|
||||||
other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
|
other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
|
||||||
if any(ie.suitable(url) for ie in other_ies): return False
|
if any(ie.suitable(url) for ie in other_ies):
|
||||||
else: return super(YoutubeUserIE, cls).suitable(url)
|
return False
|
||||||
|
else:
|
||||||
|
return super(YoutubeUserIE, cls).suitable(url)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# Extract username
|
# Extract username
|
||||||
|
@ -1557,12 +1565,14 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
|
||||||
paging = mobj.group('paging')
|
paging = mobj.group('paging')
|
||||||
return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
|
return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
|
||||||
|
|
||||||
|
|
||||||
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
|
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
|
||||||
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
|
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
|
||||||
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
|
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
|
||||||
_FEED_NAME = 'recommended'
|
_FEED_NAME = 'recommended'
|
||||||
_PLAYLIST_TITLE = 'Youtube Recommended videos'
|
_PLAYLIST_TITLE = 'Youtube Recommended videos'
|
||||||
|
|
||||||
|
|
||||||
class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
|
class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
|
||||||
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
|
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
|
||||||
_VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
|
_VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
|
||||||
|
@ -1570,6 +1580,7 @@ class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
|
||||||
_PLAYLIST_TITLE = 'Youtube Watch Later'
|
_PLAYLIST_TITLE = 'Youtube Watch Later'
|
||||||
_PERSONAL_FEED = True
|
_PERSONAL_FEED = True
|
||||||
|
|
||||||
|
|
||||||
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
|
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
|
||||||
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
|
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
|
||||||
_VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
|
_VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
|
||||||
|
@ -1577,6 +1588,7 @@ class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
|
||||||
_PERSONAL_FEED = True
|
_PERSONAL_FEED = True
|
||||||
_PLAYLIST_TITLE = 'Youtube Watch History'
|
_PLAYLIST_TITLE = 'Youtube Watch History'
|
||||||
|
|
||||||
|
|
||||||
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
|
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
|
||||||
IE_NAME = 'youtube:favorites'
|
IE_NAME = 'youtube:favorites'
|
||||||
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
|
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
|
||||||
|
|
|
@ -621,7 +621,7 @@ def parseOpts(overrideArguments=None):
|
||||||
postproc.add_option(
|
postproc.add_option(
|
||||||
'--exec',
|
'--exec',
|
||||||
metavar='CMD', dest='exec_cmd',
|
metavar='CMD', dest='exec_cmd',
|
||||||
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
|
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
|
||||||
|
|
||||||
parser.add_option_group(general)
|
parser.add_option_group(general)
|
||||||
parser.add_option_group(selection)
|
parser.add_option_group(selection)
|
||||||
|
|
|
@ -26,4 +26,3 @@ class ExecAfterDownloadPP(PostProcessor):
|
||||||
'Command returned error code %d' % retCode)
|
'Command returned error code %d' % retCode)
|
||||||
|
|
||||||
return None, information # by default, keep file and do nothing
|
return None, information # by default, keep file and do nothing
|
||||||
|
|
||||||
|
|
|
@ -216,7 +216,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||||
self._downloader.to_screen(u'[' + self._executable + '] Destination: ' + new_path)
|
self._downloader.to_screen(u'[' + self._executable + '] Destination: ' + new_path)
|
||||||
self.run_ffmpeg(path, new_path, acodec, more_opts)
|
self.run_ffmpeg(path, new_path, acodec, more_opts)
|
||||||
except:
|
except:
|
||||||
etype,e,tb = sys.exc_info()
|
etype, e, tb = sys.exc_info()
|
||||||
if isinstance(e, AudioConversionError):
|
if isinstance(e, AudioConversionError):
|
||||||
msg = u'audio conversion failed: ' + e.msg
|
msg = u'audio conversion failed: ' + e.msg
|
||||||
else:
|
else:
|
||||||
|
@ -231,13 +231,13 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||||
self._downloader.report_warning(u'Cannot update utime of audio file')
|
self._downloader.report_warning(u'Cannot update utime of audio file')
|
||||||
|
|
||||||
information['filepath'] = new_path
|
information['filepath'] = new_path
|
||||||
return self._nopostoverwrites,information
|
return self._nopostoverwrites, information
|
||||||
|
|
||||||
|
|
||||||
class FFmpegVideoConvertor(FFmpegPostProcessor):
|
class FFmpegVideoConvertor(FFmpegPostProcessor):
|
||||||
def __init__(self, downloader=None,preferedformat=None):
|
def __init__(self, downloader=None, preferedformat=None):
|
||||||
super(FFmpegVideoConvertor, self).__init__(downloader)
|
super(FFmpegVideoConvertor, self).__init__(downloader)
|
||||||
self._preferedformat=preferedformat
|
self._preferedformat = preferedformat
|
||||||
|
|
||||||
def run(self, information):
|
def run(self, information):
|
||||||
path = information['filepath']
|
path = information['filepath']
|
||||||
|
@ -245,13 +245,13 @@ class FFmpegVideoConvertor(FFmpegPostProcessor):
|
||||||
outpath = prefix + sep + self._preferedformat
|
outpath = prefix + sep + self._preferedformat
|
||||||
if information['ext'] == self._preferedformat:
|
if information['ext'] == self._preferedformat:
|
||||||
self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
|
self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
|
||||||
return True,information
|
return True, information
|
||||||
self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) +outpath)
|
self._downloader.to_screen(u'[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
|
||||||
self.run_ffmpeg(path, outpath, [])
|
self.run_ffmpeg(path, outpath, [])
|
||||||
information['filepath'] = outpath
|
information['filepath'] = outpath
|
||||||
information['format'] = self._preferedformat
|
information['format'] = self._preferedformat
|
||||||
information['ext'] = self._preferedformat
|
information['ext'] = self._preferedformat
|
||||||
return False,information
|
return False, information
|
||||||
|
|
||||||
|
|
||||||
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
|
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
|
||||||
|
@ -466,7 +466,7 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
|
||||||
|
|
||||||
opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy']
|
opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy']
|
||||||
for (i, lang) in enumerate(sub_langs):
|
for (i, lang) in enumerate(sub_langs):
|
||||||
opts.extend(['-map', '%d:0' % (i+1), '-c:s:%d' % i, 'mov_text'])
|
opts.extend(['-map', '%d:0' % (i + 1), '-c:s:%d' % i, 'mov_text'])
|
||||||
lang_code = self._conver_lang_code(lang)
|
lang_code = self._conver_lang_code(lang)
|
||||||
if lang_code is not None:
|
if lang_code is not None:
|
||||||
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
|
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
|
||||||
|
|
|
@ -108,4 +108,3 @@ class XAttrMetadataPP(PostProcessor):
|
||||||
except (subprocess.CalledProcessError, OSError):
|
except (subprocess.CalledProcessError, OSError):
|
||||||
self._downloader.report_error("This filesystem doesn't support extended attributes. (You may have to enable them in your /etc/fstab)")
|
self._downloader.report_error("This filesystem doesn't support extended attributes. (You may have to enable them in your /etc/fstab)")
|
||||||
return False, info
|
return False, info
|
||||||
|
|
||||||
|
|
|
@ -827,4 +827,3 @@ class SWFInterpreter(object):
|
||||||
|
|
||||||
avm_class.method_pyfunctions[func_name] = resfunc
|
avm_class.method_pyfunctions[func_name] = resfunc
|
||||||
return resfunc
|
return resfunc
|
||||||
|
|
||||||
|
|
|
@ -13,13 +13,17 @@ from .utils import (
|
||||||
)
|
)
|
||||||
from .version import __version__
|
from .version import __version__
|
||||||
|
|
||||||
|
|
||||||
def rsa_verify(message, signature, key):
|
def rsa_verify(message, signature, key):
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from sys import version_info
|
from sys import version_info
|
||||||
|
|
||||||
def b(x):
|
def b(x):
|
||||||
if version_info[0] == 2: return x
|
if version_info[0] == 2:
|
||||||
else: return x.encode('latin1')
|
return x
|
||||||
|
else:
|
||||||
|
return x.encode('latin1')
|
||||||
assert(type(message) == type(b('')))
|
assert(type(message) == type(b('')))
|
||||||
block_size = 0
|
block_size = 0
|
||||||
n = key[0]
|
n = key[0]
|
||||||
|
@ -32,13 +36,17 @@ def rsa_verify(message, signature, key):
|
||||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
||||||
signature >>= 8
|
signature >>= 8
|
||||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
||||||
if signature[0:2] != b('\x00\x01'): return False
|
if signature[0:2] != b('\x00\x01'):
|
||||||
|
return False
|
||||||
signature = signature[2:]
|
signature = signature[2:]
|
||||||
if not b('\x00') in signature: return False
|
if not b('\x00') in signature:
|
||||||
signature = signature[signature.index(b('\x00'))+1:]
|
return False
|
||||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
signature = signature[signature.index(b('\x00')) + 1:]
|
||||||
|
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
|
||||||
|
return False
|
||||||
signature = signature[19:]
|
signature = signature[19:]
|
||||||
if signature != sha256(message).digest(): return False
|
if signature != sha256(message).digest():
|
||||||
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
@ -58,7 +66,8 @@ def update_self(to_screen, verbose):
|
||||||
try:
|
try:
|
||||||
newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
|
newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
|
||||||
except:
|
except:
|
||||||
if verbose: to_screen(compat_str(traceback.format_exc()))
|
if verbose:
|
||||||
|
to_screen(compat_str(traceback.format_exc()))
|
||||||
to_screen(u'ERROR: can\'t find the current version. Please try again later.')
|
to_screen(u'ERROR: can\'t find the current version. Please try again later.')
|
||||||
return
|
return
|
||||||
if newversion == __version__:
|
if newversion == __version__:
|
||||||
|
@ -70,7 +79,8 @@ def update_self(to_screen, verbose):
|
||||||
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
|
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
|
||||||
versions_info = json.loads(versions_info)
|
versions_info = json.loads(versions_info)
|
||||||
except:
|
except:
|
||||||
if verbose: to_screen(compat_str(traceback.format_exc()))
|
if verbose:
|
||||||
|
to_screen(compat_str(traceback.format_exc()))
|
||||||
to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
|
to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
|
||||||
return
|
return
|
||||||
if not 'signature' in versions_info:
|
if not 'signature' in versions_info:
|
||||||
|
@ -118,7 +128,8 @@ def update_self(to_screen, verbose):
|
||||||
newcontent = urlh.read()
|
newcontent = urlh.read()
|
||||||
urlh.close()
|
urlh.close()
|
||||||
except (IOError, OSError):
|
except (IOError, OSError):
|
||||||
if verbose: to_screen(compat_str(traceback.format_exc()))
|
if verbose:
|
||||||
|
to_screen(compat_str(traceback.format_exc()))
|
||||||
to_screen(u'ERROR: unable to download latest version')
|
to_screen(u'ERROR: unable to download latest version')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -131,7 +142,8 @@ def update_self(to_screen, verbose):
|
||||||
with open(exe + '.new', 'wb') as outf:
|
with open(exe + '.new', 'wb') as outf:
|
||||||
outf.write(newcontent)
|
outf.write(newcontent)
|
||||||
except (IOError, OSError):
|
except (IOError, OSError):
|
||||||
if verbose: to_screen(compat_str(traceback.format_exc()))
|
if verbose:
|
||||||
|
to_screen(compat_str(traceback.format_exc()))
|
||||||
to_screen(u'ERROR: unable to write the new version')
|
to_screen(u'ERROR: unable to write the new version')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -150,7 +162,8 @@ start /b "" cmd /c del "%%~f0"&exit /b"
|
||||||
subprocess.Popen([bat]) # Continues to run in the background
|
subprocess.Popen([bat]) # Continues to run in the background
|
||||||
return # Do not show premature success messages
|
return # Do not show premature success messages
|
||||||
except (IOError, OSError):
|
except (IOError, OSError):
|
||||||
if verbose: to_screen(compat_str(traceback.format_exc()))
|
if verbose:
|
||||||
|
to_screen(compat_str(traceback.format_exc()))
|
||||||
to_screen(u'ERROR: unable to overwrite current version')
|
to_screen(u'ERROR: unable to overwrite current version')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -161,7 +174,8 @@ start /b "" cmd /c del "%%~f0"&exit /b"
|
||||||
newcontent = urlh.read()
|
newcontent = urlh.read()
|
||||||
urlh.close()
|
urlh.close()
|
||||||
except (IOError, OSError):
|
except (IOError, OSError):
|
||||||
if verbose: to_screen(compat_str(traceback.format_exc()))
|
if verbose:
|
||||||
|
to_screen(compat_str(traceback.format_exc()))
|
||||||
to_screen(u'ERROR: unable to download latest version')
|
to_screen(u'ERROR: unable to download latest version')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -174,19 +188,22 @@ start /b "" cmd /c del "%%~f0"&exit /b"
|
||||||
with open(filename, 'wb') as outf:
|
with open(filename, 'wb') as outf:
|
||||||
outf.write(newcontent)
|
outf.write(newcontent)
|
||||||
except (IOError, OSError):
|
except (IOError, OSError):
|
||||||
if verbose: to_screen(compat_str(traceback.format_exc()))
|
if verbose:
|
||||||
|
to_screen(compat_str(traceback.format_exc()))
|
||||||
to_screen(u'ERROR: unable to overwrite current version')
|
to_screen(u'ERROR: unable to overwrite current version')
|
||||||
return
|
return
|
||||||
|
|
||||||
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
|
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
|
||||||
|
|
||||||
|
|
||||||
def get_notes(versions, fromVersion):
|
def get_notes(versions, fromVersion):
|
||||||
notes = []
|
notes = []
|
||||||
for v,vdata in sorted(versions.items()):
|
for v, vdata in sorted(versions.items()):
|
||||||
if v > fromVersion:
|
if v > fromVersion:
|
||||||
notes.extend(vdata.get('notes', []))
|
notes.extend(vdata.get('notes', []))
|
||||||
return notes
|
return notes
|
||||||
|
|
||||||
|
|
||||||
def print_notes(to_screen, versions, fromVersion=__version__):
|
def print_notes(to_screen, versions, fromVersion=__version__):
|
||||||
notes = get_notes(versions, fromVersion)
|
notes = get_notes(versions, fromVersion)
|
||||||
if notes:
|
if notes:
|
||||||
|
|
|
@ -56,6 +56,7 @@ std_headers = {
|
||||||
'Accept-Language': 'en-us,en;q=0.5',
|
'Accept-Language': 'en-us,en;q=0.5',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def preferredencoding():
|
def preferredencoding():
|
||||||
"""Get preferred encoding.
|
"""Get preferred encoding.
|
||||||
|
|
||||||
|
@ -146,6 +147,8 @@ else:
|
||||||
|
|
||||||
# On python2.6 the xml.etree.ElementTree.Element methods don't support
|
# On python2.6 the xml.etree.ElementTree.Element methods don't support
|
||||||
# the namespace parameter
|
# the namespace parameter
|
||||||
|
|
||||||
|
|
||||||
def xpath_with_ns(path, ns_map):
|
def xpath_with_ns(path, ns_map):
|
||||||
components = [c.split(':') for c in path.split('/')]
|
components = [c.split(':') for c in path.split('/')]
|
||||||
replaced = []
|
replaced = []
|
||||||
|
@ -256,6 +259,7 @@ def timeconvert(timestr):
|
||||||
timestamp = email.utils.mktime_tz(timetuple)
|
timestamp = email.utils.mktime_tz(timetuple)
|
||||||
return timestamp
|
return timestamp
|
||||||
|
|
||||||
|
|
||||||
def sanitize_filename(s, restricted=False, is_id=False):
|
def sanitize_filename(s, restricted=False, is_id=False):
|
||||||
"""Sanitizes a string so it could be used as part of a filename.
|
"""Sanitizes a string so it could be used as part of a filename.
|
||||||
If restricted is set, use a stricter subset of allowed characters.
|
If restricted is set, use a stricter subset of allowed characters.
|
||||||
|
@ -288,6 +292,7 @@ def sanitize_filename(s, restricted=False, is_id=False):
|
||||||
result = '_'
|
result = '_'
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def orderedSet(iterable):
|
def orderedSet(iterable):
|
||||||
""" Remove all duplicates from the input iterable """
|
""" Remove all duplicates from the input iterable """
|
||||||
res = []
|
res = []
|
||||||
|
@ -372,6 +377,7 @@ def decodeOption(optval):
|
||||||
assert isinstance(optval, compat_str)
|
assert isinstance(optval, compat_str)
|
||||||
return optval
|
return optval
|
||||||
|
|
||||||
|
|
||||||
def formatSeconds(secs):
|
def formatSeconds(secs):
|
||||||
if secs > 3600:
|
if secs > 3600:
|
||||||
return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
|
return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
|
||||||
|
@ -424,6 +430,7 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
|
||||||
|
|
||||||
class ExtractorError(Exception):
|
class ExtractorError(Exception):
|
||||||
"""Error during info extraction."""
|
"""Error during info extraction."""
|
||||||
|
|
||||||
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
|
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
|
||||||
""" tb, if given, is the original traceback (so that it can be printed out).
|
""" tb, if given, is the original traceback (so that it can be printed out).
|
||||||
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
|
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
|
||||||
|
@ -468,6 +475,7 @@ class DownloadError(Exception):
|
||||||
configured to continue on errors. They will contain the appropriate
|
configured to continue on errors. They will contain the appropriate
|
||||||
error message.
|
error message.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, msg, exc_info=None):
|
def __init__(self, msg, exc_info=None):
|
||||||
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
|
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
|
||||||
super(DownloadError, self).__init__(msg)
|
super(DownloadError, self).__init__(msg)
|
||||||
|
@ -489,9 +497,11 @@ class PostProcessingError(Exception):
|
||||||
This exception may be raised by PostProcessor's .run() method to
|
This exception may be raised by PostProcessor's .run() method to
|
||||||
indicate an error in the postprocessing task.
|
indicate an error in the postprocessing task.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, msg):
|
def __init__(self, msg):
|
||||||
self.msg = msg
|
self.msg = msg
|
||||||
|
|
||||||
|
|
||||||
class MaxDownloadsReached(Exception):
|
class MaxDownloadsReached(Exception):
|
||||||
""" --max-downloads limit has been reached. """
|
""" --max-downloads limit has been reached. """
|
||||||
pass
|
pass
|
||||||
|
@ -521,6 +531,7 @@ class ContentTooShortError(Exception):
|
||||||
self.downloaded = downloaded
|
self.downloaded = downloaded
|
||||||
self.expected = expected
|
self.expected = expected
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
|
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
|
||||||
"""Handler for HTTP requests and responses.
|
"""Handler for HTTP requests and responses.
|
||||||
|
|
||||||
|
@ -640,7 +651,7 @@ def unified_strdate(date_str):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
upload_date = None
|
upload_date = None
|
||||||
#Replace commas
|
# Replace commas
|
||||||
date_str = date_str.replace(',', ' ')
|
date_str = date_str.replace(',', ' ')
|
||||||
# %z (UTC offset) is only supported in python>=3.2
|
# %z (UTC offset) is only supported in python>=3.2
|
||||||
date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
|
date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
|
||||||
|
@ -681,6 +692,7 @@ def unified_strdate(date_str):
|
||||||
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
|
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
|
||||||
return upload_date
|
return upload_date
|
||||||
|
|
||||||
|
|
||||||
def determine_ext(url, default_ext='unknown_video'):
|
def determine_ext(url, default_ext='unknown_video'):
|
||||||
if url is None:
|
if url is None:
|
||||||
return default_ext
|
return default_ext
|
||||||
|
@ -690,9 +702,11 @@ def determine_ext(url, default_ext='unknown_video'):
|
||||||
else:
|
else:
|
||||||
return default_ext
|
return default_ext
|
||||||
|
|
||||||
|
|
||||||
def subtitles_filename(filename, sub_lang, sub_format):
|
def subtitles_filename(filename, sub_lang, sub_format):
|
||||||
return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
|
return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
|
||||||
|
|
||||||
|
|
||||||
def date_from_str(date_str):
|
def date_from_str(date_str):
|
||||||
"""
|
"""
|
||||||
Return a datetime object from a string in the format YYYYMMDD or
|
Return a datetime object from a string in the format YYYYMMDD or
|
||||||
|
@ -707,7 +721,7 @@ def date_from_str(date_str):
|
||||||
if sign == '-':
|
if sign == '-':
|
||||||
time = -time
|
time = -time
|
||||||
unit = match.group('unit')
|
unit = match.group('unit')
|
||||||
#A bad aproximation?
|
# A bad aproximation?
|
||||||
if unit == 'month':
|
if unit == 'month':
|
||||||
unit = 'day'
|
unit = 'day'
|
||||||
time *= 30
|
time *= 30
|
||||||
|
@ -719,6 +733,7 @@ def date_from_str(date_str):
|
||||||
return today + delta
|
return today + delta
|
||||||
return datetime.datetime.strptime(date_str, "%Y%m%d").date()
|
return datetime.datetime.strptime(date_str, "%Y%m%d").date()
|
||||||
|
|
||||||
|
|
||||||
def hyphenate_date(date_str):
|
def hyphenate_date(date_str):
|
||||||
"""
|
"""
|
||||||
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
|
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
|
||||||
|
@ -728,8 +743,10 @@ def hyphenate_date(date_str):
|
||||||
else:
|
else:
|
||||||
return date_str
|
return date_str
|
||||||
|
|
||||||
|
|
||||||
class DateRange(object):
|
class DateRange(object):
|
||||||
"""Represents a time interval between two dates"""
|
"""Represents a time interval between two dates"""
|
||||||
|
|
||||||
def __init__(self, start=None, end=None):
|
def __init__(self, start=None, end=None):
|
||||||
"""start and end must be strings in the format accepted by date"""
|
"""start and end must be strings in the format accepted by date"""
|
||||||
if start is not None:
|
if start is not None:
|
||||||
|
@ -742,17 +759,20 @@ class DateRange(object):
|
||||||
self.end = datetime.datetime.max.date()
|
self.end = datetime.datetime.max.date()
|
||||||
if self.start > self.end:
|
if self.start > self.end:
|
||||||
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
|
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def day(cls, day):
|
def day(cls, day):
|
||||||
"""Returns a range that only contains the given day"""
|
"""Returns a range that only contains the given day"""
|
||||||
return cls(day,day)
|
return cls(day, day)
|
||||||
|
|
||||||
def __contains__(self, date):
|
def __contains__(self, date):
|
||||||
"""Check if the date is in the range"""
|
"""Check if the date is in the range"""
|
||||||
if not isinstance(date, datetime.date):
|
if not isinstance(date, datetime.date):
|
||||||
date = date_from_str(date)
|
date = date_from_str(date)
|
||||||
return self.start <= date <= self.end
|
return self.start <= date <= self.end
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '%s - %s' % ( self.start.isoformat(), self.end.isoformat())
|
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
|
||||||
|
|
||||||
|
|
||||||
def platform_name():
|
def platform_name():
|
||||||
|
|
Loading…
Reference in a new issue