mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-01-07 17:16:08 +00:00
Merge pull request #4110 from nemunaire/channel9-fix
[channel9] Fix extraction
This commit is contained in:
commit
cde9b380e6
|
@ -27,7 +27,7 @@ class Channel9IE(InfoExtractor):
|
||||||
'title': 'Developer Kick-Off Session: Stuff We Love',
|
'title': 'Developer Kick-Off Session: Stuff We Love',
|
||||||
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
|
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
|
||||||
'duration': 4576,
|
'duration': 4576,
|
||||||
'thumbnail': 'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
|
'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
|
||||||
'session_code': 'KOS002',
|
'session_code': 'KOS002',
|
||||||
'session_day': 'Day 1',
|
'session_day': 'Day 1',
|
||||||
'session_room': 'Arena 1A',
|
'session_room': 'Arena 1A',
|
||||||
|
@ -43,7 +43,7 @@ class Channel9IE(InfoExtractor):
|
||||||
'title': 'Self-service BI with Power BI - nuclear testing',
|
'title': 'Self-service BI with Power BI - nuclear testing',
|
||||||
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
||||||
'duration': 1540,
|
'duration': 1540,
|
||||||
'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
||||||
'authors': [ 'Mike Wilmot' ],
|
'authors': [ 'Mike Wilmot' ],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -115,7 +115,7 @@ class Channel9IE(InfoExtractor):
|
||||||
return self._html_search_meta('description', html, 'description')
|
return self._html_search_meta('description', html, 'description')
|
||||||
|
|
||||||
def _extract_duration(self, html):
|
def _extract_duration(self, html):
|
||||||
m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
|
m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
|
||||||
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
|
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
|
||||||
|
|
||||||
def _extract_slides(self, html):
|
def _extract_slides(self, html):
|
||||||
|
@ -258,16 +258,17 @@ class Channel9IE(InfoExtractor):
|
||||||
|
|
||||||
webpage = self._download_webpage(url, content_path, 'Downloading web page')
|
webpage = self._download_webpage(url, content_path, 'Downloading web page')
|
||||||
|
|
||||||
page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
|
page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
|
||||||
if page_type_m is None:
|
if page_type_m is not None:
|
||||||
raise ExtractorError('Search.PageType not found, don\'t know how to process this page', expected=True)
|
page_type = page_type_m.group('pagetype')
|
||||||
|
if page_type == 'Entry': # Any 'item'-like page, may contain downloadable content
|
||||||
|
return self._extract_entry_item(webpage, content_path)
|
||||||
|
elif page_type == 'Session': # Event session page, may contain downloadable content
|
||||||
|
return self._extract_session(webpage, content_path)
|
||||||
|
elif page_type == 'Event':
|
||||||
|
return self._extract_list(content_path)
|
||||||
|
else:
|
||||||
|
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
|
||||||
|
|
||||||
page_type = page_type_m.group('pagetype')
|
else: # Assuming list
|
||||||
if page_type == 'List': # List page, may contain list of 'item'-like objects
|
|
||||||
return self._extract_list(content_path)
|
return self._extract_list(content_path)
|
||||||
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
|
|
||||||
return self._extract_entry_item(webpage, content_path)
|
|
||||||
elif page_type == 'Session': # Event session page, may contain downloadable content
|
|
||||||
return self._extract_session(webpage, content_path)
|
|
||||||
else:
|
|
||||||
raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)
|
|
Loading…
Reference in a new issue