0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-16 01:25:06 +00:00

[twitch] Adapt to new API (Fixes #3946, Fixes #3949, Fixes #3965)

Work in progress
This commit is contained in:
Sergey M․ 2014-10-16 22:23:35 +07:00 committed by Sergey M
parent 09a42738fc
commit 355d074ff9

View file

@ -10,6 +10,7 @@
compat_str, compat_str,
ExtractorError, ExtractorError,
formatSeconds, formatSeconds,
parse_iso8601,
) )
@ -43,6 +44,8 @@ class JustinTVIE(InfoExtractor):
} }
} }
_API_BASE = 'https://api.twitch.tv'
# Return count of items, list of *valid* items # Return count of items, list of *valid* items
def _parse_page(self, url, video_id, counter): def _parse_page(self, url, video_id, counter):
info_json = self._download_webpage( info_json = self._download_webpage(
@ -74,18 +77,84 @@ def _parse_page(self, url, video_id, counter):
}) })
return (len(response), info) return (len(response), info)
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
def _download_json(self, url, video_id, note='Downloading JSON metadata'):
response = super(JustinTVIE, self)._download_json(url, video_id, note)
self._handle_error(response)
return response
def _extract_media(self, item, item_id):
ITEMS = {
'a': 'video',
'c': 'chapter',
}
info = self._extract_info(self._download_json(
'%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
'Downloading %s info JSON' % ITEMS[item]))
response = self._download_json(
'%s/api/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
'Downloading %s playlist JSON' % ITEMS[item])
entries = []
chunks = response['chunks']
qualities = list(chunks.keys())
for num, fragment in enumerate(zip(*chunks.values()), start=1):
formats = []
for fmt_num, fragment_fmt in enumerate(fragment):
format_id = qualities[fmt_num]
fmt = {
'url': fragment_fmt['url'],
'format_id': format_id,
'quality': 1 if format_id == 'live' else 0,
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
entry = dict(info)
entry['title'] = '%s part %d' % (entry['title'], num)
entry['formats'] = formats
entries.append(entry)
return entries
def _extract_info(self, info):
return {
'id': info['_id'],
'title': info['title'],
'description': info['description'],
'duration': info['length'],
'thumbnail': info['preview'],
'uploader': info['channel']['display_name'],
'uploader_id': info['channel']['name'],
'timestamp': parse_iso8601(info['recorded_at']),
'view_count': info['views'],
}
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
api_base = 'http://api.justin.tv' api_base = 'http://api.twitch.tv'
paged = False paged = False
if mobj.group('channelid'): if mobj.group('channelid'):
paged = True paged = True
video_id = mobj.group('channelid') video_id = mobj.group('channelid')
api = api_base + '/channel/archives/%s.json' % video_id api = api_base + '/channel/archives/%s.json' % video_id
elif mobj.group('chapterid'): elif mobj.group('chapterid'):
chapter_id = mobj.group('chapterid') return self._extract_media('c', mobj.group('chapterid'))
"""
webpage = self._download_webpage(url, chapter_id) webpage = self._download_webpage(url, chapter_id)
m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage) m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
if not m: if not m:
@ -133,9 +202,9 @@ def _real_extract(self, url):
'uploader_id': chapter_info['channel']['name'], 'uploader_id': chapter_info['channel']['name'],
} }
return info return info
"""
else: else:
video_id = mobj.group('videoid') return self._extract_media('a', mobj.group('videoid'))
api = api_base + '/broadcast/by_archive/%s.json' % video_id
entries = [] entries = []
offset = 0 offset = 0