0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-04 23:35:04 +00:00

[litv] Fix extraction (#11006)

This commit is contained in:
Yen Chi Hsuan 2016-10-23 23:22:09 +08:00
parent f16f8505b1
commit 9dde0e04e6
No known key found for this signature in database
GPG key ID: 3FDDD575826C5C30
2 changed files with 20 additions and 22 deletions

View file

@ -3,6 +3,9 @@ version <unreleased>
Core Core
* Running youtube-dl in the background is fixed (#10996, #10706, #955) * Running youtube-dl in the background is fixed (#10996, #10706, #955)
Extractors
* [litv] Fix extraction
version 2016.10.21.1 version 2016.10.21.1

View file

@ -2,7 +2,6 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import json import json
import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@ -52,8 +51,8 @@ class LiTVIE(InfoExtractor):
'skip': 'Georestricted to Taiwan', 'skip': 'Georestricted to Taiwan',
}] }]
def _extract_playlist(self, season_list, video_id, vod_data, view_data, prompt=True): def _extract_playlist(self, season_list, video_id, program_info, prompt=True):
episode_title = view_data['title'] episode_title = program_info['title']
content_id = season_list['contentId'] content_id = season_list['contentId']
if prompt: if prompt:
@ -61,7 +60,7 @@ def _extract_playlist(self, season_list, video_id, vod_data, view_data, prompt=T
all_episodes = [ all_episodes = [
self.url_result(smuggle_url( self.url_result(smuggle_url(
self._URL_TEMPLATE % (view_data['contentType'], episode['contentId']), self._URL_TEMPLATE % (program_info['contentType'], episode['contentId']),
{'force_noplaylist': True})) # To prevent infinite recursion {'force_noplaylist': True})) # To prevent infinite recursion
for episode in season_list['episode']] for episode in season_list['episode']]
@ -80,19 +79,15 @@ def _real_extract(self, url):
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
view_data = dict(map(lambda t: (t[0], t[2]), re.findall( program_info = self._parse_json(self._search_regex(
r'viewData\.([a-zA-Z]+)\s*=\s*(["\'])([^"\']+)\2', 'var\s+programInfo\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'),
webpage)))
vod_data = self._parse_json(self._search_regex(
'var\s+vod\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'),
video_id) video_id)
season_list = list(vod_data.get('seasonList', {}).values()) season_list = list(program_info.get('seasonList', {}).values())
if season_list: if season_list:
if not noplaylist: if not noplaylist:
return self._extract_playlist( return self._extract_playlist(
season_list[0], video_id, vod_data, view_data, season_list[0], video_id, program_info,
prompt=noplaylist_prompt) prompt=noplaylist_prompt)
if noplaylist_prompt: if noplaylist_prompt:
@ -102,8 +97,8 @@ def _real_extract(self, url):
# endpoint gives the same result as the data embedded in the webpage. # endpoint gives the same result as the data embedded in the webpage.
# If georestricted, there are no embedded data, so an extra request is # If georestricted, there are no embedded data, so an extra request is
# necessary to get the error code # necessary to get the error code
if 'assetId' not in view_data: if 'assetId' not in program_info:
view_data = self._download_json( program_info = self._download_json(
'https://www.litv.tv/vod/ajax/getProgramInfo', video_id, 'https://www.litv.tv/vod/ajax/getProgramInfo', video_id,
query={'contentId': video_id}, query={'contentId': video_id},
headers={'Accept': 'application/json'}) headers={'Accept': 'application/json'})
@ -112,9 +107,9 @@ def _real_extract(self, url):
webpage, 'video data', default='{}'), video_id) webpage, 'video data', default='{}'), video_id)
if not video_data: if not video_data:
payload = { payload = {
'assetId': view_data['assetId'], 'assetId': program_info['assetId'],
'watchDevices': view_data['watchDevices'], 'watchDevices': program_info['watchDevices'],
'contentType': view_data['contentType'], 'contentType': program_info['contentType'],
} }
video_data = self._download_json( video_data = self._download_json(
'https://www.litv.tv/vod/getMainUrl', video_id, 'https://www.litv.tv/vod/getMainUrl', video_id,
@ -136,11 +131,11 @@ def _real_extract(self, url):
# LiTV HLS segments doesn't like compressions # LiTV HLS segments doesn't like compressions
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = True a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = True
title = view_data['title'] + view_data.get('secondaryMark', '') title = program_info['title'] + program_info.get('secondaryMark', '')
description = view_data.get('description') description = program_info.get('description')
thumbnail = view_data.get('imageFile') thumbnail = program_info.get('imageFile')
categories = [item['name'] for item in vod_data.get('category', [])] categories = [item['name'] for item in program_info.get('category', [])]
episode = int_or_none(view_data.get('episode')) episode = int_or_none(program_info.get('episode'))
return { return {
'id': video_id, 'id': video_id,