0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-23 02:25:11 +00:00
yt-dlp/yt_dlp/extractor/mgtv.py

101 lines
3.5 KiB
Python
Raw Normal View History

# coding: utf-8
from __future__ import unicode_literals
2019-04-12 08:19:09 +00:00
import base64
import time
import uuid
from .common import InfoExtractor
2019-04-12 08:19:09 +00:00
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
)
class MGTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:w(?:ww)?\.)?mgtv\.com/(v|b)/(?:[^/]+/)*(?P<id>\d+)\.html'
IE_DESC = '芒果TV'
2016-07-26 16:06:21 +00:00
_TESTS = [{
'url': 'http://www.mgtv.com/v/1/290525/f/3116640.html',
'info_dict': {
'id': '3116640',
'ext': 'mp4',
2019-04-12 08:19:09 +00:00
'title': '我是歌手 第四季',
'description': '我是歌手第四季双年巅峰会',
'duration': 7461,
'thumbnail': r're:^https?://.*\.jpg$',
},
2016-07-26 16:06:21 +00:00
}, {
2017-02-23 10:50:04 +00:00
'url': 'http://www.mgtv.com/b/301817/3826653.html',
2016-07-26 16:06:21 +00:00
'only_matching': True,
}, {
'url': 'https://w.mgtv.com/b/301817/3826653.html',
'only_matching': True,
2016-07-26 16:06:21 +00:00
}]
def _real_extract(self, url):
video_id = self._match_id(url)
tk2 = base64.urlsafe_b64encode(b'did=%s|pno=1030|ver=0.3.0301|clit=%d' % (compat_str(uuid.uuid4()).encode(), time.time()))[::-1]
2019-04-12 08:19:09 +00:00
try:
api_data = self._download_json(
'https://pcweb.api.mgtv.com/player/video', video_id, query={
'tk2': tk2,
2019-04-12 08:19:09 +00:00
'video_id': video_id,
}, headers=self.geo_verification_headers())['data']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
error = self._parse_json(e.cause.read().decode(), None)
if error.get('code') == 40005:
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
raise ExtractorError(error['msg'], expected=True)
raise
info = api_data['info']
2017-02-23 10:50:04 +00:00
title = info['title'].strip()
2019-04-12 08:19:09 +00:00
stream_data = self._download_json(
'https://pcweb.api.mgtv.com/player/getSource', video_id, query={
'pm2': api_data['atc']['pm2'],
'tk2': tk2,
2019-04-12 08:19:09 +00:00
'video_id': video_id,
}, headers=self.geo_verification_headers())['data']
stream_domain = stream_data['stream_domain'][0]
formats = []
2019-04-12 08:19:09 +00:00
for idx, stream in enumerate(stream_data['stream']):
2017-02-23 10:50:04 +00:00
stream_path = stream.get('url')
if not stream_path:
continue
format_data = self._download_json(
stream_domain + stream_path, video_id,
note='Download video info for format #%d' % idx)
format_url = format_data.get('info')
if not format_url:
2016-05-12 21:45:54 +00:00
continue
2019-04-12 08:19:09 +00:00
tbr = int_or_none(stream.get('filebitrate') or self._search_regex(
2017-02-23 10:50:04 +00:00
r'_(\d+)_mp4/', format_url, 'tbr', default=None))
formats.append({
'format_id': compat_str(tbr or idx),
'url': format_url,
'ext': 'mp4',
'tbr': tbr,
'protocol': 'm3u8_native',
'http_headers': {
'Referer': url,
},
'format_note': stream.get('name'),
2017-02-23 10:50:04 +00:00
})
self._sort_formats(formats)
return {
'id': video_id,
2017-02-23 10:50:04 +00:00
'title': title,
'formats': formats,
'description': info.get('desc'),
'duration': int_or_none(info.get('duration')),
'thumbnail': info.get('thumb'),
}