0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-23 02:25:11 +00:00

[openload] Fix extraction (closes #10408)

Thanks @yokrysty for the algorithm
This commit is contained in:
Yen Chi Hsuan 2016-08-23 21:55:58 +08:00
parent fb009b7f53
commit 18b6216150
No known key found for this signature in database
GPG key ID: 3FDDD575826C5C30
2 changed files with 24 additions and 63 deletions

View file

@ -1,3 +1,9 @@
version <unreleased>
Extractors
* [openload] Fix extraction (#10408)
version 2016.08.22 version 2016.08.22
Core Core

View file

@ -1,12 +1,12 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals, division from __future__ import unicode_literals, division
import math
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_chr from ..compat import (
compat_chr,
compat_ord,
)
from ..utils import ( from ..utils import (
decode_png,
determine_ext, determine_ext,
ExtractorError, ExtractorError,
) )
@ -42,71 +42,26 @@ class OpenloadIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage('https://openload.co/embed/%s/' % video_id, video_id)
if 'File not found' in webpage: if 'File not found' in webpage or 'deleted by the owner' in webpage:
raise ExtractorError('File not found', expected=True) raise ExtractorError('File not found', expected=True)
# The following extraction logic is proposed by @Belderak and @gdkchan # The following decryption algorithm is written by @yokrysty and
# and declared to be used freely in youtube-dl # declared to be freely used in youtube-dl
# See https://github.com/rg3/youtube-dl/issues/9706 # See https://github.com/rg3/youtube-dl/issues/10408
enc_data = self._html_search_regex(
r'<span[^>]+id="hiddenurl"[^>]*>([^<]+)</span>', webpage, 'encrypted data')
numbers_js = self._download_webpage( video_url_chars = []
'https://openload.co/assets/js/obfuscator/n.js', video_id,
note='Downloading signature numbers')
signums = self._search_regex(
r'window\.signatureNumbers\s*=\s*[\'"](?P<data>[a-z]+)[\'"]',
numbers_js, 'signature numbers', group='data')
linkimg_uri = self._search_regex( for c in enc_data:
r'<img[^>]+id="linkimg"[^>]+src="([^"]+)"', webpage, 'link image') j = compat_ord(c)
linkimg = self._request_webpage( if j >= 33 and j <= 126:
linkimg_uri, video_id, note=False).read() j = ((j + 14) % 94) + 33
video_url_chars += compat_chr(j)
width, height, pixels = decode_png(linkimg) video_url = 'https://openload.co/stream/%s?mime=true' % ''.join(video_url_chars)
output = ''
for y in range(height):
for x in range(width):
r, g, b = pixels[y][3 * x:3 * x + 3]
if r == 0 and g == 0 and b == 0:
break
else:
output += compat_chr(r)
output += compat_chr(g)
output += compat_chr(b)
img_str_length = len(output) // 200
img_str = [[0 for x in range(img_str_length)] for y in range(10)]
sig_str_length = len(signums) // 260
sig_str = [[0 for x in range(sig_str_length)] for y in range(10)]
for i in range(10):
for j in range(img_str_length):
begin = i * img_str_length * 20 + j * 20
img_str[i][j] = output[begin:begin + 20]
for j in range(sig_str_length):
begin = i * sig_str_length * 26 + j * 26
sig_str[i][j] = signums[begin:begin + 26]
parts = []
# TODO: find better names for str_, chr_ and sum_
str_ = ''
for i in [2, 3, 5, 7]:
str_ = ''
sum_ = float(99)
for j in range(len(sig_str[i])):
for chr_idx in range(len(img_str[i][j])):
if sum_ > float(122):
sum_ = float(98)
chr_ = compat_chr(int(math.floor(sum_)))
if sig_str[i][j][chr_idx] == chr_ and j >= len(str_):
sum_ += float(2.5)
str_ += img_str[i][j][chr_idx]
parts.append(str_.replace(',', ''))
video_url = 'https://openload.co/stream/%s~%s~%s~%s' % (parts[3], parts[1], parts[2], parts[0])
title = self._og_search_title(webpage, default=None) or self._search_regex( title = self._og_search_title(webpage, default=None) or self._search_regex(
r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage, r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage,