0
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-12-01 03:43:02 +00:00

[extractor/netverse] Add NetverseSearch extractor (#5838)

Authored by: HobbyistDev
This commit is contained in:
HobbyistDev 2022-12-29 17:12:07 +09:00 committed by GitHub
parent 9fcd8ad1f2
commit 153e88a751
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 30 additions and 1 deletions

View file

@ -1160,6 +1160,7 @@
from .netverse import ( from .netverse import (
NetverseIE, NetverseIE,
NetversePlaylistIE, NetversePlaylistIE,
NetverseSearchIE,
) )
from .newgrounds import ( from .newgrounds import (
NewgroundsIE, NewgroundsIE,

View file

@ -1,6 +1,6 @@
import itertools import itertools
from .common import InfoExtractor from .common import InfoExtractor, SearchInfoExtractor
from .dailymotion import DailymotionIE from .dailymotion import DailymotionIE
from ..utils import smuggle_url, traverse_obj from ..utils import smuggle_url, traverse_obj
@ -251,3 +251,31 @@ def _real_extract(self, url):
self.parse_playlist(playlist_data['response'], playlist_id), self.parse_playlist(playlist_data['response'], playlist_id),
traverse_obj(playlist_data, ('response', 'webseries_info', 'slug')), traverse_obj(playlist_data, ('response', 'webseries_info', 'slug')),
traverse_obj(playlist_data, ('response', 'webseries_info', 'title'))) traverse_obj(playlist_data, ('response', 'webseries_info', 'title')))
class NetverseSearchIE(SearchInfoExtractor):
_SEARCH_KEY = 'netsearch'
_TESTS = [{
'url': 'netsearch10:tetangga',
'info_dict': {
'id': 'tetangga',
'title': 'tetangga',
},
'playlist_count': 10,
}]
def _search_results(self, query):
last_page = None
for i in itertools.count(1):
search_data = self._download_json(
'https://api.netverse.id/search/elastic/search', query,
query={'q': query, 'page': i}, note=f'Downloading page {i}')
videos = traverse_obj(search_data, ('response', 'data', ...))
for video in videos:
yield self.url_result(f'https://netverse.id/video/{video["slug"]}', NetverseIE)
last_page = last_page or traverse_obj(search_data, ('response', 'lastpage'))
if not videos or i >= (last_page or 0):
break