2014-02-06 02:29:10 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2013-06-23 18:32:49 +00:00
|
|
|
import itertools
|
|
|
|
import re
|
|
|
|
|
|
|
|
from .common import SearchInfoExtractor
|
|
|
|
|
|
|
|
|
|
|
|
class GoogleSearchIE(SearchInfoExtractor):
|
2014-02-06 02:29:10 +00:00
|
|
|
IE_DESC = 'Google Video search'
|
|
|
|
IE_NAME = 'video.google:search'
|
2013-06-23 18:32:49 +00:00
|
|
|
_SEARCH_KEY = 'gvsearch'
|
2022-01-31 07:02:44 +00:00
|
|
|
_TESTS = [{
|
2014-08-25 15:02:52 +00:00
|
|
|
'url': 'gvsearch15:python language',
|
|
|
|
'info_dict': {
|
|
|
|
'id': 'python language',
|
|
|
|
'title': 'python language',
|
|
|
|
},
|
|
|
|
'playlist_count': 15,
|
2022-01-31 07:02:44 +00:00
|
|
|
}]
|
|
|
|
_PAGE_SIZE = 100
|
2013-06-23 18:32:49 +00:00
|
|
|
|
2021-10-08 20:39:55 +00:00
|
|
|
def _search_results(self, query):
|
2014-02-06 02:29:10 +00:00
|
|
|
for pagenum in itertools.count():
|
|
|
|
webpage = self._download_webpage(
|
2022-01-31 07:02:44 +00:00
|
|
|
'http://www.google.com/search', f'gvsearch:{query}',
|
|
|
|
note=f'Downloading result page {pagenum + 1}',
|
2016-10-28 16:19:59 +00:00
|
|
|
query={
|
|
|
|
'tbm': 'vid',
|
|
|
|
'q': query,
|
2022-01-31 07:02:44 +00:00
|
|
|
'start': pagenum * self._PAGE_SIZE,
|
|
|
|
'num': self._PAGE_SIZE,
|
2016-10-28 16:19:59 +00:00
|
|
|
'hl': 'en',
|
|
|
|
})
|
2014-02-06 02:29:10 +00:00
|
|
|
|
2022-01-31 07:02:44 +00:00
|
|
|
for url in re.findall(r'<div[^>]* class="dXiKIc"[^>]*><a href="([^"]+)"', webpage):
|
|
|
|
yield self.url_result(url)
|
2014-02-06 02:29:10 +00:00
|
|
|
|
2021-10-08 20:39:55 +00:00
|
|
|
if not re.search(r'id="pnnext"', webpage):
|
|
|
|
return
|