mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-10-31 23:02:40 +00:00
Merge branch 'yt-dlp:master' into teachable-fix-add-hotmart
This commit is contained in:
commit
8a0f2ffd80
267 changed files with 6349 additions and 12163 deletions
17
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
17
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
|
@ -18,7 +18,7 @@ body:
|
|||
options:
|
||||
- label: I'm reporting that yt-dlp is broken on a **supported** site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
@ -61,19 +61,18 @@ body:
|
|||
description: |
|
||||
It should start like this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2023.10.13, Current version: 2023.10.13
|
||||
yt-dlp is up to date (2023.10.13)
|
||||
[debug] Request Handlers: urllib, requests
|
||||
[debug] Loaded 1893 extractors
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
|
|
@ -18,7 +18,7 @@ body:
|
|||
options:
|
||||
- label: I'm reporting a new site support request
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
@ -73,19 +73,18 @@ body:
|
|||
description: |
|
||||
It should start like this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2023.10.13, Current version: 2023.10.13
|
||||
yt-dlp is up to date (2023.10.13)
|
||||
[debug] Request Handlers: urllib, requests
|
||||
[debug] Loaded 1893 extractors
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
|
|
@ -18,7 +18,7 @@ body:
|
|||
options:
|
||||
- label: I'm requesting a site-specific feature
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
@ -69,19 +69,18 @@ body:
|
|||
description: |
|
||||
It should start like this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2023.10.13, Current version: 2023.10.13
|
||||
yt-dlp is up to date (2023.10.13)
|
||||
[debug] Request Handlers: urllib, requests
|
||||
[debug] Loaded 1893 extractors
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
|
17
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
17
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
|
@ -18,7 +18,7 @@ body:
|
|||
options:
|
||||
- label: I'm reporting a bug unrelated to a specific site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
@ -54,19 +54,18 @@ body:
|
|||
description: |
|
||||
It should start like this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2023.10.13, Current version: 2023.10.13
|
||||
yt-dlp is up to date (2023.10.13)
|
||||
[debug] Request Handlers: urllib, requests
|
||||
[debug] Loaded 1893 extractors
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
|
17
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
17
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
|
@ -20,7 +20,7 @@ body:
|
|||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||
required: true
|
||||
|
@ -50,18 +50,17 @@ body:
|
|||
description: |
|
||||
It should start like this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2023.10.13, Current version: 2023.10.13
|
||||
yt-dlp is up to date (2023.10.13)
|
||||
[debug] Request Handlers: urllib, requests
|
||||
[debug] Loaded 1893 extractors
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
<more lines>
|
||||
render: shell
|
||||
|
|
17
.github/ISSUE_TEMPLATE/6_question.yml
vendored
17
.github/ISSUE_TEMPLATE/6_question.yml
vendored
|
@ -26,7 +26,7 @@ body:
|
|||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
||||
required: true
|
||||
|
@ -56,18 +56,17 @@ body:
|
|||
description: |
|
||||
It should start like this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2023.10.13, Current version: 2023.10.13
|
||||
yt-dlp is up to date (2023.10.13)
|
||||
[debug] Request Handlers: urllib, requests
|
||||
[debug] Loaded 1893 extractors
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
<more lines>
|
||||
render: shell
|
||||
|
|
|
@ -12,7 +12,7 @@ body:
|
|||
options:
|
||||
- label: I'm reporting that yt-dlp is broken on a **supported** site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
|
|
@ -12,7 +12,7 @@ body:
|
|||
options:
|
||||
- label: I'm reporting a new site support request
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
|
|
@ -12,7 +12,7 @@ body:
|
|||
options:
|
||||
- label: I'm requesting a site-specific feature
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
|
2
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
|
@ -12,7 +12,7 @@ body:
|
|||
options:
|
||||
- label: I'm reporting a bug unrelated to a specific site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
|
|
@ -14,7 +14,7 @@ body:
|
|||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||
required: true
|
||||
|
|
2
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
2
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
|
@ -20,7 +20,7 @@ body:
|
|||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||
required: true
|
||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
||||
required: true
|
||||
|
|
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -40,10 +40,4 @@ ### What is the purpose of your *pull request*?
|
|||
- [ ] Core bug fix/improvement
|
||||
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
|
||||
|
||||
|
||||
<!-- Do NOT edit/remove anything below this! -->
|
||||
</details><details><summary>Copilot Summary</summary>
|
||||
|
||||
copilot:all
|
||||
|
||||
</details>
|
||||
|
|
88
.github/workflows/build.yml
vendored
88
.github/workflows/build.yml
vendored
|
@ -30,6 +30,10 @@ on:
|
|||
meta_files:
|
||||
default: true
|
||||
type: boolean
|
||||
origin:
|
||||
required: false
|
||||
default: ''
|
||||
type: string
|
||||
secrets:
|
||||
GPG_SIGNING_KEY:
|
||||
required: false
|
||||
|
@ -37,11 +41,13 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: Version tag (YYYY.MM.DD[.REV])
|
||||
description: |
|
||||
VERSION: yyyy.mm.dd[.rev] or rev
|
||||
required: true
|
||||
type: string
|
||||
channel:
|
||||
description: Update channel (stable/nightly/...)
|
||||
description: |
|
||||
SOURCE of this build's updates: stable/nightly/master/<repo>
|
||||
required: true
|
||||
default: stable
|
||||
type: string
|
||||
|
@ -73,16 +79,34 @@ on:
|
|||
description: SHA2-256SUMS, SHA2-512SUMS, _update_spec
|
||||
default: true
|
||||
type: boolean
|
||||
origin:
|
||||
description: .
|
||||
required: false
|
||||
default: ''
|
||||
type: choice
|
||||
options:
|
||||
- ''
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
process:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
origin: ${{ steps.process_origin.outputs.origin }}
|
||||
steps:
|
||||
- name: Process origin
|
||||
id: process_origin
|
||||
run: |
|
||||
echo "origin=${{ inputs.origin || github.repository }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
unix:
|
||||
needs: process
|
||||
if: inputs.unix
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
@ -96,22 +120,21 @@ jobs:
|
|||
auto-activate-base: false
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
sudo apt-get -y install zip pandoc man sed
|
||||
python -m pip install -U pip setuptools wheel
|
||||
python -m pip install -U Pyinstaller -r requirements.txt
|
||||
sudo apt -y install zip pandoc man sed
|
||||
reqs=$(mktemp)
|
||||
cat > $reqs << EOF
|
||||
cat > "$reqs" << EOF
|
||||
python=3.10.*
|
||||
pyinstaller
|
||||
cffi
|
||||
brotli-python
|
||||
secretstorage
|
||||
EOF
|
||||
sed '/^brotli.*/d' requirements.txt >> $reqs
|
||||
mamba create -n build --file $reqs
|
||||
sed -E '/^(brotli|secretstorage).*/d' requirements.txt >> "$reqs"
|
||||
mamba create -n build --file "$reqs"
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
python devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
||||
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||
python devscripts/make_lazy_extractors.py
|
||||
- name: Build Unix platform-independent binary
|
||||
run: |
|
||||
|
@ -150,6 +173,7 @@ jobs:
|
|||
yt-dlp_linux.zip
|
||||
|
||||
linux_arm:
|
||||
needs: process
|
||||
if: inputs.linux_arm
|
||||
permissions:
|
||||
contents: read
|
||||
|
@ -162,7 +186,7 @@ jobs:
|
|||
- aarch64
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
path: ./repo
|
||||
- name: Virtualized Install, Prepare & Build
|
||||
|
@ -180,12 +204,12 @@ jobs:
|
|||
apt -y install zlib1g-dev python3.8 python3.8-dev python3.8-distutils python3-pip
|
||||
python3.8 -m pip install -U pip setuptools wheel
|
||||
# Cannot access requirements.txt from the repo directory at this stage
|
||||
python3.8 -m pip install -U Pyinstaller mutagen pycryptodomex websockets brotli certifi
|
||||
python3.8 -m pip install -U Pyinstaller mutagen pycryptodomex websockets brotli certifi secretstorage
|
||||
|
||||
run: |
|
||||
cd repo
|
||||
python3.8 -m pip install -U Pyinstaller -r requirements.txt # Cached version may be out of date
|
||||
python3.8 devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
||||
python3.8 -m pip install -U Pyinstaller secretstorage -r requirements.txt # Cached version may be out of date
|
||||
python3.8 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||
python3.8 devscripts/make_lazy_extractors.py
|
||||
python3.8 pyinst.py
|
||||
|
||||
|
@ -206,11 +230,12 @@ jobs:
|
|||
repo/dist/yt-dlp_linux_${{ (matrix.architecture == 'armv7' && 'armv7l') || matrix.architecture }}
|
||||
|
||||
macos:
|
||||
needs: process
|
||||
if: inputs.macos
|
||||
runs-on: macos-11
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
# NB: Building universal2 does not work with python from actions/setup-python
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
|
@ -221,7 +246,7 @@ jobs:
|
|||
|
||||
- name: Prepare
|
||||
run: |
|
||||
python3 devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
||||
python3 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||
python3 devscripts/make_lazy_extractors.py
|
||||
- name: Build
|
||||
run: |
|
||||
|
@ -247,11 +272,12 @@ jobs:
|
|||
dist/yt-dlp_macos.zip
|
||||
|
||||
macos_legacy:
|
||||
needs: process
|
||||
if: inputs.macos_legacy
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Python
|
||||
# We need the official Python, because the GA ones only support newer macOS versions
|
||||
env:
|
||||
|
@ -272,7 +298,7 @@ jobs:
|
|||
|
||||
- name: Prepare
|
||||
run: |
|
||||
python3 devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
||||
python3 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||
python3 devscripts/make_lazy_extractors.py
|
||||
- name: Build
|
||||
run: |
|
||||
|
@ -296,11 +322,12 @@ jobs:
|
|||
dist/yt-dlp_macos_legacy
|
||||
|
||||
windows:
|
||||
needs: process
|
||||
if: inputs.windows
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with: # 3.8 is used for Win7 support
|
||||
python-version: "3.8"
|
||||
|
@ -311,7 +338,7 @@ jobs:
|
|||
|
||||
- name: Prepare
|
||||
run: |
|
||||
python devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
||||
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||
python devscripts/make_lazy_extractors.py
|
||||
- name: Build
|
||||
run: |
|
||||
|
@ -343,14 +370,15 @@ jobs:
|
|||
dist/yt-dlp_win.zip
|
||||
|
||||
windows32:
|
||||
needs: process
|
||||
if: inputs.windows32
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with: # 3.7 is used for Vista support. See https://github.com/yt-dlp/yt-dlp/issues/390
|
||||
python-version: "3.7"
|
||||
with:
|
||||
python-version: "3.8"
|
||||
architecture: "x86"
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
|
@ -359,7 +387,7 @@ jobs:
|
|||
|
||||
- name: Prepare
|
||||
run: |
|
||||
python devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
||||
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||
python devscripts/make_lazy_extractors.py
|
||||
- name: Build
|
||||
run: |
|
||||
|
@ -387,6 +415,7 @@ jobs:
|
|||
meta_files:
|
||||
if: inputs.meta_files && always() && !cancelled()
|
||||
needs:
|
||||
- process
|
||||
- unix
|
||||
- linux_arm
|
||||
- macos
|
||||
|
@ -407,7 +436,16 @@ jobs:
|
|||
run: |
|
||||
cat >> _update_spec << EOF
|
||||
# This file is used for regulating self-update
|
||||
lock 2022.08.18.36 .+ Python 3.6
|
||||
lock 2022.08.18.36 .+ Python 3\.6
|
||||
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
||||
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
|
||||
lockV2 yt-dlp/yt-dlp 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
||||
lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7
|
||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7
|
||||
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
EOF
|
||||
|
||||
- name: Sign checksum files
|
||||
|
|
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
|
@ -29,7 +29,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
|
12
.github/workflows/core.yml
vendored
12
.github/workflows/core.yml
vendored
|
@ -3,6 +3,10 @@ on: [push, pull_request]
|
|||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: core-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: Core Tests
|
||||
|
@ -13,12 +17,12 @@ jobs:
|
|||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
# CPython 3.11 is in quick-test
|
||||
python-version: ['3.8', '3.9', '3.10', '3.12', pypy-3.7, pypy-3.8, pypy-3.10]
|
||||
python-version: ['3.8', '3.9', '3.10', '3.12', pypy-3.8, pypy-3.10]
|
||||
run-tests-ext: [sh]
|
||||
include:
|
||||
# atleast one of each CPython/PyPy tests must be in windows
|
||||
- os: windows-latest
|
||||
python-version: '3.7'
|
||||
python-version: '3.8'
|
||||
run-tests-ext: bat
|
||||
- os: windows-latest
|
||||
python-version: '3.12'
|
||||
|
@ -27,12 +31,12 @@ jobs:
|
|||
python-version: pypy-3.9
|
||||
run-tests-ext: bat
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
- name: Install test requirements
|
||||
run: pip install pytest -r requirements.txt
|
||||
- name: Run tests
|
||||
continue-on-error: False
|
||||
|
|
12
.github/workflows/download.yml
vendored
12
.github/workflows/download.yml
vendored
|
@ -9,13 +9,13 @@ jobs:
|
|||
if: "contains(github.event.head_commit.message, 'ci run dl')"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install test requirements
|
||||
run: pip install pytest
|
||||
run: pip install pytest -r requirements.txt
|
||||
- name: Run tests
|
||||
continue-on-error: true
|
||||
run: ./devscripts/run_tests.sh download
|
||||
|
@ -28,7 +28,7 @@ jobs:
|
|||
fail-fast: true
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ['3.7', '3.10', '3.12', pypy-3.7, pypy-3.8, pypy-3.10]
|
||||
python-version: ['3.10', '3.11', '3.12', pypy-3.8, pypy-3.10]
|
||||
run-tests-ext: [sh]
|
||||
include:
|
||||
# atleast one of each CPython/PyPy tests must be in windows
|
||||
|
@ -39,13 +39,13 @@ jobs:
|
|||
python-version: pypy-3.9
|
||||
run-tests-ext: bat
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install pytest
|
||||
run: pip install pytest
|
||||
- name: Install test requirements
|
||||
run: pip install pytest -r requirements.txt
|
||||
- name: Run tests
|
||||
continue-on-error: true
|
||||
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }} download
|
||||
|
|
97
.github/workflows/publish.yml
vendored
97
.github/workflows/publish.yml
vendored
|
@ -1,97 +0,0 @@
|
|||
name: Publish
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
channel:
|
||||
default: stable
|
||||
required: true
|
||||
type: string
|
||||
version:
|
||||
required: true
|
||||
type: string
|
||||
target_commitish:
|
||||
required: true
|
||||
type: string
|
||||
prerelease:
|
||||
default: false
|
||||
required: true
|
||||
type: boolean
|
||||
secrets:
|
||||
ARCHIVE_REPO_TOKEN:
|
||||
required: false
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Generate release notes
|
||||
run: |
|
||||
printf '%s' \
|
||||
'[![Installation](https://img.shields.io/badge/-Which%20file%20should%20I%20download%3F-white.svg?style=for-the-badge)]' \
|
||||
'(https://github.com/yt-dlp/yt-dlp#installation "Installation instructions") ' \
|
||||
'[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]' \
|
||||
'(https://github.com/yt-dlp/yt-dlp/tree/2023.03.04#readme "Documentation") ' \
|
||||
'[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]' \
|
||||
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
||||
'[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]' \
|
||||
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
||||
${{ inputs.channel != 'nightly' && '"[![Nightly](https://img.shields.io/badge/Get%20nightly%20builds-purple.svg?style=for-the-badge)]" \
|
||||
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\")"' || '' }} \
|
||||
> ./RELEASE_NOTES
|
||||
printf '\n\n' >> ./RELEASE_NOTES
|
||||
cat >> ./RELEASE_NOTES << EOF
|
||||
#### A description of the various files are in the [README](https://github.com/yt-dlp/yt-dlp#release-files)
|
||||
---
|
||||
$(python ./devscripts/make_changelog.py -vv --collapsible)
|
||||
EOF
|
||||
printf '%s\n\n' '**This is an automated nightly pre-release build**' >> ./NIGHTLY_NOTES
|
||||
cat ./RELEASE_NOTES >> ./NIGHTLY_NOTES
|
||||
printf '%s\n\n' 'Generated from: https://github.com/${{ github.repository }}/commit/${{ inputs.target_commitish }}' >> ./ARCHIVE_NOTES
|
||||
cat ./RELEASE_NOTES >> ./ARCHIVE_NOTES
|
||||
|
||||
- name: Archive nightly release
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.ARCHIVE_REPO_TOKEN }}
|
||||
GH_REPO: ${{ vars.ARCHIVE_REPO }}
|
||||
if: |
|
||||
inputs.channel == 'nightly' && env.GH_TOKEN != '' && env.GH_REPO != ''
|
||||
run: |
|
||||
gh release create \
|
||||
--notes-file ARCHIVE_NOTES \
|
||||
--title "yt-dlp nightly ${{ inputs.version }}" \
|
||||
${{ inputs.version }} \
|
||||
artifact/*
|
||||
|
||||
- name: Prune old nightly release
|
||||
if: inputs.channel == 'nightly' && !vars.ARCHIVE_REPO
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
gh release delete --yes --cleanup-tag "nightly" || true
|
||||
git tag --delete "nightly" || true
|
||||
sleep 5 # Enough time to cover deletion race condition
|
||||
|
||||
- name: Publish release${{ inputs.channel == 'nightly' && ' (nightly)' || '' }}
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
if: (inputs.channel == 'nightly' && !vars.ARCHIVE_REPO) || inputs.channel != 'nightly'
|
||||
run: |
|
||||
gh release create \
|
||||
--notes-file ${{ inputs.channel == 'nightly' && 'NIGHTLY_NOTES' || 'RELEASE_NOTES' }} \
|
||||
--target ${{ inputs.target_commitish }} \
|
||||
--title "yt-dlp ${{ inputs.channel == 'nightly' && 'nightly ' || '' }}${{ inputs.version }}" \
|
||||
${{ inputs.prerelease && '--prerelease' || '' }} \
|
||||
${{ inputs.channel == 'nightly' && '"nightly"' || inputs.version }} \
|
||||
artifact/*
|
6
.github/workflows/quick-test.yml
vendored
6
.github/workflows/quick-test.yml
vendored
|
@ -9,13 +9,13 @@ jobs:
|
|||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Install test requirements
|
||||
run: pip install pytest pycryptodomex
|
||||
run: pip install pytest -r requirements.txt
|
||||
- name: Run tests
|
||||
run: |
|
||||
python3 -m yt_dlp -v || true
|
||||
|
@ -25,7 +25,7 @@ jobs:
|
|||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install flake8
|
||||
run: pip install flake8
|
||||
|
|
27
.github/workflows/release-master.yml
vendored
Normal file
27
.github/workflows/release-master.yml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
name: Release (master)
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "yt_dlp/**.py"
|
||||
- "!yt_dlp/version.py"
|
||||
- "setup.py"
|
||||
- "pyinst.py"
|
||||
concurrency:
|
||||
group: release-master
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release:
|
||||
if: vars.BUILD_MASTER != ''
|
||||
uses: ./.github/workflows/release.yml
|
||||
with:
|
||||
prerelease: true
|
||||
source: master
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
id-token: write # mandatory for trusted publishing
|
||||
secrets: inherit
|
57
.github/workflows/release-nightly.yml
vendored
57
.github/workflows/release-nightly.yml
vendored
|
@ -1,52 +1,35 @@
|
|||
name: Release (nightly)
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "yt_dlp/**.py"
|
||||
- "!yt_dlp/version.py"
|
||||
concurrency:
|
||||
group: release-nightly
|
||||
cancel-in-progress: true
|
||||
schedule:
|
||||
- cron: '23 23 * * *'
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
check_nightly:
|
||||
if: vars.BUILD_NIGHTLY != ''
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.get_version.outputs.version }}
|
||||
|
||||
commit: ${{ steps.check_for_new_commits.outputs.commit }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Get version
|
||||
id: get_version
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Check for new commits
|
||||
id: check_for_new_commits
|
||||
run: |
|
||||
python devscripts/update-version.py "$(date -u +"%H%M%S")" | grep -Po "version=\d+(\.\d+){3}" >> "$GITHUB_OUTPUT"
|
||||
relevant_files=("yt_dlp/*.py" ':!yt_dlp/version.py' "setup.py" "pyinst.py")
|
||||
echo "commit=$(git log --format=%H -1 --since="24 hours ago" -- "${relevant_files[@]}")" | tee "$GITHUB_OUTPUT"
|
||||
|
||||
build:
|
||||
needs: prepare
|
||||
uses: ./.github/workflows/build.yml
|
||||
release:
|
||||
needs: [check_nightly]
|
||||
if: ${{ needs.check_nightly.outputs.commit }}
|
||||
uses: ./.github/workflows/release.yml
|
||||
with:
|
||||
version: ${{ needs.prepare.outputs.version }}
|
||||
channel: nightly
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # For package cache
|
||||
secrets:
|
||||
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
||||
|
||||
publish:
|
||||
needs: [prepare, build]
|
||||
uses: ./.github/workflows/publish.yml
|
||||
secrets:
|
||||
ARCHIVE_REPO_TOKEN: ${{ secrets.ARCHIVE_REPO_TOKEN }}
|
||||
prerelease: true
|
||||
source: nightly
|
||||
permissions:
|
||||
contents: write
|
||||
with:
|
||||
channel: nightly
|
||||
prerelease: true
|
||||
version: ${{ needs.prepare.outputs.version }}
|
||||
target_commitish: ${{ github.sha }}
|
||||
packages: write
|
||||
id-token: write # mandatory for trusted publishing
|
||||
secrets: inherit
|
||||
|
|
360
.github/workflows/release.yml
vendored
360
.github/workflows/release.yml
vendored
|
@ -1,14 +1,45 @@
|
|||
name: Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
inputs:
|
||||
version:
|
||||
description: Version tag (YYYY.MM.DD[.REV])
|
||||
prerelease:
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
source:
|
||||
required: false
|
||||
default: ''
|
||||
type: string
|
||||
channel:
|
||||
description: Update channel (stable/nightly/...)
|
||||
target:
|
||||
required: false
|
||||
default: ''
|
||||
type: string
|
||||
version:
|
||||
required: false
|
||||
default: ''
|
||||
type: string
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
source:
|
||||
description: |
|
||||
SOURCE of this release's updates:
|
||||
channel, repo, tag, or channel/repo@tag
|
||||
(default: <current_repo>)
|
||||
required: false
|
||||
default: ''
|
||||
type: string
|
||||
target:
|
||||
description: |
|
||||
TARGET to publish this release to:
|
||||
channel, tag, or channel@tag
|
||||
(default: <source> if writable else <current_repo>[@source_tag])
|
||||
required: false
|
||||
default: ''
|
||||
type: string
|
||||
version:
|
||||
description: |
|
||||
VERSION: yyyy.mm.dd[.rev] or rev
|
||||
(default: auto-generated)
|
||||
required: false
|
||||
default: ''
|
||||
type: string
|
||||
|
@ -26,12 +57,18 @@ jobs:
|
|||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
channel: ${{ steps.set_channel.outputs.channel }}
|
||||
version: ${{ steps.update_version.outputs.version }}
|
||||
channel: ${{ steps.setup_variables.outputs.channel }}
|
||||
version: ${{ steps.setup_variables.outputs.version }}
|
||||
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
||||
target_repo_token: ${{ steps.setup_variables.outputs.target_repo_token }}
|
||||
target_tag: ${{ steps.setup_variables.outputs.target_tag }}
|
||||
pypi_project: ${{ steps.setup_variables.outputs.pypi_project }}
|
||||
pypi_suffix: ${{ steps.setup_variables.outputs.pypi_suffix }}
|
||||
pypi_token: ${{ steps.setup_variables.outputs.pypi_token }}
|
||||
head_sha: ${{ steps.get_target.outputs.head_sha }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
@ -39,25 +76,133 @@ jobs:
|
|||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Set channel
|
||||
id: set_channel
|
||||
- name: Process inputs
|
||||
id: process_inputs
|
||||
run: |
|
||||
CHANNEL="${{ github.repository == 'yt-dlp/yt-dlp' && 'stable' || github.repository }}"
|
||||
echo "channel=${{ inputs.channel || '$CHANNEL' }}" > "$GITHUB_OUTPUT"
|
||||
cat << EOF
|
||||
::group::Inputs
|
||||
prerelease=${{ inputs.prerelease }}
|
||||
source=${{ inputs.source }}
|
||||
target=${{ inputs.target }}
|
||||
version=${{ inputs.version }}
|
||||
::endgroup::
|
||||
EOF
|
||||
IFS='@' read -r source_repo source_tag <<<"${{ inputs.source }}"
|
||||
IFS='@' read -r target_repo target_tag <<<"${{ inputs.target }}"
|
||||
cat << EOF >> "$GITHUB_OUTPUT"
|
||||
source_repo=${source_repo}
|
||||
source_tag=${source_tag}
|
||||
target_repo=${target_repo}
|
||||
target_tag=${target_tag}
|
||||
EOF
|
||||
|
||||
- name: Update version
|
||||
id: update_version
|
||||
- name: Setup variables
|
||||
id: setup_variables
|
||||
env:
|
||||
source_repo: ${{ steps.process_inputs.outputs.source_repo }}
|
||||
source_tag: ${{ steps.process_inputs.outputs.source_tag }}
|
||||
target_repo: ${{ steps.process_inputs.outputs.target_repo }}
|
||||
target_tag: ${{ steps.process_inputs.outputs.target_tag }}
|
||||
run: |
|
||||
REVISION="${{ vars.PUSH_VERSION_COMMIT == '' && '$(date -u +"%H%M%S")' || '' }}"
|
||||
REVISION="${{ inputs.prerelease && '$(date -u +"%H%M%S")' || '$REVISION' }}"
|
||||
python devscripts/update-version.py ${{ inputs.version || '$REVISION' }} | \
|
||||
grep -Po "version=\d+\.\d+\.\d+(\.\d+)?" >> "$GITHUB_OUTPUT"
|
||||
# unholy bash monstrosity (sincere apologies)
|
||||
fallback_token () {
|
||||
if ${{ !secrets.ARCHIVE_REPO_TOKEN }}; then
|
||||
echo "::error::Repository access secret ${target_repo_token^^} not found"
|
||||
exit 1
|
||||
fi
|
||||
target_repo_token=ARCHIVE_REPO_TOKEN
|
||||
return 0
|
||||
}
|
||||
|
||||
source_is_channel=0
|
||||
[[ "${source_repo}" == 'stable' ]] && source_repo='yt-dlp/yt-dlp'
|
||||
if [[ -z "${source_repo}" ]]; then
|
||||
source_repo='${{ github.repository }}'
|
||||
elif [[ '${{ vars[format('{0}_archive_repo', env.source_repo)] }}' ]]; then
|
||||
source_is_channel=1
|
||||
source_channel='${{ vars[format('{0}_archive_repo', env.source_repo)] }}'
|
||||
elif [[ -z "${source_tag}" && "${source_repo}" != */* ]]; then
|
||||
source_tag="${source_repo}"
|
||||
source_repo='${{ github.repository }}'
|
||||
fi
|
||||
resolved_source="${source_repo}"
|
||||
if [[ "${source_tag}" ]]; then
|
||||
resolved_source="${resolved_source}@${source_tag}"
|
||||
elif [[ "${source_repo}" == 'yt-dlp/yt-dlp' ]]; then
|
||||
resolved_source='stable'
|
||||
fi
|
||||
|
||||
revision="${{ (inputs.prerelease || !vars.PUSH_VERSION_COMMIT) && '$(date -u +"%H%M%S")' || '' }}"
|
||||
version="$(
|
||||
python devscripts/update-version.py \
|
||||
-c "${resolved_source}" -r "${{ github.repository }}" ${{ inputs.version || '$revision' }} | \
|
||||
grep -Po "version=\K\d+\.\d+\.\d+(\.\d+)?")"
|
||||
|
||||
if [[ "${target_repo}" ]]; then
|
||||
if [[ -z "${target_tag}" ]]; then
|
||||
if [[ '${{ vars[format('{0}_archive_repo', env.target_repo)] }}' ]]; then
|
||||
target_tag="${source_tag:-${version}}"
|
||||
else
|
||||
target_tag="${target_repo}"
|
||||
target_repo='${{ github.repository }}'
|
||||
fi
|
||||
fi
|
||||
if [[ "${target_repo}" != '${{ github.repository}}' ]]; then
|
||||
target_repo='${{ vars[format('{0}_archive_repo', env.target_repo)] }}'
|
||||
target_repo_token='${{ env.target_repo }}_archive_repo_token'
|
||||
${{ !!secrets[format('{0}_archive_repo_token', env.target_repo)] }} || fallback_token
|
||||
pypi_project='${{ vars[format('{0}_pypi_project', env.target_repo)] }}'
|
||||
pypi_suffix='${{ vars[format('{0}_pypi_suffix', env.target_repo)] }}'
|
||||
${{ !secrets[format('{0}_pypi_token', env.target_repo)] }} || pypi_token='${{ env.target_repo }}_pypi_token'
|
||||
fi
|
||||
else
|
||||
target_tag="${source_tag:-${version}}"
|
||||
if ((source_is_channel)); then
|
||||
target_repo="${source_channel}"
|
||||
target_repo_token='${{ env.source_repo }}_archive_repo_token'
|
||||
${{ !!secrets[format('{0}_archive_repo_token', env.source_repo)] }} || fallback_token
|
||||
pypi_project='${{ vars[format('{0}_pypi_project', env.source_repo)] }}'
|
||||
pypi_suffix='${{ vars[format('{0}_pypi_suffix', env.source_repo)] }}'
|
||||
${{ !secrets[format('{0}_pypi_token', env.source_repo)] }} || pypi_token='${{ env.source_repo }}_pypi_token'
|
||||
else
|
||||
target_repo='${{ github.repository }}'
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${target_repo}" == '${{ github.repository }}' ]] && ${{ !inputs.prerelease }}; then
|
||||
pypi_project='${{ vars.PYPI_PROJECT }}'
|
||||
fi
|
||||
if [[ -z "${pypi_token}" && "${pypi_project}" ]]; then
|
||||
if ${{ !secrets.PYPI_TOKEN }}; then
|
||||
pypi_token=OIDC
|
||||
else
|
||||
pypi_token=PYPI_TOKEN
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "::group::Output variables"
|
||||
cat << EOF | tee -a "$GITHUB_OUTPUT"
|
||||
channel=${resolved_source}
|
||||
version=${version}
|
||||
target_repo=${target_repo}
|
||||
target_repo_token=${target_repo_token}
|
||||
target_tag=${target_tag}
|
||||
pypi_project=${pypi_project}
|
||||
pypi_suffix=${pypi_suffix}
|
||||
pypi_token=${pypi_token}
|
||||
EOF
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Update documentation
|
||||
env:
|
||||
version: ${{ steps.setup_variables.outputs.version }}
|
||||
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
||||
if: |
|
||||
!inputs.prerelease && env.target_repo == github.repository
|
||||
run: |
|
||||
make doc
|
||||
sed '/### /Q' Changelog.md >> ./CHANGELOG
|
||||
echo '### ${{ steps.update_version.outputs.version }}' >> ./CHANGELOG
|
||||
echo '### ${{ env.version }}' >> ./CHANGELOG
|
||||
python ./devscripts/make_changelog.py -vv -c >> ./CHANGELOG
|
||||
echo >> ./CHANGELOG
|
||||
grep -Poz '(?s)### \d+\.\d+\.\d+.+' 'Changelog.md' | head -n -1 >> ./CHANGELOG
|
||||
|
@ -65,12 +210,16 @@ jobs:
|
|||
|
||||
- name: Push to release
|
||||
id: push_release
|
||||
if: ${{ !inputs.prerelease }}
|
||||
env:
|
||||
version: ${{ steps.setup_variables.outputs.version }}
|
||||
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
||||
if: |
|
||||
!inputs.prerelease && env.target_repo == github.repository
|
||||
run: |
|
||||
git config --global user.name github-actions
|
||||
git config --global user.email github-actions@example.com
|
||||
git config --global user.name "github-actions[bot]"
|
||||
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git add -u
|
||||
git commit -m "Release ${{ steps.update_version.outputs.version }}" \
|
||||
git commit -m "Release ${{ env.version }}" \
|
||||
-m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all :ci run dl"
|
||||
git push origin --force ${{ github.event.ref }}:release
|
||||
|
||||
|
@ -80,7 +229,10 @@ jobs:
|
|||
echo "head_sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Update master
|
||||
if: vars.PUSH_VERSION_COMMIT != '' && !inputs.prerelease
|
||||
env:
|
||||
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
||||
if: |
|
||||
vars.PUSH_VERSION_COMMIT != '' && !inputs.prerelease && env.target_repo == github.repository
|
||||
run: git push origin ${{ github.event.ref }}
|
||||
|
||||
build:
|
||||
|
@ -89,75 +241,159 @@ jobs:
|
|||
with:
|
||||
version: ${{ needs.prepare.outputs.version }}
|
||||
channel: ${{ needs.prepare.outputs.channel }}
|
||||
origin: ${{ needs.prepare.outputs.target_repo }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # For package cache
|
||||
secrets:
|
||||
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
||||
|
||||
publish_pypi_homebrew:
|
||||
publish_pypi:
|
||||
needs: [prepare, build]
|
||||
if: ${{ needs.prepare.outputs.pypi_project }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write # mandatory for trusted publishing
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
sudo apt-get -y install pandoc man
|
||||
sudo apt -y install pandoc man
|
||||
python -m pip install -U pip setuptools wheel twine
|
||||
python -m pip install -U -r requirements.txt
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
python devscripts/update-version.py ${{ needs.prepare.outputs.version }}
|
||||
python devscripts/make_lazy_extractors.py
|
||||
|
||||
- name: Build and publish on PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||
if: env.TWINE_PASSWORD != '' && !inputs.prerelease
|
||||
version: ${{ needs.prepare.outputs.version }}
|
||||
suffix: ${{ needs.prepare.outputs.pypi_suffix }}
|
||||
channel: ${{ needs.prepare.outputs.channel }}
|
||||
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
||||
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
|
||||
run: |
|
||||
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
||||
python devscripts/make_lazy_extractors.py
|
||||
sed -i -E "s/(name=')[^']+(', # package name)/\1${{ env.pypi_project }}\2/" setup.py
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
rm -rf dist/*
|
||||
make pypi-files
|
||||
python devscripts/set-variant.py pip -M "You installed yt-dlp with pip or using the wheel from PyPi; Use that to update"
|
||||
python setup.py sdist bdist_wheel
|
||||
|
||||
- name: Publish to PyPI via token
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets[needs.prepare.outputs.pypi_token] }}
|
||||
if: |
|
||||
needs.prepare.outputs.pypi_token != 'OIDC' && env.TWINE_PASSWORD
|
||||
run: |
|
||||
twine upload dist/*
|
||||
|
||||
- name: Checkout Homebrew repository
|
||||
env:
|
||||
BREW_TOKEN: ${{ secrets.BREW_TOKEN }}
|
||||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
||||
if: env.BREW_TOKEN != '' && env.PYPI_TOKEN != '' && !inputs.prerelease
|
||||
uses: actions/checkout@v3
|
||||
- name: Publish to PyPI via trusted publishing
|
||||
if: |
|
||||
needs.prepare.outputs.pypi_token == 'OIDC'
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
repository: yt-dlp/homebrew-taps
|
||||
path: taps
|
||||
ssh-key: ${{ secrets.BREW_TOKEN }}
|
||||
|
||||
- name: Update Homebrew Formulae
|
||||
env:
|
||||
BREW_TOKEN: ${{ secrets.BREW_TOKEN }}
|
||||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
||||
if: env.BREW_TOKEN != '' && env.PYPI_TOKEN != '' && !inputs.prerelease
|
||||
run: |
|
||||
python devscripts/update-formulae.py taps/Formula/yt-dlp.rb "${{ needs.prepare.outputs.version }}"
|
||||
git -C taps/ config user.name github-actions
|
||||
git -C taps/ config user.email github-actions@example.com
|
||||
git -C taps/ commit -am 'yt-dlp: ${{ needs.prepare.outputs.version }}'
|
||||
git -C taps/ push
|
||||
verbose: true
|
||||
|
||||
publish:
|
||||
needs: [prepare, build]
|
||||
uses: ./.github/workflows/publish.yml
|
||||
permissions:
|
||||
contents: write
|
||||
with:
|
||||
channel: ${{ needs.prepare.outputs.channel }}
|
||||
prerelease: ${{ inputs.prerelease }}
|
||||
version: ${{ needs.prepare.outputs.version }}
|
||||
target_commitish: ${{ needs.prepare.outputs.head_sha }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Generate release notes
|
||||
env:
|
||||
head_sha: ${{ needs.prepare.outputs.head_sha }}
|
||||
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
||||
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
||||
run: |
|
||||
printf '%s' \
|
||||
'[![Installation](https://img.shields.io/badge/-Which%20file%20should%20I%20download%3F-white.svg?style=for-the-badge)]' \
|
||||
'(https://github.com/${{ github.repository }}#installation "Installation instructions") ' \
|
||||
'[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]' \
|
||||
'(https://github.com/${{ github.repository }}' \
|
||||
'${{ env.target_repo == github.repository && format('/tree/{0}', env.target_tag) || '' }}#readme "Documentation") ' \
|
||||
'[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]' \
|
||||
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
||||
'[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]' \
|
||||
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
||||
${{ env.target_repo == 'yt-dlp/yt-dlp' && '\
|
||||
"[![Nightly](https://img.shields.io/badge/Get%20nightly%20builds-purple.svg?style=for-the-badge)]" \
|
||||
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\") " \
|
||||
"[![Master](https://img.shields.io/badge/Get%20master%20builds-lightblue.svg?style=for-the-badge)]" \
|
||||
"(https://github.com/yt-dlp/yt-dlp-master-builds/releases/latest \"Master builds\")"' || '' }} > ./RELEASE_NOTES
|
||||
printf '\n\n' >> ./RELEASE_NOTES
|
||||
cat >> ./RELEASE_NOTES << EOF
|
||||
#### A description of the various files are in the [README](https://github.com/${{ github.repository }}#release-files)
|
||||
---
|
||||
$(python ./devscripts/make_changelog.py -vv --collapsible)
|
||||
EOF
|
||||
printf '%s\n\n' '**This is a pre-release build**' >> ./PRERELEASE_NOTES
|
||||
cat ./RELEASE_NOTES >> ./PRERELEASE_NOTES
|
||||
printf '%s\n\n' 'Generated from: https://github.com/${{ github.repository }}/commit/${{ env.head_sha }}' >> ./ARCHIVE_NOTES
|
||||
cat ./RELEASE_NOTES >> ./ARCHIVE_NOTES
|
||||
|
||||
- name: Publish to archive repo
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets[needs.prepare.outputs.target_repo_token] }}
|
||||
GH_REPO: ${{ needs.prepare.outputs.target_repo }}
|
||||
version: ${{ needs.prepare.outputs.version }}
|
||||
channel: ${{ needs.prepare.outputs.channel }}
|
||||
if: |
|
||||
inputs.prerelease && env.GH_TOKEN != '' && env.GH_REPO != '' && env.GH_REPO != github.repository
|
||||
run: |
|
||||
title="${{ startswith(env.GH_REPO, 'yt-dlp/') && 'yt-dlp ' || '' }}${{ env.channel }}"
|
||||
gh release create \
|
||||
--notes-file ARCHIVE_NOTES \
|
||||
--title "${title} ${{ env.version }}" \
|
||||
${{ env.version }} \
|
||||
artifact/*
|
||||
|
||||
- name: Prune old release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
version: ${{ needs.prepare.outputs.version }}
|
||||
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
||||
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
||||
if: |
|
||||
env.target_repo == github.repository && env.target_tag != env.version
|
||||
run: |
|
||||
gh release delete --yes --cleanup-tag "${{ env.target_tag }}" || true
|
||||
git tag --delete "${{ env.target_tag }}" || true
|
||||
sleep 5 # Enough time to cover deletion race condition
|
||||
|
||||
- name: Publish release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
version: ${{ needs.prepare.outputs.version }}
|
||||
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
||||
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
||||
head_sha: ${{ needs.prepare.outputs.head_sha }}
|
||||
if: |
|
||||
env.target_repo == github.repository
|
||||
run: |
|
||||
title="${{ github.repository == 'yt-dlp/yt-dlp' && 'yt-dlp ' || '' }}"
|
||||
title+="${{ env.target_tag != env.version && format('{0} ', env.target_tag) || '' }}"
|
||||
gh release create \
|
||||
--notes-file ${{ inputs.prerelease && 'PRERELEASE_NOTES' || 'RELEASE_NOTES' }} \
|
||||
--target ${{ env.head_sha }} \
|
||||
--title "${title}${{ env.version }}" \
|
||||
${{ inputs.prerelease && '--prerelease' || '' }} \
|
||||
${{ env.target_tag }} \
|
||||
artifact/*
|
||||
|
|
|
@ -222,7 +222,7 @@ ## Adding support for a new site
|
|||
|
||||
$ flake8 yt_dlp/extractor/yourextractor.py
|
||||
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.7 and above. Backward compatibility is not required for even older versions of Python.
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.8 and above. Backward compatibility is not required for even older versions of Python.
|
||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
$ git add yt_dlp/extractor/_extractors.py
|
||||
|
|
15
CONTRIBUTORS
15
CONTRIBUTORS
|
@ -513,3 +513,18 @@ awalgarg
|
|||
midnightveil
|
||||
naginatana
|
||||
Riteo
|
||||
1100101
|
||||
aniolpages
|
||||
bartbroere
|
||||
CrendKing
|
||||
Esokrates
|
||||
HitomaruKonpaku
|
||||
LoserFox
|
||||
peci1
|
||||
saintliao
|
||||
shubhexists
|
||||
SirElderling
|
||||
almx
|
||||
elivinsky
|
||||
starius
|
||||
TravisDupes
|
||||
|
|
85
Changelog.md
85
Changelog.md
|
@ -4,6 +4,91 @@ # Changelog
|
|||
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
||||
-->
|
||||
|
||||
### 2023.11.16
|
||||
|
||||
#### Extractor changes
|
||||
- **abc.net.au**: iview, showseries: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/15cb3528cbda7b6198f49a6b5953c226d701696b) ([#8586](https://github.com/yt-dlp/yt-dlp/issues/8586)) by [bashonly](https://github.com/bashonly)
|
||||
- **beatbump**: [Update `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/21dc069bea2d4d99345dd969e098f4535c751d45) ([#8576](https://github.com/yt-dlp/yt-dlp/issues/8576)) by [seproDev](https://github.com/seproDev)
|
||||
- **dailymotion**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/a489f071508ec5caf5f32052d142afe86c28df7a) ([#7692](https://github.com/yt-dlp/yt-dlp/issues/7692)) by [TravisDupes](https://github.com/TravisDupes)
|
||||
- **drtv**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0783fd558ed0d3a8bc754beb75a406256f8b97b2) ([#8484](https://github.com/yt-dlp/yt-dlp/issues/8484)) by [almx](https://github.com/almx), [seproDev](https://github.com/seproDev)
|
||||
- **eltrecetv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/dcfad52812aa8ce007cefbfbe63f58b49f6b1046) ([#8216](https://github.com/yt-dlp/yt-dlp/issues/8216)) by [elivinsky](https://github.com/elivinsky)
|
||||
- **jiosaavn**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/b530118e7f48232cacf8050d79a6b20bdfcf5468) ([#8307](https://github.com/yt-dlp/yt-dlp/issues/8307)) by [awalgarg](https://github.com/awalgarg)
|
||||
- **njpwworld**: [Remove](https://github.com/yt-dlp/yt-dlp/commit/e569c2d1f4b665795a2b64f0aaf7f76930664233) ([#8570](https://github.com/yt-dlp/yt-dlp/issues/8570)) by [aarubui](https://github.com/aarubui)
|
||||
- **tv5mondeplus**: [Extract subtitles](https://github.com/yt-dlp/yt-dlp/commit/0f634dba3afdc429ece8839b02f6d56c27b7973a) ([#4209](https://github.com/yt-dlp/yt-dlp/issues/4209)) by [FrankZ85](https://github.com/FrankZ85)
|
||||
- **twitcasting**: [Fix livestream detection](https://github.com/yt-dlp/yt-dlp/commit/2325d03aa7bb80f56ba52cd6992258e44727b424) ([#8574](https://github.com/yt-dlp/yt-dlp/issues/8574)) by [JC-Chung](https://github.com/JC-Chung)
|
||||
- **zenyandex**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/5efe68b73cbf6e907c2e6a3aa338664385084184) ([#8454](https://github.com/yt-dlp/yt-dlp/issues/8454)) by [starius](https://github.com/starius)
|
||||
|
||||
#### Misc. changes
|
||||
- **build**: [Make `secretstorage` an optional dependency](https://github.com/yt-dlp/yt-dlp/commit/24f827875c6ba513f12ed09a3aef2bbed223760d) ([#8585](https://github.com/yt-dlp/yt-dlp/issues/8585)) by [bashonly](https://github.com/bashonly)
|
||||
|
||||
### 2023.11.14
|
||||
|
||||
#### Important changes
|
||||
- **The release channels have been adjusted!**
|
||||
* [`master`](https://github.com/yt-dlp/yt-dlp-master-builds) builds are made after each push, containing the latest fixes (but also possibly bugs). This was previously the `nightly` channel.
|
||||
* [`nightly`](https://github.com/yt-dlp/yt-dlp-nightly-builds) builds are now made once a day, if there were any changes.
|
||||
- Security: [[CVE-2023-46121](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-46121)] Patch [Generic Extractor MITM Vulnerability via Arbitrary Proxy Injection](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-3ch3-jhc6-5r8x)
|
||||
- Disallow smuggling of arbitrary `http_headers`; extractors now only use specific headers
|
||||
|
||||
#### Core changes
|
||||
- [Add `--compat-option manifest-filesize-approx`](https://github.com/yt-dlp/yt-dlp/commit/10025b715ea01489557eb2c5a3cc04d361fcdb52) ([#8356](https://github.com/yt-dlp/yt-dlp/issues/8356)) by [bashonly](https://github.com/bashonly)
|
||||
- [Fix format sorting with `--load-info-json`](https://github.com/yt-dlp/yt-dlp/commit/595ea4a99b726b8fe9463e7853b7053978d0544e) ([#8521](https://github.com/yt-dlp/yt-dlp/issues/8521)) by [bashonly](https://github.com/bashonly)
|
||||
- [Include build origin in verbose output](https://github.com/yt-dlp/yt-dlp/commit/20314dd46f25e0e0a7e985a7804049aefa8b909f) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||
- [Only ensure playlist thumbnail dir if writing thumbs](https://github.com/yt-dlp/yt-dlp/commit/a40e0b37dfc8c26916b0e01aa3f29f3bc42250b6) ([#8373](https://github.com/yt-dlp/yt-dlp/issues/8373)) by [bashonly](https://github.com/bashonly)
|
||||
- **update**: [Overhaul self-updater](https://github.com/yt-dlp/yt-dlp/commit/0b6ad22e6a432006a75df968f0283e6c6b3cfae6) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||
|
||||
#### Extractor changes
|
||||
- [Do not smuggle `http_headers`](https://github.com/yt-dlp/yt-dlp/commit/f04b5bedad7b281bee9814686bba1762bae092eb) by [coletdjnz](https://github.com/coletdjnz)
|
||||
- [Do not test truth value of `xml.etree.ElementTree.Element`](https://github.com/yt-dlp/yt-dlp/commit/d4f14a72dc1dd79396e0e80980268aee902b61e4) ([#8582](https://github.com/yt-dlp/yt-dlp/issues/8582)) by [bashonly](https://github.com/bashonly)
|
||||
- **brilliantpala**: [Fix cookies support](https://github.com/yt-dlp/yt-dlp/commit/9b5bedf13a3323074daceb0ec6ebb3cc6e0b9684) ([#8352](https://github.com/yt-dlp/yt-dlp/issues/8352)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||
- **generic**: [Improve direct video link ext detection](https://github.com/yt-dlp/yt-dlp/commit/4ce2f29a50fcfb9920e6f2ffe42192945a2bad7e) ([#8340](https://github.com/yt-dlp/yt-dlp/issues/8340)) by [bashonly](https://github.com/bashonly)
|
||||
- **laxarxames**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/312a2d1e8bc247264f9d85c5ec764e33aa0133b5) ([#8412](https://github.com/yt-dlp/yt-dlp/issues/8412)) by [aniolpages](https://github.com/aniolpages)
|
||||
- **n-tv.de**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/8afd9468b0c822843bc480d366d1c86698daabfb) ([#8414](https://github.com/yt-dlp/yt-dlp/issues/8414)) by [1100101](https://github.com/1100101)
|
||||
- **neteasemusic**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/46acc418a53470b7f32581b3309c3cb87aa8488d) ([#8531](https://github.com/yt-dlp/yt-dlp/issues/8531)) by [LoserFox](https://github.com/LoserFox)
|
||||
- **nhk**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/54579be4364e148277c32e20a5c3efc2c3f52f5b) ([#8388](https://github.com/yt-dlp/yt-dlp/issues/8388)) by [garret1317](https://github.com/garret1317)
|
||||
- **novaembed**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/3ff494f6f41c27549420fa88be27555bd449ffdc) ([#8368](https://github.com/yt-dlp/yt-dlp/issues/8368)) by [peci1](https://github.com/peci1)
|
||||
- **npo**: [Send `POST` request to streams API endpoint](https://github.com/yt-dlp/yt-dlp/commit/8e02a4dcc800f9444e9d461edc41edd7b662f435) ([#8413](https://github.com/yt-dlp/yt-dlp/issues/8413)) by [bartbroere](https://github.com/bartbroere)
|
||||
- **ondemandkorea**: [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/05adfd883a4f2ecae0267e670a62a2e45c351aeb) ([#8386](https://github.com/yt-dlp/yt-dlp/issues/8386)) by [seproDev](https://github.com/seproDev)
|
||||
- **orf**: podcast: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/6ba3085616652cbf05d1858efc321fdbfc4c6119) ([#8486](https://github.com/yt-dlp/yt-dlp/issues/8486)) by [Esokrates](https://github.com/Esokrates)
|
||||
- **polskieradio**: audition: [Fix playlist extraction](https://github.com/yt-dlp/yt-dlp/commit/464327acdb353ceb91d2115163a5a9621b22fe0d) ([#8459](https://github.com/yt-dlp/yt-dlp/issues/8459)) by [shubhexists](https://github.com/shubhexists)
|
||||
- **qdance**: [Update `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/177f0d963e4b9db749805c482e6f288354c8be84) ([#8426](https://github.com/yt-dlp/yt-dlp/issues/8426)) by [bashonly](https://github.com/bashonly)
|
||||
- **radiocomercial**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/ef12dbdcd3e7264bd3d744c1e3107597bd23ad35) ([#8508](https://github.com/yt-dlp/yt-dlp/issues/8508)) by [SirElderling](https://github.com/SirElderling)
|
||||
- **sbs.co.kr**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/25a4bd345a0dcfece6fef752d4537eb403da94d9) ([#8326](https://github.com/yt-dlp/yt-dlp/issues/8326)) by [seproDev](https://github.com/seproDev)
|
||||
- **theatercomplextown**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/2863fcf2b6876d0c7965ff7d6d9242eea653dc6b) ([#8560](https://github.com/yt-dlp/yt-dlp/issues/8560)) by [bashonly](https://github.com/bashonly)
|
||||
- **thisav**: [Remove](https://github.com/yt-dlp/yt-dlp/commit/cb480e390d85fb3a598c1b6d5eef3438ce729fc9) ([#8346](https://github.com/yt-dlp/yt-dlp/issues/8346)) by [bashonly](https://github.com/bashonly)
|
||||
- **thisoldhouse**: [Add login support](https://github.com/yt-dlp/yt-dlp/commit/c76c96677ff6a056f5844a568ef05ee22c46d6f4) ([#8561](https://github.com/yt-dlp/yt-dlp/issues/8561)) by [bashonly](https://github.com/bashonly)
|
||||
- **twitcasting**: [Fix livestream extraction](https://github.com/yt-dlp/yt-dlp/commit/7b8b1cf5eb8bf44ce70bc24e1f56f0dba2737e98) ([#8427](https://github.com/yt-dlp/yt-dlp/issues/8427)) by [JC-Chung](https://github.com/JC-Chung), [saintliao](https://github.com/saintliao)
|
||||
- **twitter**
|
||||
- broadcast
|
||||
- [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/7d337ca977d73a0a6c07ab481ed8faa8f6ff8726) ([#8383](https://github.com/yt-dlp/yt-dlp/issues/8383)) by [HitomaruKonpaku](https://github.com/HitomaruKonpaku)
|
||||
- [Support `--wait-for-video`](https://github.com/yt-dlp/yt-dlp/commit/f6e97090d2ed9e05441ab0f4bec3559b816d7a00) ([#8475](https://github.com/yt-dlp/yt-dlp/issues/8475)) by [bashonly](https://github.com/bashonly)
|
||||
- **weibo**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/15b252dfd2c6807fe57afc5a95e59abadb32ccd2) ([#8463](https://github.com/yt-dlp/yt-dlp/issues/8463)) by [c-basalt](https://github.com/c-basalt)
|
||||
- **weverse**: [Fix login error handling](https://github.com/yt-dlp/yt-dlp/commit/4a601c9eff9fb42e24a4c8da3fa03628e035b35b) ([#8458](https://github.com/yt-dlp/yt-dlp/issues/8458)) by [seproDev](https://github.com/seproDev)
|
||||
- **youtube**: [Check newly uploaded iOS HLS formats](https://github.com/yt-dlp/yt-dlp/commit/ef79d20dc9d27ac002a7196f073b37f2f2721aed) ([#8336](https://github.com/yt-dlp/yt-dlp/issues/8336)) by [bashonly](https://github.com/bashonly)
|
||||
- **zoom**: [Extract combined view formats](https://github.com/yt-dlp/yt-dlp/commit/3906de07551fedb00b789345bf24cc27d6ddf128) ([#7847](https://github.com/yt-dlp/yt-dlp/issues/7847)) by [Mipsters](https://github.com/Mipsters)
|
||||
|
||||
#### Downloader changes
|
||||
- **aria2c**: [Remove duplicate `--file-allocation=none`](https://github.com/yt-dlp/yt-dlp/commit/21b25281c51523620706b11bfc1c4a889858e1f2) ([#8332](https://github.com/yt-dlp/yt-dlp/issues/8332)) by [CrendKing](https://github.com/CrendKing)
|
||||
- **dash**: [Force native downloader for `--live-from-start`](https://github.com/yt-dlp/yt-dlp/commit/2622c804d1a5accc3045db398e0fc52074f4bdb3) ([#8339](https://github.com/yt-dlp/yt-dlp/issues/8339)) by [bashonly](https://github.com/bashonly)
|
||||
|
||||
#### Networking changes
|
||||
- **Request Handler**: requests: [Add handler for `requests` HTTP library (#3668)](https://github.com/yt-dlp/yt-dlp/commit/8a8b54523addf46dfd50ef599761a81bc22362e6) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [Grub4K](https://github.com/Grub4K) (With fixes in [4e38e2a](https://github.com/yt-dlp/yt-dlp/commit/4e38e2ae9d7380015349e6aee59c78bb3938befd))
|
||||
|
||||
Adds support for HTTPS proxies and persistent connections (keep-alive)
|
||||
|
||||
#### Misc. changes
|
||||
- **build**
|
||||
- [Include secretstorage in Linux builds](https://github.com/yt-dlp/yt-dlp/commit/9970d74c8383432c6c8779aa47d3253dcf412b14) by [bashonly](https://github.com/bashonly)
|
||||
- [Overhaul and unify release workflow](https://github.com/yt-dlp/yt-dlp/commit/1d03633c5a1621b9f3a756f0a4f9dc61fab3aeaa) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||
- **ci**
|
||||
- [Bump `actions/checkout` to v4](https://github.com/yt-dlp/yt-dlp/commit/5438593a35b7b042fc48fe29cad0b9039f07c9bb) by [bashonly](https://github.com/bashonly)
|
||||
- [Run core tests with dependencies](https://github.com/yt-dlp/yt-dlp/commit/700444c23ddb65f618c2abd942acdc0c58c650b1) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz)
|
||||
- **cleanup**
|
||||
- [Fix changelog typo](https://github.com/yt-dlp/yt-dlp/commit/a9d3f4b20a3533d2a40104c85bc2cc6c2564c800) by [bashonly](https://github.com/bashonly)
|
||||
- [Update documentation for master and nightly channels](https://github.com/yt-dlp/yt-dlp/commit/a00af29853b8c7350ce086f4cab8c2c9cf2fcf1d) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||
- Miscellaneous: [b012271](https://github.com/yt-dlp/yt-dlp/commit/b012271d01b59759e4eefeab0308698cd9e7224c) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [dirkf](https://github.com/dirkf), [gamer191](https://github.com/gamer191), [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
|
||||
- **test**: update: [Implement simple updater unit tests](https://github.com/yt-dlp/yt-dlp/commit/87264d4fdadcddd91289b968dd0e4bf58d449267) by [bashonly](https://github.com/bashonly)
|
||||
|
||||
### 2023.10.13
|
||||
|
||||
#### Core changes
|
||||
|
|
48
README.md
48
README.md
|
@ -121,7 +121,7 @@ # NEW FEATURES
|
|||
|
||||
* **Self updater**: The releases can be updated using `yt-dlp -U`, and downgraded using `--update-to` if required
|
||||
|
||||
* **Nightly builds**: [Automated nightly builds](#update-channels) can be used with `--update-to nightly`
|
||||
* **Automated builds**: [Nightly/master builds](#update-channels) can be used with `--update-to nightly` and `--update-to master`
|
||||
|
||||
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
||||
|
||||
|
@ -131,7 +131,7 @@ ### Differences in default behavior
|
|||
|
||||
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc:
|
||||
|
||||
* yt-dlp supports only [Python 3.7+](## "Windows 7"), and *may* remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743)
|
||||
* yt-dlp supports only [Python 3.8+](## "Windows 7"), and *may* remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743)
|
||||
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
||||
* `avconv` is not supported as an alternative to `ffmpeg`
|
||||
* yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations
|
||||
|
@ -157,15 +157,16 @@ ### Differences in default behavior
|
|||
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
||||
* yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [~~aria2c~~](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is
|
||||
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
||||
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
||||
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
||||
|
||||
For ease of use, a few more compat options are available:
|
||||
|
||||
* `--compat-options all`: Use all compat options (Do NOT use)
|
||||
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter`
|
||||
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter`
|
||||
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter,-manifest-filesize-approx`
|
||||
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter,-manifest-filesize-approx`
|
||||
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
||||
* `--compat-options 2022`: Same as `--compat-options playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler`. Use this to enable all future compat options
|
||||
* `--compat-options 2022`: Same as `--compat-options playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler,manifest-filesize-approx`. Use this to enable all future compat options
|
||||
|
||||
|
||||
# INSTALLATION
|
||||
|
@ -192,9 +193,11 @@ ## UPDATE
|
|||
|
||||
<a id="update-channels"/>
|
||||
|
||||
There are currently two release channels for binaries, `stable` and `nightly`.
|
||||
`stable` is the default channel, and many of its changes have been tested by users of the nightly channel.
|
||||
The `nightly` channel has releases built after each push to the master branch, and will have the most recent fixes and additions, but also have more risk of regressions. They are available in [their own repo](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases).
|
||||
There are currently three release channels for binaries: `stable`, `nightly` and `master`.
|
||||
|
||||
* `stable` is the default channel, and many of its changes have been tested by users of the `nightly` and `master` channels.
|
||||
* The `nightly` channel has releases scheduled to build every day around midnight UTC, for a snapshot of the project's new patches and changes. This is the **recommended channel for regular users** of yt-dlp. The `nightly` releases are available from [yt-dlp/yt-dlp-nightly-builds](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases) or as development releases of the `yt-dlp` PyPI package (which can be installed with pip's `--pre` flag).
|
||||
* The `master` channel features releases that are built after each push to the master branch, and these will have the very latest fixes and additions, but may also be more prone to regressions. They are available from [yt-dlp/yt-dlp-master-builds](https://github.com/yt-dlp/yt-dlp-master-builds/releases).
|
||||
|
||||
When using `--update`/`-U`, a release binary will only update to its current channel.
|
||||
`--update-to CHANNEL` can be used to switch to a different channel when a newer version is available. `--update-to [CHANNEL@]TAG` can also be used to upgrade or downgrade to specific tags from a channel.
|
||||
|
@ -202,10 +205,19 @@ ## UPDATE
|
|||
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
|
||||
|
||||
Example usage:
|
||||
* `yt-dlp --update-to nightly` change to `nightly` channel and update to its latest release
|
||||
* `yt-dlp --update-to stable@2023.02.17` upgrade/downgrade to release to `stable` channel tag `2023.02.17`
|
||||
* `yt-dlp --update-to 2023.01.06` upgrade/downgrade to tag `2023.01.06` if it exists on the current channel
|
||||
* `yt-dlp --update-to example/yt-dlp@2023.03.01` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.03.01`
|
||||
* `yt-dlp --update-to master` switch to the `master` channel and update to its latest release
|
||||
* `yt-dlp --update-to stable@2023.07.06` upgrade/downgrade to release to `stable` channel tag `2023.07.06`
|
||||
* `yt-dlp --update-to 2023.10.07` upgrade/downgrade to tag `2023.10.07` if it exists on the current channel
|
||||
* `yt-dlp --update-to example/yt-dlp@2023.09.24` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.09.24`
|
||||
|
||||
**Important**: Any user experiencing an issue with the `stable` release should install or update to the `nightly` release before submitting a bug report:
|
||||
```
|
||||
# To update to nightly from stable executable/binary:
|
||||
yt-dlp --update-to nightly
|
||||
|
||||
# To install nightly with pip:
|
||||
python -m pip install -U --pre yt-dlp
|
||||
```
|
||||
|
||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||
## RELEASE FILES
|
||||
|
@ -254,7 +266,7 @@ #### Misc
|
|||
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
||||
|
||||
## DEPENDENCIES
|
||||
Python versions 3.7+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||
Python versions 3.8+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||
|
||||
<!-- Python 3.5+ uses VC++14 and it is already embedded in the binary created
|
||||
<!x-- https://www.microsoft.com/en-us/download/details.aspx?id=26999 --x>
|
||||
|
@ -322,7 +334,7 @@ ### Standalone PyInstaller Builds
|
|||
**Important**: Running `pyinstaller` directly **without** using `pyinst.py` is **not** officially supported. This may or may not work correctly.
|
||||
|
||||
### Platform-independent Binary (UNIX)
|
||||
You will need the build tools `python` (3.7+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*.
|
||||
You will need the build tools `python` (3.8+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*.
|
||||
|
||||
After installing these, simply run `make`.
|
||||
|
||||
|
@ -368,7 +380,8 @@ ## General Options:
|
|||
CHANNEL can be a repository as well. CHANNEL
|
||||
and TAG default to "stable" and "latest"
|
||||
respectively if omitted; See "UPDATE" for
|
||||
details. Supported channels: stable, nightly
|
||||
details. Supported channels: stable,
|
||||
nightly, master
|
||||
-i, --ignore-errors Ignore download and postprocessing errors.
|
||||
The download will be considered successful
|
||||
even if the postprocessing fails
|
||||
|
@ -1255,7 +1268,7 @@ # OUTPUT TEMPLATE
|
|||
|
||||
1. **Object traversal**: The dictionaries and lists available in metadata can be traversed by using a dot `.` separator; e.g. `%(tags.0)s`, `%(subtitles.en.-1.ext)s`. You can do Python slicing with colon `:`; E.g. `%(id.3:7:-1)s`, `%(formats.:.format_id)s`. Curly braces `{}` can be used to build dictionaries with only specific keys; e.g. `%(formats.:.{format_id,height})#j`. An empty field name `%()s` refers to the entire infodict; e.g. `%(.{id,title})s`. Note that all the fields that become available using this method are not listed below. Use `-j` to see such fields
|
||||
|
||||
1. **Addition**: Addition and subtraction of numeric fields can be done using `+` and `-` respectively. E.g. `%(playlist_index+10)03d`, `%(n_entries+1-playlist_index)d`
|
||||
1. **Arithmetic**: Simple arithmetic can be done on numeric fields using `+`, `-` and `*`. E.g. `%(playlist_index+10)03d`, `%(n_entries+1-playlist_index)d`
|
||||
|
||||
1. **Date/time Formatting**: Date/time fields can be formatted according to [strftime formatting](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) by specifying it separated from the field name using a `>`. E.g. `%(duration>%H-%M-%S)s`, `%(upload_date>%Y-%m-%d)s`, `%(epoch-3600>%H-%M-%S)s`
|
||||
|
||||
|
@ -1296,6 +1309,7 @@ # OUTPUT TEMPLATE
|
|||
- `upload_date` (string): Video upload date in UTC (YYYYMMDD)
|
||||
- `release_timestamp` (numeric): UNIX timestamp of the moment the video was released
|
||||
- `release_date` (string): The date (YYYYMMDD) when the video was released in UTC
|
||||
- `release_year` (numeric): Year (YYYY) when the video or album was released
|
||||
- `modified_timestamp` (numeric): UNIX timestamp of the moment the video was last modified
|
||||
- `modified_date` (string): The date (YYYYMMDD) when the video was last modified in UTC
|
||||
- `uploader_id` (string): Nickname or id of the video uploader
|
||||
|
@ -1319,6 +1333,7 @@ # OUTPUT TEMPLATE
|
|||
- `was_live` (boolean): Whether this video was originally a live stream
|
||||
- `playable_in_embed` (string): Whether this video is allowed to play in embedded players on other sites
|
||||
- `availability` (string): Whether the video is "private", "premium_only", "subscriber_only", "needs_auth", "unlisted" or "public"
|
||||
- `media_type` (string): The type of media as classified by the site, e.g. "episode", "clip", "trailer"
|
||||
- `start_time` (numeric): Time in seconds where the reproduction should start, as specified in the URL
|
||||
- `end_time` (numeric): Time in seconds where the reproduction should end, as specified in the URL
|
||||
- `extractor` (string): Name of the extractor
|
||||
|
@ -1369,7 +1384,6 @@ # OUTPUT TEMPLATE
|
|||
- `album_type` (string): Type of the album
|
||||
- `album_artist` (string): List of all artists appeared on the album
|
||||
- `disc_number` (numeric): Number of the disc or other physical medium the track belongs to
|
||||
- `release_year` (numeric): Year (YYYY) when the album was released
|
||||
|
||||
Available only when using `--download-sections` and for `chapter:` prefix when using `--split-chapters` for videos with internal chapters:
|
||||
|
||||
|
|
|
@ -98,5 +98,21 @@
|
|||
"action": "add",
|
||||
"when": "61bdf15fc7400601c3da1aa7a43917310a5bf391",
|
||||
"short": "[priority] Security: [[CVE-2023-40581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-40581)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-42h4-v29r-42qg)\n - The shell escape function is now using `\"\"` instead of `\\\"`.\n - `utils.Popen` has been patched to properly quote commands."
|
||||
},
|
||||
{
|
||||
"action": "change",
|
||||
"when": "8a8b54523addf46dfd50ef599761a81bc22362e6",
|
||||
"short": "[rh:requests] Add handler for `requests` HTTP library (#3668)\n\n\tAdds support for HTTPS proxies and persistent connections (keep-alive)",
|
||||
"authors": ["bashonly", "coletdjnz", "Grub4K"]
|
||||
},
|
||||
{
|
||||
"action": "add",
|
||||
"when": "1d03633c5a1621b9f3a756f0a4f9dc61fab3aeaa",
|
||||
"short": "[priority] **The release channels have been adjusted!**\n\t* [`master`](https://github.com/yt-dlp/yt-dlp-master-builds) builds are made after each push, containing the latest fixes (but also possibly bugs). This was previously the `nightly` channel.\n\t* [`nightly`](https://github.com/yt-dlp/yt-dlp-nightly-builds) builds are now made once a day, if there were any changes."
|
||||
},
|
||||
{
|
||||
"action": "add",
|
||||
"when": "f04b5bedad7b281bee9814686bba1762bae092eb",
|
||||
"short": "[priority] Security: [[CVE-2023-46121](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-46121)] Patch [Generic Extractor MITM Vulnerability via Arbitrary Proxy Injection](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-3ch3-jhc6-5r8x)\n\t- Disallow smuggling of arbitrary `http_headers`; extractors now only use specific headers"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
from devscripts.utils import (
|
||||
get_filename_args,
|
||||
read_file,
|
||||
read_version,
|
||||
write_file,
|
||||
)
|
||||
|
||||
|
@ -35,19 +34,18 @@
|
|||
description: |
|
||||
It should start like this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version %(version)s [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: %(version)s, Current version: %(version)s
|
||||
yt-dlp is up to date (%(version)s)
|
||||
[debug] Request Handlers: urllib, requests
|
||||
[debug] Loaded 1893 extractors
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
@ -66,7 +64,7 @@
|
|||
|
||||
|
||||
def main():
|
||||
fields = {'version': read_version(), 'no_skip': NO_SKIP}
|
||||
fields = {'no_skip': NO_SKIP}
|
||||
fields['verbose'] = VERBOSE_TMPL % fields
|
||||
fields['verbose_optional'] = re.sub(r'(\n\s+validations:)?\n\s+required: true', '', fields['verbose'])
|
||||
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
||||
version can be either 0-aligned (yt-dlp version) or normalized (PyPi version)
|
||||
"""
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
import json
|
||||
import re
|
||||
import urllib.request
|
||||
|
||||
from devscripts.utils import read_file, write_file
|
||||
|
||||
filename, version = sys.argv[1:]
|
||||
|
||||
normalized_version = '.'.join(str(int(x)) for x in version.split('.'))
|
||||
|
||||
pypi_release = json.loads(urllib.request.urlopen(
|
||||
'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version
|
||||
).read().decode())
|
||||
|
||||
tarball_file = next(x for x in pypi_release['urls'] if x['filename'].endswith('.tar.gz'))
|
||||
|
||||
sha256sum = tarball_file['digests']['sha256']
|
||||
url = tarball_file['url']
|
||||
|
||||
formulae_text = read_file(filename)
|
||||
|
||||
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text, count=1)
|
||||
formulae_text = re.sub(r'url "[^"]*?"', 'url "%s"' % url, formulae_text, count=1)
|
||||
|
||||
write_file(filename, formulae_text)
|
|
@ -20,7 +20,7 @@ def get_new_version(version, revision):
|
|||
version = datetime.now(timezone.utc).strftime('%Y.%m.%d')
|
||||
|
||||
if revision:
|
||||
assert revision.isdigit(), 'Revision must be a number'
|
||||
assert revision.isdecimal(), 'Revision must be a number'
|
||||
else:
|
||||
old_version = read_version().split('.')
|
||||
if version.split('.') == old_version[:3]:
|
||||
|
@ -46,6 +46,10 @@ def get_git_head():
|
|||
UPDATE_HINT = None
|
||||
|
||||
CHANNEL = {channel!r}
|
||||
|
||||
ORIGIN = {origin!r}
|
||||
|
||||
_pkg_version = {package_version!r}
|
||||
'''
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -53,6 +57,12 @@ def get_git_head():
|
|||
parser.add_argument(
|
||||
'-c', '--channel', default='stable',
|
||||
help='Select update channel (default: %(default)s)')
|
||||
parser.add_argument(
|
||||
'-r', '--origin', default='local',
|
||||
help='Select origin/repository (default: %(default)s)')
|
||||
parser.add_argument(
|
||||
'-s', '--suffix', default='',
|
||||
help='Add an alphanumeric suffix to the package version, e.g. "dev"')
|
||||
parser.add_argument(
|
||||
'-o', '--output', default='yt_dlp/version.py',
|
||||
help='The output file to write to (default: %(default)s)')
|
||||
|
@ -66,6 +76,7 @@ def get_git_head():
|
|||
args.version if args.version and '.' in args.version
|
||||
else get_new_version(None, args.version))
|
||||
write_file(args.output, VERSION_TEMPLATE.format(
|
||||
version=version, git_head=git_head, channel=args.channel))
|
||||
version=version, git_head=git_head, channel=args.channel, origin=args.origin,
|
||||
package_version=f'{version}{args.suffix}'))
|
||||
|
||||
print(f'version={version} ({args.channel}), head={git_head}')
|
||||
|
|
|
@ -13,10 +13,11 @@ def write_file(fname, content, mode='w'):
|
|||
return f.write(content)
|
||||
|
||||
|
||||
def read_version(fname='yt_dlp/version.py'):
|
||||
def read_version(fname='yt_dlp/version.py', varname='__version__'):
|
||||
"""Get the version without importing the package"""
|
||||
exec(compile(read_file(fname), fname, 'exec'))
|
||||
return locals()['__version__']
|
||||
items = {}
|
||||
exec(compile(read_file(fname), fname, 'exec'), items)
|
||||
return items[varname]
|
||||
|
||||
|
||||
def get_filename_args(has_infile=False, default_outfile=None):
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
mutagen
|
||||
pycryptodomex
|
||||
websockets
|
||||
brotli; platform_python_implementation=='CPython'
|
||||
brotlicffi; platform_python_implementation!='CPython'
|
||||
brotli; implementation_name=='cpython'
|
||||
brotlicffi; implementation_name!='cpython'
|
||||
certifi
|
||||
requests>=2.31.0,<3
|
||||
urllib3>=1.26.17,<3
|
||||
urllib3>=1.26.17,<3
|
||||
websockets>=12.0
|
||||
|
|
|
@ -26,7 +26,7 @@ markers =
|
|||
|
||||
[tox:tox]
|
||||
skipsdist = true
|
||||
envlist = py{36,37,38,39,310,311},pypy{36,37,38,39}
|
||||
envlist = py{38,39,310,311,312},pypy{38,39,310}
|
||||
skip_missing_interpreters = true
|
||||
|
||||
[testenv] # tox
|
||||
|
@ -39,7 +39,7 @@ setenv =
|
|||
|
||||
|
||||
[isort]
|
||||
py_version = 37
|
||||
py_version = 38
|
||||
multi_line_output = VERTICAL_HANGING_INDENT
|
||||
line_length = 80
|
||||
reverse_relative = true
|
||||
|
|
8
setup.py
8
setup.py
|
@ -18,7 +18,7 @@
|
|||
|
||||
from devscripts.utils import read_file, read_version
|
||||
|
||||
VERSION = read_version()
|
||||
VERSION = read_version(varname='_pkg_version')
|
||||
|
||||
DESCRIPTION = 'A youtube-dl fork with additional features and patches'
|
||||
|
||||
|
@ -142,7 +142,7 @@ def main():
|
|||
params = build_params()
|
||||
|
||||
setup(
|
||||
name='yt-dlp',
|
||||
name='yt-dlp', # package name (do not change/remove comment)
|
||||
version=VERSION,
|
||||
maintainer='pukkandan',
|
||||
maintainer_email='pukkandan.ytdlp@gmail.com',
|
||||
|
@ -152,7 +152,7 @@ def main():
|
|||
url='https://github.com/yt-dlp/yt-dlp',
|
||||
packages=packages(),
|
||||
install_requires=REQUIREMENTS,
|
||||
python_requires='>=3.7',
|
||||
python_requires='>=3.8',
|
||||
project_urls={
|
||||
'Documentation': 'https://github.com/yt-dlp/yt-dlp#readme',
|
||||
'Source': 'https://github.com/yt-dlp/yt-dlp',
|
||||
|
@ -164,11 +164,11 @@ def main():
|
|||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: Console',
|
||||
'Programming Language :: Python',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: 3.9',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Programming Language :: Python :: 3.11',
|
||||
'Programming Language :: Python :: 3.12',
|
||||
'Programming Language :: Python :: Implementation',
|
||||
'Programming Language :: Python :: Implementation :: CPython',
|
||||
'Programming Language :: Python :: Implementation :: PyPy',
|
||||
|
|
|
@ -414,6 +414,7 @@ # Supported sites
|
|||
- **EllenTubeVideo**
|
||||
- **Elonet**
|
||||
- **ElPais**: El País
|
||||
- **ElTreceTV**: El Trece TV (Argentina)
|
||||
- **Embedly**
|
||||
- **EMPFlix**
|
||||
- **Engadget**
|
||||
|
@ -654,6 +655,8 @@ # Supported sites
|
|||
- **Jamendo**
|
||||
- **JamendoAlbum**
|
||||
- **JeuxVideo**
|
||||
- **JioSaavnAlbum**
|
||||
- **JioSaavnSong**
|
||||
- **Joj**
|
||||
- **Jove**
|
||||
- **JStream**
|
||||
|
@ -700,6 +703,7 @@ # Supported sites
|
|||
- **LastFM**
|
||||
- **LastFMPlaylist**
|
||||
- **LastFMUser**
|
||||
- **LaXarxaMes**: [*laxarxames*](## "netrc machine")
|
||||
- **lbry**
|
||||
- **lbry:channel**
|
||||
- **lbry:playlist**
|
||||
|
@ -975,7 +979,6 @@ # Supported sites
|
|||
- **Nitter**
|
||||
- **njoy**: N-JOY
|
||||
- **njoy:embed**
|
||||
- **NJPWWorld**: [*njpwworld*](## "netrc machine") 新日本プロレスワールド
|
||||
- **NobelPrize**
|
||||
- **NoicePodcast**
|
||||
- **NonkTube**
|
||||
|
@ -1026,6 +1029,7 @@ # Supported sites
|
|||
- **on24**: ON24
|
||||
- **OnDemandChinaEpisode**
|
||||
- **OnDemandKorea**
|
||||
- **OnDemandKoreaProgram**
|
||||
- **OneFootball**
|
||||
- **OnePlacePodcast**
|
||||
- **onet.pl**
|
||||
|
@ -1043,6 +1047,7 @@ # Supported sites
|
|||
- **OraTV**
|
||||
- **orf:fm4:story**: fm4.orf.at stories
|
||||
- **orf:iptv**: iptv.ORF.at
|
||||
- **orf:podcast**
|
||||
- **orf:radio**
|
||||
- **orf:tvthek**: ORF TVthek
|
||||
- **OsnatelTV**: [*osnateltv*](## "netrc machine")
|
||||
|
@ -1180,6 +1185,8 @@ # Supported sites
|
|||
- **radiobremen**
|
||||
- **radiocanada**
|
||||
- **radiocanada:audiovideo**
|
||||
- **RadioComercial**
|
||||
- **RadioComercialPlaylist**
|
||||
- **radiofrance**
|
||||
- **RadioFranceLive**
|
||||
- **RadioFrancePodcast**
|
||||
|
@ -1306,6 +1313,9 @@ # Supported sites
|
|||
- **Sapo**: SAPO Vídeos
|
||||
- **savefrom.net**
|
||||
- **SBS**: sbs.com.au
|
||||
- **sbs.co.kr**
|
||||
- **sbs.co.kr:allvod_program**
|
||||
- **sbs.co.kr:programs_vod**
|
||||
- **schooltv**
|
||||
- **ScienceChannel**
|
||||
- **screen.yahoo:search**: Yahoo screen search; "yvsearch:" prefix
|
||||
|
@ -1474,6 +1484,8 @@ # Supported sites
|
|||
- **TenPlaySeason**
|
||||
- **TF1**
|
||||
- **TFO**
|
||||
- **theatercomplextown:ppv**: [*theatercomplextown*](## "netrc machine")
|
||||
- **theatercomplextown:vod**: [*theatercomplextown*](## "netrc machine")
|
||||
- **TheHoleTv**
|
||||
- **TheIntercept**
|
||||
- **ThePlatform**
|
||||
|
@ -1482,8 +1494,7 @@ # Supported sites
|
|||
- **TheSun**
|
||||
- **TheWeatherChannel**
|
||||
- **ThisAmericanLife**
|
||||
- **ThisAV**
|
||||
- **ThisOldHouse**
|
||||
- **ThisOldHouse**: [*thisoldhouse*](## "netrc machine")
|
||||
- **ThisVid**
|
||||
- **ThisVidMember**
|
||||
- **ThisVidPlaylist**
|
||||
|
|
|
@ -19,3 +19,8 @@ def handler(request):
|
|||
pytest.skip(f'{RH_KEY} request handler is not available')
|
||||
|
||||
return functools.partial(handler, logger=FakeLogger)
|
||||
|
||||
|
||||
def validate_and_send(rh, req):
|
||||
rh.validate(req)
|
||||
return rh.send(req)
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
import yt_dlp.extractor
|
||||
from yt_dlp import YoutubeDL
|
||||
from yt_dlp.compat import compat_os_name
|
||||
from yt_dlp.utils import preferredencoding, write_string
|
||||
from yt_dlp.utils import preferredencoding, try_call, write_string
|
||||
|
||||
if 'pytest' in sys.modules:
|
||||
import pytest
|
||||
|
@ -214,14 +214,19 @@ def sanitize(key, value):
|
|||
|
||||
test_info_dict = {
|
||||
key: sanitize(key, value) for key, value in got_dict.items()
|
||||
if value is not None and key not in IGNORED_FIELDS and not any(
|
||||
key.startswith(f'{prefix}_') for prefix in IGNORED_PREFIXES)
|
||||
if value is not None and key not in IGNORED_FIELDS and (
|
||||
not any(key.startswith(f'{prefix}_') for prefix in IGNORED_PREFIXES)
|
||||
or key == '_old_archive_ids')
|
||||
}
|
||||
|
||||
# display_id may be generated from id
|
||||
if test_info_dict.get('display_id') == test_info_dict.get('id'):
|
||||
test_info_dict.pop('display_id')
|
||||
|
||||
# release_year may be generated from release_date
|
||||
if try_call(lambda: test_info_dict['release_year'] == int(test_info_dict['release_date'][:4])):
|
||||
test_info_dict.pop('release_year')
|
||||
|
||||
# Check url for flat entries
|
||||
if got_dict.get('_type', 'video') != 'video' and got_dict.get('url'):
|
||||
test_info_dict['url'] = got_dict['url']
|
||||
|
|
|
@ -797,6 +797,7 @@ def expect_same_infodict(out):
|
|||
test('%(title|%)s %(title|%%)s', '% %%')
|
||||
test('%(id+1-height+3)05d', '00158')
|
||||
test('%(width+100)05d', 'NA')
|
||||
test('%(filesize*8)d', '8192')
|
||||
test('%(formats.0) 15s', ('% 15s' % FORMATS[0], None))
|
||||
test('%(formats.0)r', (repr(FORMATS[0]), None))
|
||||
test('%(height.0)03d', '001')
|
||||
|
|
|
@ -52,6 +52,8 @@
|
|||
from yt_dlp.utils._utils import _YDLLogger as FakeLogger
|
||||
from yt_dlp.utils.networking import HTTPHeaderDict
|
||||
|
||||
from test.conftest import validate_and_send
|
||||
|
||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
|
@ -275,11 +277,6 @@ def send_header(self, keyword, value):
|
|||
self._headers_buffer.append(f'{keyword}: {value}\r\n'.encode())
|
||||
|
||||
|
||||
def validate_and_send(rh, req):
|
||||
rh.validate(req)
|
||||
return rh.send(req)
|
||||
|
||||
|
||||
class TestRequestHandlerBase:
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
|
@ -872,8 +869,9 @@ def request(self, *args, **kwargs):
|
|||
])
|
||||
@pytest.mark.parametrize('handler', ['Requests'], indirect=True)
|
||||
def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
||||
from urllib3.response import HTTPResponse as Urllib3Response
|
||||
from requests.models import Response as RequestsResponse
|
||||
from urllib3.response import HTTPResponse as Urllib3Response
|
||||
|
||||
from yt_dlp.networking._requests import RequestsResponseAdapter
|
||||
requests_res = RequestsResponse()
|
||||
requests_res.raw = Urllib3Response(body=b'', status=200)
|
||||
|
@ -929,13 +927,17 @@ class HTTPSupportedRH(ValidationRH):
|
|||
('http', False, {}),
|
||||
('https', False, {}),
|
||||
]),
|
||||
('Websockets', [
|
||||
('ws', False, {}),
|
||||
('wss', False, {}),
|
||||
]),
|
||||
(NoCheckRH, [('http', False, {})]),
|
||||
(ValidationRH, [('http', UnsupportedRequest, {})])
|
||||
]
|
||||
|
||||
PROXY_SCHEME_TESTS = [
|
||||
# scheme, expected to fail
|
||||
('Urllib', [
|
||||
('Urllib', 'http', [
|
||||
('http', False),
|
||||
('https', UnsupportedRequest),
|
||||
('socks4', False),
|
||||
|
@ -944,7 +946,7 @@ class HTTPSupportedRH(ValidationRH):
|
|||
('socks5h', False),
|
||||
('socks', UnsupportedRequest),
|
||||
]),
|
||||
('Requests', [
|
||||
('Requests', 'http', [
|
||||
('http', False),
|
||||
('https', False),
|
||||
('socks4', False),
|
||||
|
@ -952,8 +954,11 @@ class HTTPSupportedRH(ValidationRH):
|
|||
('socks5', False),
|
||||
('socks5h', False),
|
||||
]),
|
||||
(NoCheckRH, [('http', False)]),
|
||||
(HTTPSupportedRH, [('http', UnsupportedRequest)]),
|
||||
(NoCheckRH, 'http', [('http', False)]),
|
||||
(HTTPSupportedRH, 'http', [('http', UnsupportedRequest)]),
|
||||
('Websockets', 'ws', [('http', UnsupportedRequest)]),
|
||||
(NoCheckRH, 'http', [('http', False)]),
|
||||
(HTTPSupportedRH, 'http', [('http', UnsupportedRequest)]),
|
||||
]
|
||||
|
||||
PROXY_KEY_TESTS = [
|
||||
|
@ -972,7 +977,7 @@ class HTTPSupportedRH(ValidationRH):
|
|||
]
|
||||
|
||||
EXTENSION_TESTS = [
|
||||
('Urllib', [
|
||||
('Urllib', 'http', [
|
||||
({'cookiejar': 'notacookiejar'}, AssertionError),
|
||||
({'cookiejar': YoutubeDLCookieJar()}, False),
|
||||
({'cookiejar': CookieJar()}, AssertionError),
|
||||
|
@ -980,17 +985,21 @@ class HTTPSupportedRH(ValidationRH):
|
|||
({'timeout': 'notatimeout'}, AssertionError),
|
||||
({'unsupported': 'value'}, UnsupportedRequest),
|
||||
]),
|
||||
('Requests', [
|
||||
('Requests', 'http', [
|
||||
({'cookiejar': 'notacookiejar'}, AssertionError),
|
||||
({'cookiejar': YoutubeDLCookieJar()}, False),
|
||||
({'timeout': 1}, False),
|
||||
({'timeout': 'notatimeout'}, AssertionError),
|
||||
({'unsupported': 'value'}, UnsupportedRequest),
|
||||
]),
|
||||
(NoCheckRH, [
|
||||
(NoCheckRH, 'http', [
|
||||
({'cookiejar': 'notacookiejar'}, False),
|
||||
({'somerandom': 'test'}, False), # but any extension is allowed through
|
||||
]),
|
||||
('Websockets', 'ws', [
|
||||
({'cookiejar': YoutubeDLCookieJar()}, False),
|
||||
({'timeout': 2}, False),
|
||||
]),
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize('handler,scheme,fail,handler_kwargs', [
|
||||
|
@ -1016,14 +1025,14 @@ def test_proxy_key(self, handler, proxy_key, fail):
|
|||
run_validation(handler, fail, Request('http://', proxies={proxy_key: 'http://example.com'}))
|
||||
run_validation(handler, fail, Request('http://'), proxies={proxy_key: 'http://example.com'})
|
||||
|
||||
@pytest.mark.parametrize('handler,scheme,fail', [
|
||||
(handler_tests[0], scheme, fail)
|
||||
@pytest.mark.parametrize('handler,req_scheme,scheme,fail', [
|
||||
(handler_tests[0], handler_tests[1], scheme, fail)
|
||||
for handler_tests in PROXY_SCHEME_TESTS
|
||||
for scheme, fail in handler_tests[1]
|
||||
for scheme, fail in handler_tests[2]
|
||||
], indirect=['handler'])
|
||||
def test_proxy_scheme(self, handler, scheme, fail):
|
||||
run_validation(handler, fail, Request('http://', proxies={'http': f'{scheme}://example.com'}))
|
||||
run_validation(handler, fail, Request('http://'), proxies={'http': f'{scheme}://example.com'})
|
||||
def test_proxy_scheme(self, handler, req_scheme, scheme, fail):
|
||||
run_validation(handler, fail, Request(f'{req_scheme}://', proxies={req_scheme: f'{scheme}://example.com'}))
|
||||
run_validation(handler, fail, Request(f'{req_scheme}://'), proxies={req_scheme: f'{scheme}://example.com'})
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Urllib', HTTPSupportedRH, 'Requests'], indirect=True)
|
||||
def test_empty_proxy(self, handler):
|
||||
|
@ -1035,14 +1044,14 @@ def test_empty_proxy(self, handler):
|
|||
def test_invalid_proxy_url(self, handler, proxy_url):
|
||||
run_validation(handler, UnsupportedRequest, Request('http://', proxies={'http': proxy_url}))
|
||||
|
||||
@pytest.mark.parametrize('handler,extensions,fail', [
|
||||
(handler_tests[0], extensions, fail)
|
||||
@pytest.mark.parametrize('handler,scheme,extensions,fail', [
|
||||
(handler_tests[0], handler_tests[1], extensions, fail)
|
||||
for handler_tests in EXTENSION_TESTS
|
||||
for extensions, fail in handler_tests[1]
|
||||
for extensions, fail in handler_tests[2]
|
||||
], indirect=['handler'])
|
||||
def test_extension(self, handler, extensions, fail):
|
||||
def test_extension(self, handler, scheme, extensions, fail):
|
||||
run_validation(
|
||||
handler, fail, Request('http://', extensions=extensions))
|
||||
handler, fail, Request(f'{scheme}://', extensions=extensions))
|
||||
|
||||
def test_invalid_request_type(self):
|
||||
rh = self.ValidationRH(logger=FakeLogger())
|
||||
|
@ -1075,6 +1084,22 @@ def __init__(self, *args, **kwargs):
|
|||
self._request_director = self.build_request_director([FakeRH])
|
||||
|
||||
|
||||
class AllUnsupportedRHYDL(FakeYDL):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
class UnsupportedRH(RequestHandler):
|
||||
def _send(self, request: Request):
|
||||
pass
|
||||
|
||||
_SUPPORTED_FEATURES = ()
|
||||
_SUPPORTED_PROXY_SCHEMES = ()
|
||||
_SUPPORTED_URL_SCHEMES = ()
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
self._request_director = self.build_request_director([UnsupportedRH])
|
||||
|
||||
|
||||
class TestRequestDirector:
|
||||
|
||||
def test_handler_operations(self):
|
||||
|
@ -1234,6 +1259,12 @@ def test_file_urls_error(self):
|
|||
with pytest.raises(RequestError, match=r'file:// URLs are disabled by default'):
|
||||
ydl.urlopen('file://')
|
||||
|
||||
@pytest.mark.parametrize('scheme', (['ws', 'wss']))
|
||||
def test_websocket_unavailable_error(self, scheme):
|
||||
with AllUnsupportedRHYDL() as ydl:
|
||||
with pytest.raises(RequestError, match=r'This request requires WebSocket support'):
|
||||
ydl.urlopen(f'{scheme}://')
|
||||
|
||||
def test_legacy_server_connect_error(self):
|
||||
with FakeRHYDL() as ydl:
|
||||
for error in ('UNSAFE_LEGACY_RENEGOTIATION_DISABLED', 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
|
||||
|
@ -1293,6 +1324,10 @@ def test_clean_header(self):
|
|||
assert 'Youtubedl-no-compression' not in rh.headers
|
||||
assert rh.headers.get('Accept-Encoding') == 'identity'
|
||||
|
||||
with FakeYDL({'http_headers': {'Ytdl-socks-proxy': 'socks://localhost:1080'}}) as ydl:
|
||||
rh = self.build_handler(ydl)
|
||||
assert 'Ytdl-socks-proxy' not in rh.headers
|
||||
|
||||
def test_build_handler_params(self):
|
||||
with FakeYDL({
|
||||
'http_headers': {'test': 'testtest'},
|
||||
|
|
|
@ -210,6 +210,16 @@ def do_GET(self):
|
|||
self.wfile.write(payload.encode())
|
||||
|
||||
|
||||
class SocksWebSocketTestRequestHandler(SocksTestRequestHandler):
|
||||
def handle(self):
|
||||
import websockets.sync.server
|
||||
protocol = websockets.ServerProtocol()
|
||||
connection = websockets.sync.server.ServerConnection(socket=self.request, protocol=protocol, close_timeout=0)
|
||||
connection.handshake()
|
||||
connection.send(json.dumps(self.socks_info))
|
||||
connection.close()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def socks_server(socks_server_class, request_handler, bind_ip=None, **socks_server_kwargs):
|
||||
server = server_thread = None
|
||||
|
@ -252,8 +262,22 @@ def socks_info_request(self, handler, target_domain=None, target_port=None, **re
|
|||
return json.loads(handler.send(request).read().decode())
|
||||
|
||||
|
||||
class WebSocketSocksTestProxyContext(SocksProxyTestContext):
|
||||
REQUEST_HANDLER_CLASS = SocksWebSocketTestRequestHandler
|
||||
|
||||
def socks_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs):
|
||||
request = Request(f'ws://{target_domain or "127.0.0.1"}:{target_port or "40000"}', **req_kwargs)
|
||||
handler.validate(request)
|
||||
ws = handler.send(request)
|
||||
ws.send('socks_info')
|
||||
socks_info = ws.recv()
|
||||
ws.close()
|
||||
return json.loads(socks_info)
|
||||
|
||||
|
||||
CTX_MAP = {
|
||||
'http': HTTPSocksTestProxyContext,
|
||||
'ws': WebSocketSocksTestProxyContext,
|
||||
}
|
||||
|
||||
|
||||
|
@ -263,7 +287,7 @@ def ctx(request):
|
|||
|
||||
|
||||
class TestSocks4Proxy:
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks4_no_auth(self, handler, ctx):
|
||||
with handler() as rh:
|
||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||
|
@ -271,7 +295,7 @@ def test_socks4_no_auth(self, handler, ctx):
|
|||
rh, proxies={'all': f'socks4://{server_address}'})
|
||||
assert response['version'] == 4
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks4_auth(self, handler, ctx):
|
||||
with handler() as rh:
|
||||
with ctx.socks_server(Socks4ProxyHandler, user_id='user') as server_address:
|
||||
|
@ -281,7 +305,7 @@ def test_socks4_auth(self, handler, ctx):
|
|||
rh, proxies={'all': f'socks4://user:@{server_address}'})
|
||||
assert response['version'] == 4
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks4a_ipv4_target(self, handler, ctx):
|
||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
||||
|
@ -289,7 +313,7 @@ def test_socks4a_ipv4_target(self, handler, ctx):
|
|||
assert response['version'] == 4
|
||||
assert (response['ipv4_address'] == '127.0.0.1') != (response['domain_address'] == '127.0.0.1')
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks4a_domain_target(self, handler, ctx):
|
||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
||||
|
@ -298,7 +322,7 @@ def test_socks4a_domain_target(self, handler, ctx):
|
|||
assert response['ipv4_address'] is None
|
||||
assert response['domain_address'] == 'localhost'
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_ipv4_client_source_address(self, handler, ctx):
|
||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||
|
@ -308,7 +332,7 @@ def test_ipv4_client_source_address(self, handler, ctx):
|
|||
assert response['client_address'][0] == source_address
|
||||
assert response['version'] == 4
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
@pytest.mark.parametrize('reply_code', [
|
||||
Socks4CD.REQUEST_REJECTED_OR_FAILED,
|
||||
Socks4CD.REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD,
|
||||
|
@ -320,7 +344,7 @@ def test_socks4_errors(self, handler, ctx, reply_code):
|
|||
with pytest.raises(ProxyError):
|
||||
ctx.socks_info_request(rh)
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_ipv6_socks4_proxy(self, handler, ctx):
|
||||
with ctx.socks_server(Socks4ProxyHandler, bind_ip='::1') as server_address:
|
||||
with handler(proxies={'all': f'socks4://{server_address}'}) as rh:
|
||||
|
@ -329,7 +353,7 @@ def test_ipv6_socks4_proxy(self, handler, ctx):
|
|||
assert response['ipv4_address'] == '127.0.0.1'
|
||||
assert response['version'] == 4
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_timeout(self, handler, ctx):
|
||||
with ctx.socks_server(Socks4ProxyHandler, sleep=2) as server_address:
|
||||
with handler(proxies={'all': f'socks4://{server_address}'}, timeout=0.5) as rh:
|
||||
|
@ -339,7 +363,7 @@ def test_timeout(self, handler, ctx):
|
|||
|
||||
class TestSocks5Proxy:
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks5_no_auth(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||
|
@ -347,7 +371,7 @@ def test_socks5_no_auth(self, handler, ctx):
|
|||
assert response['auth_methods'] == [0x0]
|
||||
assert response['version'] == 5
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks5_user_pass(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler, auth=('test', 'testpass')) as server_address:
|
||||
with handler() as rh:
|
||||
|
@ -360,7 +384,7 @@ def test_socks5_user_pass(self, handler, ctx):
|
|||
assert response['auth_methods'] == [Socks5Auth.AUTH_NONE, Socks5Auth.AUTH_USER_PASS]
|
||||
assert response['version'] == 5
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks5_ipv4_target(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||
|
@ -368,7 +392,7 @@ def test_socks5_ipv4_target(self, handler, ctx):
|
|||
assert response['ipv4_address'] == '127.0.0.1'
|
||||
assert response['version'] == 5
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks5_domain_target(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||
|
@ -376,7 +400,7 @@ def test_socks5_domain_target(self, handler, ctx):
|
|||
assert (response['ipv4_address'] == '127.0.0.1') != (response['ipv6_address'] == '::1')
|
||||
assert response['version'] == 5
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks5h_domain_target(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
||||
|
@ -385,7 +409,7 @@ def test_socks5h_domain_target(self, handler, ctx):
|
|||
assert response['domain_address'] == 'localhost'
|
||||
assert response['version'] == 5
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks5h_ip_target(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
||||
|
@ -394,7 +418,7 @@ def test_socks5h_ip_target(self, handler, ctx):
|
|||
assert response['domain_address'] is None
|
||||
assert response['version'] == 5
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_socks5_ipv6_destination(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||
|
@ -402,7 +426,7 @@ def test_socks5_ipv6_destination(self, handler, ctx):
|
|||
assert response['ipv6_address'] == '::1'
|
||||
assert response['version'] == 5
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_ipv6_socks5_proxy(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler, bind_ip='::1') as server_address:
|
||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||
|
@ -413,7 +437,7 @@ def test_ipv6_socks5_proxy(self, handler, ctx):
|
|||
|
||||
# XXX: is there any feasible way of testing IPv6 source addresses?
|
||||
# Same would go for non-proxy source_address test...
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_ipv4_client_source_address(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||
|
@ -422,7 +446,7 @@ def test_ipv4_client_source_address(self, handler, ctx):
|
|||
assert response['client_address'][0] == source_address
|
||||
assert response['version'] == 5
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
@pytest.mark.parametrize('reply_code', [
|
||||
Socks5Reply.GENERAL_FAILURE,
|
||||
Socks5Reply.CONNECTION_NOT_ALLOWED,
|
||||
|
@ -439,7 +463,7 @@ def test_socks5_errors(self, handler, ctx, reply_code):
|
|||
with pytest.raises(ProxyError):
|
||||
ctx.socks_info_request(rh)
|
||||
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Websockets', 'ws')], indirect=True)
|
||||
def test_timeout(self, handler, ctx):
|
||||
with ctx.socks_server(Socks5ProxyHandler, sleep=2) as server_address:
|
||||
with handler(proxies={'all': f'socks5://{server_address}'}, timeout=1) as rh:
|
||||
|
|
219
test/test_update.py
Normal file
219
test/test_update.py
Normal file
|
@ -0,0 +1,219 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
from test.helper import FakeYDL, report_warning
|
||||
from yt_dlp.update import Updater, UpdateInfo
|
||||
|
||||
TEST_API_DATA = {
|
||||
'yt-dlp/yt-dlp/latest': {
|
||||
'tag_name': '2023.12.31',
|
||||
'target_commitish': 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',
|
||||
'name': 'yt-dlp 2023.12.31',
|
||||
'body': 'BODY',
|
||||
},
|
||||
'yt-dlp/yt-dlp-nightly-builds/latest': {
|
||||
'tag_name': '2023.12.31.123456',
|
||||
'target_commitish': 'master',
|
||||
'name': 'yt-dlp nightly 2023.12.31.123456',
|
||||
'body': 'Generated from: https://github.com/yt-dlp/yt-dlp/commit/cccccccccccccccccccccccccccccccccccccccc',
|
||||
},
|
||||
'yt-dlp/yt-dlp-master-builds/latest': {
|
||||
'tag_name': '2023.12.31.987654',
|
||||
'target_commitish': 'master',
|
||||
'name': 'yt-dlp master 2023.12.31.987654',
|
||||
'body': 'Generated from: https://github.com/yt-dlp/yt-dlp/commit/dddddddddddddddddddddddddddddddddddddddd',
|
||||
},
|
||||
'yt-dlp/yt-dlp/tags/testing': {
|
||||
'tag_name': 'testing',
|
||||
'target_commitish': '9999999999999999999999999999999999999999',
|
||||
'name': 'testing',
|
||||
'body': 'BODY',
|
||||
},
|
||||
'fork/yt-dlp/latest': {
|
||||
'tag_name': '2050.12.31',
|
||||
'target_commitish': 'eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee',
|
||||
'name': '2050.12.31',
|
||||
'body': 'BODY',
|
||||
},
|
||||
'fork/yt-dlp/tags/pr0000': {
|
||||
'tag_name': 'pr0000',
|
||||
'target_commitish': 'ffffffffffffffffffffffffffffffffffffffff',
|
||||
'name': 'pr1234 2023.11.11.000000',
|
||||
'body': 'BODY',
|
||||
},
|
||||
'fork/yt-dlp/tags/pr1234': {
|
||||
'tag_name': 'pr1234',
|
||||
'target_commitish': '0000000000000000000000000000000000000000',
|
||||
'name': 'pr1234 2023.12.31.555555',
|
||||
'body': 'BODY',
|
||||
},
|
||||
'fork/yt-dlp/tags/pr9999': {
|
||||
'tag_name': 'pr9999',
|
||||
'target_commitish': '1111111111111111111111111111111111111111',
|
||||
'name': 'pr9999',
|
||||
'body': 'BODY',
|
||||
},
|
||||
'fork/yt-dlp-satellite/tags/pr987': {
|
||||
'tag_name': 'pr987',
|
||||
'target_commitish': 'master',
|
||||
'name': 'pr987',
|
||||
'body': 'Generated from: https://github.com/yt-dlp/yt-dlp/commit/2222222222222222222222222222222222222222',
|
||||
},
|
||||
}
|
||||
|
||||
TEST_LOCKFILE_COMMENT = '# This file is used for regulating self-update'
|
||||
|
||||
TEST_LOCKFILE_V1 = r'''%s
|
||||
lock 2022.08.18.36 .+ Python 3\.6
|
||||
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
||||
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
''' % TEST_LOCKFILE_COMMENT
|
||||
|
||||
TEST_LOCKFILE_V2_TMPL = r'''%s
|
||||
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
|
||||
lockV2 yt-dlp/yt-dlp 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
||||
lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7
|
||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7
|
||||
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
'''
|
||||
|
||||
TEST_LOCKFILE_V2 = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_COMMENT
|
||||
|
||||
TEST_LOCKFILE_ACTUAL = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_V1.rstrip('\n')
|
||||
|
||||
TEST_LOCKFILE_FORK = r'''%s# Test if a fork blocks updates to non-numeric tags
|
||||
lockV2 fork/yt-dlp pr0000 .+ Python 3.6
|
||||
lockV2 fork/yt-dlp pr1234 (?!win_x86_exe).+ Python 3\.7
|
||||
lockV2 fork/yt-dlp pr1234 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||
lockV2 fork/yt-dlp pr9999 .+ Python 3.11
|
||||
''' % TEST_LOCKFILE_ACTUAL
|
||||
|
||||
|
||||
class FakeUpdater(Updater):
|
||||
current_version = '2022.01.01'
|
||||
current_commit = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
|
||||
|
||||
_channel = 'stable'
|
||||
_origin = 'yt-dlp/yt-dlp'
|
||||
|
||||
def _download_update_spec(self, *args, **kwargs):
|
||||
return TEST_LOCKFILE_ACTUAL
|
||||
|
||||
def _call_api(self, tag):
|
||||
tag = f'tags/{tag}' if tag != 'latest' else tag
|
||||
return TEST_API_DATA[f'{self.requested_repo}/{tag}']
|
||||
|
||||
def _report_error(self, msg, *args, **kwargs):
|
||||
report_warning(msg)
|
||||
|
||||
|
||||
class TestUpdate(unittest.TestCase):
|
||||
maxDiff = None
|
||||
|
||||
def test_update_spec(self):
|
||||
ydl = FakeYDL()
|
||||
updater = FakeUpdater(ydl, 'stable')
|
||||
|
||||
def test(lockfile, identifier, input_tag, expect_tag, exact=False, repo='yt-dlp/yt-dlp'):
|
||||
updater._identifier = identifier
|
||||
updater._exact = exact
|
||||
updater.requested_repo = repo
|
||||
result = updater._process_update_spec(lockfile, input_tag)
|
||||
self.assertEqual(
|
||||
result, expect_tag,
|
||||
f'{identifier!r} requesting {repo}@{input_tag} (exact={exact}) '
|
||||
f'returned {result!r} instead of {expect_tag!r}')
|
||||
|
||||
for lockfile in (TEST_LOCKFILE_V1, TEST_LOCKFILE_V2, TEST_LOCKFILE_ACTUAL, TEST_LOCKFILE_FORK):
|
||||
# Normal operation
|
||||
test(lockfile, 'zip Python 3.12.0', '2023.12.31', '2023.12.31')
|
||||
test(lockfile, 'zip stable Python 3.12.0', '2023.12.31', '2023.12.31', exact=True)
|
||||
# Python 3.6 --update should update only to its lock
|
||||
test(lockfile, 'zip Python 3.6.0', '2023.11.16', '2022.08.18.36')
|
||||
# --update-to an exact version later than the lock should return None
|
||||
test(lockfile, 'zip stable Python 3.6.0', '2023.11.16', None, exact=True)
|
||||
# Python 3.7 should be able to update to its lock
|
||||
test(lockfile, 'zip Python 3.7.0', '2023.11.16', '2023.11.16')
|
||||
test(lockfile, 'zip stable Python 3.7.1', '2023.11.16', '2023.11.16', exact=True)
|
||||
# Non-win_x86_exe builds on py3.7 must be locked
|
||||
test(lockfile, 'zip Python 3.7.1', '2023.12.31', '2023.11.16')
|
||||
test(lockfile, 'zip stable Python 3.7.1', '2023.12.31', None, exact=True)
|
||||
test( # Windows Vista w/ win_x86_exe must be locked
|
||||
lockfile, 'win_x86_exe stable Python 3.7.9 (CPython x86 32bit) - Windows-Vista-6.0.6003-SP2',
|
||||
'2023.12.31', '2023.11.16')
|
||||
test( # Windows 2008Server w/ win_x86_exe must be locked
|
||||
lockfile, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-2008Server',
|
||||
'2023.12.31', None, exact=True)
|
||||
test( # Windows 7 w/ win_x86_exe py3.7 build should be able to update beyond lock
|
||||
lockfile, 'win_x86_exe stable Python 3.7.9 (CPython x86 32bit) - Windows-7-6.1.7601-SP1',
|
||||
'2023.12.31', '2023.12.31')
|
||||
test( # Windows 8.1 w/ '2008Server' in platform string should be able to update beyond lock
|
||||
lockfile, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-post2008Server-6.2.9200',
|
||||
'2023.12.31', '2023.12.31', exact=True)
|
||||
|
||||
# Forks can block updates to non-numeric tags rather than lock
|
||||
test(TEST_LOCKFILE_FORK, 'zip Python 3.6.3', 'pr0000', None, repo='fork/yt-dlp')
|
||||
test(TEST_LOCKFILE_FORK, 'zip stable Python 3.7.4', 'pr0000', 'pr0000', repo='fork/yt-dlp')
|
||||
test(TEST_LOCKFILE_FORK, 'zip stable Python 3.7.4', 'pr1234', None, repo='fork/yt-dlp')
|
||||
test(TEST_LOCKFILE_FORK, 'zip Python 3.8.1', 'pr1234', 'pr1234', repo='fork/yt-dlp', exact=True)
|
||||
test(
|
||||
TEST_LOCKFILE_FORK, 'win_x86_exe stable Python 3.7.9 (CPython x86 32bit) - Windows-Vista-6.0.6003-SP2',
|
||||
'pr1234', None, repo='fork/yt-dlp')
|
||||
test(
|
||||
TEST_LOCKFILE_FORK, 'win_x86_exe stable Python 3.7.9 (CPython x86 32bit) - Windows-7-6.1.7601-SP1',
|
||||
'2023.12.31', '2023.12.31', repo='fork/yt-dlp')
|
||||
test(TEST_LOCKFILE_FORK, 'zip Python 3.11.2', 'pr9999', None, repo='fork/yt-dlp', exact=True)
|
||||
test(TEST_LOCKFILE_FORK, 'zip stable Python 3.12.0', 'pr9999', 'pr9999', repo='fork/yt-dlp')
|
||||
|
||||
def test_query_update(self):
|
||||
ydl = FakeYDL()
|
||||
|
||||
def test(target, expected, current_version=None, current_commit=None, identifier=None):
|
||||
updater = FakeUpdater(ydl, target)
|
||||
if current_version:
|
||||
updater.current_version = current_version
|
||||
if current_commit:
|
||||
updater.current_commit = current_commit
|
||||
updater._identifier = identifier or 'zip'
|
||||
update_info = updater.query_update(_output=True)
|
||||
self.assertDictEqual(
|
||||
update_info.__dict__ if update_info else {}, expected.__dict__ if expected else {})
|
||||
|
||||
test('yt-dlp/yt-dlp@latest', UpdateInfo(
|
||||
'2023.12.31', version='2023.12.31', requested_version='2023.12.31', commit='b' * 40))
|
||||
test('yt-dlp/yt-dlp-nightly-builds@latest', UpdateInfo(
|
||||
'2023.12.31.123456', version='2023.12.31.123456', requested_version='2023.12.31.123456', commit='c' * 40))
|
||||
test('yt-dlp/yt-dlp-master-builds@latest', UpdateInfo(
|
||||
'2023.12.31.987654', version='2023.12.31.987654', requested_version='2023.12.31.987654', commit='d' * 40))
|
||||
test('fork/yt-dlp@latest', UpdateInfo(
|
||||
'2050.12.31', version='2050.12.31', requested_version='2050.12.31', commit='e' * 40))
|
||||
test('fork/yt-dlp@pr0000', UpdateInfo(
|
||||
'pr0000', version='2023.11.11.000000', requested_version='2023.11.11.000000', commit='f' * 40))
|
||||
test('fork/yt-dlp@pr1234', UpdateInfo(
|
||||
'pr1234', version='2023.12.31.555555', requested_version='2023.12.31.555555', commit='0' * 40))
|
||||
test('fork/yt-dlp@pr9999', UpdateInfo(
|
||||
'pr9999', version=None, requested_version=None, commit='1' * 40))
|
||||
test('fork/yt-dlp-satellite@pr987', UpdateInfo(
|
||||
'pr987', version=None, requested_version=None, commit='2' * 40))
|
||||
test('yt-dlp/yt-dlp', None, current_version='2024.01.01')
|
||||
test('stable', UpdateInfo(
|
||||
'2023.12.31', version='2023.12.31', requested_version='2023.12.31', commit='b' * 40))
|
||||
test('nightly', UpdateInfo(
|
||||
'2023.12.31.123456', version='2023.12.31.123456', requested_version='2023.12.31.123456', commit='c' * 40))
|
||||
test('master', UpdateInfo(
|
||||
'2023.12.31.987654', version='2023.12.31.987654', requested_version='2023.12.31.987654', commit='d' * 40))
|
||||
test('testing', None, current_commit='9' * 40)
|
||||
test('testing', UpdateInfo('testing', commit='9' * 40))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -1,30 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
import json
|
||||
|
||||
from yt_dlp.update import rsa_verify
|
||||
|
||||
|
||||
class TestUpdate(unittest.TestCase):
|
||||
def test_rsa_verify(self):
|
||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
||||
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'versions.json'), 'rb') as f:
|
||||
versions_info = f.read().decode()
|
||||
versions_info = json.loads(versions_info)
|
||||
signature = versions_info['signature']
|
||||
del versions_info['signature']
|
||||
self.assertTrue(rsa_verify(
|
||||
json.dumps(versions_info, sort_keys=True).encode(),
|
||||
signature, UPDATES_RSA_KEY))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -2317,23 +2317,6 @@ def test_traverse_obj(self):
|
|||
self.assertEqual(traverse_obj({}, (0, slice(1)), traverse_string=True), [],
|
||||
msg='branching should result in list if `traverse_string`')
|
||||
|
||||
# Test is_user_input behavior
|
||||
_IS_USER_INPUT_DATA = {'range8': list(range(8))}
|
||||
self.assertEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', '3'),
|
||||
is_user_input=True), 3,
|
||||
msg='allow for string indexing if `is_user_input`')
|
||||
self.assertCountEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', '3:'),
|
||||
is_user_input=True), tuple(range(8))[3:],
|
||||
msg='allow for string slice if `is_user_input`')
|
||||
self.assertCountEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', ':4:2'),
|
||||
is_user_input=True), tuple(range(8))[:4:2],
|
||||
msg='allow step in string slice if `is_user_input`')
|
||||
self.assertCountEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', ':'),
|
||||
is_user_input=True), range(8),
|
||||
msg='`:` should be treated as `...` if `is_user_input`')
|
||||
with self.assertRaises(TypeError, msg='too many params should result in error'):
|
||||
traverse_obj(_IS_USER_INPUT_DATA, ('range8', ':::'), is_user_input=True)
|
||||
|
||||
# Test re.Match as input obj
|
||||
mobj = re.fullmatch(r'0(12)(?P<group>3)(4)?', '0123')
|
||||
self.assertEqual(traverse_obj(mobj, ...), [x for x in mobj.groups() if x is not None],
|
||||
|
|
380
test/test_websockets.py
Normal file
380
test/test_websockets.py
Normal file
|
@ -0,0 +1,380 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import http.client
|
||||
import http.cookiejar
|
||||
import http.server
|
||||
import json
|
||||
import random
|
||||
import ssl
|
||||
import threading
|
||||
|
||||
from yt_dlp import socks
|
||||
from yt_dlp.cookies import YoutubeDLCookieJar
|
||||
from yt_dlp.dependencies import websockets
|
||||
from yt_dlp.networking import Request
|
||||
from yt_dlp.networking.exceptions import (
|
||||
CertificateVerifyError,
|
||||
HTTPError,
|
||||
ProxyError,
|
||||
RequestError,
|
||||
SSLError,
|
||||
TransportError,
|
||||
)
|
||||
from yt_dlp.utils.networking import HTTPHeaderDict
|
||||
|
||||
from test.conftest import validate_and_send
|
||||
|
||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
def websocket_handler(websocket):
|
||||
for message in websocket:
|
||||
if isinstance(message, bytes):
|
||||
if message == b'bytes':
|
||||
return websocket.send('2')
|
||||
elif isinstance(message, str):
|
||||
if message == 'headers':
|
||||
return websocket.send(json.dumps(dict(websocket.request.headers)))
|
||||
elif message == 'path':
|
||||
return websocket.send(websocket.request.path)
|
||||
elif message == 'source_address':
|
||||
return websocket.send(websocket.remote_address[0])
|
||||
elif message == 'str':
|
||||
return websocket.send('1')
|
||||
return websocket.send(message)
|
||||
|
||||
|
||||
def process_request(self, request):
|
||||
if request.path.startswith('/gen_'):
|
||||
status = http.HTTPStatus(int(request.path[5:]))
|
||||
if 300 <= status.value <= 300:
|
||||
return websockets.http11.Response(
|
||||
status.value, status.phrase, websockets.datastructures.Headers([('Location', '/')]), b'')
|
||||
return self.protocol.reject(status.value, status.phrase)
|
||||
return self.protocol.accept(request)
|
||||
|
||||
|
||||
def create_websocket_server(**ws_kwargs):
|
||||
import websockets.sync.server
|
||||
wsd = websockets.sync.server.serve(websocket_handler, '127.0.0.1', 0, process_request=process_request, **ws_kwargs)
|
||||
ws_port = wsd.socket.getsockname()[1]
|
||||
ws_server_thread = threading.Thread(target=wsd.serve_forever)
|
||||
ws_server_thread.daemon = True
|
||||
ws_server_thread.start()
|
||||
return ws_server_thread, ws_port
|
||||
|
||||
|
||||
def create_ws_websocket_server():
|
||||
return create_websocket_server()
|
||||
|
||||
|
||||
def create_wss_websocket_server():
|
||||
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
||||
sslctx.load_cert_chain(certfn, None)
|
||||
return create_websocket_server(ssl_context=sslctx)
|
||||
|
||||
|
||||
MTLS_CERT_DIR = os.path.join(TEST_DIR, 'testdata', 'certificate')
|
||||
|
||||
|
||||
def create_mtls_wss_websocket_server():
|
||||
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||
cacertfn = os.path.join(MTLS_CERT_DIR, 'ca.crt')
|
||||
|
||||
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
||||
sslctx.verify_mode = ssl.CERT_REQUIRED
|
||||
sslctx.load_verify_locations(cafile=cacertfn)
|
||||
sslctx.load_cert_chain(certfn, None)
|
||||
|
||||
return create_websocket_server(ssl_context=sslctx)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not websockets, reason='websockets must be installed to test websocket request handlers')
|
||||
class TestWebsSocketRequestHandlerConformance:
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
cls.ws_thread, cls.ws_port = create_ws_websocket_server()
|
||||
cls.ws_base_url = f'ws://127.0.0.1:{cls.ws_port}'
|
||||
|
||||
cls.wss_thread, cls.wss_port = create_wss_websocket_server()
|
||||
cls.wss_base_url = f'wss://127.0.0.1:{cls.wss_port}'
|
||||
|
||||
cls.bad_wss_thread, cls.bad_wss_port = create_websocket_server(ssl_context=ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER))
|
||||
cls.bad_wss_host = f'wss://127.0.0.1:{cls.bad_wss_port}'
|
||||
|
||||
cls.mtls_wss_thread, cls.mtls_wss_port = create_mtls_wss_websocket_server()
|
||||
cls.mtls_wss_base_url = f'wss://127.0.0.1:{cls.mtls_wss_port}'
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_basic_websockets(self, handler):
|
||||
with handler() as rh:
|
||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
||||
assert 'upgrade' in ws.headers
|
||||
assert ws.status == 101
|
||||
ws.send('foo')
|
||||
assert ws.recv() == 'foo'
|
||||
ws.close()
|
||||
|
||||
# https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
|
||||
@pytest.mark.parametrize('msg,opcode', [('str', 1), (b'bytes', 2)])
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_send_types(self, handler, msg, opcode):
|
||||
with handler() as rh:
|
||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
||||
ws.send(msg)
|
||||
assert int(ws.recv()) == opcode
|
||||
ws.close()
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_verify_cert(self, handler):
|
||||
with handler() as rh:
|
||||
with pytest.raises(CertificateVerifyError):
|
||||
validate_and_send(rh, Request(self.wss_base_url))
|
||||
|
||||
with handler(verify=False) as rh:
|
||||
ws = validate_and_send(rh, Request(self.wss_base_url))
|
||||
assert ws.status == 101
|
||||
ws.close()
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_ssl_error(self, handler):
|
||||
with handler(verify=False) as rh:
|
||||
with pytest.raises(SSLError, match='sslv3 alert handshake failure') as exc_info:
|
||||
validate_and_send(rh, Request(self.bad_wss_host))
|
||||
assert not issubclass(exc_info.type, CertificateVerifyError)
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
@pytest.mark.parametrize('path,expected', [
|
||||
# Unicode characters should be encoded with uppercase percent-encoding
|
||||
('/中文', '/%E4%B8%AD%E6%96%87'),
|
||||
# don't normalize existing percent encodings
|
||||
('/%c7%9f', '/%c7%9f'),
|
||||
])
|
||||
def test_percent_encode(self, handler, path, expected):
|
||||
with handler() as rh:
|
||||
ws = validate_and_send(rh, Request(f'{self.ws_base_url}{path}'))
|
||||
ws.send('path')
|
||||
assert ws.recv() == expected
|
||||
assert ws.status == 101
|
||||
ws.close()
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_remove_dot_segments(self, handler):
|
||||
with handler() as rh:
|
||||
# This isn't a comprehensive test,
|
||||
# but it should be enough to check whether the handler is removing dot segments
|
||||
ws = validate_and_send(rh, Request(f'{self.ws_base_url}/a/b/./../../test'))
|
||||
assert ws.status == 101
|
||||
ws.send('path')
|
||||
assert ws.recv() == '/test'
|
||||
ws.close()
|
||||
|
||||
# We are restricted to known HTTP status codes in http.HTTPStatus
|
||||
# Redirects are not supported for websockets
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
@pytest.mark.parametrize('status', (200, 204, 301, 302, 303, 400, 500, 511))
|
||||
def test_raise_http_error(self, handler, status):
|
||||
with handler() as rh:
|
||||
with pytest.raises(HTTPError) as exc_info:
|
||||
validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}'))
|
||||
assert exc_info.value.status == status
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
@pytest.mark.parametrize('params,extensions', [
|
||||
({'timeout': 0.00001}, {}),
|
||||
({}, {'timeout': 0.00001}),
|
||||
])
|
||||
def test_timeout(self, handler, params, extensions):
|
||||
with handler(**params) as rh:
|
||||
with pytest.raises(TransportError):
|
||||
validate_and_send(rh, Request(self.ws_base_url, extensions=extensions))
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_cookies(self, handler):
|
||||
cookiejar = YoutubeDLCookieJar()
|
||||
cookiejar.set_cookie(http.cookiejar.Cookie(
|
||||
version=0, name='test', value='ytdlp', port=None, port_specified=False,
|
||||
domain='127.0.0.1', domain_specified=True, domain_initial_dot=False, path='/',
|
||||
path_specified=True, secure=False, expires=None, discard=False, comment=None,
|
||||
comment_url=None, rest={}))
|
||||
|
||||
with handler(cookiejar=cookiejar) as rh:
|
||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
||||
ws.send('headers')
|
||||
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
||||
ws.close()
|
||||
|
||||
with handler() as rh:
|
||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
||||
ws.send('headers')
|
||||
assert 'cookie' not in json.loads(ws.recv())
|
||||
ws.close()
|
||||
|
||||
ws = validate_and_send(rh, Request(self.ws_base_url, extensions={'cookiejar': cookiejar}))
|
||||
ws.send('headers')
|
||||
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
||||
ws.close()
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_source_address(self, handler):
|
||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||
with handler(source_address=source_address) as rh:
|
||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
||||
ws.send('source_address')
|
||||
assert source_address == ws.recv()
|
||||
ws.close()
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_response_url(self, handler):
|
||||
with handler() as rh:
|
||||
url = f'{self.ws_base_url}/something'
|
||||
ws = validate_and_send(rh, Request(url))
|
||||
assert ws.url == url
|
||||
ws.close()
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_request_headers(self, handler):
|
||||
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
||||
# Global Headers
|
||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
||||
ws.send('headers')
|
||||
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
||||
assert headers['test1'] == 'test'
|
||||
ws.close()
|
||||
|
||||
# Per request headers, merged with global
|
||||
ws = validate_and_send(rh, Request(
|
||||
self.ws_base_url, headers={'test2': 'changed', 'test3': 'test3'}))
|
||||
ws.send('headers')
|
||||
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
||||
assert headers['test1'] == 'test'
|
||||
assert headers['test2'] == 'changed'
|
||||
assert headers['test3'] == 'test3'
|
||||
ws.close()
|
||||
|
||||
@pytest.mark.parametrize('client_cert', (
|
||||
{'client_certificate': os.path.join(MTLS_CERT_DIR, 'clientwithkey.crt')},
|
||||
{
|
||||
'client_certificate': os.path.join(MTLS_CERT_DIR, 'client.crt'),
|
||||
'client_certificate_key': os.path.join(MTLS_CERT_DIR, 'client.key'),
|
||||
},
|
||||
{
|
||||
'client_certificate': os.path.join(MTLS_CERT_DIR, 'clientwithencryptedkey.crt'),
|
||||
'client_certificate_password': 'foobar',
|
||||
},
|
||||
{
|
||||
'client_certificate': os.path.join(MTLS_CERT_DIR, 'client.crt'),
|
||||
'client_certificate_key': os.path.join(MTLS_CERT_DIR, 'clientencrypted.key'),
|
||||
'client_certificate_password': 'foobar',
|
||||
}
|
||||
))
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
def test_mtls(self, handler, client_cert):
|
||||
with handler(
|
||||
# Disable client-side validation of unacceptable self-signed testcert.pem
|
||||
# The test is of a check on the server side, so unaffected
|
||||
verify=False,
|
||||
client_cert=client_cert
|
||||
) as rh:
|
||||
validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
|
||||
|
||||
|
||||
def create_fake_ws_connection(raised):
|
||||
import websockets.sync.client
|
||||
|
||||
class FakeWsConnection(websockets.sync.client.ClientConnection):
|
||||
def __init__(self, *args, **kwargs):
|
||||
class FakeResponse:
|
||||
body = b''
|
||||
headers = {}
|
||||
status_code = 101
|
||||
reason_phrase = 'test'
|
||||
|
||||
self.response = FakeResponse()
|
||||
|
||||
def send(self, *args, **kwargs):
|
||||
raise raised()
|
||||
|
||||
def recv(self, *args, **kwargs):
|
||||
raise raised()
|
||||
|
||||
def close(self, *args, **kwargs):
|
||||
return
|
||||
|
||||
return FakeWsConnection()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||
class TestWebsocketsRequestHandler:
|
||||
@pytest.mark.parametrize('raised,expected', [
|
||||
# https://websockets.readthedocs.io/en/stable/reference/exceptions.html
|
||||
(lambda: websockets.exceptions.InvalidURI(msg='test', uri='test://'), RequestError),
|
||||
# Requires a response object. Should be covered by HTTP error tests.
|
||||
# (lambda: websockets.exceptions.InvalidStatus(), TransportError),
|
||||
(lambda: websockets.exceptions.InvalidHandshake(), TransportError),
|
||||
# These are subclasses of InvalidHandshake
|
||||
(lambda: websockets.exceptions.InvalidHeader(name='test'), TransportError),
|
||||
(lambda: websockets.exceptions.NegotiationError(), TransportError),
|
||||
# Catch-all
|
||||
(lambda: websockets.exceptions.WebSocketException(), TransportError),
|
||||
(lambda: TimeoutError(), TransportError),
|
||||
# These may be raised by our create_connection implementation, which should also be caught
|
||||
(lambda: OSError(), TransportError),
|
||||
(lambda: ssl.SSLError(), SSLError),
|
||||
(lambda: ssl.SSLCertVerificationError(), CertificateVerifyError),
|
||||
(lambda: socks.ProxyError(), ProxyError),
|
||||
])
|
||||
def test_request_error_mapping(self, handler, monkeypatch, raised, expected):
|
||||
import websockets.sync.client
|
||||
|
||||
import yt_dlp.networking._websockets
|
||||
with handler() as rh:
|
||||
def fake_connect(*args, **kwargs):
|
||||
raise raised()
|
||||
monkeypatch.setattr(yt_dlp.networking._websockets, 'create_connection', lambda *args, **kwargs: None)
|
||||
monkeypatch.setattr(websockets.sync.client, 'connect', fake_connect)
|
||||
with pytest.raises(expected) as exc_info:
|
||||
rh.send(Request('ws://fake-url'))
|
||||
assert exc_info.type is expected
|
||||
|
||||
@pytest.mark.parametrize('raised,expected,match', [
|
||||
# https://websockets.readthedocs.io/en/stable/reference/sync/client.html#websockets.sync.client.ClientConnection.send
|
||||
(lambda: websockets.exceptions.ConnectionClosed(None, None), TransportError, None),
|
||||
(lambda: RuntimeError(), TransportError, None),
|
||||
(lambda: TimeoutError(), TransportError, None),
|
||||
(lambda: TypeError(), RequestError, None),
|
||||
(lambda: socks.ProxyError(), ProxyError, None),
|
||||
# Catch-all
|
||||
(lambda: websockets.exceptions.WebSocketException(), TransportError, None),
|
||||
])
|
||||
def test_ws_send_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
||||
from yt_dlp.networking._websockets import WebsocketsResponseAdapter
|
||||
ws = WebsocketsResponseAdapter(create_fake_ws_connection(raised), url='ws://fake-url')
|
||||
with pytest.raises(expected, match=match) as exc_info:
|
||||
ws.send('test')
|
||||
assert exc_info.type is expected
|
||||
|
||||
@pytest.mark.parametrize('raised,expected,match', [
|
||||
# https://websockets.readthedocs.io/en/stable/reference/sync/client.html#websockets.sync.client.ClientConnection.recv
|
||||
(lambda: websockets.exceptions.ConnectionClosed(None, None), TransportError, None),
|
||||
(lambda: RuntimeError(), TransportError, None),
|
||||
(lambda: TimeoutError(), TransportError, None),
|
||||
(lambda: socks.ProxyError(), ProxyError, None),
|
||||
# Catch-all
|
||||
(lambda: websockets.exceptions.WebSocketException(), TransportError, None),
|
||||
])
|
||||
def test_ws_recv_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
||||
from yt_dlp.networking._websockets import WebsocketsResponseAdapter
|
||||
ws = WebsocketsResponseAdapter(create_fake_ws_connection(raised), url='ws://fake-url')
|
||||
with pytest.raises(expected, match=match) as exc_info:
|
||||
ws.recv()
|
||||
assert exc_info.type is expected
|
|
@ -1,34 +0,0 @@
|
|||
{
|
||||
"latest": "2013.01.06",
|
||||
"signature": "72158cdba391628569ffdbea259afbcf279bbe3d8aeb7492690735dc1cfa6afa754f55c61196f3871d429599ab22f2667f1fec98865527b32632e7f4b3675a7ef0f0fbe084d359256ae4bba68f0d33854e531a70754712f244be71d4b92e664302aa99653ee4df19800d955b6c4149cd2b3f24288d6e4b40b16126e01f4c8ce6",
|
||||
"versions": {
|
||||
"2013.01.02": {
|
||||
"bin": [
|
||||
"http://youtube-dl.org/downloads/2013.01.02/youtube-dl",
|
||||
"f5b502f8aaa77675c4884938b1e4871ebca2611813a0c0e74f60c0fbd6dcca6b"
|
||||
],
|
||||
"exe": [
|
||||
"http://youtube-dl.org/downloads/2013.01.02/youtube-dl.exe",
|
||||
"75fa89d2ce297d102ff27675aa9d92545bbc91013f52ec52868c069f4f9f0422"
|
||||
],
|
||||
"tar": [
|
||||
"http://youtube-dl.org/downloads/2013.01.02/youtube-dl-2013.01.02.tar.gz",
|
||||
"6a66d022ac8e1c13da284036288a133ec8dba003b7bd3a5179d0c0daca8c8196"
|
||||
]
|
||||
},
|
||||
"2013.01.06": {
|
||||
"bin": [
|
||||
"http://youtube-dl.org/downloads/2013.01.06/youtube-dl",
|
||||
"64b6ed8865735c6302e836d4d832577321b4519aa02640dc508580c1ee824049"
|
||||
],
|
||||
"exe": [
|
||||
"http://youtube-dl.org/downloads/2013.01.06/youtube-dl.exe",
|
||||
"58609baf91e4389d36e3ba586e21dab882daaaee537e4448b1265392ae86ff84"
|
||||
],
|
||||
"tar": [
|
||||
"http://youtube-dl.org/downloads/2013.01.06/youtube-dl-2013.01.06.tar.gz",
|
||||
"fe77ab20a95d980ed17a659aa67e371fdd4d656d19c4c7950e7b720b0c2f1a86"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -60,7 +60,7 @@
|
|||
get_postprocessor,
|
||||
)
|
||||
from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
|
||||
from .update import REPOSITORY, _get_system_deprecation, current_git_head, detect_variant
|
||||
from .update import REPOSITORY, _get_system_deprecation, _make_label, current_git_head, detect_variant
|
||||
from .utils import (
|
||||
DEFAULT_OUTTMPL,
|
||||
IDENTITY,
|
||||
|
@ -158,7 +158,7 @@
|
|||
clean_proxies,
|
||||
std_headers,
|
||||
)
|
||||
from .version import CHANNEL, RELEASE_GIT_HEAD, VARIANT, __version__
|
||||
from .version import CHANNEL, ORIGIN, RELEASE_GIT_HEAD, VARIANT, __version__
|
||||
|
||||
if compat_os_name == 'nt':
|
||||
import ctypes
|
||||
|
@ -625,13 +625,16 @@ def __init__(self, params=None, auto_init=True):
|
|||
'Overwriting params from "color" with "no_color"')
|
||||
self.params['color'] = 'no_color'
|
||||
|
||||
term_allow_color = os.environ.get('TERM', '').lower() != 'dumb'
|
||||
term_allow_color = os.getenv('TERM', '').lower() != 'dumb'
|
||||
no_color = bool(os.getenv('NO_COLOR'))
|
||||
|
||||
def process_color_policy(stream):
|
||||
stream_name = {sys.stdout: 'stdout', sys.stderr: 'stderr'}[stream]
|
||||
policy = traverse_obj(self.params, ('color', (stream_name, None), {str}), get_all=False)
|
||||
if policy in ('auto', None):
|
||||
return term_allow_color and supports_terminal_sequences(stream)
|
||||
if term_allow_color and supports_terminal_sequences(stream):
|
||||
return 'no_color' if no_color else True
|
||||
return False
|
||||
assert policy in ('always', 'never', 'no_color'), policy
|
||||
return {'always': True, 'never': False}.get(policy, policy)
|
||||
|
||||
|
@ -1176,6 +1179,7 @@ def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
|
|||
MATH_FUNCTIONS = {
|
||||
'+': float.__add__,
|
||||
'-': float.__sub__,
|
||||
'*': float.__mul__,
|
||||
}
|
||||
# Field is of the form key1.key2...
|
||||
# where keys (except first) can be string, int, slice or "{field, ...}"
|
||||
|
@ -1197,6 +1201,15 @@ def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
|
|||
(?:\|(?P<default>.*?))?
|
||||
)$''')
|
||||
|
||||
def _from_user_input(field):
|
||||
if field == ':':
|
||||
return ...
|
||||
elif ':' in field:
|
||||
return slice(*map(int_or_none, field.split(':')))
|
||||
elif int_or_none(field) is not None:
|
||||
return int(field)
|
||||
return field
|
||||
|
||||
def _traverse_infodict(fields):
|
||||
fields = [f for x in re.split(r'\.({.+?})\.?', fields)
|
||||
for f in ([x] if x.startswith('{') else x.split('.'))]
|
||||
|
@ -1206,11 +1219,12 @@ def _traverse_infodict(fields):
|
|||
|
||||
for i, f in enumerate(fields):
|
||||
if not f.startswith('{'):
|
||||
fields[i] = _from_user_input(f)
|
||||
continue
|
||||
assert f.endswith('}'), f'No closing brace for {f} in {fields}'
|
||||
fields[i] = {k: k.split('.') for k in f[1:-1].split(',')}
|
||||
fields[i] = {k: list(map(_from_user_input, k.split('.'))) for k in f[1:-1].split(',')}
|
||||
|
||||
return traverse_obj(info_dict, fields, is_user_input=True, traverse_string=True)
|
||||
return traverse_obj(info_dict, fields, traverse_string=True)
|
||||
|
||||
def get_value(mdict):
|
||||
# Object traversal
|
||||
|
@ -2338,7 +2352,7 @@ def _check_formats(formats):
|
|||
return
|
||||
|
||||
for f in formats:
|
||||
if f.get('has_drm'):
|
||||
if f.get('has_drm') or f.get('__needs_testing'):
|
||||
yield from self._check_formats([f])
|
||||
else:
|
||||
yield f
|
||||
|
@ -2586,6 +2600,9 @@ def _fill_common_fields(self, info_dict, final=True):
|
|||
upload_date = datetime.datetime.fromtimestamp(info_dict[ts_key], datetime.timezone.utc)
|
||||
info_dict[date_key] = upload_date.strftime('%Y%m%d')
|
||||
|
||||
if not info_dict.get('release_year'):
|
||||
info_dict['release_year'] = traverse_obj(info_dict, ('release_date', {lambda x: int(x[:4])}))
|
||||
|
||||
live_keys = ('is_live', 'was_live')
|
||||
live_status = info_dict.get('live_status')
|
||||
if live_status is None:
|
||||
|
@ -2764,7 +2781,8 @@ def is_wellformed(f):
|
|||
format['dynamic_range'] = 'SDR'
|
||||
if format.get('aspect_ratio') is None:
|
||||
format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
|
||||
if (not format.get('manifest_url') # For fragmented formats, "tbr" is often max bitrate and not average
|
||||
# For fragmented formats, "tbr" is often max bitrate and not average
|
||||
if (('manifest-filesize-approx' in self.params['compat_opts'] or not format.get('manifest_url'))
|
||||
and info_dict.get('duration') and format.get('tbr')
|
||||
and not format.get('filesize') and not format.get('filesize_approx')):
|
||||
format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
|
||||
|
@ -3543,14 +3561,14 @@ def sanitize_info(info_dict, remove_private_keys=False):
|
|||
'version': __version__,
|
||||
'current_git_head': current_git_head(),
|
||||
'release_git_head': RELEASE_GIT_HEAD,
|
||||
'repository': REPOSITORY,
|
||||
'repository': ORIGIN,
|
||||
})
|
||||
|
||||
if remove_private_keys:
|
||||
reject = lambda k, v: v is None or k.startswith('__') or k in {
|
||||
'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
|
||||
'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
|
||||
'playlist_autonumber', '_format_sort_fields',
|
||||
'playlist_autonumber',
|
||||
}
|
||||
else:
|
||||
reject = lambda k, v: False
|
||||
|
@ -3926,8 +3944,8 @@ def get_encoding(stream):
|
|||
source += '*'
|
||||
klass = type(self)
|
||||
write_debug(join_nonempty(
|
||||
f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
|
||||
f'{CHANNEL}@{__version__}',
|
||||
f'{REPOSITORY.rpartition("/")[2]} version',
|
||||
_make_label(ORIGIN, CHANNEL.partition('@')[2] or __version__, __version__),
|
||||
f'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD else '',
|
||||
'' if source == 'unknown' else f'({source})',
|
||||
'' if _IN_CLI else 'API' if klass == YoutubeDL else f'API:{self.__module__}.{klass.__qualname__}',
|
||||
|
@ -4051,6 +4069,7 @@ def urlopen(self, req):
|
|||
return self._request_director.send(req)
|
||||
except NoSupportingHandlers as e:
|
||||
for ue in e.unsupported_errors:
|
||||
# FIXME: This depends on the order of errors.
|
||||
if not (ue.handler and ue.msg):
|
||||
continue
|
||||
if ue.handler.RH_KEY == 'Urllib' and 'unsupported url scheme: "file"' in ue.msg.lower():
|
||||
|
@ -4060,6 +4079,15 @@ def urlopen(self, req):
|
|||
if 'unsupported proxy type: "https"' in ue.msg.lower():
|
||||
raise RequestError(
|
||||
'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests')
|
||||
|
||||
elif (
|
||||
re.match(r'unsupported url scheme: "wss?"', ue.msg.lower())
|
||||
and 'websockets' not in self._request_director.handlers
|
||||
):
|
||||
raise RequestError(
|
||||
'This request requires WebSocket support. '
|
||||
'Ensure one of the following dependencies are installed: websockets',
|
||||
cause=ue) from ue
|
||||
raise
|
||||
except SSLError as e:
|
||||
if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e):
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
try:
|
||||
import contextvars # noqa: F401
|
||||
except Exception:
|
||||
raise Exception(
|
||||
f'You are using an unsupported version of Python. Only Python versions 3.7 and above are supported by yt-dlp') # noqa: F541
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 8):
|
||||
raise ImportError(
|
||||
f'You are using an unsupported version of Python. Only Python versions 3.8 and above are supported by yt-dlp') # noqa: F541
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
|
||||
|
@ -12,7 +12,6 @@
|
|||
import optparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from .compat import compat_shlex_quote
|
||||
|
@ -74,14 +73,16 @@ def _exit(status=0, *args):
|
|||
|
||||
|
||||
def get_urls(urls, batchfile, verbose):
|
||||
# Batch file verification
|
||||
"""
|
||||
@param verbose -1: quiet, 0: normal, 1: verbose
|
||||
"""
|
||||
batch_urls = []
|
||||
if batchfile is not None:
|
||||
try:
|
||||
batch_urls = read_batch_urls(
|
||||
read_stdin('URLs') if batchfile == '-'
|
||||
read_stdin(None if verbose == -1 else 'URLs') if batchfile == '-'
|
||||
else open(expand_path(batchfile), encoding='utf-8', errors='ignore'))
|
||||
if verbose:
|
||||
if verbose == 1:
|
||||
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
|
||||
except OSError:
|
||||
_exit(f'ERROR: batch file {batchfile} could not be read')
|
||||
|
@ -722,7 +723,7 @@ def get_postprocessors(opts):
|
|||
def parse_options(argv=None):
|
||||
"""@returns ParsedOptions(parser, opts, urls, ydl_opts)"""
|
||||
parser, opts, urls = parseOpts(argv)
|
||||
urls = get_urls(urls, opts.batchfile, opts.verbose)
|
||||
urls = get_urls(urls, opts.batchfile, -1 if opts.quiet and not opts.verbose else opts.verbose)
|
||||
|
||||
set_compat_opts(opts)
|
||||
try:
|
||||
|
|
|
@ -25,7 +25,7 @@ def get_hidden_imports():
|
|||
for module in ('websockets', 'requests', 'urllib3'):
|
||||
yield from collect_submodules(module)
|
||||
# These are auto-detected, but explicitly add them just in case
|
||||
yield from ('mutagen', 'brotli', 'certifi')
|
||||
yield from ('mutagen', 'brotli', 'certifi', 'secretstorage')
|
||||
|
||||
|
||||
hiddenimports = list(get_hidden_imports())
|
||||
|
|
|
@ -10,17 +10,3 @@
|
|||
cache # >= 3.9
|
||||
except NameError:
|
||||
cache = lru_cache(maxsize=None)
|
||||
|
||||
try:
|
||||
cached_property # >= 3.8
|
||||
except NameError:
|
||||
class cached_property:
|
||||
def __init__(self, func):
|
||||
update_wrapper(self, func)
|
||||
self.func = func
|
||||
|
||||
def __get__(self, instance, _):
|
||||
if instance is None:
|
||||
return self
|
||||
setattr(instance, self.func.__name__, self.func(instance))
|
||||
return getattr(instance, self.func.__name__)
|
||||
|
|
|
@ -15,12 +15,15 @@ class DashSegmentsFD(FragmentFD):
|
|||
FD_NAME = 'dashsegments'
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
if info_dict.get('is_live') and set(info_dict['protocol'].split('+')) != {'http_dash_segments_generator'}:
|
||||
self.report_error('Live DASH videos are not supported')
|
||||
if 'http_dash_segments_generator' in info_dict['protocol'].split('+'):
|
||||
real_downloader = None # No external FD can support --live-from-start
|
||||
else:
|
||||
if info_dict.get('is_live'):
|
||||
self.report_error('Live DASH videos are not supported')
|
||||
real_downloader = get_suitable_downloader(
|
||||
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
||||
|
||||
real_start = time.time()
|
||||
real_downloader = get_suitable_downloader(
|
||||
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
||||
|
||||
requested_formats = [{**info_dict, **fmt} for fmt in info_dict.get('requested_formats', [])]
|
||||
args = []
|
||||
|
|
|
@ -335,7 +335,7 @@ def _make_cmd(self, tmpfilename, info_dict):
|
|||
cmd += ['--auto-file-renaming=false']
|
||||
|
||||
if 'fragments' in info_dict:
|
||||
cmd += ['--file-allocation=none', '--uri-selector=inorder']
|
||||
cmd += ['--uri-selector=inorder']
|
||||
url_list_file = '%s.frag.urls' % tmpfilename
|
||||
url_list = []
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
from .common import FileDownloader
|
||||
from .external import FFmpegFD
|
||||
from ..networking import Request
|
||||
from ..utils import DownloadError, WebSocketsWrapper, str_or_none, try_get
|
||||
from ..utils import DownloadError, str_or_none, try_get
|
||||
|
||||
|
||||
class NiconicoDmcFD(FileDownloader):
|
||||
|
@ -64,7 +64,6 @@ def real_download(self, filename, info_dict):
|
|||
ws_url = info_dict['url']
|
||||
ws_extractor = info_dict['ws']
|
||||
ws_origin_host = info_dict['origin']
|
||||
cookies = info_dict.get('cookies')
|
||||
live_quality = info_dict.get('live_quality', 'high')
|
||||
live_latency = info_dict.get('live_latency', 'high')
|
||||
dl = FFmpegFD(self.ydl, self.params or {})
|
||||
|
@ -76,12 +75,7 @@ def real_download(self, filename, info_dict):
|
|||
|
||||
def communicate_ws(reconnect):
|
||||
if reconnect:
|
||||
ws = WebSocketsWrapper(ws_url, {
|
||||
'Cookies': str_or_none(cookies) or '',
|
||||
'Origin': f'https://{ws_origin_host}',
|
||||
'Accept': '*/*',
|
||||
'User-Agent': self.params['http_headers']['User-Agent'],
|
||||
})
|
||||
ws = self.ydl.urlopen(Request(ws_url, headers={'Origin': f'https://{ws_origin_host}'}))
|
||||
if self.ydl.params.get('verbose', False):
|
||||
self.to_screen('[debug] Sending startWatching request')
|
||||
ws.send(json.dumps({
|
||||
|
|
|
@ -77,16 +77,23 @@
|
|||
WyborczaPodcastIE,
|
||||
WyborczaVideoIE,
|
||||
)
|
||||
from .airmozilla import AirMozillaIE
|
||||
from .airtv import AirTVIE
|
||||
from .aitube import AitubeKZVideoIE
|
||||
from .aljazeera import AlJazeeraIE
|
||||
from .allstar import (
|
||||
AllstarIE,
|
||||
AllstarProfileIE,
|
||||
)
|
||||
from .alphaporno import AlphaPornoIE
|
||||
from .amara import AmaraIE
|
||||
from .altcensored import (
|
||||
AltCensoredIE,
|
||||
AltCensoredChannelIE,
|
||||
)
|
||||
from .alura import (
|
||||
AluraIE,
|
||||
AluraCourseIE
|
||||
)
|
||||
from .amara import AmaraIE
|
||||
from .amcnetworks import AMCNetworksIE
|
||||
from .amazon import (
|
||||
AmazonStoreIE,
|
||||
|
@ -139,7 +146,6 @@
|
|||
from .arnes import ArnesIE
|
||||
from .atresplayer import AtresPlayerIE
|
||||
from .atscaleconf import AtScaleConfEventIE
|
||||
from .atttechchannel import ATTTechChannelIE
|
||||
from .atvat import ATVAtIE
|
||||
from .audimedia import AudiMediaIE
|
||||
from .audioboom import AudioBoomIE
|
||||
|
@ -212,6 +218,8 @@
|
|||
BiliBiliBangumiIE,
|
||||
BiliBiliBangumiSeasonIE,
|
||||
BiliBiliBangumiMediaIE,
|
||||
BilibiliCheeseIE,
|
||||
BilibiliCheeseSeasonIE,
|
||||
BiliBiliSearchIE,
|
||||
BilibiliCategoryIE,
|
||||
BilibiliAudioIE,
|
||||
|
@ -233,11 +241,6 @@
|
|||
BitChuteIE,
|
||||
BitChuteChannelIE,
|
||||
)
|
||||
from .bitwave import (
|
||||
BitwaveReplayIE,
|
||||
BitwaveStreamIE,
|
||||
)
|
||||
from .biqle import BIQLEIE
|
||||
from .blackboardcollaborate import BlackboardCollaborateIE
|
||||
from .bleacherreport import (
|
||||
BleacherReportIE,
|
||||
|
@ -252,10 +255,7 @@
|
|||
from .box import BoxIE
|
||||
from .boxcast import BoxCastVideoIE
|
||||
from .bpb import BpbIE
|
||||
from .br import (
|
||||
BRIE,
|
||||
BRMediathekIE,
|
||||
)
|
||||
from .br import BRIE
|
||||
from .bravotv import BravoTVIE
|
||||
from .brainpop import (
|
||||
BrainPOPIE,
|
||||
|
@ -265,7 +265,6 @@
|
|||
BrainPOPFrIE,
|
||||
BrainPOPIlIE,
|
||||
)
|
||||
from .breakcom import BreakIE
|
||||
from .breitbart import BreitBartIE
|
||||
from .brightcove import (
|
||||
BrightcoveLegacyIE,
|
||||
|
@ -295,16 +294,11 @@
|
|||
from .cammodels import CamModelsIE
|
||||
from .camsoda import CamsodaIE
|
||||
from .camtasia import CamtasiaEmbedIE
|
||||
from .camwithher import CamWithHerIE
|
||||
from .canal1 import Canal1IE
|
||||
from .canalalpha import CanalAlphaIE
|
||||
from .canalplus import CanalplusIE
|
||||
from .canalc2 import Canalc2IE
|
||||
from .caracoltv import CaracolTvPlayIE
|
||||
from .carambatv import (
|
||||
CarambaTVIE,
|
||||
CarambaTVPageIE,
|
||||
)
|
||||
from .cartoonnetwork import CartoonNetworkIE
|
||||
from .cbc import (
|
||||
CBCIE,
|
||||
|
@ -343,7 +337,6 @@
|
|||
from .cellebrite import CellebriteIE
|
||||
from .ceskatelevize import CeskaTelevizeIE
|
||||
from .cgtn import CGTNIE
|
||||
from .channel9 import Channel9IE
|
||||
from .charlierose import CharlieRoseIE
|
||||
from .chaturbate import ChaturbateIE
|
||||
from .chilloutzone import ChilloutzoneIE
|
||||
|
@ -351,11 +344,6 @@
|
|||
ChingariIE,
|
||||
ChingariUserIE,
|
||||
)
|
||||
from .chirbit import (
|
||||
ChirbitIE,
|
||||
ChirbitProfileIE,
|
||||
)
|
||||
from .cinchcast import CinchcastIE
|
||||
from .cinemax import CinemaxIE
|
||||
from .cinetecamilano import CinetecaMilanoIE
|
||||
from .cineverse import (
|
||||
|
@ -372,10 +360,8 @@
|
|||
from .cliphunter import CliphunterIE
|
||||
from .clippit import ClippitIE
|
||||
from .cliprs import ClipRsIE
|
||||
from .clipsyndicate import ClipsyndicateIE
|
||||
from .closertotruth import CloserToTruthIE
|
||||
from .cloudflarestream import CloudflareStreamIE
|
||||
from .cloudy import CloudyIE
|
||||
from .clubic import ClubicIE
|
||||
from .clyp import ClypIE
|
||||
from .cmt import CMTIE
|
||||
|
@ -442,7 +428,6 @@
|
|||
DacastVODIE,
|
||||
DacastPlaylistIE,
|
||||
)
|
||||
from .daftsex import DaftsexIE
|
||||
from .dailymail import DailyMailIE
|
||||
from .dailymotion import (
|
||||
DailymotionIE,
|
||||
|
@ -479,7 +464,6 @@
|
|||
from .dfb import DFBIE
|
||||
from .dhm import DHMIE
|
||||
from .digg import DiggIE
|
||||
from .dotsub import DotsubIE
|
||||
from .douyutv import (
|
||||
DouyuShowIE,
|
||||
DouyuTVIE,
|
||||
|
@ -526,7 +510,6 @@
|
|||
DubokuPlaylistIE
|
||||
)
|
||||
from .dumpert import DumpertIE
|
||||
from .defense import DefenseGouvFrIE
|
||||
from .deuxm import (
|
||||
DeuxMIE,
|
||||
DeuxMNewsIE
|
||||
|
@ -541,6 +524,7 @@
|
|||
DropoutSeasonIE,
|
||||
DropoutIE
|
||||
)
|
||||
from .duoplay import DuoplayIE
|
||||
from .dw import (
|
||||
DWIE,
|
||||
DWArticleIE,
|
||||
|
@ -548,25 +532,17 @@
|
|||
from .eagleplatform import EaglePlatformIE, ClipYouEmbedIE
|
||||
from .ebaumsworld import EbaumsWorldIE
|
||||
from .ebay import EbayIE
|
||||
from .echomsk import EchoMskIE
|
||||
from .egghead import (
|
||||
EggheadCourseIE,
|
||||
EggheadLessonIE,
|
||||
)
|
||||
from .ehow import EHowIE
|
||||
from .eighttracks import EightTracksIE
|
||||
from .einthusan import EinthusanIE
|
||||
from .eitb import EitbIE
|
||||
from .elevensports import ElevenSportsIE
|
||||
from .ellentube import (
|
||||
EllenTubeIE,
|
||||
EllenTubeVideoIE,
|
||||
EllenTubePlaylistIE,
|
||||
)
|
||||
from .elonet import ElonetIE
|
||||
from .elpais import ElPaisIE
|
||||
from .eltrecetv import ElTreceTVIE
|
||||
from .embedly import EmbedlyIE
|
||||
from .engadget import EngadgetIE
|
||||
from .epicon import (
|
||||
EpiconIE,
|
||||
EpiconSeriesIE,
|
||||
|
@ -584,7 +560,6 @@
|
|||
ERTFlixIE,
|
||||
ERTWebtvEmbedIE,
|
||||
)
|
||||
from .escapist import EscapistIE
|
||||
from .espn import (
|
||||
ESPNIE,
|
||||
WatchESPNIE,
|
||||
|
@ -592,15 +567,12 @@
|
|||
FiveThirtyEightIE,
|
||||
ESPNCricInfoIE,
|
||||
)
|
||||
from .esri import EsriVideoIE
|
||||
from .ettutv import EttuTvIE
|
||||
from .europa import EuropaIE, EuroParlWebstreamIE
|
||||
from .europeantour import EuropeanTourIE
|
||||
from .eurosport import EurosportIE
|
||||
from .euscreen import EUScreenIE
|
||||
from .expotv import ExpoTVIE
|
||||
from .expressen import ExpressenIE
|
||||
from .extremetube import ExtremeTubeIE
|
||||
from .eyedotv import EyedoTVIE
|
||||
from .facebook import (
|
||||
FacebookIE,
|
||||
|
@ -630,6 +602,10 @@
|
|||
from .firsttv import FirstTVIE
|
||||
from .fivetv import FiveTVIE
|
||||
from .flickr import FlickrIE
|
||||
from .floatplane import (
|
||||
FloatplaneIE,
|
||||
FloatplaneChannelIE,
|
||||
)
|
||||
from .folketinget import FolketingetIE
|
||||
from .footyroom import FootyRoomIE
|
||||
from .formula1 import Formula1IE
|
||||
|
@ -639,16 +615,11 @@
|
|||
PornerBrosIE,
|
||||
FuxIE,
|
||||
)
|
||||
from .fourzerostudio import (
|
||||
FourZeroStudioArchiveIE,
|
||||
FourZeroStudioClipIE,
|
||||
)
|
||||
from .fox import FOXIE
|
||||
from .fox9 import (
|
||||
FOX9IE,
|
||||
FOX9NewsIE,
|
||||
)
|
||||
from .foxgay import FoxgayIE
|
||||
from .foxnews import (
|
||||
FoxNewsIE,
|
||||
FoxNewsArticleIE,
|
||||
|
@ -681,7 +652,6 @@
|
|||
)
|
||||
from .funk import FunkIE
|
||||
from .funker530 import Funker530IE
|
||||
from .fusion import FusionIE
|
||||
from .fuyintv import FuyinTVIE
|
||||
from .gab import (
|
||||
GabTVIE,
|
||||
|
@ -712,7 +682,6 @@
|
|||
GettrIE,
|
||||
GettrStreamingIE,
|
||||
)
|
||||
from .gfycat import GfycatIE
|
||||
from .giantbomb import GiantBombIE
|
||||
from .giga import GigaIE
|
||||
from .glide import GlideIE
|
||||
|
@ -758,12 +727,10 @@
|
|||
from .hearthisat import HearThisAtIE
|
||||
from .heise import HeiseIE
|
||||
from .hellporno import HellPornoIE
|
||||
from .helsinki import HelsinkiIE
|
||||
from .hgtv import HGTVComShowIE
|
||||
from .hketv import HKETVIE
|
||||
from .hidive import HiDiveIE
|
||||
from .historicfilms import HistoricFilmsIE
|
||||
from .hitbox import HitboxIE, HitboxLiveIE
|
||||
from .hitrecord import HitRecordIE
|
||||
from .hollywoodreporter import (
|
||||
HollywoodReporterIE,
|
||||
|
@ -779,8 +746,6 @@
|
|||
HotStarSeasonIE,
|
||||
HotStarSeriesIE,
|
||||
)
|
||||
from .howcast import HowcastIE
|
||||
from .howstuffworks import HowStuffWorksIE
|
||||
from .hrefli import HrefLiRedirectIE
|
||||
from .hrfensehen import HRFernsehenIE
|
||||
from .hrti import (
|
||||
|
@ -894,6 +859,10 @@
|
|||
SangiinIE,
|
||||
)
|
||||
from .jeuxvideo import JeuxVideoIE
|
||||
from .jiosaavn import (
|
||||
JioSaavnSongIE,
|
||||
JioSaavnAlbumIE,
|
||||
)
|
||||
from .jove import JoveIE
|
||||
from .joj import JojIE
|
||||
from .jstream import JStreamIE
|
||||
|
@ -908,7 +877,6 @@
|
|||
from .kankanews import KankaNewsIE
|
||||
from .karaoketv import KaraoketvIE
|
||||
from .karrierevideos import KarriereVideosIE
|
||||
from .keezmovies import KeezMoviesIE
|
||||
from .kelbyone import KelbyOneIE
|
||||
from .khanacademy import (
|
||||
KhanAcademyIE,
|
||||
|
@ -943,17 +911,12 @@
|
|||
LA7PodcastEpisodeIE,
|
||||
LA7PodcastIE,
|
||||
)
|
||||
from .laola1tv import (
|
||||
Laola1TvEmbedIE,
|
||||
Laola1TvIE,
|
||||
EHFTVIE,
|
||||
ITTFIE,
|
||||
)
|
||||
from .lastfm import (
|
||||
LastFMIE,
|
||||
LastFMPlaylistIE,
|
||||
LastFMUserIE,
|
||||
)
|
||||
from .laxarxames import LaXarxaMesIE
|
||||
from .lbry import (
|
||||
LBRYIE,
|
||||
LBRYChannelIE,
|
||||
|
@ -1002,7 +965,6 @@
|
|||
LinkedInLearningIE,
|
||||
LinkedInLearningCourseIE,
|
||||
)
|
||||
from .linuxacademy import LinuxAcademyIE
|
||||
from .liputan6 import Liputan6IE
|
||||
from .listennotes import ListenNotesIE
|
||||
from .litv import LiTVIE
|
||||
|
@ -1030,7 +992,6 @@
|
|||
LyndaIE,
|
||||
LyndaCourseIE
|
||||
)
|
||||
from .m6 import M6IE
|
||||
from .magellantv import MagellanTVIE
|
||||
from .magentamusik360 import MagentaMusik360IE
|
||||
from .mailru import (
|
||||
|
@ -1081,10 +1042,7 @@
|
|||
from .megaphone import MegaphoneIE
|
||||
from .meipai import MeipaiIE
|
||||
from .melonvod import MelonVODIE
|
||||
from .meta import METAIE
|
||||
from .metacafe import MetacafeIE
|
||||
from .metacritic import MetacriticIE
|
||||
from .mgoon import MgoonIE
|
||||
from .mgtv import MGTVIE
|
||||
from .miaopai import MiaoPaiIE
|
||||
from .microsoftstream import MicrosoftStreamIE
|
||||
|
@ -1106,7 +1064,6 @@
|
|||
)
|
||||
from .ministrygrid import MinistryGridIE
|
||||
from .minoto import MinotoIE
|
||||
from .miomio import MioMioIE
|
||||
from .mirrativ import (
|
||||
MirrativIE,
|
||||
MirrativUserIE,
|
||||
|
@ -1130,13 +1087,7 @@
|
|||
MLBArticleIE,
|
||||
)
|
||||
from .mlssoccer import MLSSoccerIE
|
||||
from .mnet import MnetIE
|
||||
from .mocha import MochaVideoIE
|
||||
from .moevideo import MoeVideoIE
|
||||
from .mofosex import (
|
||||
MofosexIE,
|
||||
MofosexEmbedIE,
|
||||
)
|
||||
from .mojvideo import MojvideoIE
|
||||
from .monstercat import MonstercatIE
|
||||
from .morningstar import MorningstarIE
|
||||
|
@ -1146,7 +1097,6 @@
|
|||
MotherlessGalleryIE,
|
||||
)
|
||||
from .motorsport import MotorsportIE
|
||||
from .movieclips import MovieClipsIE
|
||||
from .moviepilot import MoviepilotIE
|
||||
from .moview import MoviewPlayIE
|
||||
from .moviezine import MoviezineIE
|
||||
|
@ -1171,18 +1121,12 @@
|
|||
MusicdexArtistIE,
|
||||
MusicdexPlaylistIE,
|
||||
)
|
||||
from .mwave import MwaveIE, MwaveMeetGreetIE
|
||||
from .mxplayer import (
|
||||
MxplayerIE,
|
||||
MxplayerShowIE,
|
||||
)
|
||||
from .mychannels import MyChannelsIE
|
||||
from .myspace import MySpaceIE, MySpaceAlbumIE
|
||||
from .myspass import MySpassIE
|
||||
from .myvi import (
|
||||
MyviIE,
|
||||
MyviEmbedIE,
|
||||
)
|
||||
from .myvideoge import MyVideoGeIE
|
||||
from .myvidster import MyVidsterIE
|
||||
from .mzaalo import MzaaloIE
|
||||
|
@ -1231,6 +1175,7 @@
|
|||
from .ndtv import NDTVIE
|
||||
from .nebula import (
|
||||
NebulaIE,
|
||||
NebulaClassIE,
|
||||
NebulaSubscriptionsIE,
|
||||
NebulaChannelIE,
|
||||
)
|
||||
|
@ -1257,7 +1202,6 @@
|
|||
NewgroundsUserIE,
|
||||
)
|
||||
from .newspicks import NewsPicksIE
|
||||
from .newstube import NewstubeIE
|
||||
from .newsy import NewsyIE
|
||||
from .nextmedia import (
|
||||
NextMediaIE,
|
||||
|
@ -1292,7 +1236,6 @@
|
|||
NickIE,
|
||||
NickBrIE,
|
||||
NickDeIE,
|
||||
NickNightIE,
|
||||
NickRuIE,
|
||||
)
|
||||
from .niconico import (
|
||||
|
@ -1320,14 +1263,11 @@
|
|||
from .ninenow import NineNowIE
|
||||
from .nintendo import NintendoIE
|
||||
from .nitter import NitterIE
|
||||
from .njpwworld import NJPWWorldIE
|
||||
from .nobelprize import NobelPrizeIE
|
||||
from .noice import NoicePodcastIE
|
||||
from .nonktube import NonkTubeIE
|
||||
from .noodlemagazine import NoodleMagazineIE
|
||||
from .noovo import NoovoIE
|
||||
from .normalboots import NormalbootsIE
|
||||
from .nosvideo import NosVideoIE
|
||||
from .nosnl import NOSNLArticleIE
|
||||
from .nova import (
|
||||
NovaEmbedIE,
|
||||
|
@ -1388,7 +1328,10 @@
|
|||
from .oktoberfesttv import OktoberfestTVIE
|
||||
from .olympics import OlympicsReplayIE
|
||||
from .on24 import On24IE
|
||||
from .ondemandkorea import OnDemandKoreaIE
|
||||
from .ondemandkorea import (
|
||||
OnDemandKoreaIE,
|
||||
OnDemandKoreaProgramIE,
|
||||
)
|
||||
from .onefootball import OneFootballIE
|
||||
from .onenewsnz import OneNewsNZIE
|
||||
from .oneplace import OnePlacePodcastIE
|
||||
|
@ -1399,10 +1342,6 @@
|
|||
OnetPlIE,
|
||||
)
|
||||
from .onionstudios import OnionStudiosIE
|
||||
from .ooyala import (
|
||||
OoyalaIE,
|
||||
OoyalaExternalIE,
|
||||
)
|
||||
from .opencast import (
|
||||
OpencastIE,
|
||||
OpencastPlaylistIE,
|
||||
|
@ -1417,6 +1356,7 @@
|
|||
ORFTVthekIE,
|
||||
ORFFM4StoryIE,
|
||||
ORFRadioIE,
|
||||
ORFPodcastIE,
|
||||
ORFIPTVIE,
|
||||
)
|
||||
from .outsidetv import OutsideTVIE
|
||||
|
@ -1430,7 +1370,6 @@
|
|||
PalcoMP3ArtistIE,
|
||||
PalcoMP3VideoIE,
|
||||
)
|
||||
from .pandoratv import PandoraTVIE
|
||||
from .panopto import (
|
||||
PanoptoIE,
|
||||
PanoptoListIE,
|
||||
|
@ -1458,7 +1397,6 @@
|
|||
PelotonIE,
|
||||
PelotonLiveIE
|
||||
)
|
||||
from .people import PeopleIE
|
||||
from .performgroup import PerformGroupIE
|
||||
from .periscope import (
|
||||
PeriscopeIE,
|
||||
|
@ -1490,13 +1428,10 @@
|
|||
PlatziIE,
|
||||
PlatziCourseIE,
|
||||
)
|
||||
from .playfm import PlayFMIE
|
||||
from .playplustv import PlayPlusTVIE
|
||||
from .plays import PlaysTVIE
|
||||
from .playstuff import PlayStuffIE
|
||||
from .playsuisse import PlaySuisseIE
|
||||
from .playtvak import PlaytvakIE
|
||||
from .playvid import PlayvidIE
|
||||
from .playwire import PlaywireIE
|
||||
from .plutotv import PlutoTVIE
|
||||
from .pluralsight import (
|
||||
|
@ -1528,9 +1463,7 @@
|
|||
from .popcorntv import PopcornTVIE
|
||||
from .porn91 import Porn91IE
|
||||
from .pornbox import PornboxIE
|
||||
from .porncom import PornComIE
|
||||
from .pornflip import PornFlipIE
|
||||
from .pornhd import PornHdIE
|
||||
from .pornhub import (
|
||||
PornHubIE,
|
||||
PornHubUserIE,
|
||||
|
@ -1541,7 +1474,6 @@
|
|||
from .pornotube import PornotubeIE
|
||||
from .pornovoisines import PornoVoisinesIE
|
||||
from .pornoxo import PornoXOIE
|
||||
from .pornez import PornezIE
|
||||
from .puhutv import (
|
||||
PuhuTVIE,
|
||||
PuhuTVSerieIE,
|
||||
|
@ -1579,9 +1511,12 @@
|
|||
RadioCanadaIE,
|
||||
RadioCanadaAudioVideoIE,
|
||||
)
|
||||
from .radiocomercial import (
|
||||
RadioComercialIE,
|
||||
RadioComercialPlaylistIE,
|
||||
)
|
||||
from .radiode import RadioDeIE
|
||||
from .radiojavan import RadioJavanIE
|
||||
from .radiobremen import RadioBremenIE
|
||||
from .radiofrance import (
|
||||
FranceCultureIE,
|
||||
RadioFranceIE,
|
||||
|
@ -1633,7 +1568,6 @@
|
|||
RCTIPlusTVIE,
|
||||
)
|
||||
from .rds import RDSIE
|
||||
from .recurbate import RecurbateIE
|
||||
from .redbee import ParliamentLiveUKIE, RTBFIE
|
||||
from .redbulltv import (
|
||||
RedBullTVIE,
|
||||
|
@ -1657,7 +1591,6 @@
|
|||
from .reuters import ReutersIE
|
||||
from .reverbnation import ReverbNationIE
|
||||
from .rheinmaintv import RheinMainTVIE
|
||||
from .rice import RICEIE
|
||||
from .rmcdecouverte import RMCDecouverteIE
|
||||
from .rockstargames import RockstarGamesIE
|
||||
from .rokfin import (
|
||||
|
@ -1681,11 +1614,7 @@
|
|||
RTLLuLiveIE,
|
||||
RTLLuRadioIE,
|
||||
)
|
||||
from .rtl2 import (
|
||||
RTL2IE,
|
||||
RTL2YouIE,
|
||||
RTL2YouSeriesIE,
|
||||
)
|
||||
from .rtl2 import RTL2IE
|
||||
from .rtnews import (
|
||||
RTNewsIE,
|
||||
RTDocumentryIE,
|
||||
|
@ -1707,10 +1636,8 @@
|
|||
RTVEInfantilIE,
|
||||
RTVETelevisionIE,
|
||||
)
|
||||
from .rtvnh import RTVNHIE
|
||||
from .rtvs import RTVSIE
|
||||
from .rtvslo import RTVSLOIE
|
||||
from .ruhd import RUHDIE
|
||||
from .rule34video import Rule34VideoIE
|
||||
from .rumble import (
|
||||
RumbleEmbedIE,
|
||||
|
@ -1759,6 +1686,11 @@
|
|||
from .sapo import SapoIE
|
||||
from .savefrom import SaveFromIE
|
||||
from .sbs import SBSIE
|
||||
from .sbscokr import (
|
||||
SBSCoKrIE,
|
||||
SBSCoKrAllvodProgramIE,
|
||||
SBSCoKrProgramsVodIE,
|
||||
)
|
||||
from .screen9 import Screen9IE
|
||||
from .screencast import ScreencastIE
|
||||
from .screencastify import ScreencastifyIE
|
||||
|
@ -1787,10 +1719,6 @@
|
|||
ShahidIE,
|
||||
ShahidShowIE,
|
||||
)
|
||||
from .shared import (
|
||||
SharedIE,
|
||||
VivoIE,
|
||||
)
|
||||
from .sharevideos import ShareVideosEmbedIE
|
||||
from .sibnet import SibnetEmbedIE
|
||||
from .shemaroome import ShemarooMeIE
|
||||
|
@ -1868,7 +1796,6 @@
|
|||
SpankBangIE,
|
||||
SpankBangPlaylistIE,
|
||||
)
|
||||
from .spankwire import SpankwireIE
|
||||
from .spiegel import SpiegelIE
|
||||
from .spike import (
|
||||
BellatorIE,
|
||||
|
@ -1903,6 +1830,8 @@
|
|||
from .stacommu import (
|
||||
StacommuLiveIE,
|
||||
StacommuVODIE,
|
||||
TheaterComplexTownVODIE,
|
||||
TheaterComplexTownPPVIE,
|
||||
)
|
||||
from .stanfordoc import StanfordOpenClassroomIE
|
||||
from .startv import StarTVIE
|
||||
|
@ -1916,7 +1845,6 @@
|
|||
StoryFireSeriesIE,
|
||||
)
|
||||
from .streamable import StreamableIE
|
||||
from .streamcloud import StreamcloudIE
|
||||
from .streamcz import StreamCZIE
|
||||
from .streamff import StreamFFIE
|
||||
from .streetvoice import StreetVoiceIE
|
||||
|
@ -1936,7 +1864,6 @@
|
|||
SVTSeriesIE,
|
||||
)
|
||||
from .swearnet import SwearnetEpisodeIE
|
||||
from .swrmediathek import SWRMediathekIE
|
||||
from .syvdk import SYVDKIE
|
||||
from .syfy import SyfyIE
|
||||
from .sztvhu import SztvHuIE
|
||||
|
@ -1963,7 +1890,6 @@
|
|||
ConanClassicIE,
|
||||
)
|
||||
from .teamtreehouse import TeamTreeHouseIE
|
||||
from .techtalks import TechTalksIE
|
||||
from .ted import (
|
||||
TedEmbedIE,
|
||||
TedPlaylistIE,
|
||||
|
@ -2005,6 +1931,10 @@
|
|||
from .testurl import TestURLIE
|
||||
from .tf1 import TF1IE
|
||||
from .tfo import TFOIE
|
||||
from .theguardian import (
|
||||
TheGuardianPodcastIE,
|
||||
TheGuardianPodcastPlaylistIE,
|
||||
)
|
||||
from .theholetv import TheHoleTvIE
|
||||
from .theintercept import TheInterceptIE
|
||||
from .theplatform import (
|
||||
|
@ -2015,7 +1945,6 @@
|
|||
from .thesun import TheSunIE
|
||||
from .theweatherchannel import TheWeatherChannelIE
|
||||
from .thisamericanlife import ThisAmericanLifeIE
|
||||
from .thisav import ThisAVIE
|
||||
from .thisoldhouse import ThisOldHouseIE
|
||||
from .thisvid import (
|
||||
ThisVidIE,
|
||||
|
@ -2037,7 +1966,6 @@
|
|||
TikTokLiveIE,
|
||||
DouyinIE,
|
||||
)
|
||||
from .tinypic import TinyPicIE
|
||||
from .tmz import TMZIE
|
||||
from .tnaflix import (
|
||||
TNAFlixNetworkEmbedIE,
|
||||
|
@ -2052,10 +1980,6 @@
|
|||
from .toggo import (
|
||||
ToggoIE,
|
||||
)
|
||||
from .tokentube import (
|
||||
TokentubeIE,
|
||||
TokentubeChannelIE
|
||||
)
|
||||
from .tonline import TOnlineIE
|
||||
from .toongoggles import ToonGogglesIE
|
||||
from .toutv import TouTvIE
|
||||
|
@ -2066,7 +1990,6 @@
|
|||
TrillerUserIE,
|
||||
TrillerShortIE,
|
||||
)
|
||||
from .trilulilu import TriluliluIE
|
||||
from .trovo import (
|
||||
TrovoIE,
|
||||
TrovoVodIE,
|
||||
|
@ -2091,7 +2014,6 @@
|
|||
TuneInPodcastEpisodeIE,
|
||||
TuneInShortenerIE,
|
||||
)
|
||||
from .tunepk import TunePkIE
|
||||
from .turbo import TurboIE
|
||||
from .tv2 import (
|
||||
TV2IE,
|
||||
|
@ -2133,16 +2055,7 @@
|
|||
from .tviplayer import TVIPlayerIE
|
||||
from .tvland import TVLandIE
|
||||
from .tvn24 import TVN24IE
|
||||
from .tvnet import TVNetIE
|
||||
from .tvnoe import TVNoeIE
|
||||
from .tvnow import (
|
||||
TVNowIE,
|
||||
TVNowFilmIE,
|
||||
TVNowNewIE,
|
||||
TVNowSeasonIE,
|
||||
TVNowAnnualIE,
|
||||
TVNowShowIE,
|
||||
)
|
||||
from .tvopengr import (
|
||||
TVOpenGrWatchIE,
|
||||
TVOpenGrEmbedIE,
|
||||
|
@ -2160,7 +2073,6 @@
|
|||
)
|
||||
from .tvplayer import TVPlayerIE
|
||||
from .tweakers import TweakersIE
|
||||
from .twentyfourvideo import TwentyFourVideoIE
|
||||
from .twentymin import TwentyMinutenIE
|
||||
from .twentythreevideo import TwentyThreeVideoIE
|
||||
from .twitcasting import (
|
||||
|
@ -2209,7 +2121,6 @@
|
|||
from .umg import UMGDeIE
|
||||
from .unistra import UnistraIE
|
||||
from .unity import UnityIE
|
||||
from .unscripted import UnscriptedNewsVideoIE
|
||||
from .unsupported import KnownDRMIE, KnownPiracyIE
|
||||
from .uol import UOLIE
|
||||
from .uplynk import (
|
||||
|
@ -2228,7 +2139,6 @@
|
|||
from .utreon import UtreonIE
|
||||
from .varzesh3 import Varzesh3IE
|
||||
from .vbox7 import Vbox7IE
|
||||
from .veehd import VeeHDIE
|
||||
from .veo import VeoIE
|
||||
from .veoh import (
|
||||
VeohIE,
|
||||
|
@ -2250,7 +2160,6 @@
|
|||
ViceArticleIE,
|
||||
ViceShowIE,
|
||||
)
|
||||
from .vidbit import VidbitIE
|
||||
from .viddler import ViddlerIE
|
||||
from .videa import VideaIE
|
||||
from .videocampus_sachsen import (
|
||||
|
@ -2278,6 +2187,7 @@
|
|||
VidioLiveIE
|
||||
)
|
||||
from .vidlii import VidLiiIE
|
||||
from .vidly import VidlyIE
|
||||
from .viewlift import (
|
||||
ViewLiftIE,
|
||||
ViewLiftEmbedIE,
|
||||
|
@ -2300,7 +2210,6 @@
|
|||
VimmIE,
|
||||
VimmRecordingIE,
|
||||
)
|
||||
from .vimple import VimpleIE
|
||||
from .vine import (
|
||||
VineIE,
|
||||
VineUserIE,
|
||||
|
@ -2324,10 +2233,8 @@
|
|||
VKPlayLiveIE,
|
||||
)
|
||||
from .vocaroo import VocarooIE
|
||||
from .vodlocker import VodlockerIE
|
||||
from .vodpl import VODPlIE
|
||||
from .vodplatform import VODPlatformIE
|
||||
from .voicerepublic import VoiceRepublicIE
|
||||
from .voicy import (
|
||||
VoicyIE,
|
||||
VoicyChannelIE,
|
||||
|
@ -2347,23 +2254,13 @@
|
|||
KetnetIE,
|
||||
DagelijkseKostIE,
|
||||
)
|
||||
from .vrak import VrakIE
|
||||
from .vrv import (
|
||||
VRVIE,
|
||||
VRVSeriesIE,
|
||||
)
|
||||
from .vshare import VShareIE
|
||||
from .vtm import VTMIE
|
||||
from .medialaan import MedialaanIE
|
||||
from .vuclip import VuClipIE
|
||||
from .vupload import VuploadIE
|
||||
from .vvvvid import (
|
||||
VVVVIDIE,
|
||||
VVVVIDShowIE,
|
||||
)
|
||||
from .vyborymos import VyboryMosIE
|
||||
from .vzaar import VzaarIE
|
||||
from .wakanim import WakanimIE
|
||||
from .walla import WallaIE
|
||||
from .washingtonpost import (
|
||||
WashingtonPostIE,
|
||||
|
@ -2375,8 +2272,6 @@
|
|||
WASDTVClipIE,
|
||||
)
|
||||
from .wat import WatIE
|
||||
from .watchbox import WatchBoxIE
|
||||
from .watchindianporn import WatchIndianPornIE
|
||||
from .wdr import (
|
||||
WDRIE,
|
||||
WDRPageIE,
|
||||
|
@ -2410,7 +2305,6 @@
|
|||
from .weyyak import WeyyakIE
|
||||
from .whyp import WhypIE
|
||||
from .wikimedia import WikimediaIE
|
||||
from .willow import WillowIE
|
||||
from .wimbledon import WimbledonIE
|
||||
from .wimtv import WimTVIE
|
||||
from .whowatch import WhoWatchIE
|
||||
|
@ -2444,7 +2338,6 @@
|
|||
WykopPostCommentIE,
|
||||
)
|
||||
from .xanimu import XanimuIE
|
||||
from .xbef import XBefIE
|
||||
from .xboxclips import XboxClipsIE
|
||||
from .xfileshare import XFileShareIE
|
||||
from .xhamster import (
|
||||
|
@ -2460,8 +2353,6 @@
|
|||
from .xminus import XMinusIE
|
||||
from .xnxx import XNXXIE
|
||||
from .xstream import XstreamIE
|
||||
from .xtube import XTubeUserIE, XTubeIE
|
||||
from .xuite import XuiteIE
|
||||
from .xvideos import (
|
||||
XVideosIE,
|
||||
XVideosQuickiesIE
|
||||
|
@ -2491,10 +2382,7 @@
|
|||
YappyIE,
|
||||
YappyProfileIE,
|
||||
)
|
||||
from .yesjapan import YesJapanIE
|
||||
from .yinyuetai import YinYueTaiIE
|
||||
from .yle_areena import YleAreenaIE
|
||||
from .ynet import YnetIE
|
||||
from .youjizz import YouJizzIE
|
||||
from .youku import (
|
||||
YoukuIE,
|
||||
|
@ -2570,6 +2458,9 @@
|
|||
ZingMp3ChartMusicVideoIE,
|
||||
ZingMp3UserIE,
|
||||
ZingMp3HubIE,
|
||||
ZingMp3LiveRadioIE,
|
||||
ZingMp3PodcastEpisodeIE,
|
||||
ZingMp3PodcastIE,
|
||||
)
|
||||
from .zoom import ZoomIE
|
||||
from .zype import ZypeIE
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
try_get,
|
||||
unescapeHTML,
|
||||
update_url_query,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
|
@ -379,6 +380,18 @@ class ABCIViewShowSeriesIE(InfoExtractor):
|
|||
'noplaylist': True,
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# 'videoEpisodes' is a dict with `items` key
|
||||
'url': 'https://iview.abc.net.au/show/7-30-mark-humphries-satire',
|
||||
'info_dict': {
|
||||
'id': '178458-0',
|
||||
'title': 'Episodes',
|
||||
'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.',
|
||||
'series': '7.30 Mark Humphries Satire',
|
||||
'season': 'Episodes',
|
||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
|
||||
},
|
||||
'playlist_count': 15,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -398,12 +411,14 @@ def _real_extract(self, url):
|
|||
series = video_data['selectedSeries']
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': [self.url_result(episode['shareUrl'])
|
||||
for episode in series['_embedded']['videoEpisodes']],
|
||||
'entries': [self.url_result(episode_url, ABCIViewIE)
|
||||
for episode_url in traverse_obj(series, (
|
||||
'_embedded', 'videoEpisodes', (None, 'items'), ..., 'shareUrl', {url_or_none}))],
|
||||
'id': series.get('id'),
|
||||
'title': dict_get(series, ('title', 'displaySubtitle')),
|
||||
'description': series.get('description'),
|
||||
'series': dict_get(series, ('showTitle', 'displayTitle')),
|
||||
'season': dict_get(series, ('title', 'displaySubtitle')),
|
||||
'thumbnail': series.get('thumbnail'),
|
||||
'thumbnail': traverse_obj(
|
||||
series, 'thumbnail', ('images', lambda _, v: v['name'] == 'seriesThumbnail', 'url'), get_all=False),
|
||||
}
|
||||
|
|
|
@ -211,7 +211,8 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||
'id': '194-25_s2_p1',
|
||||
'title': '第1話 「チーズケーキ」 「モーニング再び」',
|
||||
'series': '異世界食堂2',
|
||||
'series_number': 2,
|
||||
'season': 'シーズン2',
|
||||
'season_number': 2,
|
||||
'episode': '第1話 「チーズケーキ」 「モーニング再び」',
|
||||
'episode_number': 1,
|
||||
},
|
||||
|
@ -347,12 +348,12 @@ def _real_extract(self, url):
|
|||
)?
|
||||
''', r'\1', og_desc)
|
||||
|
||||
# canonical URL may contain series and episode number
|
||||
# canonical URL may contain season and episode number
|
||||
mobj = re.search(r's(\d+)_p(\d+)$', canonical_url)
|
||||
if mobj:
|
||||
seri = int_or_none(mobj.group(1), default=float('inf'))
|
||||
epis = int_or_none(mobj.group(2), default=float('inf'))
|
||||
info['series_number'] = seri if seri < 100 else None
|
||||
info['season_number'] = seri if seri < 100 else None
|
||||
# some anime like Detective Conan (though not available in AbemaTV)
|
||||
# has more than 1000 episodes (1026 as of 2021/11/15)
|
||||
info['episode_number'] = epis if epis < 2000 else None
|
||||
|
@ -381,7 +382,7 @@ def _real_extract(self, url):
|
|||
self.report_warning('This is a premium-only stream')
|
||||
info.update(traverse_obj(api_response, {
|
||||
'series': ('series', 'title'),
|
||||
'season': ('season', 'title'),
|
||||
'season': ('season', 'name'),
|
||||
'season_number': ('season', 'sequence'),
|
||||
'episode_number': ('episode', 'number'),
|
||||
}))
|
||||
|
|
|
@ -121,11 +121,21 @@ class AENetworksIE(AENetworksBaseIE):
|
|||
'info_dict': {
|
||||
'id': '22253814',
|
||||
'ext': 'mp4',
|
||||
'title': 'Winter is Coming',
|
||||
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
|
||||
'title': 'Winter Is Coming',
|
||||
'description': 'md5:a40e370925074260b1c8a633c632c63a',
|
||||
'timestamp': 1338306241,
|
||||
'upload_date': '20120529',
|
||||
'uploader': 'AENE-NEW',
|
||||
'duration': 2592.0,
|
||||
'thumbnail': r're:^https?://.*\.jpe?g$',
|
||||
'chapters': 'count:5',
|
||||
'tags': 'count:14',
|
||||
'categories': ['Mountain Men'],
|
||||
'episode_number': 1,
|
||||
'episode': 'Episode 1',
|
||||
'season': 'Season 1',
|
||||
'season_number': 1,
|
||||
'series': 'Mountain Men',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
|
@ -143,6 +153,15 @@ class AENetworksIE(AENetworksBaseIE):
|
|||
'timestamp': 1452634428,
|
||||
'upload_date': '20160112',
|
||||
'uploader': 'AENE-NEW',
|
||||
'duration': 1277.695,
|
||||
'thumbnail': r're:^https?://.*\.jpe?g$',
|
||||
'chapters': 'count:4',
|
||||
'tags': 'count:23',
|
||||
'episode': 'Episode 1',
|
||||
'episode_number': 1,
|
||||
'season': 'Season 9',
|
||||
'season_number': 9,
|
||||
'series': 'Duck Dynasty',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class AirMozillaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://air\.mozilla\.org/(?P<id>[0-9a-z-]+)/?'
|
||||
_TEST = {
|
||||
'url': 'https://air.mozilla.org/privacy-lab-a-meetup-for-privacy-minded-people-in-san-francisco/',
|
||||
'md5': '8d02f53ee39cf006009180e21df1f3ba',
|
||||
'info_dict': {
|
||||
'id': '6x4q2w',
|
||||
'ext': 'mp4',
|
||||
'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco',
|
||||
'thumbnail': r're:https?://.*/poster\.jpg',
|
||||
'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...',
|
||||
'timestamp': 1422487800,
|
||||
'upload_date': '20150128',
|
||||
'location': 'SFO Commons',
|
||||
'duration': 3780,
|
||||
'view_count': int,
|
||||
'categories': ['Main', 'Privacy'],
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_id = self._html_search_regex(r'//vid\.ly/(.*?)/embed', webpage, 'id')
|
||||
|
||||
embed_script = self._download_webpage('https://vid.ly/{0}/embed'.format(video_id), video_id)
|
||||
jwconfig = self._parse_json(self._search_regex(
|
||||
r'initCallback\((.*)\);', embed_script, 'metadata'), video_id)['config']
|
||||
|
||||
info_dict = self._parse_jwplayer_data(jwconfig, video_id)
|
||||
view_count = int_or_none(self._html_search_regex(
|
||||
r'Views since archived: ([0-9]+)',
|
||||
webpage, 'view count', fatal=False))
|
||||
timestamp = parse_iso8601(self._html_search_regex(
|
||||
r'<time datetime="(.*?)"', webpage, 'timestamp', fatal=False))
|
||||
duration = parse_duration(self._search_regex(
|
||||
r'Duration:\s*(\d+\s*hours?\s*\d+\s*minutes?)',
|
||||
webpage, 'duration', fatal=False))
|
||||
|
||||
info_dict.update({
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'url': self._og_search_url(webpage),
|
||||
'display_id': display_id,
|
||||
'description': self._og_search_description(webpage),
|
||||
'timestamp': timestamp,
|
||||
'location': self._html_search_regex(r'Location: (.*)', webpage, 'location', default=None),
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'categories': re.findall(r'<a href=".*?" class="channel">(.*?)</a>', webpage),
|
||||
})
|
||||
|
||||
return info_dict
|
253
yt_dlp/extractor/allstar.py
Normal file
253
yt_dlp/extractor/allstar.py
Normal file
|
@ -0,0 +1,253 @@
|
|||
import functools
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
OnDemandPagedList,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
parse_qs,
|
||||
urljoin,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
_FIELDS = '''
|
||||
_id
|
||||
clipImageSource
|
||||
clipImageThumb
|
||||
clipLink
|
||||
clipTitle
|
||||
createdDate
|
||||
shareId
|
||||
user { _id }
|
||||
username
|
||||
views'''
|
||||
|
||||
_EXTRA_FIELDS = '''
|
||||
clipLength
|
||||
clipSizeBytes'''
|
||||
|
||||
_QUERIES = {
|
||||
'clip': '''query ($id: String!) {
|
||||
video: getClip(clipIdentifier: $id) {
|
||||
%s %s
|
||||
}
|
||||
}''' % (_FIELDS, _EXTRA_FIELDS),
|
||||
'montage': '''query ($id: String!) {
|
||||
video: getMontage(clipIdentifier: $id) {
|
||||
%s
|
||||
}
|
||||
}''' % _FIELDS,
|
||||
'Clips': '''query ($page: Int!, $user: String!, $game: Int) {
|
||||
videos: clips(search: createdDate, page: $page, user: $user, mobile: false, game: $game) {
|
||||
data { %s %s }
|
||||
}
|
||||
}''' % (_FIELDS, _EXTRA_FIELDS),
|
||||
'Montages': '''query ($page: Int!, $user: String!) {
|
||||
videos: montages(search: createdDate, page: $page, user: $user) {
|
||||
data { %s }
|
||||
}
|
||||
}''' % _FIELDS,
|
||||
'Mobile Clips': '''query ($page: Int!, $user: String!) {
|
||||
videos: clips(search: createdDate, page: $page, user: $user, mobile: true) {
|
||||
data { %s %s }
|
||||
}
|
||||
}''' % (_FIELDS, _EXTRA_FIELDS),
|
||||
}
|
||||
|
||||
|
||||
class AllstarBaseIE(InfoExtractor):
|
||||
@staticmethod
|
||||
def _parse_video_data(video_data):
|
||||
def media_url_or_none(path):
|
||||
return urljoin('https://media.allstar.gg/', path)
|
||||
|
||||
info = traverse_obj(video_data, {
|
||||
'id': ('_id', {str}),
|
||||
'display_id': ('shareId', {str}),
|
||||
'title': ('clipTitle', {str}),
|
||||
'url': ('clipLink', {media_url_or_none}),
|
||||
'thumbnails': (('clipImageThumb', 'clipImageSource'), {'url': {media_url_or_none}}),
|
||||
'duration': ('clipLength', {int_or_none}),
|
||||
'filesize': ('clipSizeBytes', {int_or_none}),
|
||||
'timestamp': ('createdDate', {functools.partial(int_or_none, scale=1000)}),
|
||||
'uploader': ('username', {str}),
|
||||
'uploader_id': ('user', '_id', {str}),
|
||||
'view_count': ('views', {int_or_none}),
|
||||
})
|
||||
|
||||
if info.get('id') and info.get('url'):
|
||||
basename = 'clip' if '/clips/' in info['url'] else 'montage'
|
||||
info['webpage_url'] = f'https://allstar.gg/{basename}?{basename}={info["id"]}'
|
||||
|
||||
info.update({
|
||||
'extractor_key': AllstarIE.ie_key(),
|
||||
'extractor': AllstarIE.IE_NAME,
|
||||
'uploader_url': urljoin('https://allstar.gg/u/', info.get('uploader_id')),
|
||||
})
|
||||
|
||||
return info
|
||||
|
||||
def _call_api(self, query, variables, path, video_id=None, note=None):
|
||||
response = self._download_json(
|
||||
'https://a1.allstar.gg/graphql', video_id, note=note,
|
||||
headers={'content-type': 'application/json'},
|
||||
data=json.dumps({'variables': variables, 'query': query}).encode())
|
||||
|
||||
errors = traverse_obj(response, ('errors', ..., 'message', {str}))
|
||||
if errors:
|
||||
raise ExtractorError('; '.join(errors))
|
||||
|
||||
return traverse_obj(response, path)
|
||||
|
||||
|
||||
class AllstarIE(AllstarBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?allstar\.gg/(?P<type>(?:clip|montage))\?(?P=type)=(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://allstar.gg/clip?clip=64482c2da9eec30008a67d1b',
|
||||
'info_dict': {
|
||||
'id': '64482c2da9eec30008a67d1b',
|
||||
'title': '4K on Inferno',
|
||||
'url': 'md5:66befb5381eef0c9456026386c25fa55',
|
||||
'thumbnail': r're:https://media\.allstar\.gg/.+\.(?:png|jpg)$',
|
||||
'uploader': 'chrk.',
|
||||
'ext': 'mp4',
|
||||
'duration': 20,
|
||||
'filesize': 21199257,
|
||||
'timestamp': 1682451501,
|
||||
'uploader_id': '62b8bdfc9021052f7905882d',
|
||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||
'upload_date': '20230425',
|
||||
'view_count': int,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://allstar.gg/clip?clip=8LJLY4JKB',
|
||||
'info_dict': {
|
||||
'id': '64a1ec6b887f4c0008dc50b8',
|
||||
'display_id': '8LJLY4JKB',
|
||||
'title': 'AK-47 3K on Mirage',
|
||||
'url': 'md5:dde224fd12f035c0e2529a4ae34c4283',
|
||||
'ext': 'mp4',
|
||||
'thumbnail': r're:https://media\.allstar\.gg/.+\.(?:png|jpg)$',
|
||||
'duration': 16,
|
||||
'filesize': 30175859,
|
||||
'timestamp': 1688333419,
|
||||
'uploader': 'cherokee',
|
||||
'uploader_id': '62b8bdfc9021052f7905882d',
|
||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||
'upload_date': '20230702',
|
||||
'view_count': int,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://allstar.gg/montage?montage=643e64089da7e9363e1fa66c',
|
||||
'info_dict': {
|
||||
'id': '643e64089da7e9363e1fa66c',
|
||||
'display_id': 'APQLGM2IMXW',
|
||||
'title': 'cherokee Rapid Fire Snipers Montage',
|
||||
'url': 'md5:a3ee356022115db2b27c81321d195945',
|
||||
'thumbnail': r're:https://media\.allstar\.gg/.+\.(?:png|jpg)$',
|
||||
'ext': 'mp4',
|
||||
'timestamp': 1681810448,
|
||||
'uploader': 'cherokee',
|
||||
'uploader_id': '62b8bdfc9021052f7905882d',
|
||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||
'upload_date': '20230418',
|
||||
'view_count': int,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://allstar.gg/montage?montage=RILJMH6QOS',
|
||||
'info_dict': {
|
||||
'id': '64a2697372ce3703de29e868',
|
||||
'display_id': 'RILJMH6QOS',
|
||||
'title': 'cherokee Rapid Fire Snipers Montage',
|
||||
'url': 'md5:d5672e6f88579730c2310a80fdbc4030',
|
||||
'thumbnail': r're:https://media\.allstar\.gg/.+\.(?:png|jpg)$',
|
||||
'ext': 'mp4',
|
||||
'timestamp': 1688365434,
|
||||
'uploader': 'cherokee',
|
||||
'uploader_id': '62b8bdfc9021052f7905882d',
|
||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||
'upload_date': '20230703',
|
||||
'view_count': int,
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
query_id, video_id = self._match_valid_url(url).group('type', 'id')
|
||||
|
||||
return self._parse_video_data(
|
||||
self._call_api(
|
||||
_QUERIES.get(query_id), {'id': video_id}, ('data', 'video'), video_id))
|
||||
|
||||
|
||||
class AllstarProfileIE(AllstarBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?allstar\.gg/(?:profile\?user=|u/)(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://allstar.gg/profile?user=62b8bdfc9021052f7905882d',
|
||||
'info_dict': {
|
||||
'id': '62b8bdfc9021052f7905882d-clips',
|
||||
'title': 'cherokee - Clips',
|
||||
},
|
||||
'playlist_mincount': 15
|
||||
}, {
|
||||
'url': 'https://allstar.gg/u/cherokee?game=730&view=Clips',
|
||||
'info_dict': {
|
||||
'id': '62b8bdfc9021052f7905882d-clips-730',
|
||||
'title': 'cherokee - Clips - 730',
|
||||
},
|
||||
'playlist_mincount': 15
|
||||
}, {
|
||||
'url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d?view=Montages',
|
||||
'info_dict': {
|
||||
'id': '62b8bdfc9021052f7905882d-montages',
|
||||
'title': 'cherokee - Montages',
|
||||
},
|
||||
'playlist_mincount': 4
|
||||
}, {
|
||||
'url': 'https://allstar.gg/profile?user=cherokee&view=Mobile Clips',
|
||||
'info_dict': {
|
||||
'id': '62b8bdfc9021052f7905882d-mobile',
|
||||
'title': 'cherokee - Mobile Clips',
|
||||
},
|
||||
'playlist_mincount': 1
|
||||
}]
|
||||
|
||||
_PAGE_SIZE = 10
|
||||
|
||||
def _get_page(self, user_id, display_id, game, query, page_num):
|
||||
page_num += 1
|
||||
|
||||
for video_data in self._call_api(
|
||||
query, {
|
||||
'user': user_id,
|
||||
'page': page_num,
|
||||
'game': game,
|
||||
}, ('data', 'videos', 'data'), display_id, f'Downloading page {page_num}'):
|
||||
yield self._parse_video_data(video_data)
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
profile_data = self._download_json(
|
||||
urljoin('https://api.allstar.gg/v1/users/profile/', display_id), display_id)
|
||||
user_id = traverse_obj(profile_data, ('data', ('_id'), {str}))
|
||||
if not user_id:
|
||||
raise ExtractorError('Unable to extract the user id')
|
||||
|
||||
username = traverse_obj(profile_data, ('data', 'profile', ('username'), {str}))
|
||||
url_query = parse_qs(url)
|
||||
game = traverse_obj(url_query, ('game', 0, {int_or_none}))
|
||||
query_id = traverse_obj(url_query, ('view', 0), default='Clips')
|
||||
|
||||
if query_id not in ('Clips', 'Montages', 'Mobile Clips'):
|
||||
raise ExtractorError(f'Unsupported playlist URL type {query_id!r}')
|
||||
|
||||
return self.playlist_result(
|
||||
OnDemandPagedList(
|
||||
functools.partial(
|
||||
self._get_page, user_id, display_id, game, _QUERIES.get(query_id)), self._PAGE_SIZE),
|
||||
playlist_id=join_nonempty(user_id, query_id.lower().split()[0], game),
|
||||
playlist_title=join_nonempty((username or display_id), query_id, game, delim=' - '))
|
96
yt_dlp/extractor/altcensored.py
Normal file
96
yt_dlp/extractor/altcensored.py
Normal file
|
@ -0,0 +1,96 @@
|
|||
import re
|
||||
|
||||
from .archiveorg import ArchiveOrgIE
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
InAdvancePagedList,
|
||||
int_or_none,
|
||||
orderedSet,
|
||||
str_to_int,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
class AltCensoredIE(InfoExtractor):
|
||||
IE_NAME = 'altcensored'
|
||||
_VALID_URL = r'https?://(?:www\.)?altcensored\.com/(?:watch\?v=|embed/)(?P<id>[^/?#]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.altcensored.com/watch?v=k0srjLSkga8',
|
||||
'info_dict': {
|
||||
'id': 'youtube-k0srjLSkga8',
|
||||
'ext': 'webm',
|
||||
'title': "QUELLES SONT LES CONSÉQUENCES DE L'HYPERSEXUALISATION DE LA SOCIÉTÉ ?",
|
||||
'display_id': 'k0srjLSkga8.webm',
|
||||
'release_date': '20180403',
|
||||
'creator': 'Virginie Vota',
|
||||
'release_year': 2018,
|
||||
'upload_date': '20230318',
|
||||
'uploader': 'admin@altcensored.com',
|
||||
'description': 'md5:0b38a8fc04103579d5c1db10a247dc30',
|
||||
'timestamp': 1679161343,
|
||||
'track': 'k0srjLSkga8',
|
||||
'duration': 926.09,
|
||||
'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg',
|
||||
'view_count': int,
|
||||
'categories': ['News & Politics'],
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': f'https://archive.org/details/youtube-{video_id}',
|
||||
'ie_key': ArchiveOrgIE.ie_key(),
|
||||
'view_count': str_to_int(self._html_search_regex(
|
||||
r'YouTube Views:(?:\s| )*([\d,]+)', webpage, 'view count', default=None)),
|
||||
'categories': self._html_search_regex(
|
||||
r'<a href="/category/\d+">\s*\n?\s*([^<]+)</a>',
|
||||
webpage, 'category', default='').split() or None,
|
||||
}
|
||||
|
||||
|
||||
class AltCensoredChannelIE(InfoExtractor):
|
||||
IE_NAME = 'altcensored:channel'
|
||||
_VALID_URL = r'https?://(?:www\.)?altcensored\.com/channel/(?!page|table)(?P<id>[^/?#]+)'
|
||||
_PAGE_SIZE = 24
|
||||
_TESTS = [{
|
||||
'url': 'https://www.altcensored.com/channel/UCFPTO55xxHqFqkzRZHu4kcw',
|
||||
'info_dict': {
|
||||
'title': 'Virginie Vota',
|
||||
'id': 'UCFPTO55xxHqFqkzRZHu4kcw',
|
||||
},
|
||||
'playlist_count': 91
|
||||
}, {
|
||||
'url': 'https://altcensored.com/channel/UC9CcJ96HKMWn0LZlcxlpFTw',
|
||||
'info_dict': {
|
||||
'title': 'yukikaze775',
|
||||
'id': 'UC9CcJ96HKMWn0LZlcxlpFTw',
|
||||
},
|
||||
'playlist_count': 4
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel_id = self._match_id(url)
|
||||
webpage = self._download_webpage(
|
||||
url, channel_id, 'Download channel webpage', 'Unable to get channel webpage')
|
||||
title = self._html_search_meta('altcen_title', webpage, 'title', fatal=False)
|
||||
page_count = int_or_none(self._html_search_regex(
|
||||
r'<a[^>]+href="/channel/\w+/page/(\d+)">(?:\1)</a>',
|
||||
webpage, 'page count', default='1'))
|
||||
|
||||
def page_func(page_num):
|
||||
page_num += 1
|
||||
webpage = self._download_webpage(
|
||||
f'https://altcensored.com/channel/{channel_id}/page/{page_num}',
|
||||
channel_id, note=f'Downloading page {page_num}')
|
||||
|
||||
items = re.findall(r'<a[^>]+href="(/watch\?v=[^"]+)', webpage)
|
||||
return [self.url_result(urljoin('https://www.altcensored.com', path), AltCensoredIE)
|
||||
for path in orderedSet(items)]
|
||||
|
||||
return self.playlist_result(
|
||||
InAdvancePagedList(page_func, page_count, self._PAGE_SIZE),
|
||||
playlist_id=channel_id, playlist_title=title)
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
|
||||
class AolIE(YahooIE): # XXX: Do not subclass from concrete IE
|
||||
_WORKING = False
|
||||
IE_NAME = 'aol.com'
|
||||
_VALID_URL = r'(?:aol-video:|https?://(?:www\.)?aol\.(?:com|ca|co\.uk|de|jp)/video/(?:[^/]+/)*)(?P<id>\d{9}|[0-9a-f]{24}|[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12})'
|
||||
|
||||
|
|
|
@ -52,7 +52,6 @@ class ArchiveOrgIE(InfoExtractor):
|
|||
'creator': 'SRI International',
|
||||
'uploader': 'laura@archive.org',
|
||||
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
||||
'release_year': 1968,
|
||||
'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr',
|
||||
'track': 'XD300-23 68HighlightsAResearchCntAugHumanIntellect',
|
||||
|
||||
|
@ -134,7 +133,6 @@ class ArchiveOrgIE(InfoExtractor):
|
|||
'album': '1977-05-08 - Barton Hall - Cornell University',
|
||||
'release_date': '19770508',
|
||||
'display_id': 'gd1977-05-08d01t07.flac',
|
||||
'release_year': 1977,
|
||||
'track_number': 7,
|
||||
},
|
||||
}, {
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import unified_strdate
|
||||
|
||||
|
||||
class ATTTechChannelIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://techchannel\.att\.com/play-video\.cfm/([^/]+/)*(?P<id>.+)'
|
||||
_TEST = {
|
||||
'url': 'http://techchannel.att.com/play-video.cfm/2014/1/27/ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use',
|
||||
'info_dict': {
|
||||
'id': '11316',
|
||||
'display_id': 'ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use',
|
||||
'ext': 'flv',
|
||||
'title': 'AT&T Archives : The UNIX System: Making Computers Easier to Use',
|
||||
'description': 'A 1982 film about UNIX is the foundation for software in use around Bell Labs and AT&T.',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'upload_date': '20140127',
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
video_url = self._search_regex(
|
||||
r"url\s*:\s*'(rtmp://[^']+)'",
|
||||
webpage, 'video URL')
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'mediaid\s*=\s*(\d+)',
|
||||
webpage, 'video id', fatal=False)
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._og_search_description(webpage)
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'[Rr]elease\s+date:\s*(\d{1,2}/\d{1,2}/\d{4})',
|
||||
webpage, 'upload date', fatal=False), False)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'url': video_url,
|
||||
'ext': 'flv',
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'upload_date': upload_date,
|
||||
}
|
|
@ -3,14 +3,13 @@
|
|||
|
||||
|
||||
class BeatBumpVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'https://beatbump\.ml/listen\?id=(?P<id>[\w-]+)'
|
||||
_VALID_URL = r'https://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
|
||||
'md5': '5ff3fff41d3935b9810a9731e485fe66',
|
||||
'info_dict': {
|
||||
'id': 'MgNrAu2pzNs',
|
||||
'ext': 'mp4',
|
||||
'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
|
||||
'artist': 'Stephen',
|
||||
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
|
||||
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
|
||||
|
@ -22,10 +21,9 @@ class BeatBumpVideoIE(InfoExtractor):
|
|||
'alt_title': 'Voyeur Girl',
|
||||
'view_count': int,
|
||||
'track': 'Voyeur Girl',
|
||||
'uploader': 'Stephen - Topic',
|
||||
'uploader': 'Stephen',
|
||||
'title': 'Voyeur Girl',
|
||||
'channel_follower_count': int,
|
||||
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
|
||||
'age_limit': 0,
|
||||
'availability': 'public',
|
||||
'live_status': 'not_live',
|
||||
|
@ -36,7 +34,12 @@ class BeatBumpVideoIE(InfoExtractor):
|
|||
'tags': 'count:11',
|
||||
'creator': 'Stephen',
|
||||
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
|
||||
}
|
||||
'channel_is_verified': True,
|
||||
'heatmap': 'count:100',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://beatbump.io/listen?id=LDGZAprNGWo',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -45,7 +48,7 @@ def _real_extract(self, url):
|
|||
|
||||
|
||||
class BeatBumpPlaylistIE(InfoExtractor):
|
||||
_VALID_URL = r'https://beatbump\.ml/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
|
||||
_VALID_URL = r'https://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
|
||||
'playlist_count': 50,
|
||||
|
@ -56,25 +59,28 @@ class BeatBumpPlaylistIE(InfoExtractor):
|
|||
'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
|
||||
'description': '',
|
||||
'tags': [],
|
||||
'modified_date': '20221223',
|
||||
}
|
||||
'modified_date': '20231110',
|
||||
},
|
||||
'expected_warnings': ['YouTube Music is not directly supported'],
|
||||
}, {
|
||||
'url': 'https://beatbump.ml/artist/UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'playlist_mincount': 1,
|
||||
'params': {'flatplaylist': True},
|
||||
'info_dict': {
|
||||
'id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'uploader_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds',
|
||||
'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'uploader_id': '@NoCopyrightSounds',
|
||||
'channel_follower_count': int,
|
||||
'title': 'NoCopyrightSounds - Videos',
|
||||
'title': 'NoCopyrightSounds',
|
||||
'uploader': 'NoCopyrightSounds',
|
||||
'description': 'md5:cd4fd53d81d363d05eee6c1b478b491a',
|
||||
'channel': 'NoCopyrightSounds',
|
||||
'tags': 'count:12',
|
||||
'tags': 'count:65',
|
||||
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'channel_is_verified': True,
|
||||
},
|
||||
'expected_warnings': ['YouTube Music is not directly supported'],
|
||||
}, {
|
||||
'url': 'https://beatbump.ml/playlist/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
|
||||
'playlist_mincount': 1,
|
||||
|
@ -84,16 +90,20 @@ class BeatBumpPlaylistIE(InfoExtractor):
|
|||
'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds',
|
||||
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
|
||||
'view_count': int,
|
||||
'channel_url': 'https://www.youtube.com/@NoCopyrightSounds',
|
||||
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'uploader_id': '@NoCopyrightSounds',
|
||||
'title': 'NCS : All Releases 💿',
|
||||
'uploader': 'NoCopyrightSounds',
|
||||
'availability': 'public',
|
||||
'channel': 'NoCopyrightSounds',
|
||||
'tags': [],
|
||||
'modified_date': '20221225',
|
||||
'modified_date': '20231112',
|
||||
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
}
|
||||
},
|
||||
'expected_warnings': ['YouTube Music is not directly supported'],
|
||||
}, {
|
||||
'url': 'https://beatbump.io/playlist/VLPLFCHGavqRG-q_2ZhmgU2XB2--ZY6irT1c',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
|
||||
class BehindKinkIE(InfoExtractor):
|
||||
_WORKING = False
|
||||
_VALID_URL = r'https?://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/',
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
from .mtv import MTVServicesInfoExtractor
|
||||
from ..utils import unified_strdate
|
||||
|
||||
# TODO Remove - Reason: Outdated Site
|
||||
|
||||
|
||||
class BetIE(MTVServicesInfoExtractor):
|
||||
_WORKING = False
|
||||
_VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html'
|
||||
_TESTS = [
|
||||
{
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
|
||||
class BFIPlayerIE(InfoExtractor):
|
||||
_WORKING = False
|
||||
IE_NAME = 'bfi:player'
|
||||
_VALID_URL = r'https?://player\.bfi\.org\.uk/[^/]+/film/watch-(?P<id>[\w-]+)-online'
|
||||
_TEST = {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
class BFMTVBaseIE(InfoExtractor):
|
||||
_VALID_URL_BASE = r'https?://(?:www\.|rmc\.)?bfmtv\.com/'
|
||||
_VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P<id>\d{12})\.html'
|
||||
_VIDEO_BLOCK_REGEX = r'(<div[^>]+class="video_block"[^>]*>)'
|
||||
_VIDEO_BLOCK_REGEX = r'(<div[^>]+class="video_block[^"]*"[^>]*>)'
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
||||
|
||||
def _brightcove_url_result(self, video_id, video_block):
|
||||
|
@ -55,8 +55,11 @@ class BFMTVLiveIE(BFMTVIE): # XXX: Do not subclass from concrete IE
|
|||
'ext': 'mp4',
|
||||
'title': r're:^le direct BFMTV WEB \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
|
||||
'uploader_id': '876450610001',
|
||||
'upload_date': '20171018',
|
||||
'timestamp': 1508329950,
|
||||
'upload_date': '20220926',
|
||||
'timestamp': 1664207191,
|
||||
'live_status': 'is_live',
|
||||
'thumbnail': r're:https://.+/image\.jpg',
|
||||
'tags': [],
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
import functools
|
||||
import hashlib
|
||||
import itertools
|
||||
import json
|
||||
import math
|
||||
import re
|
||||
import time
|
||||
|
@ -16,9 +17,11 @@
|
|||
InAdvancePagedList,
|
||||
OnDemandPagedList,
|
||||
bool_or_none,
|
||||
clean_html,
|
||||
filter_dict,
|
||||
float_or_none,
|
||||
format_field,
|
||||
get_element_by_class,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
make_archive_id,
|
||||
|
@ -88,6 +91,12 @@ def extract_formats(self, play_info):
|
|||
|
||||
return formats
|
||||
|
||||
def _download_playinfo(self, video_id, cid):
|
||||
return self._download_json(
|
||||
'https://api.bilibili.com/x/player/playurl', video_id,
|
||||
query={'bvid': video_id, 'cid': cid, 'fnval': 4048},
|
||||
note=f'Downloading video formats for cid {cid}')['data']
|
||||
|
||||
def json2srt(self, json_data):
|
||||
srt_data = ''
|
||||
for idx, line in enumerate(json_data.get('body') or []):
|
||||
|
@ -96,7 +105,7 @@ def json2srt(self, json_data):
|
|||
f'{line["content"]}\n\n')
|
||||
return srt_data
|
||||
|
||||
def _get_subtitles(self, video_id, aid, cid):
|
||||
def _get_subtitles(self, video_id, cid, aid=None):
|
||||
subtitles = {
|
||||
'danmaku': [{
|
||||
'ext': 'xml',
|
||||
|
@ -104,8 +113,15 @@ def _get_subtitles(self, video_id, aid, cid):
|
|||
}]
|
||||
}
|
||||
|
||||
video_info_json = self._download_json(f'https://api.bilibili.com/x/player/v2?aid={aid}&cid={cid}', video_id)
|
||||
for s in traverse_obj(video_info_json, ('data', 'subtitle', 'subtitles', ...)):
|
||||
subtitle_info = traverse_obj(self._download_json(
|
||||
'https://api.bilibili.com/x/player/v2', video_id,
|
||||
query={'aid': aid, 'cid': cid} if aid else {'bvid': video_id, 'cid': cid},
|
||||
note=f'Extracting subtitle info {cid}'), ('data', 'subtitle'))
|
||||
subs_list = traverse_obj(subtitle_info, ('subtitles', lambda _, v: v['subtitle_url'] and v['lan']))
|
||||
if not subs_list and traverse_obj(subtitle_info, 'allow_submit'):
|
||||
if not self._get_cookies('https://api.bilibili.com').get('SESSDATA'): # no login session cookie
|
||||
self.report_warning(f'CC subtitles (if any) are only visible when logged in. {self._login_hint()}', only_once=True)
|
||||
for s in subs_list:
|
||||
subtitles.setdefault(s['lan'], []).append({
|
||||
'ext': 'srt',
|
||||
'data': self.json2srt(self._download_json(s['subtitle_url'], video_id))
|
||||
|
@ -155,7 +171,54 @@ def _get_episodes_from_season(self, ss_id, url):
|
|||
for entry in traverse_obj(season_info, (
|
||||
'result', 'main_section', 'episodes',
|
||||
lambda _, v: url_or_none(v['share_url']) and v['id'])):
|
||||
yield self.url_result(entry['share_url'], BiliBiliBangumiIE, f'ep{entry["id"]}')
|
||||
yield self.url_result(entry['share_url'], BiliBiliBangumiIE, str_or_none(entry.get('id')))
|
||||
|
||||
def _get_divisions(self, video_id, graph_version, edges, edge_id, cid_edges=None):
|
||||
cid_edges = cid_edges or {}
|
||||
division_data = self._download_json(
|
||||
'https://api.bilibili.com/x/stein/edgeinfo_v2', video_id,
|
||||
query={'graph_version': graph_version, 'edge_id': edge_id, 'bvid': video_id},
|
||||
note=f'Extracting divisions from edge {edge_id}')
|
||||
edges.setdefault(edge_id, {}).update(
|
||||
traverse_obj(division_data, ('data', 'story_list', lambda _, v: v['edge_id'] == edge_id, {
|
||||
'title': ('title', {str}),
|
||||
'cid': ('cid', {int_or_none}),
|
||||
}), get_all=False))
|
||||
|
||||
edges[edge_id].update(traverse_obj(division_data, ('data', {
|
||||
'title': ('title', {str}),
|
||||
'choices': ('edges', 'questions', ..., 'choices', ..., {
|
||||
'edge_id': ('id', {int_or_none}),
|
||||
'cid': ('cid', {int_or_none}),
|
||||
'text': ('option', {str}),
|
||||
}),
|
||||
})))
|
||||
# use dict to combine edges that use the same video section (same cid)
|
||||
cid_edges.setdefault(edges[edge_id]['cid'], {})[edge_id] = edges[edge_id]
|
||||
for choice in traverse_obj(edges, (edge_id, 'choices', ...)):
|
||||
if choice['edge_id'] not in edges:
|
||||
edges[choice['edge_id']] = {'cid': choice['cid']}
|
||||
self._get_divisions(video_id, graph_version, edges, choice['edge_id'], cid_edges=cid_edges)
|
||||
return cid_edges
|
||||
|
||||
def _get_interactive_entries(self, video_id, cid, metainfo):
|
||||
graph_version = traverse_obj(
|
||||
self._download_json(
|
||||
'https://api.bilibili.com/x/player/wbi/v2', video_id,
|
||||
'Extracting graph version', query={'bvid': video_id, 'cid': cid}),
|
||||
('data', 'interaction', 'graph_version', {int_or_none}))
|
||||
cid_edges = self._get_divisions(video_id, graph_version, {1: {'cid': cid}}, 1)
|
||||
for cid, edges in cid_edges.items():
|
||||
play_info = self._download_playinfo(video_id, cid)
|
||||
yield {
|
||||
**metainfo,
|
||||
'id': f'{video_id}_{cid}',
|
||||
'title': f'{metainfo.get("title")} - {list(edges.values())[0].get("title")}',
|
||||
'formats': self.extract_formats(play_info),
|
||||
'description': f'{json.dumps(edges, ensure_ascii=False)}\n{metainfo.get("description", "")}',
|
||||
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
||||
'subtitles': self.extract_subtitles(video_id, cid),
|
||||
}
|
||||
|
||||
|
||||
class BiliBiliIE(BilibiliBaseIE):
|
||||
|
@ -180,7 +243,7 @@ class BiliBiliIE(BilibiliBaseIE):
|
|||
'view_count': int,
|
||||
},
|
||||
}, {
|
||||
# old av URL version
|
||||
'note': 'old av URL version',
|
||||
'url': 'http://www.bilibili.com/video/av1074402/',
|
||||
'info_dict': {
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg)$',
|
||||
|
@ -212,7 +275,7 @@ class BiliBiliIE(BilibiliBaseIE):
|
|||
'id': 'BV1bK411W797_p1',
|
||||
'ext': 'mp4',
|
||||
'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
|
||||
'tags': 'count:11',
|
||||
'tags': 'count:10',
|
||||
'timestamp': 1589601697,
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
||||
'uploader': '打牌还是打桩',
|
||||
|
@ -232,7 +295,7 @@ class BiliBiliIE(BilibiliBaseIE):
|
|||
'id': 'BV1bK411W797_p1',
|
||||
'ext': 'mp4',
|
||||
'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
|
||||
'tags': 'count:11',
|
||||
'tags': 'count:10',
|
||||
'timestamp': 1589601697,
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
||||
'uploader': '打牌还是打桩',
|
||||
|
@ -343,18 +406,120 @@ class BiliBiliIE(BilibiliBaseIE):
|
|||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
||||
},
|
||||
'params': {'skip_download': True},
|
||||
}, {
|
||||
'note': 'interactive/split-path video',
|
||||
'url': 'https://www.bilibili.com/video/BV1af4y1H7ga/',
|
||||
'info_dict': {
|
||||
'id': 'BV1af4y1H7ga',
|
||||
'title': '【互动游戏】花了大半年时间做的自我介绍~请查收!!',
|
||||
'timestamp': 1630500414,
|
||||
'upload_date': '20210901',
|
||||
'description': 'md5:01113e39ab06e28042d74ac356a08786',
|
||||
'tags': list,
|
||||
'uploader': '钉宫妮妮Ninico',
|
||||
'duration': 1503,
|
||||
'uploader_id': '8881297',
|
||||
'comment_count': int,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
||||
},
|
||||
'playlist_count': 33,
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': 'BV1af4y1H7ga_400950101',
|
||||
'ext': 'mp4',
|
||||
'title': '【互动游戏】花了大半年时间做的自我介绍~请查收!! - 听见猫猫叫~',
|
||||
'timestamp': 1630500414,
|
||||
'upload_date': '20210901',
|
||||
'description': 'md5:db66ac7a2813a94b8291dbce990cc5b2',
|
||||
'tags': list,
|
||||
'uploader': '钉宫妮妮Ninico',
|
||||
'duration': 11.605,
|
||||
'uploader_id': '8881297',
|
||||
'comment_count': int,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
||||
},
|
||||
}],
|
||||
}, {
|
||||
'note': '301 redirect to bangumi link',
|
||||
'url': 'https://www.bilibili.com/video/BV1TE411f7f1',
|
||||
'info_dict': {
|
||||
'id': '288525',
|
||||
'title': '李永乐老师 钱学森弹道和乘波体飞行器是什么?',
|
||||
'ext': 'mp4',
|
||||
'series': '我和我的祖国',
|
||||
'series_id': '4780',
|
||||
'season': '幕后纪实',
|
||||
'season_id': '28609',
|
||||
'season_number': 1,
|
||||
'episode': '钱学森弹道和乘波体飞行器是什么?',
|
||||
'episode_id': '288525',
|
||||
'episode_number': 105,
|
||||
'duration': 1183.957,
|
||||
'timestamp': 1571648124,
|
||||
'upload_date': '20191021',
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.bilibili.com/video/BV1jL41167ZG/',
|
||||
'info_dict': {
|
||||
'id': 'BV1jL41167ZG',
|
||||
'title': '一场大火引发的离奇死亡!古典推理经典短篇集《不可能犯罪诊断书》!',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
'skip': 'supporter-only video',
|
||||
}, {
|
||||
'url': 'https://www.bilibili.com/video/BV1Ks411f7aQ/',
|
||||
'info_dict': {
|
||||
'id': 'BV1Ks411f7aQ',
|
||||
'title': '【BD1080P】狼与香辛料I【华盟】',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
'skip': 'login required',
|
||||
}, {
|
||||
'url': 'https://www.bilibili.com/video/BV1GJ411x7h7/',
|
||||
'info_dict': {
|
||||
'id': 'BV1GJ411x7h7',
|
||||
'title': '【官方 MV】Never Gonna Give You Up - Rick Astley',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
'skip': 'geo-restricted',
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
webpage, urlh = self._download_webpage_handle(url, video_id)
|
||||
if not self._match_valid_url(urlh.url):
|
||||
return self.url_result(urlh.url)
|
||||
|
||||
initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', video_id)
|
||||
|
||||
is_festival = 'videoData' not in initial_state
|
||||
if is_festival:
|
||||
video_data = initial_state['videoInfo']
|
||||
else:
|
||||
play_info = self._search_json(r'window\.__playinfo__\s*=', webpage, 'play info', video_id)['data']
|
||||
play_info_obj = self._search_json(
|
||||
r'window\.__playinfo__\s*=', webpage, 'play info', video_id, fatal=False)
|
||||
if not play_info_obj:
|
||||
if traverse_obj(initial_state, ('error', 'trueCode')) == -403:
|
||||
self.raise_login_required()
|
||||
if traverse_obj(initial_state, ('error', 'trueCode')) == -404:
|
||||
raise ExtractorError(
|
||||
'This video may be deleted or geo-restricted. '
|
||||
'You might want to try a VPN or a proxy server (with --proxy)', expected=True)
|
||||
play_info = traverse_obj(play_info_obj, ('data', {dict}))
|
||||
if not play_info:
|
||||
if traverse_obj(play_info_obj, 'code') == 87007:
|
||||
toast = get_element_by_class('tips-toast', webpage) or ''
|
||||
msg = clean_html(
|
||||
f'{get_element_by_class("belongs-to", toast) or ""},'
|
||||
+ (get_element_by_class('level', toast) or ''))
|
||||
raise ExtractorError(
|
||||
f'This is a supporter-only video: {msg}. {self._login_hint()}', expected=True)
|
||||
raise ExtractorError('Failed to extract play info')
|
||||
video_data = initial_state['videoData']
|
||||
|
||||
video_id, title = video_data['bvid'], video_data.get('title')
|
||||
|
@ -385,10 +550,7 @@ def _real_extract(self, url):
|
|||
|
||||
festival_info = {}
|
||||
if is_festival:
|
||||
play_info = self._download_json(
|
||||
'https://api.bilibili.com/x/player/playurl', video_id,
|
||||
query={'bvid': video_id, 'cid': cid, 'fnval': 4048},
|
||||
note='Extracting festival video formats')['data']
|
||||
play_info = self._download_playinfo(video_id, cid)
|
||||
|
||||
festival_info = traverse_obj(initial_state, {
|
||||
'uploader': ('videoInfo', 'upName'),
|
||||
|
@ -397,7 +559,7 @@ def _real_extract(self, url):
|
|||
'thumbnail': ('sectionEpisodes', lambda _, v: v['bvid'] == video_id, 'cover'),
|
||||
}, get_all=False)
|
||||
|
||||
return {
|
||||
metainfo = {
|
||||
**traverse_obj(initial_state, {
|
||||
'uploader': ('upData', 'name'),
|
||||
'uploader_id': ('upData', 'mid', {str_or_none}),
|
||||
|
@ -413,28 +575,59 @@ def _real_extract(self, url):
|
|||
'comment_count': ('stat', 'reply', {int_or_none}),
|
||||
}, get_all=False),
|
||||
'id': f'{video_id}{format_field(part_id, None, "_p%d")}',
|
||||
'formats': self.extract_formats(play_info),
|
||||
'_old_archive_ids': [make_archive_id(self, old_video_id)] if old_video_id else None,
|
||||
'title': title,
|
||||
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
||||
'chapters': self._get_chapters(aid, cid),
|
||||
'subtitles': self.extract_subtitles(video_id, aid, cid),
|
||||
'__post_extractor': self.extract_comments(aid),
|
||||
'http_headers': {'Referer': url},
|
||||
}
|
||||
|
||||
is_interactive = traverse_obj(video_data, ('rights', 'is_stein_gate'))
|
||||
if is_interactive:
|
||||
return self.playlist_result(
|
||||
self._get_interactive_entries(video_id, cid, metainfo), **metainfo, **{
|
||||
'duration': traverse_obj(initial_state, ('videoData', 'duration', {int_or_none})),
|
||||
'__post_extractor': self.extract_comments(aid),
|
||||
})
|
||||
else:
|
||||
return {
|
||||
**metainfo,
|
||||
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
||||
'chapters': self._get_chapters(aid, cid),
|
||||
'subtitles': self.extract_subtitles(video_id, cid),
|
||||
'formats': self.extract_formats(play_info),
|
||||
'__post_extractor': self.extract_comments(aid),
|
||||
}
|
||||
|
||||
|
||||
class BiliBiliBangumiIE(BilibiliBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?bilibili\.com/bangumi/play/(?P<id>ep\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?bilibili\.com/bangumi/play/ep(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bilibili.com/bangumi/play/ep21495/',
|
||||
'info_dict': {
|
||||
'id': '21495',
|
||||
'ext': 'mp4',
|
||||
'series': '悠久之翼',
|
||||
'series_id': '774',
|
||||
'season': '第二季',
|
||||
'season_id': '1182',
|
||||
'season_number': 2,
|
||||
'episode': 'forever/ef',
|
||||
'episode_id': '21495',
|
||||
'episode_number': 12,
|
||||
'title': '12 forever/ef',
|
||||
'duration': 1420.791,
|
||||
'timestamp': 1320412200,
|
||||
'upload_date': '20111104',
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.bilibili.com/bangumi/play/ep267851',
|
||||
'info_dict': {
|
||||
'id': '267851',
|
||||
'ext': 'mp4',
|
||||
'series': '鬼灭之刃',
|
||||
'series_id': '4358',
|
||||
'season': '鬼灭之刃',
|
||||
'season': '立志篇',
|
||||
'season_id': '26801',
|
||||
'season_number': 1,
|
||||
'episode': '残酷',
|
||||
|
@ -446,13 +639,32 @@ class BiliBiliBangumiIE(BilibiliBaseIE):
|
|||
'upload_date': '20190406',
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$'
|
||||
},
|
||||
'skip': 'According to the copyright owner\'s request, you may only watch the video after you are premium member.'
|
||||
'skip': 'Geo-restricted',
|
||||
}, {
|
||||
'note': 'a making-of which falls outside main section',
|
||||
'url': 'https://www.bilibili.com/bangumi/play/ep345120',
|
||||
'info_dict': {
|
||||
'id': '345120',
|
||||
'ext': 'mp4',
|
||||
'series': '鬼灭之刃',
|
||||
'series_id': '4358',
|
||||
'season': '立志篇',
|
||||
'season_id': '26801',
|
||||
'season_number': 1,
|
||||
'episode': '炭治郎篇',
|
||||
'episode_id': '345120',
|
||||
'episode_number': 27,
|
||||
'title': '#1 炭治郎篇',
|
||||
'duration': 1922.129,
|
||||
'timestamp': 1602853860,
|
||||
'upload_date': '20201016',
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$'
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
episode_id = video_id[2:]
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
episode_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, episode_id)
|
||||
|
||||
if '您所在的地区无法观看本片' in webpage:
|
||||
raise GeoRestrictedError('This video is restricted')
|
||||
|
@ -461,7 +673,7 @@ def _real_extract(self, url):
|
|||
|
||||
headers = {'Referer': url, **self.geo_verification_headers()}
|
||||
play_info = self._download_json(
|
||||
'https://api.bilibili.com/pgc/player/web/v2/playurl', video_id,
|
||||
'https://api.bilibili.com/pgc/player/web/v2/playurl', episode_id,
|
||||
'Extracting episode', query={'fnval': '4048', 'ep_id': episode_id},
|
||||
headers=headers)
|
||||
premium_only = play_info.get('code') == -10403
|
||||
|
@ -472,40 +684,43 @@ def _real_extract(self, url):
|
|||
self.raise_login_required('This video is for premium members only')
|
||||
|
||||
bangumi_info = self._download_json(
|
||||
'https://api.bilibili.com/pgc/view/web/season', video_id, 'Get episode details',
|
||||
'https://api.bilibili.com/pgc/view/web/season', episode_id, 'Get episode details',
|
||||
query={'ep_id': episode_id}, headers=headers)['result']
|
||||
|
||||
episode_number, episode_info = next((
|
||||
(idx, ep) for idx, ep in enumerate(traverse_obj(
|
||||
bangumi_info, ('episodes', ..., {dict})), 1)
|
||||
bangumi_info, (('episodes', ('section', ..., 'episodes')), ..., {dict})), 1)
|
||||
if str_or_none(ep.get('id')) == episode_id), (1, {}))
|
||||
|
||||
season_id = bangumi_info.get('season_id')
|
||||
season_number = season_id and next((
|
||||
idx + 1 for idx, e in enumerate(
|
||||
season_number, season_title = season_id and next((
|
||||
(idx + 1, e.get('season_title')) for idx, e in enumerate(
|
||||
traverse_obj(bangumi_info, ('seasons', ...)))
|
||||
if e.get('season_id') == season_id
|
||||
), None)
|
||||
), (None, None))
|
||||
|
||||
aid = episode_info.get('aid')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'id': episode_id,
|
||||
'formats': formats,
|
||||
**traverse_obj(bangumi_info, {
|
||||
'series': ('series', 'series_title', {str}),
|
||||
'series_id': ('series', 'series_id', {str_or_none}),
|
||||
'thumbnail': ('square_cover', {url_or_none}),
|
||||
}),
|
||||
'title': join_nonempty('title', 'long_title', delim=' ', from_dict=episode_info),
|
||||
'episode': episode_info.get('long_title'),
|
||||
**traverse_obj(episode_info, {
|
||||
'episode': ('long_title', {str}),
|
||||
'episode_number': ('title', {int_or_none}, {lambda x: x or episode_number}),
|
||||
'timestamp': ('pub_time', {int_or_none}),
|
||||
'title': {lambda v: v and join_nonempty('title', 'long_title', delim=' ', from_dict=v)},
|
||||
}),
|
||||
'episode_id': episode_id,
|
||||
'episode_number': int_or_none(episode_info.get('title')) or episode_number,
|
||||
'season': str_or_none(season_title),
|
||||
'season_id': str_or_none(season_id),
|
||||
'season_number': season_number,
|
||||
'timestamp': int_or_none(episode_info.get('pub_time')),
|
||||
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
||||
'subtitles': self.extract_subtitles(video_id, aid, episode_info.get('cid')),
|
||||
'subtitles': self.extract_subtitles(episode_id, episode_info.get('cid'), aid=aid),
|
||||
'__post_extractor': self.extract_comments(aid),
|
||||
'http_headers': headers,
|
||||
}
|
||||
|
@ -517,17 +732,53 @@ class BiliBiliBangumiMediaIE(BilibiliBaseIE):
|
|||
'url': 'https://www.bilibili.com/bangumi/media/md24097891',
|
||||
'info_dict': {
|
||||
'id': '24097891',
|
||||
'title': 'CAROLE & TUESDAY',
|
||||
'description': 'md5:42417ad33d1eaa1c93bfd2dd1626b829',
|
||||
},
|
||||
'playlist_mincount': 25,
|
||||
}, {
|
||||
'url': 'https://www.bilibili.com/bangumi/media/md1565/',
|
||||
'info_dict': {
|
||||
'id': '1565',
|
||||
'title': '攻壳机动队 S.A.C. 2nd GIG',
|
||||
'description': 'md5:46cac00bafd645b97f4d6df616fc576d',
|
||||
},
|
||||
'playlist_count': 26,
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '68540',
|
||||
'ext': 'mp4',
|
||||
'series': '攻壳机动队',
|
||||
'series_id': '1077',
|
||||
'season': '第二季',
|
||||
'season_id': '1565',
|
||||
'season_number': 2,
|
||||
'episode': '再启动 REEMBODY',
|
||||
'episode_id': '68540',
|
||||
'episode_number': 1,
|
||||
'title': '1 再启动 REEMBODY',
|
||||
'duration': 1525.777,
|
||||
'timestamp': 1425074413,
|
||||
'upload_date': '20150227',
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$'
|
||||
},
|
||||
}],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
media_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, media_id)
|
||||
ss_id = self._search_json(
|
||||
r'window\.__INITIAL_STATE__\s*=', webpage, 'initial_state', media_id)['mediaInfo']['season_id']
|
||||
|
||||
return self.playlist_result(self._get_episodes_from_season(ss_id, url), media_id)
|
||||
initial_state = self._search_json(
|
||||
r'window\.__INITIAL_STATE__\s*=', webpage, 'initial_state', media_id)
|
||||
ss_id = initial_state['mediaInfo']['season_id']
|
||||
|
||||
return self.playlist_result(
|
||||
self._get_episodes_from_season(ss_id, url), media_id,
|
||||
**traverse_obj(initial_state, ('mediaInfo', {
|
||||
'title': ('title', {str}),
|
||||
'description': ('evaluate', {str}),
|
||||
})))
|
||||
|
||||
|
||||
class BiliBiliBangumiSeasonIE(BilibiliBaseIE):
|
||||
|
@ -535,15 +786,183 @@ class BiliBiliBangumiSeasonIE(BilibiliBaseIE):
|
|||
_TESTS = [{
|
||||
'url': 'https://www.bilibili.com/bangumi/play/ss26801',
|
||||
'info_dict': {
|
||||
'id': '26801'
|
||||
'id': '26801',
|
||||
'title': '鬼灭之刃',
|
||||
'description': 'md5:e2cc9848b6f69be6db79fc2a82d9661b',
|
||||
},
|
||||
'playlist_mincount': 26
|
||||
}, {
|
||||
'url': 'https://www.bilibili.com/bangumi/play/ss2251',
|
||||
'info_dict': {
|
||||
'id': '2251',
|
||||
'title': '玲音',
|
||||
'description': 'md5:1fd40e3df4c08d4d9d89a6a34844bdc4',
|
||||
},
|
||||
'playlist_count': 13,
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '50188',
|
||||
'ext': 'mp4',
|
||||
'series': '玲音',
|
||||
'series_id': '1526',
|
||||
'season': 'TV',
|
||||
'season_id': '2251',
|
||||
'season_number': 1,
|
||||
'episode': 'WEIRD',
|
||||
'episode_id': '50188',
|
||||
'episode_number': 1,
|
||||
'title': '1 WEIRD',
|
||||
'duration': 1436.992,
|
||||
'timestamp': 1343185080,
|
||||
'upload_date': '20120725',
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$'
|
||||
},
|
||||
}],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
ss_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, ss_id)
|
||||
metainfo = traverse_obj(
|
||||
self._search_json(r'<script[^>]+type="application/ld\+json"[^>]*>', webpage, 'info', ss_id),
|
||||
('itemListElement', ..., {
|
||||
'title': ('name', {str}),
|
||||
'description': ('description', {str}),
|
||||
}), get_all=False)
|
||||
|
||||
return self.playlist_result(self._get_episodes_from_season(ss_id, url), ss_id)
|
||||
return self.playlist_result(self._get_episodes_from_season(ss_id, url), ss_id, **metainfo)
|
||||
|
||||
|
||||
class BilibiliCheeseBaseIE(BilibiliBaseIE):
|
||||
_HEADERS = {'Referer': 'https://www.bilibili.com/'}
|
||||
|
||||
def _extract_episode(self, season_info, ep_id):
|
||||
episode_info = traverse_obj(season_info, (
|
||||
'episodes', lambda _, v: v['id'] == int(ep_id)), get_all=False)
|
||||
aid, cid = episode_info['aid'], episode_info['cid']
|
||||
|
||||
if traverse_obj(episode_info, 'ep_status') == -1:
|
||||
raise ExtractorError('This course episode is not yet available.', expected=True)
|
||||
if not traverse_obj(episode_info, 'playable'):
|
||||
self.raise_login_required('You need to purchase the course to download this episode')
|
||||
|
||||
play_info = self._download_json(
|
||||
'https://api.bilibili.com/pugv/player/web/playurl', ep_id,
|
||||
query={'avid': aid, 'cid': cid, 'ep_id': ep_id, 'fnval': 16, 'fourk': 1},
|
||||
headers=self._HEADERS, note='Downloading playinfo')['data']
|
||||
|
||||
return {
|
||||
'id': str_or_none(ep_id),
|
||||
'episode_id': str_or_none(ep_id),
|
||||
'formats': self.extract_formats(play_info),
|
||||
'extractor_key': BilibiliCheeseIE.ie_key(),
|
||||
'extractor': BilibiliCheeseIE.IE_NAME,
|
||||
'webpage_url': f'https://www.bilibili.com/cheese/play/ep{ep_id}',
|
||||
**traverse_obj(episode_info, {
|
||||
'episode': ('title', {str}),
|
||||
'title': {lambda v: v and join_nonempty('index', 'title', delim=' - ', from_dict=v)},
|
||||
'alt_title': ('subtitle', {str}),
|
||||
'duration': ('duration', {int_or_none}),
|
||||
'episode_number': ('index', {int_or_none}),
|
||||
'thumbnail': ('cover', {url_or_none}),
|
||||
'timestamp': ('release_date', {int_or_none}),
|
||||
'view_count': ('play', {int_or_none}),
|
||||
}),
|
||||
**traverse_obj(season_info, {
|
||||
'uploader': ('up_info', 'uname', {str}),
|
||||
'uploader_id': ('up_info', 'mid', {str_or_none}),
|
||||
}),
|
||||
'subtitles': self.extract_subtitles(ep_id, cid, aid=aid),
|
||||
'__post_extractor': self.extract_comments(aid),
|
||||
'http_headers': self._HEADERS,
|
||||
}
|
||||
|
||||
def _download_season_info(self, query_key, video_id):
|
||||
return self._download_json(
|
||||
f'https://api.bilibili.com/pugv/view/web/season?{query_key}={video_id}', video_id,
|
||||
headers=self._HEADERS, note='Downloading season info')['data']
|
||||
|
||||
|
||||
class BilibiliCheeseIE(BilibiliCheeseBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?bilibili\.com/cheese/play/ep(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bilibili.com/cheese/play/ep229832',
|
||||
'info_dict': {
|
||||
'id': '229832',
|
||||
'ext': 'mp4',
|
||||
'title': '1 - 课程先导片',
|
||||
'alt_title': '视频课 · 3分41秒',
|
||||
'uploader': '马督工',
|
||||
'uploader_id': '316568752',
|
||||
'episode': '课程先导片',
|
||||
'episode_id': '229832',
|
||||
'episode_number': 1,
|
||||
'duration': 221,
|
||||
'timestamp': 1695549606,
|
||||
'upload_date': '20230924',
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
||||
'view_count': int,
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
ep_id = self._match_id(url)
|
||||
return self._extract_episode(self._download_season_info('ep_id', ep_id), ep_id)
|
||||
|
||||
|
||||
class BilibiliCheeseSeasonIE(BilibiliCheeseBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?bilibili\.com/cheese/play/ss(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bilibili.com/cheese/play/ss5918',
|
||||
'info_dict': {
|
||||
'id': '5918',
|
||||
'title': '【限时五折】新闻系学不到:马督工教你做自媒体',
|
||||
'description': '帮普通人建立世界模型,降低人与人的沟通门槛',
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '229832',
|
||||
'ext': 'mp4',
|
||||
'title': '1 - 课程先导片',
|
||||
'alt_title': '视频课 · 3分41秒',
|
||||
'uploader': '马督工',
|
||||
'uploader_id': '316568752',
|
||||
'episode': '课程先导片',
|
||||
'episode_id': '229832',
|
||||
'episode_number': 1,
|
||||
'duration': 221,
|
||||
'timestamp': 1695549606,
|
||||
'upload_date': '20230924',
|
||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
||||
'view_count': int,
|
||||
}
|
||||
}],
|
||||
'params': {'playlist_items': '1'},
|
||||
}, {
|
||||
'url': 'https://www.bilibili.com/cheese/play/ss5918',
|
||||
'info_dict': {
|
||||
'id': '5918',
|
||||
'title': '【限时五折】新闻系学不到:马督工教你做自媒体',
|
||||
'description': '帮普通人建立世界模型,降低人与人的沟通门槛',
|
||||
},
|
||||
'playlist_mincount': 5,
|
||||
'skip': 'paid video in list',
|
||||
}]
|
||||
|
||||
def _get_cheese_entries(self, season_info):
|
||||
for ep_id in traverse_obj(season_info, ('episodes', lambda _, v: v['episode_can_view'], 'id')):
|
||||
yield self._extract_episode(season_info, ep_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
season_id = self._match_id(url)
|
||||
season_info = self._download_season_info('season_id', season_id)
|
||||
|
||||
return self.playlist_result(
|
||||
self._get_cheese_entries(season_info), season_id,
|
||||
**traverse_obj(season_info, {
|
||||
'title': ('title', {str}),
|
||||
'description': ('subtitle', {str}),
|
||||
}))
|
||||
|
||||
|
||||
class BilibiliSpaceBaseIE(InfoExtractor):
|
||||
|
|
|
@ -1,110 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from .vk import VKIE
|
||||
from ..compat import compat_b64decode
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
)
|
||||
|
||||
|
||||
class BIQLEIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?biqle\.(?:com|org|ru)/watch/(?P<id>-?\d+_\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://biqle.ru/watch/-2000421746_85421746',
|
||||
'md5': 'ae6ef4f04d19ac84e4658046d02c151c',
|
||||
'info_dict': {
|
||||
'id': '-2000421746_85421746',
|
||||
'ext': 'mp4',
|
||||
'title': 'Forsaken By Hope Studio Clip',
|
||||
'description': 'Forsaken By Hope Studio Clip — Смотреть онлайн',
|
||||
'upload_date': '19700101',
|
||||
'thumbnail': r're:https://[^/]+/impf/7vN3ACwSTgChP96OdOfzFjUCzFR6ZglDQgWsIw/KPaACiVJJxM\.jpg\?size=800x450&quality=96&keep_aspect_ratio=1&background=000000&sign=b48ea459c4d33dbcba5e26d63574b1cb&type=video_thumb',
|
||||
'timestamp': 0,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://biqle.org/watch/-44781847_168547604',
|
||||
'md5': '7f24e72af1db0edf7c1aaba513174f97',
|
||||
'info_dict': {
|
||||
'id': '-44781847_168547604',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ребенок в шоке от автоматической мойки',
|
||||
'description': 'Ребенок в шоке от автоматической мойки — Смотреть онлайн',
|
||||
'timestamp': 1396633454,
|
||||
'upload_date': '20140404',
|
||||
'thumbnail': r're:https://[^/]+/c535507/u190034692/video/l_b84df002\.jpg',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
title = self._html_search_meta('name', webpage, 'Title', fatal=False)
|
||||
timestamp = unified_timestamp(self._html_search_meta('uploadDate', webpage, 'Upload Date', default=None))
|
||||
description = self._html_search_meta('description', webpage, 'Description', default=None)
|
||||
|
||||
global_embed_url = self._search_regex(
|
||||
r'<script[^<]+?window.globEmbedUrl\s*=\s*\'((?:https?:)?//(?:daxab\.com|dxb\.to|[^/]+/player)/[^\']+)\'',
|
||||
webpage, 'global Embed url')
|
||||
hash = self._search_regex(
|
||||
r'<script id="data-embed-video[^<]+?hash: "([^"]+)"[^<]*</script>', webpage, 'Hash')
|
||||
|
||||
embed_url = global_embed_url + hash
|
||||
|
||||
if VKIE.suitable(embed_url):
|
||||
return self.url_result(embed_url, VKIE.ie_key(), video_id)
|
||||
|
||||
embed_page = self._download_webpage(
|
||||
embed_url, video_id, 'Downloading embed webpage', headers={'Referer': url})
|
||||
|
||||
glob_params = self._parse_json(self._search_regex(
|
||||
r'<script id="globParams">[^<]*window.globParams = ([^;]+);[^<]+</script>',
|
||||
embed_page, 'Global Parameters'), video_id, transform_source=js_to_json)
|
||||
host_name = compat_b64decode(glob_params['server'][::-1]).decode()
|
||||
|
||||
item = self._download_json(
|
||||
f'https://{host_name}/method/video.get/{video_id}', video_id,
|
||||
headers={'Referer': url}, query={
|
||||
'token': glob_params['video']['access_token'],
|
||||
'videos': video_id,
|
||||
'ckey': glob_params['c_key'],
|
||||
'credentials': glob_params['video']['credentials'],
|
||||
})['response']['items'][0]
|
||||
|
||||
formats = []
|
||||
for f_id, f_url in item.get('files', {}).items():
|
||||
if f_id == 'external':
|
||||
return self.url_result(f_url)
|
||||
ext, height = f_id.split('_')
|
||||
height_extra_key = traverse_obj(glob_params, ('video', 'partial', 'quality', height))
|
||||
if height_extra_key:
|
||||
formats.append({
|
||||
'format_id': f'{height}p',
|
||||
'url': f'https://{host_name}/{f_url[8:]}&videos={video_id}&extra_key={height_extra_key}',
|
||||
'height': int_or_none(height),
|
||||
'ext': ext,
|
||||
})
|
||||
|
||||
thumbnails = []
|
||||
for k, v in item.items():
|
||||
if k.startswith('photo_') and v:
|
||||
width = k.replace('photo_', '')
|
||||
thumbnails.append({
|
||||
'id': width,
|
||||
'url': v,
|
||||
'width': int_or_none(width),
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'comment_count': int_or_none(item.get('comments')),
|
||||
'description': description,
|
||||
'duration': int_or_none(item.get('duration')),
|
||||
'thumbnails': thumbnails,
|
||||
'timestamp': timestamp,
|
||||
'view_count': int_or_none(item.get('views')),
|
||||
}
|
|
@ -7,8 +7,10 @@
|
|||
ExtractorError,
|
||||
OnDemandPagedList,
|
||||
clean_html,
|
||||
extract_attributes,
|
||||
get_element_by_class,
|
||||
get_element_by_id,
|
||||
get_element_html_by_class,
|
||||
get_elements_html_by_class,
|
||||
int_or_none,
|
||||
orderedSet,
|
||||
|
@ -17,6 +19,7 @@
|
|||
traverse_obj,
|
||||
unified_strdate,
|
||||
urlencode_postdata,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
|
@ -34,6 +37,25 @@ class BitChuteIE(InfoExtractor):
|
|||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'BitChute',
|
||||
'upload_date': '20170103',
|
||||
'uploader_url': 'https://www.bitchute.com/profile/I5NgtHZn9vPj/',
|
||||
'channel': 'BitChute',
|
||||
'channel_url': 'https://www.bitchute.com/channel/bitchute/'
|
||||
},
|
||||
}, {
|
||||
# test case: video with different channel and uploader
|
||||
'url': 'https://www.bitchute.com/video/Yti_j9A-UZ4/',
|
||||
'md5': 'f10e6a8e787766235946d0868703f1d0',
|
||||
'info_dict': {
|
||||
'id': 'Yti_j9A-UZ4',
|
||||
'ext': 'mp4',
|
||||
'title': 'Israel at War | Full Measure',
|
||||
'description': 'md5:38cf7bc6f42da1a877835539111c69ef',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'sharylattkisson',
|
||||
'upload_date': '20231106',
|
||||
'uploader_url': 'https://www.bitchute.com/profile/9K0kUWA9zmd9/',
|
||||
'channel': 'Full Measure with Sharyl Attkisson',
|
||||
'channel_url': 'https://www.bitchute.com/channel/sharylattkisson/'
|
||||
},
|
||||
}, {
|
||||
# video not downloadable in browser, but we can recover it
|
||||
|
@ -48,6 +70,9 @@ class BitChuteIE(InfoExtractor):
|
|||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'BitChute',
|
||||
'upload_date': '20181113',
|
||||
'uploader_url': 'https://www.bitchute.com/profile/I5NgtHZn9vPj/',
|
||||
'channel': 'BitChute',
|
||||
'channel_url': 'https://www.bitchute.com/channel/bitchute/'
|
||||
},
|
||||
'params': {'check_formats': None},
|
||||
}, {
|
||||
|
@ -99,6 +124,11 @@ def _raise_if_restricted(self, webpage):
|
|||
reason = clean_html(get_element_by_id('page-detail', webpage)) or page_title
|
||||
self.raise_geo_restricted(reason)
|
||||
|
||||
@staticmethod
|
||||
def _make_url(html):
|
||||
path = extract_attributes(get_element_html_by_class('spa', html) or '').get('href')
|
||||
return urljoin('https://www.bitchute.com', path)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(
|
||||
|
@ -121,12 +151,19 @@ def _real_extract(self, url):
|
|||
'Video is unavailable. Please make sure this video is playable in the browser '
|
||||
'before reporting this issue.', expected=True, video_id=video_id)
|
||||
|
||||
details = get_element_by_class('details', webpage) or ''
|
||||
uploader_html = get_element_html_by_class('creator', details) or ''
|
||||
channel_html = get_element_html_by_class('name', details) or ''
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._html_extract_title(webpage) or self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage, default=None),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'uploader': clean_html(get_element_by_class('owner', webpage)),
|
||||
'uploader': clean_html(uploader_html),
|
||||
'uploader_url': self._make_url(uploader_html),
|
||||
'channel': clean_html(channel_html),
|
||||
'channel_url': self._make_url(channel_html),
|
||||
'upload_date': unified_strdate(self._search_regex(
|
||||
r'at \d+:\d+ UTC on (.+?)\.', publish_date, 'upload date', fatal=False)),
|
||||
'formats': formats,
|
||||
|
@ -154,6 +191,9 @@ class BitChuteChannelIE(InfoExtractor):
|
|||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'BitChute',
|
||||
'upload_date': '20170103',
|
||||
'uploader_url': 'https://www.bitchute.com/profile/I5NgtHZn9vPj/',
|
||||
'channel': 'BitChute',
|
||||
'channel_url': 'https://www.bitchute.com/channel/bitchute/',
|
||||
'duration': 16,
|
||||
'view_count': int,
|
||||
},
|
||||
|
@ -169,7 +209,7 @@ class BitChuteChannelIE(InfoExtractor):
|
|||
'info_dict': {
|
||||
'id': 'wV9Imujxasw9',
|
||||
'title': 'Bruce MacDonald and "The Light of Darkness"',
|
||||
'description': 'md5:04913227d2714af1d36d804aa2ab6b1e',
|
||||
'description': 'md5:747724ef404eebdfc04277714f81863e',
|
||||
}
|
||||
}]
|
||||
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class BitwaveReplayIE(InfoExtractor):
|
||||
IE_NAME = 'bitwave:replay'
|
||||
_VALID_URL = r'https?://(?:www\.)?bitwave\.tv/(?P<user>\w+)/replay/(?P<id>\w+)/?$'
|
||||
_TEST = {
|
||||
'url': 'https://bitwave.tv/RhythmicCarnage/replay/z4P6eq5L7WDrM85UCrVr',
|
||||
'only_matching': True
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
replay_id = self._match_id(url)
|
||||
replay = self._download_json(
|
||||
'https://api.bitwave.tv/v1/replays/' + replay_id,
|
||||
replay_id
|
||||
)
|
||||
|
||||
return {
|
||||
'id': replay_id,
|
||||
'title': replay['data']['title'],
|
||||
'uploader': replay['data']['name'],
|
||||
'uploader_id': replay['data']['name'],
|
||||
'url': replay['data']['url'],
|
||||
'thumbnails': [
|
||||
{'url': x} for x in replay['data']['thumbnails']
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class BitwaveStreamIE(InfoExtractor):
|
||||
IE_NAME = 'bitwave:stream'
|
||||
_VALID_URL = r'https?://(?:www\.)?bitwave\.tv/(?P<id>\w+)/?$'
|
||||
_TEST = {
|
||||
'url': 'https://bitwave.tv/doomtube',
|
||||
'only_matching': True
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
username = self._match_id(url)
|
||||
channel = self._download_json(
|
||||
'https://api.bitwave.tv/v1/channels/' + username,
|
||||
username)
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
channel['data']['url'], username,
|
||||
'mp4')
|
||||
|
||||
return {
|
||||
'id': username,
|
||||
'title': channel['data']['title'],
|
||||
'uploader': username,
|
||||
'uploader_id': username,
|
||||
'formats': formats,
|
||||
'thumbnail': channel['data']['thumbnail'],
|
||||
'is_live': True,
|
||||
'view_count': channel['data']['viewCount']
|
||||
}
|
|
@ -22,7 +22,7 @@ class BleacherReportIE(InfoExtractor):
|
|||
'upload_date': '20150615',
|
||||
'uploader': 'Team Stream Now ',
|
||||
},
|
||||
'add_ie': ['Ooyala'],
|
||||
'skip': 'Video removed',
|
||||
}, {
|
||||
'url': 'http://bleacherreport.com/articles/2586817-aussie-golfers-get-fright-of-their-lives-after-being-chased-by-angry-kangaroo',
|
||||
'md5': '6a5cd403418c7b01719248ca97fb0692',
|
||||
|
@ -70,8 +70,6 @@ def _real_extract(self, url):
|
|||
video_type = video['type']
|
||||
if video_type in ('cms.bleacherreport.com', 'vid.bleacherreport.com'):
|
||||
info['url'] = 'http://bleacherreport.com/video_embed?id=%s' % video['id']
|
||||
elif video_type == 'ooyala.com':
|
||||
info['url'] = 'ooyala:%s' % video['id']
|
||||
elif video_type == 'youtube.com':
|
||||
info['url'] = video['id']
|
||||
elif video_type == 'vine.co':
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
import json
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
parse_iso8601,
|
||||
# try_get,
|
||||
update_url_query,
|
||||
url_or_none,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class BoxIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:[^.]+\.)?app\.box\.com/s/(?P<shared_name>[^/]+)/file/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:[^.]+\.)?app\.box\.com/s/(?P<shared_name>[^/?#]+)/file/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'https://mlssoccer.app.box.com/s/0evd2o3e08l60lr4ygukepvnkord1o1x/file/510727257538',
|
||||
'md5': '1f81b2fd3960f38a40a3b8823e5fcd43',
|
||||
|
@ -18,11 +19,12 @@ class BoxIE(InfoExtractor):
|
|||
'id': '510727257538',
|
||||
'ext': 'mp4',
|
||||
'title': 'Garber St. Louis will be 28th MLS team +scarving.mp4',
|
||||
'uploader': 'MLS Video',
|
||||
'uploader': '',
|
||||
'timestamp': 1566320259,
|
||||
'upload_date': '20190820',
|
||||
'uploader_id': '235196876',
|
||||
}
|
||||
},
|
||||
'params': {'skip_download': 'dash fragment too small'},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -58,26 +60,15 @@ def _real_extract(self, url):
|
|||
|
||||
formats = []
|
||||
|
||||
# for entry in (try_get(f, lambda x: x['representations']['entries'], list) or []):
|
||||
# entry_url_template = try_get(
|
||||
# entry, lambda x: x['content']['url_template'])
|
||||
# if not entry_url_template:
|
||||
# continue
|
||||
# representation = entry.get('representation')
|
||||
# if representation == 'dash':
|
||||
# TODO: append query to every fragment URL
|
||||
# formats.extend(self._extract_mpd_formats(
|
||||
# entry_url_template.replace('{+asset_path}', 'manifest.mpd'),
|
||||
# file_id, query=query))
|
||||
|
||||
authenticated_download_url = f.get('authenticated_download_url')
|
||||
if authenticated_download_url and f.get('is_download_available'):
|
||||
formats.append({
|
||||
'ext': f.get('extension') or determine_ext(title),
|
||||
'filesize': f.get('size'),
|
||||
'format_id': 'download',
|
||||
'url': update_url_query(authenticated_download_url, query),
|
||||
})
|
||||
for url_tmpl in traverse_obj(f, (
|
||||
'representations', 'entries', lambda _, v: v['representation'] == 'dash',
|
||||
'content', 'url_template', {url_or_none}
|
||||
)):
|
||||
manifest_url = update_url_query(url_tmpl.replace('{+asset_path}', 'manifest.mpd'), query)
|
||||
fmts = self._extract_mpd_formats(manifest_url, file_id)
|
||||
for fmt in fmts:
|
||||
fmt['extra_param_to_segment_url'] = urllib.parse.urlparse(manifest_url).query
|
||||
formats.extend(fmts)
|
||||
|
||||
creator = f.get('created_by') or {}
|
||||
|
||||
|
|
|
@ -1,18 +1,15 @@
|
|||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
xpath_element,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class BRIE(InfoExtractor):
|
||||
_WORKING = False
|
||||
IE_DESC = 'Bayerischer Rundfunk'
|
||||
_VALID_URL = r'(?P<base_url>https?://(?:www\.)?br(?:-klassik)?\.de)/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html'
|
||||
|
||||
|
@ -167,142 +164,3 @@ def _extract_thumbnails(self, variants, base_url):
|
|||
} for variant in variants.findall('variant') if xpath_text(variant, 'url')]
|
||||
thumbnails.sort(key=lambda x: x['width'] * x['height'], reverse=True)
|
||||
return thumbnails
|
||||
|
||||
|
||||
class BRMediathekIE(InfoExtractor):
|
||||
IE_DESC = 'Bayerischer Rundfunk Mediathek'
|
||||
_VALID_URL = r'https?://(?:www\.)?br\.de/mediathek//?video/(?:[^/?&#]+?-)?(?P<id>av:[0-9a-f]{24})'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.br.de/mediathek/video/gesundheit-die-sendung-vom-28112017-av:5a1e6a6e8fce6d001871cc8e',
|
||||
'md5': 'fdc3d485835966d1622587d08ba632ec',
|
||||
'info_dict': {
|
||||
'id': 'av:5a1e6a6e8fce6d001871cc8e',
|
||||
'ext': 'mp4',
|
||||
'title': 'Die Sendung vom 28.11.2017',
|
||||
'description': 'md5:6000cdca5912ab2277e5b7339f201ccc',
|
||||
'timestamp': 1511942766,
|
||||
'upload_date': '20171129',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.br.de/mediathek//video/av:61b0db581aed360007558c12',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
clip_id = self._match_id(url)
|
||||
|
||||
clip = self._download_json(
|
||||
'https://proxy-base.master.mango.express/graphql',
|
||||
clip_id, data=json.dumps({
|
||||
"query": """{
|
||||
viewer {
|
||||
clip(id: "%s") {
|
||||
title
|
||||
description
|
||||
duration
|
||||
createdAt
|
||||
ageRestriction
|
||||
videoFiles {
|
||||
edges {
|
||||
node {
|
||||
publicLocation
|
||||
fileSize
|
||||
videoProfile {
|
||||
width
|
||||
height
|
||||
bitrate
|
||||
encoding
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
captionFiles {
|
||||
edges {
|
||||
node {
|
||||
publicLocation
|
||||
}
|
||||
}
|
||||
}
|
||||
teaserImages {
|
||||
edges {
|
||||
node {
|
||||
imageFiles {
|
||||
edges {
|
||||
node {
|
||||
publicLocation
|
||||
width
|
||||
height
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}""" % clip_id}).encode(), headers={
|
||||
'Content-Type': 'application/json',
|
||||
})['data']['viewer']['clip']
|
||||
title = clip['title']
|
||||
|
||||
formats = []
|
||||
for edge in clip.get('videoFiles', {}).get('edges', []):
|
||||
node = edge.get('node', {})
|
||||
n_url = node.get('publicLocation')
|
||||
if not n_url:
|
||||
continue
|
||||
ext = determine_ext(n_url)
|
||||
if ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
n_url, clip_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
else:
|
||||
video_profile = node.get('videoProfile', {})
|
||||
tbr = int_or_none(video_profile.get('bitrate'))
|
||||
format_id = 'http'
|
||||
if tbr:
|
||||
format_id += '-%d' % tbr
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': n_url,
|
||||
'width': int_or_none(video_profile.get('width')),
|
||||
'height': int_or_none(video_profile.get('height')),
|
||||
'tbr': tbr,
|
||||
'filesize': int_or_none(node.get('fileSize')),
|
||||
})
|
||||
|
||||
subtitles = {}
|
||||
for edge in clip.get('captionFiles', {}).get('edges', []):
|
||||
node = edge.get('node', {})
|
||||
n_url = node.get('publicLocation')
|
||||
if not n_url:
|
||||
continue
|
||||
subtitles.setdefault('de', []).append({
|
||||
'url': n_url,
|
||||
})
|
||||
|
||||
thumbnails = []
|
||||
for edge in clip.get('teaserImages', {}).get('edges', []):
|
||||
for image_edge in edge.get('node', {}).get('imageFiles', {}).get('edges', []):
|
||||
node = image_edge.get('node', {})
|
||||
n_url = node.get('publicLocation')
|
||||
if not n_url:
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': n_url,
|
||||
'width': int_or_none(node.get('width')),
|
||||
'height': int_or_none(node.get('height')),
|
||||
})
|
||||
|
||||
return {
|
||||
'id': clip_id,
|
||||
'title': title,
|
||||
'description': clip.get('description'),
|
||||
'duration': int_or_none(clip.get('duration')),
|
||||
'timestamp': parse_iso8601(clip.get('createdAt')),
|
||||
'age_limit': int_or_none(clip.get('ageRestriction')),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'thumbnails': thumbnails,
|
||||
}
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class BreakIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?break\.com/video/(?P<display_id>[^/]+?)(?:-(?P<id>\d+))?(?:[/?#&]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||
'info_dict': {
|
||||
'id': '2468056',
|
||||
'ext': 'mp4',
|
||||
'title': 'When Girls Act Like D-Bags',
|
||||
'age_limit': 13,
|
||||
},
|
||||
}, {
|
||||
# youtube embed
|
||||
'url': 'http://www.break.com/video/someone-forgot-boat-brakes-work',
|
||||
'info_dict': {
|
||||
'id': 'RrrDLdeL2HQ',
|
||||
'ext': 'mp4',
|
||||
'title': 'Whale Watching Boat Crashing Into San Diego Dock',
|
||||
'description': 'md5:afc1b2772f0a8468be51dd80eb021069',
|
||||
'upload_date': '20160331',
|
||||
'uploader': 'Steve Holden',
|
||||
'uploader_id': 'sdholden07',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, video_id = self._match_valid_url(url).groups()
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
youtube_url = YoutubeIE._extract_url(webpage)
|
||||
if youtube_url:
|
||||
return self.url_result(youtube_url, ie=YoutubeIE.ie_key())
|
||||
|
||||
content = self._parse_json(
|
||||
self._search_regex(
|
||||
r'(?s)content["\']\s*:\s*(\[.+?\])\s*[,\n]', webpage,
|
||||
'content'),
|
||||
display_id)
|
||||
|
||||
formats = []
|
||||
for video in content:
|
||||
video_url = url_or_none(video.get('url'))
|
||||
if not video_url:
|
||||
continue
|
||||
bitrate = int_or_none(self._search_regex(
|
||||
r'(\d+)_kbps', video_url, 'tbr', default=None))
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'format_id': 'http-%d' % bitrate if bitrate else 'http',
|
||||
'tbr': bitrate,
|
||||
})
|
||||
|
||||
title = self._search_regex(
|
||||
(r'title["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
|
||||
r'<h1[^>]*>(?P<value>[^<]+)'), webpage, 'title', group='value')
|
||||
|
||||
def get(key, name):
|
||||
return int_or_none(self._search_regex(
|
||||
r'%s["\']\s*:\s*["\'](\d+)' % key, webpage, name,
|
||||
default=None))
|
||||
|
||||
age_limit = get('ratings', 'age limit')
|
||||
video_id = video_id or get('pid', 'video id') or display_id
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'age_limit': age_limit,
|
||||
'formats': formats,
|
||||
}
|
|
@ -21,10 +21,10 @@ def _initialize_pre_login(self):
|
|||
|
||||
def _get_logged_in_username(self, url, video_id):
|
||||
webpage, urlh = self._download_webpage_handle(url, video_id)
|
||||
if self._LOGIN_API == urlh.url:
|
||||
if urlh.url.startswith(self._LOGIN_API):
|
||||
self.raise_login_required()
|
||||
return self._html_search_regex(
|
||||
r'"username"\s*:\s*"(?P<username>[^"]+)"', webpage, 'stream page info', 'username')
|
||||
r'"username"\s*:\s*"(?P<username>[^"]+)"', webpage, 'logged-in username')
|
||||
|
||||
def _perform_login(self, username, password):
|
||||
login_form = self._hidden_inputs(self._download_webpage(
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
|
||||
|
||||
class BYUtvIE(InfoExtractor):
|
||||
_WORKING = False
|
||||
_VALID_URL = r'https?://(?:www\.)?byutv\.org/(?:watch|player)/(?!event/)(?P<id>[0-9a-f-]+)(?:/(?P<display_id>[^/?#&]+))?'
|
||||
_TESTS = [{
|
||||
# ooyalaVOD
|
||||
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
|
||||
'info_dict': {
|
||||
'id': 'ZvanRocTpW-G5_yZFeltTAMv6jxOU9KH',
|
||||
|
@ -24,7 +24,6 @@ class BYUtvIE(InfoExtractor):
|
|||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': ['Ooyala'],
|
||||
}, {
|
||||
# dvr
|
||||
'url': 'https://www.byutv.org/player/8f1dab9b-b243-47c8-b525-3e2d021a3451/byu-softball-pacific-vs-byu-41219---game-2',
|
||||
|
@ -63,19 +62,6 @@ def _real_extract(self, url):
|
|||
'x-byutv-platformkey': 'xsaaw9c7y5',
|
||||
})
|
||||
|
||||
ep = video.get('ooyalaVOD')
|
||||
if ep:
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'ie_key': 'Ooyala',
|
||||
'url': 'ooyala:%s' % ep['providerId'],
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': ep.get('title'),
|
||||
'description': ep.get('description'),
|
||||
'thumbnail': ep.get('imageThumbnail'),
|
||||
}
|
||||
|
||||
info = {}
|
||||
formats = []
|
||||
subtitles = {}
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class CamWithHerIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?camwithher\.tv/view_video\.php\?.*\bviewkey=(?P<id>\w+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://camwithher.tv/view_video.php?viewkey=6e9a24e2c0e842e1f177&page=&viewtype=&category=',
|
||||
'info_dict': {
|
||||
'id': '5644',
|
||||
'ext': 'flv',
|
||||
'title': 'Periscope Tease',
|
||||
'description': 'In the clouds teasing on periscope to my favorite song',
|
||||
'duration': 240,
|
||||
'view_count': int,
|
||||
'comment_count': int,
|
||||
'uploader': 'MileenaK',
|
||||
'upload_date': '20160322',
|
||||
'age_limit': 18,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://camwithher.tv/view_video.php?viewkey=6dfd8b7c97531a459937',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://camwithher.tv/view_video.php?page=&viewkey=6e9a24e2c0e842e1f177&viewtype=&category=',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://camwithher.tv/view_video.php?viewkey=b6c3b5bea9515d1a1fc4&page=&viewtype=&category=mv',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
flv_id = self._html_search_regex(
|
||||
r'<a[^>]+href=["\']/download/\?v=(\d+)', webpage, 'video id')
|
||||
|
||||
# Video URL construction algorithm is reverse-engineered from cwhplayer.swf
|
||||
rtmp_url = 'rtmp://camwithher.tv/clipshare/%s' % (
|
||||
('mp4:%s.mp4' % flv_id) if int(flv_id) > 2010 else flv_id)
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<div[^>]+style="float:left"[^>]*>\s*<h2>(.+?)</h2>', webpage, 'title')
|
||||
description = self._html_search_regex(
|
||||
r'>Description:</span>(.+?)</div>', webpage, 'description', default=None)
|
||||
|
||||
runtime = self._search_regex(
|
||||
r'Runtime\s*:\s*(.+?) \|', webpage, 'duration', default=None)
|
||||
if runtime:
|
||||
runtime = re.sub(r'[\s-]', '', runtime)
|
||||
duration = parse_duration(runtime)
|
||||
view_count = int_or_none(self._search_regex(
|
||||
r'Views\s*:\s*(\d+)', webpage, 'view count', default=None))
|
||||
comment_count = int_or_none(self._search_regex(
|
||||
r'Comments\s*:\s*(\d+)', webpage, 'comment count', default=None))
|
||||
|
||||
uploader = self._search_regex(
|
||||
r'Added by\s*:\s*<a[^>]+>([^<]+)</a>', webpage, 'uploader', default=None)
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'Added on\s*:\s*([\d-]+)', webpage, 'upload date', default=None))
|
||||
|
||||
return {
|
||||
'id': flv_id,
|
||||
'url': rtmp_url,
|
||||
'ext': 'flv',
|
||||
'no_resume': True,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'comment_count': comment_count,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
'age_limit': 18
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
format_field,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
try_get,
|
||||
)
|
||||
|
||||
from .videomore import VideomoreIE
|
||||
|
||||
|
||||
class CarambaTVIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:carambatv:|https?://video1\.carambatv\.ru/v/)(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://video1.carambatv.ru/v/191910501',
|
||||
'md5': '2f4a81b7cfd5ab866ee2d7270cb34a2a',
|
||||
'info_dict': {
|
||||
'id': '191910501',
|
||||
'ext': 'mp4',
|
||||
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'duration': 2678.31,
|
||||
},
|
||||
}, {
|
||||
'url': 'carambatv:191910501',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
video = self._download_json(
|
||||
'http://video1.carambatv.ru/v/%s/videoinfo.js' % video_id,
|
||||
video_id)
|
||||
|
||||
title = video['title']
|
||||
|
||||
base_url = video.get('video') or 'http://video1.carambatv.ru/v/%s/' % video_id
|
||||
|
||||
formats = [{
|
||||
'url': base_url + f['fn'],
|
||||
'height': int_or_none(f.get('height')),
|
||||
'format_id': format_field(f, 'height', '%sp'),
|
||||
} for f in video['qualities'] if f.get('fn')]
|
||||
|
||||
thumbnail = video.get('splash')
|
||||
duration = float_or_none(try_get(
|
||||
video, lambda x: x['annotations'][0]['end_time'], compat_str))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class CarambaTVPageIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://carambatv\.ru/(?:[^/]+/)+(?P<id>[^/?#&]+)'
|
||||
_TEST = {
|
||||
'url': 'http://carambatv.ru/movie/bad-comedian/razborka-v-manile/',
|
||||
'md5': 'a49fb0ec2ad66503eeb46aac237d3c86',
|
||||
'info_dict': {
|
||||
'id': '475222',
|
||||
'ext': 'flv',
|
||||
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
# duration reported by videomore is incorrect
|
||||
'duration': int,
|
||||
},
|
||||
'add_ie': [VideomoreIE.ie_key()],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
videomore_url = VideomoreIE._extract_url(webpage)
|
||||
if not videomore_url:
|
||||
videomore_id = self._search_regex(
|
||||
r'getVMCode\s*\(\s*["\']?(\d+)', webpage, 'videomore id',
|
||||
default=None)
|
||||
if videomore_id:
|
||||
videomore_url = 'videomore:%s' % videomore_id
|
||||
if videomore_url:
|
||||
title = self._og_search_title(webpage)
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': videomore_url,
|
||||
'ie_key': VideomoreIE.ie_key(),
|
||||
'title': title,
|
||||
}
|
||||
|
||||
video_url = self._og_search_property('video:iframe', webpage, default=None)
|
||||
|
||||
if not video_url:
|
||||
video_id = self._search_regex(
|
||||
r'(?:video_id|crmb_vuid)\s*[:=]\s*["\']?(\d+)',
|
||||
webpage, 'video id')
|
||||
video_url = 'carambatv:%s' % video_id
|
||||
|
||||
return self.url_result(video_url, CarambaTVIE.ie_key())
|
|
@ -1,8 +1,9 @@
|
|||
import re
|
||||
import json
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import urllib.parse
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
|
@ -179,6 +180,13 @@ class CBCPlayerIE(InfoExtractor):
|
|||
'thumbnail': 'http://thumbnails.cbc.ca/maven_legacy/thumbnails/sonali-karnick-220.jpg',
|
||||
'chapters': [],
|
||||
'duration': 494.811,
|
||||
'categories': ['AudioMobile/All in a Weekend Montreal'],
|
||||
'tags': 'count:8',
|
||||
'location': 'Quebec',
|
||||
'series': 'All in a Weekend Montreal',
|
||||
'season': 'Season 2015',
|
||||
'season_number': 2015,
|
||||
'media_type': 'Excerpt',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.cbc.ca/player/play/2164402062',
|
||||
|
@ -194,25 +202,37 @@ class CBCPlayerIE(InfoExtractor):
|
|||
'thumbnail': 'https://thumbnails.cbc.ca/maven_legacy/thumbnails/277/67/cancer_852x480_2164412612.jpg',
|
||||
'chapters': [],
|
||||
'duration': 186.867,
|
||||
'series': 'CBC News: Windsor at 6:00',
|
||||
'categories': ['News/Canada/Windsor'],
|
||||
'location': 'Windsor',
|
||||
'tags': ['cancer'],
|
||||
'creator': 'Allison Johnson',
|
||||
'media_type': 'Excerpt',
|
||||
},
|
||||
}, {
|
||||
# Has subtitles
|
||||
# These broadcasts expire after ~1 month, can find new test URL here:
|
||||
# https://www.cbc.ca/player/news/TV%20Shows/The%20National/Latest%20Broadcast
|
||||
'url': 'http://www.cbc.ca/player/play/2249992771553',
|
||||
'md5': '2f2fb675dd4f0f8a5bb7588d1b13bacd',
|
||||
'url': 'http://www.cbc.ca/player/play/2284799043667',
|
||||
'md5': '9b49f0839e88b6ec0b01d840cf3d42b5',
|
||||
'info_dict': {
|
||||
'id': '2249992771553',
|
||||
'id': '2284799043667',
|
||||
'ext': 'mp4',
|
||||
'title': 'The National | Women’s soccer pay, Florida seawater, Swift quake',
|
||||
'description': 'md5:adba28011a56cfa47a080ff198dad27a',
|
||||
'timestamp': 1690596000,
|
||||
'duration': 2716.333,
|
||||
'title': 'The National | Hockey coach charged, Green grants, Safer drugs',
|
||||
'description': 'md5:84ef46321c94bcf7d0159bb565d26bfa',
|
||||
'timestamp': 1700272800,
|
||||
'duration': 2718.833,
|
||||
'subtitles': {'eng': [{'ext': 'vtt', 'protocol': 'm3u8_native'}]},
|
||||
'thumbnail': 'https://thumbnails.cbc.ca/maven_legacy/thumbnails/481/326/thumbnail.jpeg',
|
||||
'thumbnail': 'https://thumbnails.cbc.ca/maven_legacy/thumbnails/907/171/thumbnail.jpeg',
|
||||
'uploader': 'CBCC-NEW',
|
||||
'chapters': 'count:5',
|
||||
'upload_date': '20230729',
|
||||
'upload_date': '20231118',
|
||||
'categories': 'count:4',
|
||||
'series': 'The National - Full Show',
|
||||
'tags': 'count:1',
|
||||
'creator': 'News',
|
||||
'location': 'Canada',
|
||||
'media_type': 'Full Program',
|
||||
},
|
||||
}]
|
||||
|
||||
|
@ -387,7 +407,7 @@ def _find_secret_formats(self, formats, video_id):
|
|||
url = re.sub(r'(Manifest\(.*?),format=[\w-]+(.*?\))', r'\1\2', base_url)
|
||||
|
||||
secret_xml = self._download_xml(url, video_id, note='Downloading secret XML', fatal=False)
|
||||
if not secret_xml:
|
||||
if not isinstance(secret_xml, xml.etree.ElementTree.Element):
|
||||
return
|
||||
|
||||
for child in secret_xml:
|
||||
|
|
|
@ -1,252 +0,0 @@
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
qualities,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class Channel9IE(InfoExtractor):
|
||||
IE_DESC = 'Channel 9'
|
||||
IE_NAME = 'channel9'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:channel9\.msdn\.com|s\.ch9\.ms)/(?P<contentpath>.+?)(?P<rss>/RSS)?/?(?:[?#&]|$)'
|
||||
_EMBED_REGEX = [r'<iframe[^>]+src=["\'](?P<url>https?://channel9\.msdn\.com/(?:[^/]+/)+)player\b']
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
|
||||
'md5': '32083d4eaf1946db6d454313f44510ca',
|
||||
'info_dict': {
|
||||
'id': '6c413323-383a-49dc-88f9-a22800cab024',
|
||||
'ext': 'wmv',
|
||||
'title': 'Developer Kick-Off Session: Stuff We Love',
|
||||
'description': 'md5:b80bf9355a503c193aff7ec6cd5a7731',
|
||||
'duration': 4576,
|
||||
'thumbnail': r're:https?://.*\.jpg',
|
||||
'timestamp': 1377717420,
|
||||
'upload_date': '20130828',
|
||||
'session_code': 'KOS002',
|
||||
'session_room': 'Arena 1A',
|
||||
'session_speakers': 'count:5',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing',
|
||||
'md5': 'dcf983ee6acd2088e7188c3cf79b46bc',
|
||||
'info_dict': {
|
||||
'id': 'fe8e435f-bb93-4e01-8e97-a28c01887024',
|
||||
'ext': 'wmv',
|
||||
'title': 'Self-service BI with Power BI - nuclear testing',
|
||||
'description': 'md5:2d17fec927fc91e9e17783b3ecc88f54',
|
||||
'duration': 1540,
|
||||
'thumbnail': r're:https?://.*\.jpg',
|
||||
'timestamp': 1386381991,
|
||||
'upload_date': '20131207',
|
||||
'authors': ['Mike Wilmot'],
|
||||
},
|
||||
}, {
|
||||
# low quality mp4 is best
|
||||
'url': 'https://channel9.msdn.com/Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library',
|
||||
'info_dict': {
|
||||
'id': '33ad69d2-6a4e-4172-83a1-a523013dec76',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ranges for the Standard Library',
|
||||
'description': 'md5:9895e0a9fd80822d2f01c454b8f4a372',
|
||||
'duration': 5646,
|
||||
'thumbnail': r're:https?://.*\.jpg',
|
||||
'upload_date': '20150930',
|
||||
'timestamp': 1443640735,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://channel9.msdn.com/Events/DEVintersection/DEVintersection-2016/RSS',
|
||||
'info_dict': {
|
||||
'id': 'Events/DEVintersection/DEVintersection-2016',
|
||||
'title': 'DEVintersection 2016 Orlando Sessions',
|
||||
},
|
||||
'playlist_mincount': 14,
|
||||
}, {
|
||||
'url': 'https://channel9.msdn.com/Niners/Splendid22/Queue/76acff796e8f411184b008028e0d492b/RSS',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://channel9.msdn.com/Events/Speakers/scott-hanselman/RSS?UrlSafeName=scott-hanselman',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
|
||||
|
||||
def _extract_list(self, video_id, rss_url=None):
|
||||
if not rss_url:
|
||||
rss_url = self._RSS_URL % video_id
|
||||
rss = self._download_xml(rss_url, video_id, 'Downloading RSS')
|
||||
entries = [self.url_result(session_url.text, 'Channel9')
|
||||
for session_url in rss.findall('./channel/item/link')]
|
||||
title_text = rss.find('./channel/title').text
|
||||
return self.playlist_result(entries, video_id, title_text)
|
||||
|
||||
def _real_extract(self, url):
|
||||
content_path, rss = self._match_valid_url(url).groups()
|
||||
|
||||
if rss:
|
||||
return self._extract_list(content_path, url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
url, content_path, 'Downloading web page')
|
||||
|
||||
episode_data = self._search_regex(
|
||||
r"data-episode='([^']+)'", webpage, 'episode data', default=None)
|
||||
if episode_data:
|
||||
episode_data = self._parse_json(unescapeHTML(
|
||||
episode_data), content_path)
|
||||
content_id = episode_data['contentId']
|
||||
is_session = '/Sessions(' in episode_data['api']
|
||||
content_url = 'https://channel9.msdn.com/odata' + episode_data['api'] + '?$select=Captions,CommentCount,MediaLengthInSeconds,PublishedDate,Rating,RatingCount,Title,VideoMP4High,VideoMP4Low,VideoMP4Medium,VideoPlayerPreviewImage,VideoWMV,VideoWMVHQ,Views,'
|
||||
if is_session:
|
||||
content_url += 'Code,Description,Room,Slides,Speakers,ZipFile&$expand=Speakers'
|
||||
else:
|
||||
content_url += 'Authors,Body&$expand=Authors'
|
||||
content_data = self._download_json(content_url, content_id)
|
||||
title = content_data['Title']
|
||||
|
||||
QUALITIES = (
|
||||
'mp3',
|
||||
'wmv', 'mp4',
|
||||
'wmv-low', 'mp4-low',
|
||||
'wmv-mid', 'mp4-mid',
|
||||
'wmv-high', 'mp4-high',
|
||||
)
|
||||
|
||||
quality_key = qualities(QUALITIES)
|
||||
|
||||
def quality(quality_id, format_url):
|
||||
return (len(QUALITIES) if '_Source.' in format_url
|
||||
else quality_key(quality_id))
|
||||
|
||||
formats = []
|
||||
urls = set()
|
||||
|
||||
SITE_QUALITIES = {
|
||||
'MP3': 'mp3',
|
||||
'MP4': 'mp4',
|
||||
'Low Quality WMV': 'wmv-low',
|
||||
'Low Quality MP4': 'mp4-low',
|
||||
'Mid Quality WMV': 'wmv-mid',
|
||||
'Mid Quality MP4': 'mp4-mid',
|
||||
'High Quality WMV': 'wmv-high',
|
||||
'High Quality MP4': 'mp4-high',
|
||||
}
|
||||
|
||||
formats_select = self._search_regex(
|
||||
r'(?s)<select[^>]+name=["\']format[^>]+>(.+?)</select', webpage,
|
||||
'formats select', default=None)
|
||||
if formats_select:
|
||||
for mobj in re.finditer(
|
||||
r'<option\b[^>]+\bvalue=(["\'])(?P<url>(?:(?!\1).)+)\1[^>]*>\s*(?P<format>[^<]+?)\s*<',
|
||||
formats_select):
|
||||
format_url = mobj.group('url')
|
||||
if format_url in urls:
|
||||
continue
|
||||
urls.add(format_url)
|
||||
format_id = mobj.group('format')
|
||||
quality_id = SITE_QUALITIES.get(format_id, format_id)
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': quality_id,
|
||||
'quality': quality(quality_id, format_url),
|
||||
'vcodec': 'none' if quality_id == 'mp3' else None,
|
||||
})
|
||||
|
||||
API_QUALITIES = {
|
||||
'VideoMP4Low': 'mp4-low',
|
||||
'VideoWMV': 'wmv-mid',
|
||||
'VideoMP4Medium': 'mp4-mid',
|
||||
'VideoMP4High': 'mp4-high',
|
||||
'VideoWMVHQ': 'wmv-hq',
|
||||
}
|
||||
|
||||
for format_id, q in API_QUALITIES.items():
|
||||
q_url = content_data.get(format_id)
|
||||
if not q_url or q_url in urls:
|
||||
continue
|
||||
urls.add(q_url)
|
||||
formats.append({
|
||||
'url': q_url,
|
||||
'format_id': q,
|
||||
'quality': quality(q, q_url),
|
||||
})
|
||||
|
||||
slides = content_data.get('Slides')
|
||||
zip_file = content_data.get('ZipFile')
|
||||
|
||||
if not formats and not slides and not zip_file:
|
||||
self.raise_no_formats(
|
||||
'None of recording, slides or zip are available for %s' % content_path)
|
||||
|
||||
subtitles = {}
|
||||
for caption in content_data.get('Captions', []):
|
||||
caption_url = caption.get('Url')
|
||||
if not caption_url:
|
||||
continue
|
||||
subtitles.setdefault(caption.get('Language', 'en'), []).append({
|
||||
'url': caption_url,
|
||||
'ext': 'vtt',
|
||||
})
|
||||
|
||||
common = {
|
||||
'id': content_id,
|
||||
'title': title,
|
||||
'description': clean_html(content_data.get('Description') or content_data.get('Body')),
|
||||
'thumbnail': content_data.get('VideoPlayerPreviewImage'),
|
||||
'duration': int_or_none(content_data.get('MediaLengthInSeconds')),
|
||||
'timestamp': parse_iso8601(content_data.get('PublishedDate')),
|
||||
'avg_rating': int_or_none(content_data.get('Rating')),
|
||||
'rating_count': int_or_none(content_data.get('RatingCount')),
|
||||
'view_count': int_or_none(content_data.get('Views')),
|
||||
'comment_count': int_or_none(content_data.get('CommentCount')),
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
if is_session:
|
||||
speakers = []
|
||||
for s in content_data.get('Speakers', []):
|
||||
speaker_name = s.get('FullName')
|
||||
if not speaker_name:
|
||||
continue
|
||||
speakers.append(speaker_name)
|
||||
|
||||
common.update({
|
||||
'session_code': content_data.get('Code'),
|
||||
'session_room': content_data.get('Room'),
|
||||
'session_speakers': speakers,
|
||||
})
|
||||
else:
|
||||
authors = []
|
||||
for a in content_data.get('Authors', []):
|
||||
author_name = a.get('DisplayName')
|
||||
if not author_name:
|
||||
continue
|
||||
authors.append(author_name)
|
||||
common['authors'] = authors
|
||||
|
||||
contents = []
|
||||
|
||||
if slides:
|
||||
d = common.copy()
|
||||
d.update({'title': title + '-Slides', 'url': slides})
|
||||
contents.append(d)
|
||||
|
||||
if zip_file:
|
||||
d = common.copy()
|
||||
d.update({'title': title + '-Zip', 'url': zip_file})
|
||||
contents.append(d)
|
||||
|
||||
if formats:
|
||||
d = common.copy()
|
||||
d.update({'title': title, 'formats': formats})
|
||||
contents.append(d)
|
||||
return self.playlist_result(contents)
|
||||
else:
|
||||
return self._extract_list(content_path)
|
|
@ -1,88 +0,0 @@
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_b64decode
|
||||
from ..utils import parse_duration
|
||||
|
||||
|
||||
class ChirbitIE(InfoExtractor):
|
||||
IE_NAME = 'chirbit'
|
||||
_VALID_URL = r'https?://(?:www\.)?chirb\.it/(?:(?:wp|pl)/|fb_chirbit_player\.swf\?key=)?(?P<id>[\da-zA-Z]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://chirb.it/be2abG',
|
||||
'info_dict': {
|
||||
'id': 'be2abG',
|
||||
'ext': 'mp3',
|
||||
'title': 'md5:f542ea253f5255240be4da375c6a5d7e',
|
||||
'description': 'md5:f24a4e22a71763e32da5fed59e47c770',
|
||||
'duration': 306,
|
||||
'uploader': 'Gerryaudio',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://chirb.it/fb_chirbit_player.swf?key=PrIPv5',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://chirb.it/wp/MN58c2',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
audio_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://chirb.it/%s' % audio_id, audio_id)
|
||||
|
||||
data_fd = self._search_regex(
|
||||
r'data-fd=(["\'])(?P<url>(?:(?!\1).)+)\1',
|
||||
webpage, 'data fd', group='url')
|
||||
|
||||
# Reverse engineered from https://chirb.it/js/chirbit.player.js (look
|
||||
# for soundURL)
|
||||
audio_url = compat_b64decode(data_fd[::-1]).decode('utf-8')
|
||||
|
||||
title = self._search_regex(
|
||||
r'class=["\']chirbit-title["\'][^>]*>([^<]+)', webpage, 'title')
|
||||
description = self._search_regex(
|
||||
r'<h3>Description</h3>\s*<pre[^>]*>([^<]+)</pre>',
|
||||
webpage, 'description', default=None)
|
||||
duration = parse_duration(self._search_regex(
|
||||
r'class=["\']c-length["\'][^>]*>([^<]+)',
|
||||
webpage, 'duration', fatal=False))
|
||||
uploader = self._search_regex(
|
||||
r'id=["\']chirbit-username["\'][^>]*>([^<]+)',
|
||||
webpage, 'uploader', fatal=False)
|
||||
|
||||
return {
|
||||
'id': audio_id,
|
||||
'url': audio_url,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'uploader': uploader,
|
||||
}
|
||||
|
||||
|
||||
class ChirbitProfileIE(InfoExtractor):
|
||||
IE_NAME = 'chirbit:profile'
|
||||
_VALID_URL = r'https?://(?:www\.)?chirbit\.com/(?:rss/)?(?P<id>[^/]+)'
|
||||
_TEST = {
|
||||
'url': 'http://chirbit.com/ScarletBeauty',
|
||||
'info_dict': {
|
||||
'id': 'ScarletBeauty',
|
||||
},
|
||||
'playlist_mincount': 3,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
profile_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, profile_id)
|
||||
|
||||
entries = [
|
||||
self.url_result(self._proto_relative_url('//chirb.it/' + video_id))
|
||||
for _, video_id in re.findall(r'<input[^>]+id=([\'"])copy-btn-(?P<id>[0-9a-zA-Z]+)\1', webpage)]
|
||||
|
||||
return self.playlist_result(entries, profile_id)
|
|
@ -1,56 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
unified_strdate,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class CinchcastIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://player\.cinchcast\.com/.*?(?:assetId|show_id)=(?P<id>[0-9]+)'
|
||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1']
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://player.cinchcast.com/?show_id=5258197&platformId=1&assetType=single',
|
||||
'info_dict': {
|
||||
'id': '5258197',
|
||||
'ext': 'mp3',
|
||||
'title': 'Train Your Brain to Up Your Game with Coach Mandy',
|
||||
'upload_date': '20130816',
|
||||
},
|
||||
}, {
|
||||
# Actual test is run in generic, look for undergroundwellness
|
||||
'url': 'http://player.cinchcast.com/?platformId=1&assetType=single&assetId=7141703',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
doc = self._download_xml(
|
||||
'http://www.blogtalkradio.com/playerasset/mrss?assetType=single&assetId=%s' % video_id,
|
||||
video_id)
|
||||
|
||||
item = doc.find('.//item')
|
||||
title = xpath_text(item, './title', fatal=True)
|
||||
date_str = xpath_text(
|
||||
item, './{http://developer.longtailvideo.com/trac/}date')
|
||||
upload_date = unified_strdate(date_str, day_first=False)
|
||||
# duration is present but wrong
|
||||
formats = [{
|
||||
'format_id': 'main',
|
||||
'url': item.find('./{http://search.yahoo.com/mrss/}content').attrib['url'],
|
||||
}]
|
||||
backup_url = xpath_text(
|
||||
item, './{http://developer.longtailvideo.com/trac/}backupContent')
|
||||
if backup_url:
|
||||
formats.append({
|
||||
'preference': 2, # seems to be more reliable
|
||||
'format_id': 'backup',
|
||||
'url': backup_url,
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'upload_date': upload_date,
|
||||
'formats': formats,
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
find_xpath_attr,
|
||||
fix_xml_ampersands
|
||||
)
|
||||
|
||||
|
||||
class ClipsyndicateIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:chic|www)\.clipsyndicate\.com/video/play(list/\d+)?/(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.clipsyndicate.com/video/play/4629301/brick_briscoe',
|
||||
'md5': '4d7d549451bad625e0ff3d7bd56d776c',
|
||||
'info_dict': {
|
||||
'id': '4629301',
|
||||
'ext': 'mp4',
|
||||
'title': 'Brick Briscoe',
|
||||
'duration': 612,
|
||||
'thumbnail': r're:^https?://.+\.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://chic.clipsyndicate.com/video/play/5844117/shark_attack',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
js_player = self._download_webpage(
|
||||
'http://eplayer.clipsyndicate.com/embed/player.js?va_id=%s' % video_id,
|
||||
video_id, 'Downlaoding player')
|
||||
# it includes a required token
|
||||
flvars = self._search_regex(r'flvars: "(.*?)"', js_player, 'flvars')
|
||||
|
||||
pdoc = self._download_xml(
|
||||
'http://eplayer.clipsyndicate.com/osmf/playlist?%s' % flvars,
|
||||
video_id, 'Downloading video info',
|
||||
transform_source=fix_xml_ampersands)
|
||||
|
||||
track_doc = pdoc.find('trackList/track')
|
||||
|
||||
def find_param(name):
|
||||
node = find_xpath_attr(track_doc, './/param', 'name', name)
|
||||
if node is not None:
|
||||
return node.attrib['value']
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': find_param('title'),
|
||||
'url': track_doc.find('location').text,
|
||||
'thumbnail': find_param('thumbnail'),
|
||||
'duration': int(find_param('duration')),
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
str_to_int,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class CloudyIE(InfoExtractor):
|
||||
_IE_DESC = 'cloudy.ec'
|
||||
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.cloudy.ec/v/af511e2527aac',
|
||||
'md5': '29832b05028ead1b58be86bf319397ca',
|
||||
'info_dict': {
|
||||
'id': 'af511e2527aac',
|
||||
'ext': 'mp4',
|
||||
'title': 'Funny Cats and Animals Compilation june 2013',
|
||||
'upload_date': '20130913',
|
||||
'view_count': int,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.cloudy.ec/embed.php?autoplay=1&id=af511e2527aac',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'https://www.cloudy.ec/embed.php', video_id, query={
|
||||
'id': video_id,
|
||||
'playerPage': 1,
|
||||
'autoplay': 1,
|
||||
})
|
||||
|
||||
info = self._parse_html5_media_entries(url, webpage, video_id)[0]
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'https://www.cloudy.ec/v/%s' % video_id, video_id, fatal=False)
|
||||
|
||||
if webpage:
|
||||
info.update({
|
||||
'title': self._search_regex(
|
||||
r'<h\d[^>]*>([^<]+)<', webpage, 'title'),
|
||||
'upload_date': unified_strdate(self._search_regex(
|
||||
r'>Published at (\d{4}-\d{1,2}-\d{1,2})', webpage,
|
||||
'upload date', fatal=False)),
|
||||
'view_count': str_to_int(self._search_regex(
|
||||
r'([\d,.]+) views<', webpage, 'view count', fatal=False)),
|
||||
})
|
||||
|
||||
if not info.get('title'):
|
||||
info['title'] = video_id
|
||||
|
||||
info['id'] = video_id
|
||||
|
||||
return info
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
|
||||
class ClubicIE(InfoExtractor):
|
||||
_WORKING = False
|
||||
_VALID_URL = r'https?://(?:www\.)?clubic\.com/video/(?:[^/]+/)*video.*-(?P<id>[0-9]+)\.html'
|
||||
|
||||
_TESTS = [{
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
|
||||
class CMTIE(MTVIE): # XXX: Do not subclass from concrete IE
|
||||
_WORKING = False
|
||||
IE_NAME = 'cmt.com'
|
||||
_VALID_URL = r'https?://(?:www\.)?cmt\.com/(?:videos|shows|(?:full-)?episodes|video-clips)/(?P<id>[^/]+)'
|
||||
|
||||
|
|
|
@ -286,6 +286,9 @@ class InfoExtractor:
|
|||
If it is not clear whether to use timestamp or this, use the former
|
||||
release_date: The date (YYYYMMDD) when the video was released in UTC.
|
||||
If not explicitly set, calculated from release_timestamp
|
||||
release_year: Year (YYYY) as integer when the video or album was released.
|
||||
To be used if no exact release date is known.
|
||||
If not explicitly set, calculated from release_date.
|
||||
modified_timestamp: UNIX timestamp of the moment the video was last modified.
|
||||
modified_date: The date (YYYYMMDD) when the video was last modified in UTC.
|
||||
If not explicitly set, calculated from modified_timestamp
|
||||
|
@ -379,6 +382,7 @@ class InfoExtractor:
|
|||
'private', 'premium_only', 'subscriber_only', 'needs_auth',
|
||||
'unlisted' or 'public'. Use 'InfoExtractor._availability'
|
||||
to set it
|
||||
media_type: The type of media as classified by the site, e.g. "episode", "clip", "trailer"
|
||||
_old_archive_ids: A list of old archive ids needed for backward compatibility
|
||||
_format_sort_fields: A list of fields to use for sorting formats
|
||||
__post_extractor: A function to be called just before the metadata is
|
||||
|
@ -427,7 +431,6 @@ class InfoExtractor:
|
|||
and compilations).
|
||||
disc_number: Number of the disc or other physical medium the track belongs to,
|
||||
as an integer.
|
||||
release_year: Year (YYYY) when the album was released.
|
||||
composer: Composer of the piece
|
||||
|
||||
The following fields should only be set for clips that should be cut from the original video:
|
||||
|
@ -2225,7 +2228,9 @@ def _extract_mpd_vod_duration(
|
|||
mpd_url, video_id,
|
||||
note='Downloading MPD VOD manifest' if note is None else note,
|
||||
errnote='Failed to download VOD manifest' if errnote is None else errnote,
|
||||
fatal=False, data=data, headers=headers, query=query) or {}
|
||||
fatal=False, data=data, headers=headers, query=query)
|
||||
if not isinstance(mpd_doc, xml.etree.ElementTree.Element):
|
||||
return None
|
||||
return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
|
||||
|
||||
@staticmethod
|
||||
|
@ -2339,7 +2344,9 @@ def _parse_smil_formats_and_subtitles(
|
|||
imgs_count = 0
|
||||
|
||||
srcs = set()
|
||||
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
|
||||
media = itertools.chain.from_iterable(
|
||||
smil.findall(self._xpath_ns(arg, namespace))
|
||||
for arg in ['.//video', './/audio', './/media'])
|
||||
for medium in media:
|
||||
src = medium.get('src')
|
||||
if not src or src in srcs:
|
||||
|
|
|
@ -46,6 +46,10 @@ class CWTVIE(InfoExtractor):
|
|||
'timestamp': 1444107300,
|
||||
'age_limit': 14,
|
||||
'uploader': 'CWTV',
|
||||
'thumbnail': r're:^https?://.*\.jpe?g$',
|
||||
'chapters': 'count:4',
|
||||
'episode': 'Episode 20',
|
||||
'season': 'Season 11',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
|
|
|
@ -105,7 +105,7 @@ def _real_extract(self, url):
|
|||
'chapter': module.get('title'),
|
||||
'chapter_id': str_or_none(module.get('id')),
|
||||
'title': activity.get('title'),
|
||||
'url': smuggle_url(f'https://player.vimeo.com/video/{vimeo_id}', {'http_headers': {'Referer': 'https://api.cybrary.it'}})
|
||||
'url': smuggle_url(f'https://player.vimeo.com/video/{vimeo_id}', {'referer': 'https://api.cybrary.it'})
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,150 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from ..compat import compat_b64decode
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
parse_count,
|
||||
parse_duration,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
)
|
||||
|
||||
|
||||
class DaftsexIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?daft\.sex/watch/(?P<id>-?\d+_\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://daft.sex/watch/-35370899_456246186',
|
||||
'md5': '64c04ef7b4c7b04b308f3b0c78efe7cd',
|
||||
'info_dict': {
|
||||
'id': '-35370899_456246186',
|
||||
'ext': 'mp4',
|
||||
'title': 'just relaxing',
|
||||
'description': 'just relaxing – Watch video Watch video in high quality',
|
||||
'upload_date': '20201113',
|
||||
'timestamp': 1605261911,
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'age_limit': 18,
|
||||
'duration': 15.0,
|
||||
'view_count': int
|
||||
},
|
||||
}, {
|
||||
'url': 'https://daft.sex/watch/-156601359_456242791',
|
||||
'info_dict': {
|
||||
'id': '-156601359_456242791',
|
||||
'ext': 'mp4',
|
||||
'title': 'Skye Blue - Dinner And A Show',
|
||||
'description': 'Skye Blue - Dinner And A Show - Watch video Watch video in high quality',
|
||||
'upload_date': '20200916',
|
||||
'timestamp': 1600250735,
|
||||
'thumbnail': 'https://psv153-1.crazycloud.ru/videos/-156601359/456242791/thumb.jpg?extra=i3D32KaBbBFf9TqDRMAVmQ',
|
||||
},
|
||||
'skip': 'deleted / private'
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_meta('name', webpage, 'title')
|
||||
timestamp = unified_timestamp(self._html_search_meta('uploadDate', webpage, 'Upload Date', default=None))
|
||||
description = self._html_search_meta('description', webpage, 'Description', default=None)
|
||||
|
||||
duration = parse_duration(self._search_regex(
|
||||
r'Duration: ((?:[0-9]{2}:){0,2}[0-9]{2})',
|
||||
webpage, 'duration', fatal=False))
|
||||
views = parse_count(self._search_regex(
|
||||
r'Views: ([0-9 ]+)',
|
||||
webpage, 'views', fatal=False))
|
||||
|
||||
player_hash = self._search_regex(
|
||||
r'DaxabPlayer\.Init\({[\s\S]*hash:\s*"([0-9a-zA-Z_\-]+)"[\s\S]*}',
|
||||
webpage, 'player hash')
|
||||
player_color = self._search_regex(
|
||||
r'DaxabPlayer\.Init\({[\s\S]*color:\s*"([0-9a-z]+)"[\s\S]*}',
|
||||
webpage, 'player color', fatal=False) or ''
|
||||
|
||||
embed_page = self._download_webpage(
|
||||
'https://dxb.to/player/%s?color=%s' % (player_hash, player_color),
|
||||
video_id, headers={'Referer': url})
|
||||
video_params = self._parse_json(
|
||||
self._search_regex(
|
||||
r'window\.globParams\s*=\s*({[\S\s]+})\s*;\s*<\/script>',
|
||||
embed_page, 'video parameters'),
|
||||
video_id, transform_source=js_to_json)
|
||||
|
||||
server_domain = 'https://%s' % compat_b64decode(video_params['server'][::-1]).decode('utf-8')
|
||||
|
||||
cdn_files = traverse_obj(video_params, ('video', 'cdn_files')) or {}
|
||||
if cdn_files:
|
||||
formats = []
|
||||
for format_id, format_data in cdn_files.items():
|
||||
ext, height = format_id.split('_')
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': f'{server_domain}/videos/{video_id.replace("_", "/")}/{height}.mp4?extra={format_data.split(".")[-1]}',
|
||||
'height': int_or_none(height),
|
||||
'ext': ext,
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'thumbnail': try_get(video_params, lambda vi: 'https:' + compat_b64decode(vi['video']['thumb']).decode('utf-8')),
|
||||
'timestamp': timestamp,
|
||||
'view_count': views,
|
||||
'age_limit': 18,
|
||||
}
|
||||
|
||||
items = self._download_json(
|
||||
f'{server_domain}/method/video.get/{video_id}', video_id,
|
||||
headers={'Referer': url}, query={
|
||||
'token': video_params['video']['access_token'],
|
||||
'videos': video_id,
|
||||
'ckey': video_params['c_key'],
|
||||
'credentials': video_params['video']['credentials'],
|
||||
})['response']['items']
|
||||
|
||||
if not items:
|
||||
raise ExtractorError('Video is not available', video_id=video_id, expected=True)
|
||||
|
||||
item = items[0]
|
||||
formats = []
|
||||
for f_id, f_url in item.get('files', {}).items():
|
||||
if f_id == 'external':
|
||||
return self.url_result(f_url)
|
||||
ext, height = f_id.split('_')
|
||||
height_extra_key = traverse_obj(video_params, ('video', 'partial', 'quality', height))
|
||||
if height_extra_key:
|
||||
formats.append({
|
||||
'format_id': f'{height}p',
|
||||
'url': f'{server_domain}/{f_url[8:]}&videos={video_id}&extra_key={height_extra_key}',
|
||||
'height': int_or_none(height),
|
||||
'ext': ext,
|
||||
})
|
||||
|
||||
thumbnails = []
|
||||
for k, v in item.items():
|
||||
if k.startswith('photo_') and v:
|
||||
width = k.replace('photo_', '')
|
||||
thumbnails.append({
|
||||
'id': width,
|
||||
'url': v,
|
||||
'width': int_or_none(width),
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'comment_count': int_or_none(item.get('comments')),
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'thumbnails': thumbnails,
|
||||
'timestamp': timestamp,
|
||||
'view_count': views,
|
||||
'age_limit': 18,
|
||||
}
|
|
@ -93,7 +93,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||
_VALID_URL = r'''(?ix)
|
||||
https?://
|
||||
(?:
|
||||
(?:(?:www|touch|geo)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:(?:embed|swf|\#)/)|player\.html\?)?video|swf)|
|
||||
(?:(?:www|touch|geo)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:(?:embed|swf|\#)/)|player(?:/\w+)?\.html\?)?video|swf)|
|
||||
(?:www\.)?lequipe\.fr/video
|
||||
)
|
||||
[/=](?P<id>[^/?_&]+)(?:.+?\bplaylist=(?P<playlist_id>x[0-9a-z]+))?
|
||||
|
@ -107,13 +107,17 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||
'id': 'x5kesuj',
|
||||
'ext': 'mp4',
|
||||
'title': 'Office Christmas Party Review – Jason Bateman, Olivia Munn, T.J. Miller',
|
||||
'description': 'Office Christmas Party Review - Jason Bateman, Olivia Munn, T.J. Miller',
|
||||
'description': 'Office Christmas Party Review - Jason Bateman, Olivia Munn, T.J. Miller',
|
||||
'duration': 187,
|
||||
'timestamp': 1493651285,
|
||||
'upload_date': '20170501',
|
||||
'uploader': 'Deadline',
|
||||
'uploader_id': 'x1xm8ri',
|
||||
'age_limit': 0,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'tags': ['hollywood', 'celeb', 'celebrity', 'movies', 'red carpet'],
|
||||
'thumbnail': r're:https://(?:s[12]\.)dmcdn\.net/v/K456B1aXqIx58LKWQ/x1080',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://geo.dailymotion.com/player.html?video=x89eyek&mute=true',
|
||||
|
@ -132,7 +136,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||
'view_count': int,
|
||||
'like_count': int,
|
||||
'tags': ['en_quete_d_esprit'],
|
||||
'thumbnail': 'https://s2.dmcdn.net/v/Tncwi1YGKdvFbDuDY/x1080',
|
||||
'thumbnail': r're:https://(?:s[12]\.)dmcdn\.net/v/Tncwi1YNg_RUl7ueu/x1080',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
|
||||
|
@ -201,6 +205,12 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||
}, {
|
||||
'url': 'https://www.dailymotion.com/video/x3z49k?playlist=xv4bw',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://geo.dailymotion.com/player/x86gw.html?video=k46oCapRs4iikoz9DWy',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://geo.dailymotion.com/player/xakln.html?video=x8mjju4&customConfig%5BcustomParams%5D=%2Ffr-fr%2Ftennis%2Fwimbledon-mens-singles%2Farticles-video',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_GEO_BYPASS = False
|
||||
_COMMON_MEDIA_FIELDS = '''description
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class DefenseGouvFrIE(InfoExtractor):
|
||||
IE_NAME = 'defense.gouv.fr'
|
||||
_VALID_URL = r'https?://.*?\.defense\.gouv\.fr/layout/set/ligthboxvideo/base-de-medias/webtv/(?P<id>[^/?#]*)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
|
||||
'md5': '75bba6124da7e63d2d60b5244ec9430c',
|
||||
'info_dict': {
|
||||
'id': '11213',
|
||||
'ext': 'mp4',
|
||||
'title': 'attaque-chimique-syrienne-du-21-aout-2013-1'
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
title = self._match_id(url)
|
||||
webpage = self._download_webpage(url, title)
|
||||
|
||||
video_id = self._search_regex(
|
||||
r"flashvars.pvg_id=\"(\d+)\";",
|
||||
webpage, 'ID')
|
||||
|
||||
json_url = (
|
||||
'http://static.videos.gouv.fr/brightcovehub/export/json/%s' %
|
||||
video_id)
|
||||
info = self._download_json(json_url, title, 'Downloading JSON config')
|
||||
video_url = info['renditions'][0]['url']
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
|
||||
class DHMIE(InfoExtractor):
|
||||
_WORKING = False
|
||||
IE_DESC = 'Filmarchiv - Deutsches Historisches Museum'
|
||||
_VALID_URL = r'https?://(?:www\.)?dhm\.de/filmarchiv/(?:[^/]+/)+(?P<id>[^/]+)'
|
||||
|
||||
|
|
|
@ -1,81 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class DotsubIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?dotsub\.com/view/(?P<id>[^/]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://dotsub.com/view/9c63db2a-fa95-4838-8e6e-13deafe47f09',
|
||||
'md5': '21c7ff600f545358134fea762a6d42b6',
|
||||
'info_dict': {
|
||||
'id': '9c63db2a-fa95-4838-8e6e-13deafe47f09',
|
||||
'ext': 'flv',
|
||||
'title': 'MOTIVATION - "It\'s Possible" Best Inspirational Video Ever',
|
||||
'description': 'md5:41af1e273edbbdfe4e216a78b9d34ac6',
|
||||
'thumbnail': 're:^https?://dotsub.com/media/9c63db2a-fa95-4838-8e6e-13deafe47f09/p',
|
||||
'duration': 198,
|
||||
'uploader': 'liuxt',
|
||||
'timestamp': 1385778501.104,
|
||||
'upload_date': '20131130',
|
||||
'view_count': int,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://dotsub.com/view/747bcf58-bd59-45b7-8c8c-ac312d084ee6',
|
||||
'md5': '2bb4a83896434d5c26be868c609429a3',
|
||||
'info_dict': {
|
||||
'id': '168006778',
|
||||
'ext': 'mp4',
|
||||
'title': 'Apartments and flats in Raipur the white symphony',
|
||||
'description': 'md5:784d0639e6b7d1bc29530878508e38fe',
|
||||
'thumbnail': 're:^https?://dotsub.com/media/747bcf58-bd59-45b7-8c8c-ac312d084ee6/p',
|
||||
'duration': 290,
|
||||
'timestamp': 1476767794.2809999,
|
||||
'upload_date': '20161018',
|
||||
'uploader': 'parthivi001',
|
||||
'uploader_id': 'user52596202',
|
||||
'view_count': int,
|
||||
},
|
||||
'add_ie': ['Vimeo'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
info = self._download_json(
|
||||
'https://dotsub.com/api/media/%s/metadata' % video_id, video_id)
|
||||
video_url = info.get('mediaURI')
|
||||
|
||||
if not video_url:
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._search_regex(
|
||||
[r'<source[^>]+src="([^"]+)"', r'"file"\s*:\s*\'([^\']+)'],
|
||||
webpage, 'video url', default=None)
|
||||
info_dict = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'flv',
|
||||
}
|
||||
|
||||
if not video_url:
|
||||
setup_data = self._parse_json(self._html_search_regex(
|
||||
r'(?s)data-setup=([\'"])(?P<content>(?!\1).+?)\1',
|
||||
webpage, 'setup data', group='content'), video_id)
|
||||
info_dict = {
|
||||
'_type': 'url_transparent',
|
||||
'url': setup_data['src'],
|
||||
}
|
||||
|
||||
info_dict.update({
|
||||
'title': info['title'],
|
||||
'description': info.get('description'),
|
||||
'thumbnail': info.get('screenshotURI'),
|
||||
'duration': int_or_none(info.get('duration'), 1000),
|
||||
'uploader': info.get('user'),
|
||||
'timestamp': float_or_none(info.get('dateCreated'), 1000),
|
||||
'view_count': int_or_none(info.get('numberOfViews')),
|
||||
})
|
||||
|
||||
return info_dict
|
|
@ -1,21 +1,17 @@
|
|||
import binascii
|
||||
import hashlib
|
||||
import re
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
mimetype2ext,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
parse_iso8601,
|
||||
try_call,
|
||||
update_url_query,
|
||||
url_or_none,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
SERIES_API = 'https://production-cdn.dr-massive.com/api/page?device=web_browser&item_detail_expand=all&lang=da&max_list_prefetch=3&path=%s'
|
||||
|
||||
|
@ -24,7 +20,7 @@ class DRTVIE(InfoExtractor):
|
|||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:
|
||||
(?:www\.)?dr\.dk/(?:tv/se|nyheder|(?P<radio>radio|lyd)(?:/ondemand)?)/(?:[^/]+/)*|
|
||||
(?:www\.)?dr\.dk/tv/se(?:/ondemand)?/(?:[^/?#]+/)*|
|
||||
(?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/(?:se|episode|program)/
|
||||
)
|
||||
(?P<id>[\da-z_-]+)
|
||||
|
@ -53,22 +49,6 @@ class DRTVIE(InfoExtractor):
|
|||
},
|
||||
'expected_warnings': ['Unable to download f4m manifest'],
|
||||
'skip': 'this video has been removed',
|
||||
}, {
|
||||
# embed
|
||||
'url': 'https://www.dr.dk/nyheder/indland/live-christianias-rydning-af-pusher-street-er-i-gang',
|
||||
'info_dict': {
|
||||
'id': 'urn:dr:mu:programcard:57c926176187a50a9c6e83c6',
|
||||
'ext': 'mp4',
|
||||
'title': 'christiania pusher street ryddes drdkrjpo',
|
||||
'description': 'md5:2a71898b15057e9b97334f61d04e6eb5',
|
||||
'timestamp': 1472800279,
|
||||
'upload_date': '20160902',
|
||||
'duration': 131.4,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'expected_warnings': ['Unable to download f4m manifest'],
|
||||
}, {
|
||||
# with SignLanguage formats
|
||||
'url': 'https://www.dr.dk/tv/se/historien-om-danmark/-/historien-om-danmark-stenalder',
|
||||
|
@ -87,33 +67,54 @@ class DRTVIE(InfoExtractor):
|
|||
'season': 'Historien om Danmark',
|
||||
'series': 'Historien om Danmark',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'this video has been removed',
|
||||
}, {
|
||||
'url': 'https://www.dr.dk/lyd/p4kbh/regionale-nyheder-kh4/p4-nyheder-2019-06-26-17-30-9',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.dr.dk/drtv/se/bonderoeven_71769',
|
||||
'url': 'https://www.dr.dk/drtv/se/frank-and-kastaniegaarden_71769',
|
||||
'info_dict': {
|
||||
'id': '00951930010',
|
||||
'ext': 'mp4',
|
||||
'title': 'Bonderøven 2019 (1:8)',
|
||||
'description': 'md5:b6dcfe9b6f0bea6703e9a0092739a5bd',
|
||||
'timestamp': 1654856100,
|
||||
'upload_date': '20220610',
|
||||
'duration': 2576.6,
|
||||
'season': 'Bonderøven 2019',
|
||||
'season_id': 'urn:dr:mu:bundle:5c201667a11fa01ca4528ce5',
|
||||
'title': 'Frank & Kastaniegaarden',
|
||||
'description': 'md5:974e1780934cf3275ef10280204bccb0',
|
||||
'release_timestamp': 1546545600,
|
||||
'release_date': '20190103',
|
||||
'duration': 2576,
|
||||
'season': 'Frank & Kastaniegaarden',
|
||||
'season_id': '67125',
|
||||
'release_year': 2019,
|
||||
'season_number': 2019,
|
||||
'series': 'Frank & Kastaniegaarden',
|
||||
'episode_number': 1,
|
||||
'episode': 'Episode 1',
|
||||
'episode': 'Frank & Kastaniegaarden',
|
||||
'thumbnail': r're:https?://.+',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# Foreign and Regular subtitle track
|
||||
'url': 'https://www.dr.dk/drtv/se/spise-med-price_-pasta-selv_397445',
|
||||
'info_dict': {
|
||||
'id': '00212301010',
|
||||
'ext': 'mp4',
|
||||
'episode_number': 1,
|
||||
'title': 'Spise med Price: Pasta Selv',
|
||||
'alt_title': '1. Pasta Selv',
|
||||
'release_date': '20230807',
|
||||
'description': 'md5:2da9060524fed707810d71080b3d0cd8',
|
||||
'duration': 1750,
|
||||
'season': 'Spise med Price',
|
||||
'release_timestamp': 1691438400,
|
||||
'season_id': '397440',
|
||||
'episode': 'Spise med Price: Pasta Selv',
|
||||
'thumbnail': r're:https?://.+',
|
||||
'season_number': 15,
|
||||
'series': 'Spise med Price',
|
||||
'release_year': 2022,
|
||||
'subtitles': 'mincount:2',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.dr.dk/drtv/episode/bonderoeven_71769',
|
||||
'only_matching': True,
|
||||
|
@ -123,226 +124,127 @@ class DRTVIE(InfoExtractor):
|
|||
}, {
|
||||
'url': 'https://www.dr.dk/drtv/program/jagten_220924',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.dr.dk/lyd/p4aarhus/regionale-nyheder-ar4/regionale-nyheder-2022-05-05-12-30-3',
|
||||
'info_dict': {
|
||||
'id': 'urn:dr:mu:programcard:6265cb2571401424d0360113',
|
||||
'title': "Regionale nyheder",
|
||||
'ext': 'mp4',
|
||||
'duration': 120.043,
|
||||
'series': 'P4 Østjylland regionale nyheder',
|
||||
'timestamp': 1651746600,
|
||||
'season': 'Regionale nyheder',
|
||||
'release_year': 0,
|
||||
'season_id': 'urn:dr:mu:bundle:61c26889539f0201586b73c5',
|
||||
'description': '',
|
||||
'upload_date': '20220505',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'this video has been removed',
|
||||
}, {
|
||||
'url': 'https://www.dr.dk/lyd/p4kbh/regionale-nyheder-kh4/regionale-nyheder-2023-03-14-10-30-9',
|
||||
'info_dict': {
|
||||
'ext': 'mp4',
|
||||
'id': '14802310112',
|
||||
'timestamp': 1678786200,
|
||||
'duration': 120.043,
|
||||
'season_id': 'urn:dr:mu:bundle:63a4f7c87140143504b6710f',
|
||||
'series': 'P4 København regionale nyheder',
|
||||
'upload_date': '20230314',
|
||||
'release_year': 0,
|
||||
'description': 'Hør seneste regionale nyheder fra P4 København.',
|
||||
'season': 'Regionale nyheder',
|
||||
'title': 'Regionale nyheder',
|
||||
},
|
||||
}]
|
||||
|
||||
SUBTITLE_LANGS = {
|
||||
'DanishLanguageSubtitles': 'da',
|
||||
'ForeignLanguageSubtitles': 'da_foreign',
|
||||
'CombinedLanguageSubtitles': 'da_combined',
|
||||
}
|
||||
|
||||
_TOKEN = None
|
||||
|
||||
def _real_initialize(self):
|
||||
if self._TOKEN:
|
||||
return
|
||||
|
||||
token_response = self._download_json(
|
||||
'https://production.dr-massive.com/api/authorization/anonymous-sso', None,
|
||||
note='Downloading anonymous token', headers={
|
||||
'content-type': 'application/json',
|
||||
}, query={
|
||||
'device': 'web_browser',
|
||||
'ff': 'idp,ldp,rpt',
|
||||
'lang': 'da',
|
||||
'supportFallbackToken': 'true',
|
||||
}, data=json.dumps({
|
||||
'deviceId': str(uuid.uuid4()),
|
||||
'scopes': ['Catalog'],
|
||||
'optout': True,
|
||||
}).encode())
|
||||
|
||||
self._TOKEN = traverse_obj(
|
||||
token_response, (lambda _, x: x['type'] == 'UserAccount', 'value', {str}), get_all=False)
|
||||
if not self._TOKEN:
|
||||
raise ExtractorError('Unable to get anonymous token')
|
||||
|
||||
def _real_extract(self, url):
|
||||
raw_video_id, is_radio_url = self._match_valid_url(url).group('id', 'radio')
|
||||
url_slug = self._match_id(url)
|
||||
webpage = self._download_webpage(url, url_slug)
|
||||
|
||||
webpage = self._download_webpage(url, raw_video_id)
|
||||
|
||||
if '>Programmet er ikke længere tilgængeligt' in webpage:
|
||||
raise ExtractorError(
|
||||
'Video %s is not available' % raw_video_id, expected=True)
|
||||
|
||||
video_id = self._search_regex(
|
||||
(r'data-(?:material-identifier|episode-slug)="([^"]+)"',
|
||||
r'data-resource="[^>"]+mu/programcard/expanded/([^"]+)"'),
|
||||
webpage, 'video id', default=None)
|
||||
|
||||
if not video_id:
|
||||
video_id = self._search_regex(
|
||||
r'(urn(?:%3A|:)dr(?:%3A|:)mu(?:%3A|:)programcard(?:%3A|:)[\da-f]+)',
|
||||
webpage, 'urn', default=None)
|
||||
if video_id:
|
||||
video_id = compat_urllib_parse_unquote(video_id)
|
||||
|
||||
_PROGRAMCARD_BASE = 'https://www.dr.dk/mu-online/api/1.4/programcard'
|
||||
query = {'expanded': 'true'}
|
||||
|
||||
if video_id:
|
||||
programcard_url = '%s/%s' % (_PROGRAMCARD_BASE, video_id)
|
||||
json_data = self._search_json(
|
||||
r'window\.__data\s*=', webpage, 'data', url_slug, fatal=False) or {}
|
||||
item = traverse_obj(
|
||||
json_data, ('cache', 'page', ..., (None, ('entries', 0)), 'item', {dict}), get_all=False)
|
||||
if item:
|
||||
item_id = item.get('id')
|
||||
else:
|
||||
programcard_url = _PROGRAMCARD_BASE
|
||||
if is_radio_url:
|
||||
video_id = self._search_nextjs_data(
|
||||
webpage, raw_video_id)['props']['pageProps']['episode']['productionNumber']
|
||||
else:
|
||||
json_data = self._search_json(
|
||||
r'window\.__data\s*=', webpage, 'data', raw_video_id)
|
||||
video_id = traverse_obj(json_data, (
|
||||
'cache', 'page', ..., (None, ('entries', 0)), 'item', 'customId',
|
||||
{lambda x: x.split(':')[-1]}), get_all=False)
|
||||
if not video_id:
|
||||
raise ExtractorError('Unable to extract video id')
|
||||
query['productionnumber'] = video_id
|
||||
item_id = url_slug.rsplit('_', 1)[-1]
|
||||
item = self._download_json(
|
||||
f'https://production-cdn.dr-massive.com/api/items/{item_id}', item_id,
|
||||
note='Attempting to download backup item data', query={
|
||||
'device': 'web_browser',
|
||||
'expand': 'all',
|
||||
'ff': 'idp,ldp,rpt',
|
||||
'geoLocation': 'dk',
|
||||
'isDeviceAbroad': 'false',
|
||||
'lang': 'da',
|
||||
'segments': 'drtv,optedout',
|
||||
'sub': 'Anonymous',
|
||||
})
|
||||
|
||||
data = self._download_json(
|
||||
programcard_url, video_id, 'Downloading video JSON', query=query)
|
||||
|
||||
supplementary_data = {}
|
||||
if re.search(r'_\d+$', raw_video_id):
|
||||
supplementary_data = self._download_json(
|
||||
SERIES_API % f'/episode/{raw_video_id}', raw_video_id, fatal=False) or {}
|
||||
|
||||
title = str_or_none(data.get('Title')) or re.sub(
|
||||
r'\s*\|\s*(?:TV\s*\|\s*DR|DRTV)$', '',
|
||||
self._og_search_title(webpage))
|
||||
description = self._og_search_description(
|
||||
webpage, default=None) or data.get('Description')
|
||||
|
||||
timestamp = unified_timestamp(
|
||||
data.get('PrimaryBroadcastStartTime') or data.get('SortDateTime'))
|
||||
|
||||
thumbnail = None
|
||||
duration = None
|
||||
|
||||
restricted_to_denmark = False
|
||||
video_id = try_call(lambda: item['customId'].rsplit(':', 1)[-1]) or item_id
|
||||
stream_data = self._download_json(
|
||||
f'https://production.dr-massive.com/api/account/items/{item_id}/videos', video_id,
|
||||
note='Downloading stream data', query={
|
||||
'delivery': 'stream',
|
||||
'device': 'web_browser',
|
||||
'ff': 'idp,ldp,rpt',
|
||||
'lang': 'da',
|
||||
'resolution': 'HD-1080',
|
||||
'sub': 'Anonymous',
|
||||
}, headers={'authorization': f'Bearer {self._TOKEN}'})
|
||||
|
||||
formats = []
|
||||
subtitles = {}
|
||||
for stream in traverse_obj(stream_data, (lambda _, x: x['url'])):
|
||||
format_id = stream.get('format', 'na')
|
||||
access_service = stream.get('accessService')
|
||||
preference = None
|
||||
subtitle_suffix = ''
|
||||
if access_service in ('SpokenSubtitles', 'SignLanguage', 'VisuallyInterpreted'):
|
||||
preference = -1
|
||||
format_id += f'-{access_service}'
|
||||
subtitle_suffix = f'-{access_service}'
|
||||
elif access_service == 'StandardVideo':
|
||||
preference = 1
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
stream.get('url'), video_id, ext='mp4', preference=preference, m3u8_id=format_id, fatal=False)
|
||||
formats.extend(fmts)
|
||||
|
||||
assets = []
|
||||
primary_asset = data.get('PrimaryAsset')
|
||||
if isinstance(primary_asset, dict):
|
||||
assets.append(primary_asset)
|
||||
secondary_assets = data.get('SecondaryAssets')
|
||||
if isinstance(secondary_assets, list):
|
||||
for secondary_asset in secondary_assets:
|
||||
if isinstance(secondary_asset, dict):
|
||||
assets.append(secondary_asset)
|
||||
api_subtitles = traverse_obj(stream, ('subtitles', lambda _, v: url_or_none(v['link']), {dict}))
|
||||
if not api_subtitles:
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
|
||||
def hex_to_bytes(hex):
|
||||
return binascii.a2b_hex(hex.encode('ascii'))
|
||||
for sub_track in api_subtitles:
|
||||
lang = sub_track.get('language') or 'da'
|
||||
subtitles.setdefault(self.SUBTITLE_LANGS.get(lang, lang) + subtitle_suffix, []).append({
|
||||
'url': sub_track['link'],
|
||||
'ext': mimetype2ext(sub_track.get('format')) or 'vtt'
|
||||
})
|
||||
|
||||
def decrypt_uri(e):
|
||||
n = int(e[2:10], 16)
|
||||
a = e[10 + n:]
|
||||
data = hex_to_bytes(e[10:10 + n])
|
||||
key = hashlib.sha256(('%s:sRBzYNXBzkKgnjj8pGtkACch' % a).encode('utf-8')).digest()
|
||||
iv = hex_to_bytes(a)
|
||||
decrypted = unpad_pkcs7(aes_cbc_decrypt_bytes(data, key, iv))
|
||||
return decrypted.decode('utf-8').split('?')[0]
|
||||
|
||||
for asset in assets:
|
||||
kind = asset.get('Kind')
|
||||
if kind == 'Image':
|
||||
thumbnail = url_or_none(asset.get('Uri'))
|
||||
elif kind in ('VideoResource', 'AudioResource'):
|
||||
duration = float_or_none(asset.get('DurationInMilliseconds'), 1000)
|
||||
restricted_to_denmark = asset.get('RestrictedToDenmark')
|
||||
asset_target = asset.get('Target')
|
||||
for link in asset.get('Links', []):
|
||||
uri = link.get('Uri')
|
||||
if not uri:
|
||||
encrypted_uri = link.get('EncryptedUri')
|
||||
if not encrypted_uri:
|
||||
continue
|
||||
try:
|
||||
uri = decrypt_uri(encrypted_uri)
|
||||
except Exception:
|
||||
self.report_warning(
|
||||
'Unable to decrypt EncryptedUri', video_id)
|
||||
continue
|
||||
uri = url_or_none(uri)
|
||||
if not uri:
|
||||
continue
|
||||
target = link.get('Target')
|
||||
format_id = target or ''
|
||||
if asset_target in ('SpokenSubtitles', 'SignLanguage', 'VisuallyInterpreted'):
|
||||
preference = -1
|
||||
format_id += '-%s' % asset_target
|
||||
elif asset_target == 'Default':
|
||||
preference = 1
|
||||
else:
|
||||
preference = None
|
||||
if target == 'HDS':
|
||||
f4m_formats = self._extract_f4m_formats(
|
||||
uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43',
|
||||
video_id, preference, f4m_id=format_id, fatal=False)
|
||||
if kind == 'AudioResource':
|
||||
for f in f4m_formats:
|
||||
f['vcodec'] = 'none'
|
||||
formats.extend(f4m_formats)
|
||||
elif target == 'HLS':
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
uri, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
quality=preference, m3u8_id=format_id, fatal=False)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
else:
|
||||
bitrate = link.get('Bitrate')
|
||||
if bitrate:
|
||||
format_id += '-%s' % bitrate
|
||||
formats.append({
|
||||
'url': uri,
|
||||
'format_id': format_id,
|
||||
'tbr': int_or_none(bitrate),
|
||||
'ext': link.get('FileFormat'),
|
||||
'vcodec': 'none' if kind == 'AudioResource' else None,
|
||||
'quality': preference,
|
||||
})
|
||||
subtitles_list = asset.get('SubtitlesList') or asset.get('Subtitleslist')
|
||||
if isinstance(subtitles_list, list):
|
||||
LANGS = {
|
||||
'Danish': 'da',
|
||||
}
|
||||
for subs in subtitles_list:
|
||||
if not isinstance(subs, dict):
|
||||
continue
|
||||
sub_uri = url_or_none(subs.get('Uri'))
|
||||
if not sub_uri:
|
||||
continue
|
||||
lang = subs.get('Language') or 'da'
|
||||
subtitles.setdefault(LANGS.get(lang, lang), []).append({
|
||||
'url': sub_uri,
|
||||
'ext': mimetype2ext(subs.get('MimeType')) or 'vtt'
|
||||
})
|
||||
|
||||
if not formats and restricted_to_denmark:
|
||||
self.raise_geo_restricted(
|
||||
'Unfortunately, DR is not allowed to show this program outside Denmark.',
|
||||
countries=self._GEO_COUNTRIES)
|
||||
if not formats and traverse_obj(item, ('season', 'customFields', 'IsGeoRestricted')):
|
||||
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'timestamp': timestamp,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'series': str_or_none(data.get('SeriesTitle')),
|
||||
'season': str_or_none(data.get('SeasonTitle')),
|
||||
'season_number': int_or_none(data.get('SeasonNumber')),
|
||||
'season_id': str_or_none(data.get('SeasonUrn')),
|
||||
'episode': traverse_obj(supplementary_data, ('entries', 0, 'item', 'contextualTitle')) or str_or_none(data.get('EpisodeTitle')),
|
||||
'episode_number': traverse_obj(supplementary_data, ('entries', 0, 'item', 'episodeNumber')) or int_or_none(data.get('EpisodeNumber')),
|
||||
'release_year': int_or_none(data.get('ProductionYear')),
|
||||
**traverse_obj(item, {
|
||||
'title': 'title',
|
||||
'alt_title': 'contextualTitle',
|
||||
'description': 'description',
|
||||
'thumbnail': ('images', 'wallpaper'),
|
||||
'release_timestamp': ('customFields', 'BroadcastTimeDK', {parse_iso8601}),
|
||||
'duration': ('duration', {int_or_none}),
|
||||
'series': ('season', 'show', 'title'),
|
||||
'season': ('season', 'title'),
|
||||
'season_number': ('season', 'seasonNumber', {int_or_none}),
|
||||
'season_id': 'seasonId',
|
||||
'episode': 'episodeName',
|
||||
'episode_number': ('episodeNumber', {int_or_none}),
|
||||
'release_year': ('releaseYear', {int_or_none}),
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
|
@ -412,6 +314,8 @@ class DRTVSeasonIE(InfoExtractor):
|
|||
'display_id': 'frank-and-kastaniegaarden',
|
||||
'title': 'Frank & Kastaniegaarden',
|
||||
'series': 'Frank & Kastaniegaarden',
|
||||
'season_number': 2008,
|
||||
'alt_title': 'Season 2008',
|
||||
},
|
||||
'playlist_mincount': 8
|
||||
}, {
|
||||
|
@ -421,6 +325,8 @@ class DRTVSeasonIE(InfoExtractor):
|
|||
'display_id': 'frank-and-kastaniegaarden',
|
||||
'title': 'Frank & Kastaniegaarden',
|
||||
'series': 'Frank & Kastaniegaarden',
|
||||
'season_number': 2009,
|
||||
'alt_title': 'Season 2009',
|
||||
},
|
||||
'playlist_mincount': 19
|
||||
}]
|
||||
|
@ -434,6 +340,7 @@ def _real_extract(self, url):
|
|||
'url': f'https://www.dr.dk/drtv{episode["path"]}',
|
||||
'ie_key': DRTVIE.ie_key(),
|
||||
'title': episode.get('title'),
|
||||
'alt_title': episode.get('contextualTitle'),
|
||||
'episode': episode.get('episodeName'),
|
||||
'description': episode.get('shortDescription'),
|
||||
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
|
@ -446,6 +353,7 @@ def _real_extract(self, url):
|
|||
'id': season_id,
|
||||
'display_id': display_id,
|
||||
'title': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'alt_title': traverse_obj(data, ('entries', 0, 'item', 'contextualTitle')),
|
||||
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'entries': entries,
|
||||
'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber'))
|
||||
|
@ -463,6 +371,7 @@ class DRTVSeriesIE(InfoExtractor):
|
|||
'display_id': 'frank-and-kastaniegaarden',
|
||||
'title': 'Frank & Kastaniegaarden',
|
||||
'series': 'Frank & Kastaniegaarden',
|
||||
'alt_title': '',
|
||||
},
|
||||
'playlist_mincount': 15
|
||||
}]
|
||||
|
@ -476,6 +385,7 @@ def _real_extract(self, url):
|
|||
'url': f'https://www.dr.dk/drtv{season.get("path")}',
|
||||
'ie_key': DRTVSeasonIE.ie_key(),
|
||||
'title': season.get('title'),
|
||||
'alt_title': season.get('contextualTitle'),
|
||||
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber'))
|
||||
} for season in traverse_obj(data, ('entries', 0, 'item', 'show', 'seasons', 'items'))]
|
||||
|
@ -485,6 +395,7 @@ def _real_extract(self, url):
|
|||
'id': series_id,
|
||||
'display_id': display_id,
|
||||
'title': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'alt_title': traverse_obj(data, ('entries', 0, 'item', 'contextualTitle')),
|
||||
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'entries': entries
|
||||
}
|
||||
|
|
|
@ -138,7 +138,7 @@ def _real_extract(self, url):
|
|||
# of the video.
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': smuggle_url(data_url, {'http_headers': headers}),
|
||||
'url': smuggle_url(data_url, {'referer': webpage_url}),
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'series': series_title,
|
||||
|
|
119
yt_dlp/extractor/duoplay.py
Normal file
119
yt_dlp/extractor/duoplay.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
extract_attributes,
|
||||
get_element_text_and_html_by_tag,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
str_or_none,
|
||||
try_call,
|
||||
unified_timestamp,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class DuoplayIE(InfoExtractor):
|
||||
_VALID_URL = r'https://duoplay\.ee/(?P<id>\d+)/[\w-]+/?(?:\?(?:[^#]+&)?ep=(?P<ep>\d+))?'
|
||||
_TESTS = [{
|
||||
'note': 'Siberi võmm S02E12',
|
||||
'url': 'https://duoplay.ee/4312/siberi-vomm?ep=24',
|
||||
'md5': '1ff59d535310ac9c5cf5f287d8f91b2d',
|
||||
'info_dict': {
|
||||
'id': '4312_24',
|
||||
'ext': 'mp4',
|
||||
'title': 'Operatsioon "Öö"',
|
||||
'thumbnail': r're:https://.+\.jpg(?:\?c=\d+)?$',
|
||||
'description': 'md5:8ef98f38569d6b8b78f3d350ccc6ade8',
|
||||
'upload_date': '20170523',
|
||||
'timestamp': 1495567800,
|
||||
'series': 'Siberi võmm',
|
||||
'series_id': '4312',
|
||||
'season': 'Season 2',
|
||||
'season_number': 2,
|
||||
'episode': 'Operatsioon "Öö"',
|
||||
'episode_number': 12,
|
||||
'episode_id': 24,
|
||||
},
|
||||
}, {
|
||||
'note': 'Empty title',
|
||||
'url': 'https://duoplay.ee/17/uhikarotid?ep=14',
|
||||
'md5': '6aca68be71112314738dd17cced7f8bf',
|
||||
'info_dict': {
|
||||
'id': '17_14',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ühikarotid',
|
||||
'thumbnail': r're:https://.+\.jpg(?:\?c=\d+)?$',
|
||||
'description': 'md5:4719b418e058c209def41d48b601276e',
|
||||
'upload_date': '20100916',
|
||||
'timestamp': 1284661800,
|
||||
'series': 'Ühikarotid',
|
||||
'series_id': '17',
|
||||
'season': 'Season 2',
|
||||
'season_number': 2,
|
||||
'episode_id': 14,
|
||||
'release_year': 2010,
|
||||
},
|
||||
}, {
|
||||
'note': 'Movie',
|
||||
'url': 'https://duoplay.ee/4325/naljamangud',
|
||||
'md5': '2b0bcac4159a08b1844c2bfde06b1199',
|
||||
'info_dict': {
|
||||
'id': '4325',
|
||||
'ext': 'mp4',
|
||||
'title': 'Näljamängud',
|
||||
'thumbnail': r're:https://.+\.jpg(?:\?c=\d+)?$',
|
||||
'description': 'md5:fb35f5eb2ff46cdb82e4d5fbe7b49a13',
|
||||
'cast': ['Jennifer Lawrence', 'Josh Hutcherson', 'Liam Hemsworth'],
|
||||
'upload_date': '20231109',
|
||||
'timestamp': 1699552800,
|
||||
'release_year': 2012,
|
||||
},
|
||||
}, {
|
||||
'note': 'Movie without expiry',
|
||||
'url': 'https://duoplay.ee/5501/pilvede-all.-neljas-ode',
|
||||
'md5': '7abf63d773a49ef7c39f2c127842b8fd',
|
||||
'info_dict': {
|
||||
'id': '5501',
|
||||
'ext': 'mp4',
|
||||
'title': 'Pilvede all. Neljas õde',
|
||||
'thumbnail': r're:https://.+\.jpg(?:\?c=\d+)?$',
|
||||
'description': 'md5:d86a70f8f31e82c369d4d4f4c79b1279',
|
||||
'cast': 'count:9',
|
||||
'upload_date': '20221214',
|
||||
'timestamp': 1671054000,
|
||||
'release_year': 2018,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
telecast_id, episode = self._match_valid_url(url).group('id', 'ep')
|
||||
video_id = join_nonempty(telecast_id, episode, delim='_')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_player = try_call(lambda: extract_attributes(
|
||||
get_element_text_and_html_by_tag('video-player', webpage)[1]))
|
||||
if not video_player or not video_player.get('manifest-url'):
|
||||
raise ExtractorError('No video found', expected=True)
|
||||
|
||||
episode_attr = self._parse_json(video_player.get(':episode') or '', video_id, fatal=False) or {}
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': self._extract_m3u8_formats(video_player['manifest-url'], video_id, 'mp4'),
|
||||
**traverse_obj(episode_attr, {
|
||||
'title': 'title',
|
||||
'description': 'synopsis',
|
||||
'thumbnail': ('images', 'original'),
|
||||
'timestamp': ('airtime', {lambda x: unified_timestamp(x + ' +0200')}),
|
||||
'cast': ('cast', {lambda x: x.split(', ')}),
|
||||
'release_year': ('year', {int_or_none}),
|
||||
}),
|
||||
**(traverse_obj(episode_attr, {
|
||||
'title': (None, ('subtitle', ('episode_nr', {lambda x: f'Episode {x}' if x else None}))),
|
||||
'series': 'title',
|
||||
'series_id': ('telecast_id', {str_or_none}),
|
||||
'season_number': ('season_id', {int_or_none}),
|
||||
'episode': 'subtitle',
|
||||
'episode_number': ('episode_nr', {int_or_none}),
|
||||
'episode_id': ('episode_id', {int_or_none}),
|
||||
}, get_all=False) if episode_attr.get('category') != 'movies' else {}),
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class EchoMskIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?echo\.msk\.ru/sounds/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.echo.msk.ru/sounds/1464134.html',
|
||||
'md5': '2e44b3b78daff5b458e4dbc37f191f7c',
|
||||
'info_dict': {
|
||||
'id': '1464134',
|
||||
'ext': 'mp3',
|
||||
'title': 'Особое мнение - 29 декабря 2014, 19:08',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
audio_url = self._search_regex(
|
||||
r'<a rel="mp3" href="([^"]+)">', webpage, 'audio URL')
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<a href="/programs/[^"]+" target="_blank">([^<]+)</a>',
|
||||
webpage, 'title')
|
||||
|
||||
air_date = self._html_search_regex(
|
||||
r'(?s)<div class="date">(.+?)</div>',
|
||||
webpage, 'date', fatal=False, default=None)
|
||||
|
||||
if air_date:
|
||||
air_date = re.sub(r'(\s)\1+', r'\1', air_date)
|
||||
if air_date:
|
||||
title = '%s - %s' % (title, air_date)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': audio_url,
|
||||
'title': title,
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
|
||||
|
||||
class EHowIE(InfoExtractor):
|
||||
IE_NAME = 'eHow'
|
||||
_VALID_URL = r'https?://(?:www\.)?ehow\.com/[^/_?]*_(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.ehow.com/video_12245069_hardwood-flooring-basics.html',
|
||||
'md5': '9809b4e3f115ae2088440bcb4efbf371',
|
||||
'info_dict': {
|
||||
'id': '12245069',
|
||||
'ext': 'flv',
|
||||
'title': 'Hardwood Flooring Basics',
|
||||
'description': 'Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...',
|
||||
'uploader': 'Erick Nathan',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._search_regex(
|
||||
r'(?:file|source)=(http[^\'"&]*)', webpage, 'video URL')
|
||||
final_url = compat_urllib_parse_unquote(video_url)
|
||||
uploader = self._html_search_meta('uploader', webpage)
|
||||
title = self._og_search_title(webpage).replace(' | eHow', '')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'uploader': uploader,
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue