Release 2021.03.01
This commit is contained in:
parent
62bff2c170
commit
5ef7d9bdd8
9 changed files with 36 additions and 10 deletions
|
@ -23,3 +23,5 @@ tsukumi
|
||||||
bbepis
|
bbepis
|
||||||
Pccode66
|
Pccode66
|
||||||
Ashish
|
Ashish
|
||||||
|
RobinD42
|
||||||
|
hseg
|
21
Changelog.md
21
Changelog.md
|
@ -17,6 +17,27 @@
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
### 2021.03.01
|
||||||
|
* Allow specifying path in `--external-downloader`
|
||||||
|
* Add option `--sleep-requests` to sleep b/w requests
|
||||||
|
* Add option `--extractor-retries` to retry on known extractor errors
|
||||||
|
* Extract comments only when needed
|
||||||
|
* `--get-comments` doesn't imply `--write-info-json` if `-J`, `-j` or `--print-json` are used
|
||||||
|
* [youtube] Retry on more known errors than just HTTP-5xx
|
||||||
|
* [tennistv] Fix format sorting
|
||||||
|
* [readthedocs] Improvements by [shirt](https://github.com/shirt-dev)
|
||||||
|
* [hls] Fix bug with m3u8 format extraction
|
||||||
|
* [bilibiliaudio] Recognize the file as audio-only
|
||||||
|
* [hrfensehen] Fix wrong import
|
||||||
|
* [youtube] Fix inconsistent `webpage_url`
|
||||||
|
* [hls] Enable `--hls-use-mpegts` by default when downloading live-streams
|
||||||
|
* [viki] Fix viki play pass authentication by [RobinD42](https://github.com/RobinD42)
|
||||||
|
* [embedthumbnail] Fix bug with deleting original thumbnail
|
||||||
|
* [build] Fix completion paths, zsh pip completion install by [hseg](https://github.com/hseg)
|
||||||
|
* [ci] Disable download tests unless specifically invoked
|
||||||
|
* Cleanup some code and fix typos
|
||||||
|
|
||||||
|
|
||||||
### 2021.02.24
|
### 2021.02.24
|
||||||
* Moved project to an organization [yt-dlp](https://github.com/yt-dlp)
|
* Moved project to an organization [yt-dlp](https://github.com/yt-dlp)
|
||||||
* **Completely changed project name to yt-dlp** by [Pccode66](https://github.com/Pccode66) and [pukkandan](https://github.com/pukkandan)
|
* **Completely changed project name to yt-dlp** by [Pccode66](https://github.com/Pccode66) and [pukkandan](https://github.com/pukkandan)
|
||||||
|
|
|
@ -697,6 +697,8 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||||
directory
|
directory
|
||||||
|
|
||||||
## Extractor Options:
|
## Extractor Options:
|
||||||
|
--extractor-retries RETRIES Number of retries for known extractor
|
||||||
|
errors (default is 10), or "infinite"
|
||||||
--allow-dynamic-mpd Process dynamic DASH manifests (default)
|
--allow-dynamic-mpd Process dynamic DASH manifests (default)
|
||||||
(Alias: --no-ignore-dynamic-mpd)
|
(Alias: --no-ignore-dynamic-mpd)
|
||||||
--ignore-dynamic-mpd Do not process dynamic DASH manifests
|
--ignore-dynamic-mpd Do not process dynamic DASH manifests
|
||||||
|
|
|
@ -2958,7 +2958,7 @@ class YoutubeDL(object):
|
||||||
self.to_screen('[%s] %s: Thumbnail %sis already present' %
|
self.to_screen('[%s] %s: Thumbnail %sis already present' %
|
||||||
(info_dict['extractor'], info_dict['id'], thumb_display_id))
|
(info_dict['extractor'], info_dict['id'], thumb_display_id))
|
||||||
else:
|
else:
|
||||||
self.to_screen('[%s] %s: Downloading thumbnail %s...' %
|
self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
|
||||||
(info_dict['extractor'], info_dict['id'], thumb_display_id))
|
(info_dict['extractor'], info_dict['id'], thumb_display_id))
|
||||||
try:
|
try:
|
||||||
uf = self.urlopen(t['url'])
|
uf = self.urlopen(t['url'])
|
||||||
|
|
|
@ -312,7 +312,7 @@ class FileDownloader(object):
|
||||||
def report_retry(self, err, count, retries):
|
def report_retry(self, err, count, retries):
|
||||||
"""Report retry in case of HTTP error 5xx"""
|
"""Report retry in case of HTTP error 5xx"""
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s)...'
|
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s) ...'
|
||||||
% (error_to_compat_str(err), count, self.format_retries(retries)))
|
% (error_to_compat_str(err), count, self.format_retries(retries)))
|
||||||
|
|
||||||
def report_file_already_downloaded(self, file_name):
|
def report_file_already_downloaded(self, file_name):
|
||||||
|
@ -359,7 +359,7 @@ class FileDownloader(object):
|
||||||
max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval)
|
max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval)
|
||||||
sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval)
|
sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval)
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[download] Sleeping %s seconds...' % (
|
'[download] Sleeping %s seconds ...' % (
|
||||||
int(sleep_interval) if sleep_interval.is_integer()
|
int(sleep_interval) if sleep_interval.is_integer()
|
||||||
else '%.2f' % sleep_interval))
|
else '%.2f' % sleep_interval))
|
||||||
time.sleep(sleep_interval)
|
time.sleep(sleep_interval)
|
||||||
|
@ -369,7 +369,7 @@ class FileDownloader(object):
|
||||||
sleep_interval_sub = self.params.get('sleep_interval_subtitles')
|
sleep_interval_sub = self.params.get('sleep_interval_subtitles')
|
||||||
if sleep_interval_sub > 0:
|
if sleep_interval_sub > 0:
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[download] Sleeping %s seconds...' % (
|
'[download] Sleeping %s seconds ...' % (
|
||||||
sleep_interval_sub))
|
sleep_interval_sub))
|
||||||
time.sleep(sleep_interval_sub)
|
time.sleep(sleep_interval_sub)
|
||||||
return self.real_download(filename, info_dict), True
|
return self.real_download(filename, info_dict), True
|
||||||
|
|
|
@ -55,11 +55,11 @@ class FragmentFD(FileDownloader):
|
||||||
|
|
||||||
def report_retry_fragment(self, err, frag_index, count, retries):
|
def report_retry_fragment(self, err, frag_index, count, retries):
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s)...'
|
'[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...'
|
||||||
% (error_to_compat_str(err), frag_index, count, self.format_retries(retries)))
|
% (error_to_compat_str(err), frag_index, count, self.format_retries(retries)))
|
||||||
|
|
||||||
def report_skip_fragment(self, frag_index):
|
def report_skip_fragment(self, frag_index):
|
||||||
self.to_screen('[download] Skipping fragment %d...' % frag_index)
|
self.to_screen('[download] Skipping fragment %d ...' % frag_index)
|
||||||
|
|
||||||
def _prepare_url(self, info_dict, url):
|
def _prepare_url(self, info_dict, url):
|
||||||
headers = info_dict.get('http_headers')
|
headers = info_dict.get('http_headers')
|
||||||
|
@ -174,7 +174,7 @@ class FragmentFD(FileDownloader):
|
||||||
'.ytdl file is corrupt' if is_corrupt else
|
'.ytdl file is corrupt' if is_corrupt else
|
||||||
'Inconsistent state of incomplete fragment download')
|
'Inconsistent state of incomplete fragment download')
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'%s. Restarting from the beginning...' % message)
|
'%s. Restarting from the beginning ...' % message)
|
||||||
ctx['fragment_index'] = resume_len = 0
|
ctx['fragment_index'] = resume_len = 0
|
||||||
if 'ytdl_corrupt' in ctx:
|
if 'ytdl_corrupt' in ctx:
|
||||||
del ctx['ytdl_corrupt']
|
del ctx['ytdl_corrupt']
|
||||||
|
|
|
@ -29,7 +29,7 @@ class NiconicoDmcFD(FileDownloader):
|
||||||
heartbeat_url = heartbeat_info_dict['url']
|
heartbeat_url = heartbeat_info_dict['url']
|
||||||
heartbeat_data = heartbeat_info_dict['data']
|
heartbeat_data = heartbeat_info_dict['data']
|
||||||
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
||||||
self.to_screen('[%s] Heartbeat with %s second interval...' % (self.FD_NAME, heartbeat_interval))
|
self.to_screen('[%s] Heartbeat with %s second interval ...' % (self.FD_NAME, heartbeat_interval))
|
||||||
|
|
||||||
def heartbeat():
|
def heartbeat():
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -617,7 +617,7 @@ class InfoExtractor(object):
|
||||||
if not self._downloader._first_webpage_request:
|
if not self._downloader._first_webpage_request:
|
||||||
sleep_interval = float_or_none(self._downloader.params.get('sleep_interval_requests')) or 0
|
sleep_interval = float_or_none(self._downloader.params.get('sleep_interval_requests')) or 0
|
||||||
if sleep_interval > 0:
|
if sleep_interval > 0:
|
||||||
self.to_screen('Sleeping %s seconds...' % sleep_interval)
|
self.to_screen('Sleeping %s seconds ...' % sleep_interval)
|
||||||
time.sleep(sleep_interval)
|
time.sleep(sleep_interval)
|
||||||
else:
|
else:
|
||||||
self._downloader._first_webpage_request = False
|
self._downloader._first_webpage_request = False
|
||||||
|
|
|
@ -3020,7 +3020,8 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||||
# See: https://github.com/yt-dlp/yt-dlp/issues/116
|
# See: https://github.com/yt-dlp/yt-dlp/issues/116
|
||||||
if count:
|
if count:
|
||||||
self.report_warning('Incomplete yt initial data recieved. Retrying ...')
|
self.report_warning('Incomplete yt initial data recieved. Retrying ...')
|
||||||
webpage = self._download_webpage(url, item_id,
|
webpage = self._download_webpage(
|
||||||
|
url, item_id,
|
||||||
'Downloading webpage%s' % ' (retry #%d)' % count if count else '')
|
'Downloading webpage%s' % ' (retry #%d)' % count if count else '')
|
||||||
identity_token = self._extract_identity_token(webpage, item_id)
|
identity_token = self._extract_identity_token(webpage, item_id)
|
||||||
data = self._extract_yt_initial_data(item_id, webpage)
|
data = self._extract_yt_initial_data(item_id, webpage)
|
||||||
|
|
Loading…
Reference in a new issue