parent
e0a4a3d5bf
commit
14c3a98049
2 changed files with 139 additions and 1 deletions
|
@ -997,6 +997,7 @@ from .nationalgeographic import (
|
|||
from .naver import (
|
||||
NaverIE,
|
||||
NaverLiveIE,
|
||||
NaverNowIE,
|
||||
)
|
||||
from .nba import (
|
||||
NBAWatchEmbedIE,
|
||||
|
|
|
@ -1,13 +1,19 @@
|
|||
import itertools
|
||||
import re
|
||||
from urllib.parse import urlparse, parse_qs
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
dict_get,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
merge_dicts,
|
||||
parse_duration,
|
||||
traverse_obj,
|
||||
try_call,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
update_url_query,
|
||||
)
|
||||
|
||||
|
@ -247,3 +253,134 @@ class NaverLiveIE(InfoExtractor):
|
|||
'categories': [meta.get('categoryId')],
|
||||
'is_live': True
|
||||
}
|
||||
|
||||
|
||||
class NaverNowIE(NaverBaseIE):
|
||||
IE_NAME = 'navernow'
|
||||
_VALID_URL = r'https?://now\.naver\.com/show/(?P<id>[0-9]+)'
|
||||
_PAGE_SIZE = 30
|
||||
_API_URL = 'https://apis.naver.com/now_web/nowcms-api-xhmac/cms/v1'
|
||||
_TESTS = [{
|
||||
'url': 'https://now.naver.com/show/4759?shareReplayId=5901#replay=',
|
||||
'md5': 'e05854162c21c221481de16b2944a0bc',
|
||||
'info_dict': {
|
||||
'id': '4759-5901',
|
||||
'title': '아이키X노제\r\n💖꽁냥꽁냥💖(1)',
|
||||
'ext': 'mp4',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'timestamp': 1650369600,
|
||||
'upload_date': '20220419',
|
||||
'uploader_id': 'now',
|
||||
'view_count': int,
|
||||
},
|
||||
'params': {
|
||||
'noplaylist': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://now.naver.com/show/4759?shareHightlight=1078#highlight=',
|
||||
'md5': '9f6118e398aa0f22b2152f554ea7851b',
|
||||
'info_dict': {
|
||||
'id': '4759-1078',
|
||||
'title': '아이키: 나 리정한테 흔들렸어,,, 질투 폭발하는 노제 여보😾 [아이키의 떰즈업]ㅣ네이버 NOW.',
|
||||
'ext': 'mp4',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'upload_date': '20220504',
|
||||
'timestamp': 1651648042,
|
||||
'uploader_id': 'now',
|
||||
'view_count': int,
|
||||
},
|
||||
'params': {
|
||||
'noplaylist': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://now.naver.com/show/4759',
|
||||
'info_dict': {
|
||||
'id': '4759',
|
||||
'title': '아이키의 떰즈업',
|
||||
},
|
||||
'playlist_mincount': 48
|
||||
}, {
|
||||
'url': 'https://now.naver.com/show/4759?shareReplayId=5901#replay',
|
||||
'info_dict': {
|
||||
'id': '4759',
|
||||
'title': '아이키의 떰즈업',
|
||||
},
|
||||
'playlist_mincount': 48,
|
||||
}, {
|
||||
'url': 'https://now.naver.com/show/4759?shareHightlight=1078#highlight=',
|
||||
'info_dict': {
|
||||
'id': '4759',
|
||||
'title': '아이키의 떰즈업',
|
||||
},
|
||||
'playlist_mincount': 48,
|
||||
}]
|
||||
|
||||
def _extract_replay(self, show_id, replay_id):
|
||||
vod_info = self._download_json(f'{self._API_URL}/shows/{show_id}/vod/{replay_id}', replay_id)
|
||||
in_key = self._download_json(f'{self._API_URL}/shows/{show_id}/vod/{replay_id}/inkey', replay_id)['inKey']
|
||||
return merge_dicts({
|
||||
'id': f'{show_id}-{replay_id}',
|
||||
'title': traverse_obj(vod_info, ('episode', 'title')),
|
||||
'timestamp': unified_timestamp(traverse_obj(vod_info, ('episode', 'start_time'))),
|
||||
'thumbnail': vod_info.get('thumbnail_image_url'),
|
||||
}, self._extract_video_info(replay_id, vod_info['video_id'], in_key))
|
||||
|
||||
def _extract_show_replays(self, show_id):
|
||||
page = 0
|
||||
while True:
|
||||
show_vod_info = self._download_json(
|
||||
f'{self._API_URL}/vod-shows/{show_id}', show_id,
|
||||
query={'offset': page * self._PAGE_SIZE, 'limit': self._PAGE_SIZE},
|
||||
note=f'Downloading JSON vod list for show {show_id} - page {page}'
|
||||
)['response']['result']
|
||||
for v in show_vod_info.get('vod_list') or []:
|
||||
yield self._extract_replay(show_id, v['id'])
|
||||
|
||||
if try_call(lambda: show_vod_info['count'] <= self._PAGE_SIZE * (page + 1)):
|
||||
break
|
||||
page += 1
|
||||
|
||||
def _extract_show_highlights(self, show_id, highlight_id=None):
|
||||
page = 0
|
||||
while True:
|
||||
highlights_videos = self._download_json(
|
||||
f'{self._API_URL}/shows/{show_id}/highlights/videos/', show_id,
|
||||
query={'offset': page * self._PAGE_SIZE, 'limit': self._PAGE_SIZE},
|
||||
note=f'Downloading JSON highlights for show {show_id} - page {page}')
|
||||
|
||||
for highlight in highlights_videos.get('results') or []:
|
||||
if highlight_id and highlight.get('id') != int(highlight_id):
|
||||
continue
|
||||
yield merge_dicts({
|
||||
'id': f'{show_id}-{highlight["id"]}',
|
||||
'title': highlight.get('title'),
|
||||
'timestamp': unified_timestamp(highlight.get('regdate')),
|
||||
'thumbnail': highlight.get('thumbnail_url'),
|
||||
}, self._extract_video_info(highlight['id'], highlight['video_id'], highlight['video_inkey']))
|
||||
|
||||
if try_call(lambda: highlights_videos['count'] <= self._PAGE_SIZE * (page + 1)):
|
||||
break
|
||||
page += 1
|
||||
|
||||
def _extract_highlight(self, show_id, highlight_id):
|
||||
try:
|
||||
return next(self._extract_show_highlights(show_id, highlight_id))
|
||||
except StopIteration:
|
||||
raise ExtractorError(f'Unable to find highlight {highlight_id} for show {show_id}')
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id = self._match_id(url)
|
||||
qs = parse_qs(urlparse(url).query)
|
||||
|
||||
if not self._yes_playlist(show_id, qs.get('shareHightlight')):
|
||||
return self._extract_highlight(show_id, qs['shareHightlight'][0])
|
||||
elif not self._yes_playlist(show_id, qs.get('shareReplayId')):
|
||||
return self._extract_replay(show_id, qs['shareReplayId'][0])
|
||||
|
||||
show_info = self._download_json(
|
||||
f'{self._API_URL}/shows/{show_id}', show_id,
|
||||
note=f'Downloading JSON vod list for show {show_id}')
|
||||
|
||||
return self.playlist_result(
|
||||
itertools.chain(self._extract_show_replays(show_id), self._extract_show_highlights(show_id)),
|
||||
show_id, show_info.get('title'))
|
||||
|
|
Loading…
Reference in a new issue