[flipagram] Improve extraction (Closes #9898)
This commit is contained in:
parent
0de168f7ed
commit
0af985069b
1 changed files with 78 additions and 66 deletions
|
@ -2,102 +2,114 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
float_or_none,
|
||||||
unified_strdate,
|
try_get,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class FlipagramIE(InfoExtractor):
|
class FlipagramIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?flipagram\.com/f/(?P<id>[^/?_]+)'
|
_VALID_URL = r'https?://(?:www\.)?flipagram\.com/f/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TEST = {
|
||||||
'url': 'https://flipagram.com/f/myrWjW9RJw',
|
|
||||||
'md5': '541988fb6c4c7c375215ea22a4a21841',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'myrWjW9RJw',
|
|
||||||
'title': 'Flipagram by crystaldolce featuring King and Lionheart by Of Monsters and Men',
|
|
||||||
'description': 'Herbie\'s first bannana🍌🐢🍌. #animals #pets #reptile #tortoise #sulcata #tort #justatreat #snacktime #bannanas #rescuepets #ofmonstersandmen @animals',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'uploader': 'Crystal Dolce',
|
|
||||||
'creator': 'Crystal Dolce',
|
|
||||||
'uploader_id': 'crystaldolce',
|
|
||||||
}
|
|
||||||
}, {
|
|
||||||
'url': 'https://flipagram.com/f/nyvTSJMKId',
|
'url': 'https://flipagram.com/f/nyvTSJMKId',
|
||||||
'only_matching': True,
|
'md5': '888dcf08b7ea671381f00fab74692755',
|
||||||
}]
|
'info_dict': {
|
||||||
|
'id': 'nyvTSJMKId',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Flipagram by sjuria101 featuring Midnight Memories by One Direction',
|
||||||
|
'description': 'md5:d55e32edc55261cae96a41fa85ff630e',
|
||||||
|
'duration': 35.571,
|
||||||
|
'timestamp': 1461244995,
|
||||||
|
'upload_date': '20160421',
|
||||||
|
'uploader': 'kitty juria',
|
||||||
|
'uploader_id': 'sjuria101',
|
||||||
|
'creator': 'kitty juria',
|
||||||
|
'view_count': int,
|
||||||
|
'like_count': int,
|
||||||
|
'repost_count': int,
|
||||||
|
'comment_count': int,
|
||||||
|
'comments': list,
|
||||||
|
'formats': 'mincount:2',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
video_data = self._parse_json(
|
||||||
user_data = self._parse_json(self._search_regex(r'window.reactH2O\s*=\s*({.+});', webpage, 'user data'), video_id)
|
self._search_regex(
|
||||||
content_data = self._search_json_ld(webpage, video_id)
|
r'window\.reactH2O\s*=\s*({.+});', webpage, 'video data'),
|
||||||
|
video_id)
|
||||||
|
|
||||||
flipagram = user_data.get('flipagram', {})
|
flipagram = video_data['flipagram']
|
||||||
counts = flipagram.get('counts', {})
|
video = flipagram['video']
|
||||||
user = flipagram.get('user', {})
|
|
||||||
video = flipagram.get('video', {})
|
|
||||||
|
|
||||||
thumbnails = []
|
json_ld = self._search_json_ld(webpage, video_id, default=False)
|
||||||
for cover in flipagram.get('covers', []):
|
title = json_ld.get('title') or flipagram['captionText']
|
||||||
if not cover.get('url'):
|
description = json_ld.get('description') or flipagram.get('captionText')
|
||||||
continue
|
|
||||||
thumbnails.append({
|
|
||||||
'url': self._proto_relative_url(cover.get('url')),
|
|
||||||
'width': int_or_none(cover.get('width')),
|
|
||||||
'height': int_or_none(cover.get('height')),
|
|
||||||
})
|
|
||||||
|
|
||||||
# Note that this only retrieves comments that are initally loaded.
|
formats = [{
|
||||||
# For videos with large amounts of comments, most won't be retrieved.
|
'url': video['url'],
|
||||||
comments = []
|
'width': int_or_none(video.get('width')),
|
||||||
for comment in user_data.get('comments', {}).get(video_id, {}).get('items', []):
|
'height': int_or_none(video.get('height')),
|
||||||
text = comment.get('comment', [])
|
'filesize': int_or_none(video_data.get('size')),
|
||||||
comments.append({
|
}]
|
||||||
'author': comment.get('user', {}).get('name'),
|
|
||||||
'author_id': comment.get('user', {}).get('username'),
|
|
||||||
'id': comment.get('id'),
|
|
||||||
'text': text[0] if text else '',
|
|
||||||
'timestamp': unified_timestamp(comment.get('created', '')),
|
|
||||||
})
|
|
||||||
|
|
||||||
tags = [tag for item in flipagram['story'][1:] for tag in item]
|
preview_url = try_get(
|
||||||
|
flipagram, lambda x: x['music']['track']['previewUrl'], compat_str)
|
||||||
formats = []
|
if preview_url:
|
||||||
if flipagram.get('music', {}).get('track', {}).get('previewUrl', {}):
|
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': flipagram.get('music').get('track').get('previewUrl'),
|
'url': preview_url,
|
||||||
'ext': 'm4a',
|
'ext': 'm4a',
|
||||||
'vcodec': 'none',
|
'vcodec': 'none',
|
||||||
})
|
})
|
||||||
|
|
||||||
formats.append({
|
self._sort_formats(formats)
|
||||||
'url': video.get('url'),
|
|
||||||
'ext': 'mp4',
|
counts = flipagram.get('counts', {})
|
||||||
'width': int_or_none(video.get('width')),
|
user = flipagram.get('user', {})
|
||||||
'height': int_or_none(video.get('height')),
|
video_data = flipagram.get('video', {})
|
||||||
'filesize': int_or_none(video.get('size')),
|
|
||||||
})
|
thumbnails = [{
|
||||||
|
'url': self._proto_relative_url(cover['url']),
|
||||||
|
'width': int_or_none(cover.get('width')),
|
||||||
|
'height': int_or_none(cover.get('height')),
|
||||||
|
'filesize': int_or_none(cover.get('size')),
|
||||||
|
} for cover in flipagram.get('covers', []) if cover.get('url')]
|
||||||
|
|
||||||
|
# Note that this only retrieves comments that are initally loaded.
|
||||||
|
# For videos with large amounts of comments, most won't be retrieved.
|
||||||
|
comments = []
|
||||||
|
for comment in video_data.get('comments', {}).get(video_id, {}).get('items', []):
|
||||||
|
text = comment.get('comment')
|
||||||
|
if not text or not isinstance(text, list):
|
||||||
|
continue
|
||||||
|
comments.append({
|
||||||
|
'author': comment.get('user', {}).get('name'),
|
||||||
|
'author_id': comment.get('user', {}).get('username'),
|
||||||
|
'id': comment.get('id'),
|
||||||
|
'text': text[0],
|
||||||
|
'timestamp': unified_timestamp(comment.get('created')),
|
||||||
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': content_data['title'],
|
'title': title,
|
||||||
'formats': formats,
|
'description': description,
|
||||||
|
'duration': float_or_none(flipagram.get('duration'), 1000),
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'description': content_data.get('description'),
|
'timestamp': unified_timestamp(flipagram.get('iso8601Created')),
|
||||||
'uploader': user.get('name'),
|
'uploader': user.get('name'),
|
||||||
'creator': user.get('name'),
|
|
||||||
'timestamp': parse_iso8601(flipagram.get('iso801Created')),
|
|
||||||
'upload_date': unified_strdate(flipagram.get('created')),
|
|
||||||
'uploader_id': user.get('username'),
|
'uploader_id': user.get('username'),
|
||||||
|
'creator': user.get('name'),
|
||||||
'view_count': int_or_none(counts.get('plays')),
|
'view_count': int_or_none(counts.get('plays')),
|
||||||
|
'like_count': int_or_none(counts.get('likes')),
|
||||||
'repost_count': int_or_none(counts.get('reflips')),
|
'repost_count': int_or_none(counts.get('reflips')),
|
||||||
'comment_count': int_or_none(counts.get('comments')),
|
'comment_count': int_or_none(counts.get('comments')),
|
||||||
'comments': comments,
|
'comments': comments,
|
||||||
'tags': tags,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue