-# coding: utf-8
-from __future__ import unicode_literals
-
-from itertools import zip_longest
+import itertools
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
+ extract_attributes,
float_or_none,
int_or_none,
srt_subtitles_timecode,
+ mimetype2ext,
+ traverse_obj,
try_get,
+ url_or_none,
urlencode_postdata,
urljoin,
)
-class LinkedInLearningBaseIE(InfoExtractor):
+class LinkedInBaseIE(InfoExtractor):
_NETRC_MACHINE = 'linkedin'
+ _logged_in = False
+
+ def _perform_login(self, username, password):
+ if self._logged_in:
+ return
+
+ login_page = self._download_webpage(
+ self._LOGIN_URL, None, 'Downloading login page')
+ action_url = urljoin(self._LOGIN_URL, self._search_regex(
+ r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post url',
+ default='https://www.linkedin.com/uas/login-submit', group='url'))
+ data = self._hidden_inputs(login_page)
+ data.update({
+ 'session_key': username,
+ 'session_password': password,
+ })
+ login_submit_page = self._download_webpage(
+ action_url, None, 'Logging in',
+ data=urlencode_postdata(data))
+ error = self._search_regex(
+ r'<span[^>]+class="error"[^>]*>\s*(.+?)\s*</span>',
+ login_submit_page, 'error', default=None)
+ if error:
+ raise ExtractorError(error, expected=True)
+ LinkedInBaseIE._logged_in = True
+
+
+class LinkedInLearningBaseIE(LinkedInBaseIE):
_LOGIN_URL = 'https://www.linkedin.com/uas/login?trk=learning'
def _call_api(self, course_slug, fields, video_slug=None, resolution=None):
})
sub = ' %dp' % resolution
api_url = 'https://www.linkedin.com/learning-api/detailedCourses'
+ if not self._get_cookies(api_url).get('JSESSIONID'):
+ self.raise_login_required()
return self._download_json(
api_url, video_slug, 'Downloading%s JSON metadata' % sub, headers={
'Csrf-Token': self._get_cookies(api_url)['JSESSIONID'].value,
def _get_video_id(self, video_data, course_slug, video_slug):
return self._get_urn_id(video_data) or '%s/%s' % (course_slug, video_slug)
- def _real_initialize(self):
- email, password = self._get_login_info()
- if email is None:
- return
- login_page = self._download_webpage(
- self._LOGIN_URL, None, 'Downloading login page')
- action_url = urljoin(self._LOGIN_URL, self._search_regex(
- r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post url',
- default='https://www.linkedin.com/uas/login-submit', group='url'))
- data = self._hidden_inputs(login_page)
- data.update({
- 'session_key': email,
- 'session_password': password,
- })
- login_submit_page = self._download_webpage(
- action_url, None, 'Logging in',
- data=urlencode_postdata(data))
- error = self._search_regex(
- r'<span[^>]+class="error"[^>]*>\s*(.+?)\s*</span>',
- login_submit_page, 'error', default=None)
- if error:
- raise ExtractorError(error, expected=True)
+class LinkedInIE(LinkedInBaseIE):
+ _VALID_URL = r'https?://(?:www\.)?linkedin\.com/posts/[^/?#]+-(?P<id>\d+)-\w{4}/?(?:[?#]|$)'
+ _TESTS = [{
+ 'url': 'https://www.linkedin.com/posts/mishalkhawaja_sendinblueviews-toronto-digitalmarketing-ugcPost-6850898786781339649-mM20',
+ 'info_dict': {
+ 'id': '6850898786781339649',
+ 'ext': 'mp4',
+ 'title': 'Mishal K. on LinkedIn: #sendinblueviews #toronto #digitalmarketing #nowhiring #sendinblue…',
+ 'description': 'md5:2998a31f6f479376dd62831f53a80f71',
+ 'uploader': 'Mishal K.',
+ 'thumbnail': 're:^https?://media.licdn.com/dms/image/.*$',
+ 'like_count': int
+ },
+ }, {
+ 'url': 'https://www.linkedin.com/posts/the-mathworks_2_what-is-mathworks-cloud-center-activity-7151241570371948544-4Gu7',
+ 'info_dict': {
+ 'id': '7151241570371948544',
+ 'ext': 'mp4',
+ 'title': 'MathWorks on LinkedIn: What Is MathWorks Cloud Center?',
+ 'description': 'md5:95f9d4eeb6337882fb47eefe13d7a40c',
+ 'uploader': 'MathWorks',
+ 'thumbnail': 're:^https?://media.licdn.com/dms/image/.*$',
+ 'like_count': int,
+ 'subtitles': 'mincount:1'
+ },
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ video_attrs = extract_attributes(self._search_regex(r'(<video[^>]+>)', webpage, 'video'))
+ sources = self._parse_json(video_attrs['data-sources'], video_id)
+ formats = [{
+ 'url': source['src'],
+ 'ext': mimetype2ext(source.get('type')),
+ 'tbr': float_or_none(source.get('data-bitrate'), scale=1000),
+ } for source in sources]
+ subtitles = {'en': [{
+ 'url': video_attrs['data-captions-url'],
+ 'ext': 'vtt',
+ }]} if url_or_none(video_attrs.get('data-captions-url')) else {}
+
+ return {
+ 'id': video_id,
+ 'formats': formats,
+ 'title': self._og_search_title(webpage, default=None) or self._html_extract_title(webpage),
+ 'like_count': int_or_none(self._search_regex(
+ r'\bdata-num-reactions="(\d+)"', webpage, 'reactions', default=None)),
+ 'uploader': traverse_obj(
+ self._yield_json_ld(webpage, video_id),
+ (lambda _, v: v['@type'] == 'SocialMediaPosting', 'author', 'name', {str}), get_all=False),
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'description': self._og_search_description(webpage, default=None),
+ 'subtitles': subtitles,
+ }
class LinkedInLearningIE(LinkedInLearningBaseIE):
def json2srt(self, transcript_lines, duration=None):
srt_data = ''
- for line, (line_dict, next_dict) in enumerate(zip_longest(transcript_lines, transcript_lines[1:])):
+ for line, (line_dict, next_dict) in enumerate(itertools.zip_longest(transcript_lines, transcript_lines[1:])):
start_time, caption = line_dict['transcriptStartAt'] / 1000, line_dict['caption']
end_time = next_dict['transcriptStartAt'] / 1000 if next_dict else duration or start_time + 1
- srt_data += '%d\n%s --> %s\n%s\n' % (line + 1, srt_subtitles_timecode(start_time),
- srt_subtitles_timecode(end_time),
- caption)
+ srt_data += '%d\n%s --> %s\n%s\n\n' % (line + 1, srt_subtitles_timecode(start_time),
+ srt_subtitles_timecode(end_time),
+ caption)
return srt_data
def _real_extract(self, url):
course_slug, video_slug = self._match_valid_url(url).groups()
- video_data = None
formats = []
for width, height in ((640, 360), (960, 540), (1280, 720)):
video_data = self._call_api(
streaming_url, video_slug, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
- # It seems like this would be correctly handled by default
- # However, unless someone can confirm this, the old
- # behaviour is being kept as-is
- self._sort_formats(formats, ('res', 'source_preference'))
subtitles = {}
duration = int_or_none(video_data.get('durationInSeconds'))
transcript_lines = try_get(video_data, lambda x: x['transcript']['lines'], expected_type=list)
'timestamp': float_or_none(video_data.get('publishedOn'), 1000),
'duration': duration,
'subtitles': subtitles,
+ # It seems like this would be correctly handled by default
+ # However, unless someone can confirm this, the old
+ # behaviour is being kept as-is
+ '_format_sort_fields': ('res', 'source_preference')
}