]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/common.py
[common] fix dash codec information for mixed videos and fragment url construction...
[yt-dlp.git] / youtube_dl / extractor / common.py
1 from __future__ import unicode_literals
2
3 import base64
4 import datetime
5 import hashlib
6 import json
7 import netrc
8 import os
9 import re
10 import socket
11 import sys
12 import time
13 import math
14
15 from ..compat import (
16 compat_cookiejar,
17 compat_cookies,
18 compat_etree_fromstring,
19 compat_getpass,
20 compat_http_client,
21 compat_os_name,
22 compat_str,
23 compat_urllib_error,
24 compat_urllib_parse_unquote,
25 compat_urllib_parse_urlencode,
26 compat_urllib_request,
27 compat_urlparse,
28 )
29 from ..downloader.f4m import remove_encrypted_media
30 from ..utils import (
31 NO_DEFAULT,
32 age_restricted,
33 base_url,
34 bug_reports_message,
35 clean_html,
36 compiled_regex_type,
37 determine_ext,
38 error_to_compat_str,
39 ExtractorError,
40 fix_xml_ampersands,
41 float_or_none,
42 int_or_none,
43 parse_iso8601,
44 RegexNotFoundError,
45 sanitize_filename,
46 sanitized_Request,
47 unescapeHTML,
48 unified_strdate,
49 unified_timestamp,
50 url_basename,
51 xpath_element,
52 xpath_text,
53 xpath_with_ns,
54 determine_protocol,
55 parse_duration,
56 mimetype2ext,
57 update_Request,
58 update_url_query,
59 parse_m3u8_attributes,
60 extract_attributes,
61 parse_codecs,
62 urljoin,
63 )
64
65
66 class InfoExtractor(object):
67 """Information Extractor class.
68
69 Information extractors are the classes that, given a URL, extract
70 information about the video (or videos) the URL refers to. This
71 information includes the real video URL, the video title, author and
72 others. The information is stored in a dictionary which is then
73 passed to the YoutubeDL. The YoutubeDL processes this
74 information possibly downloading the video to the file system, among
75 other possible outcomes.
76
77 The type field determines the type of the result.
78 By far the most common value (and the default if _type is missing) is
79 "video", which indicates a single video.
80
81 For a video, the dictionaries must include the following fields:
82
83 id: Video identifier.
84 title: Video title, unescaped.
85
86 Additionally, it must contain either a formats entry or a url one:
87
88 formats: A list of dictionaries for each format available, ordered
89 from worst to best quality.
90
91 Potential fields:
92 * url Mandatory. The URL of the video file
93 * manifest_url
94 The URL of the manifest file in case of
95 fragmented media (DASH, hls, hds)
96 * ext Will be calculated from URL if missing
97 * format A human-readable description of the format
98 ("mp4 container with h264/opus").
99 Calculated from the format_id, width, height.
100 and format_note fields if missing.
101 * format_id A short description of the format
102 ("mp4_h264_opus" or "19").
103 Technically optional, but strongly recommended.
104 * format_note Additional info about the format
105 ("3D" or "DASH video")
106 * width Width of the video, if known
107 * height Height of the video, if known
108 * resolution Textual description of width and height
109 * tbr Average bitrate of audio and video in KBit/s
110 * abr Average audio bitrate in KBit/s
111 * acodec Name of the audio codec in use
112 * asr Audio sampling rate in Hertz
113 * vbr Average video bitrate in KBit/s
114 * fps Frame rate
115 * vcodec Name of the video codec in use
116 * container Name of the container format
117 * filesize The number of bytes, if known in advance
118 * filesize_approx An estimate for the number of bytes
119 * player_url SWF Player URL (used for rtmpdump).
120 * protocol The protocol that will be used for the actual
121 download, lower-case.
122 "http", "https", "rtsp", "rtmp", "rtmpe",
123 "m3u8", "m3u8_native" or "http_dash_segments".
124 * fragments A list of fragments of the fragmented media,
125 with the following entries:
126 * "url" (mandatory) - fragment's URL
127 * "duration" (optional, int or float)
128 * "filesize" (optional, int)
129 * preference Order number of this format. If this field is
130 present and not None, the formats get sorted
131 by this field, regardless of all other values.
132 -1 for default (order by other properties),
133 -2 or smaller for less than default.
134 < -1000 to hide the format (if there is
135 another one which is strictly better)
136 * language Language code, e.g. "de" or "en-US".
137 * language_preference Is this in the language mentioned in
138 the URL?
139 10 if it's what the URL is about,
140 -1 for default (don't know),
141 -10 otherwise, other values reserved for now.
142 * quality Order number of the video quality of this
143 format, irrespective of the file format.
144 -1 for default (order by other properties),
145 -2 or smaller for less than default.
146 * source_preference Order number for this video source
147 (quality takes higher priority)
148 -1 for default (order by other properties),
149 -2 or smaller for less than default.
150 * http_headers A dictionary of additional HTTP headers
151 to add to the request.
152 * stretched_ratio If given and not 1, indicates that the
153 video's pixels are not square.
154 width : height ratio as float.
155 * no_resume The server does not support resuming the
156 (HTTP or RTMP) download. Boolean.
157
158 url: Final video URL.
159 ext: Video filename extension.
160 format: The video format, defaults to ext (used for --get-format)
161 player_url: SWF Player URL (used for rtmpdump).
162
163 The following fields are optional:
164
165 alt_title: A secondary title of the video.
166 display_id An alternative identifier for the video, not necessarily
167 unique, but available before title. Typically, id is
168 something like "4234987", title "Dancing naked mole rats",
169 and display_id "dancing-naked-mole-rats"
170 thumbnails: A list of dictionaries, with the following entries:
171 * "id" (optional, string) - Thumbnail format ID
172 * "url"
173 * "preference" (optional, int) - quality of the image
174 * "width" (optional, int)
175 * "height" (optional, int)
176 * "resolution" (optional, string "{width}x{height"},
177 deprecated)
178 * "filesize" (optional, int)
179 thumbnail: Full URL to a video thumbnail image.
180 description: Full video description.
181 uploader: Full name of the video uploader.
182 license: License name the video is licensed under.
183 creator: The creator of the video.
184 release_date: The date (YYYYMMDD) when the video was released.
185 timestamp: UNIX timestamp of the moment the video became available.
186 upload_date: Video upload date (YYYYMMDD).
187 If not explicitly set, calculated from timestamp.
188 uploader_id: Nickname or id of the video uploader.
189 uploader_url: Full URL to a personal webpage of the video uploader.
190 location: Physical location where the video was filmed.
191 subtitles: The available subtitles as a dictionary in the format
192 {language: subformats}. "subformats" is a list sorted from
193 lower to higher preference, each element is a dictionary
194 with the "ext" entry and one of:
195 * "data": The subtitles file contents
196 * "url": A URL pointing to the subtitles file
197 "ext" will be calculated from URL if missing
198 automatic_captions: Like 'subtitles', used by the YoutubeIE for
199 automatically generated captions
200 duration: Length of the video in seconds, as an integer or float.
201 view_count: How many users have watched the video on the platform.
202 like_count: Number of positive ratings of the video
203 dislike_count: Number of negative ratings of the video
204 repost_count: Number of reposts of the video
205 average_rating: Average rating give by users, the scale used depends on the webpage
206 comment_count: Number of comments on the video
207 comments: A list of comments, each with one or more of the following
208 properties (all but one of text or html optional):
209 * "author" - human-readable name of the comment author
210 * "author_id" - user ID of the comment author
211 * "id" - Comment ID
212 * "html" - Comment as HTML
213 * "text" - Plain text of the comment
214 * "timestamp" - UNIX timestamp of comment
215 * "parent" - ID of the comment this one is replying to.
216 Set to "root" to indicate that this is a
217 comment to the original video.
218 age_limit: Age restriction for the video, as an integer (years)
219 webpage_url: The URL to the video webpage, if given to youtube-dl it
220 should allow to get the same result again. (It will be set
221 by YoutubeDL if it's missing)
222 categories: A list of categories that the video falls in, for example
223 ["Sports", "Berlin"]
224 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
225 is_live: True, False, or None (=unknown). Whether this video is a
226 live stream that goes on instead of a fixed-length video.
227 start_time: Time in seconds where the reproduction should start, as
228 specified in the URL.
229 end_time: Time in seconds where the reproduction should end, as
230 specified in the URL.
231
232 The following fields should only be used when the video belongs to some logical
233 chapter or section:
234
235 chapter: Name or title of the chapter the video belongs to.
236 chapter_number: Number of the chapter the video belongs to, as an integer.
237 chapter_id: Id of the chapter the video belongs to, as a unicode string.
238
239 The following fields should only be used when the video is an episode of some
240 series, programme or podcast:
241
242 series: Title of the series or programme the video episode belongs to.
243 season: Title of the season the video episode belongs to.
244 season_number: Number of the season the video episode belongs to, as an integer.
245 season_id: Id of the season the video episode belongs to, as a unicode string.
246 episode: Title of the video episode. Unlike mandatory video title field,
247 this field should denote the exact title of the video episode
248 without any kind of decoration.
249 episode_number: Number of the video episode within a season, as an integer.
250 episode_id: Id of the video episode, as a unicode string.
251
252 The following fields should only be used when the media is a track or a part of
253 a music album:
254
255 track: Title of the track.
256 track_number: Number of the track within an album or a disc, as an integer.
257 track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
258 as a unicode string.
259 artist: Artist(s) of the track.
260 genre: Genre(s) of the track.
261 album: Title of the album the track belongs to.
262 album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
263 album_artist: List of all artists appeared on the album (e.g.
264 "Ash Borer / Fell Voices" or "Various Artists", useful for splits
265 and compilations).
266 disc_number: Number of the disc or other physical medium the track belongs to,
267 as an integer.
268 release_year: Year (YYYY) when the album was released.
269
270 Unless mentioned otherwise, the fields should be Unicode strings.
271
272 Unless mentioned otherwise, None is equivalent to absence of information.
273
274
275 _type "playlist" indicates multiple videos.
276 There must be a key "entries", which is a list, an iterable, or a PagedList
277 object, each element of which is a valid dictionary by this specification.
278
279 Additionally, playlists can have "title", "description" and "id" attributes
280 with the same semantics as videos (see above).
281
282
283 _type "multi_video" indicates that there are multiple videos that
284 form a single show, for examples multiple acts of an opera or TV episode.
285 It must have an entries key like a playlist and contain all the keys
286 required for a video at the same time.
287
288
289 _type "url" indicates that the video must be extracted from another
290 location, possibly by a different extractor. Its only required key is:
291 "url" - the next URL to extract.
292 The key "ie_key" can be set to the class name (minus the trailing "IE",
293 e.g. "Youtube") if the extractor class is known in advance.
294 Additionally, the dictionary may have any properties of the resolved entity
295 known in advance, for example "title" if the title of the referred video is
296 known ahead of time.
297
298
299 _type "url_transparent" entities have the same specification as "url", but
300 indicate that the given additional information is more precise than the one
301 associated with the resolved URL.
302 This is useful when a site employs a video service that hosts the video and
303 its technical metadata, but that video service does not embed a useful
304 title, description etc.
305
306
307 Subclasses of this one should re-define the _real_initialize() and
308 _real_extract() methods and define a _VALID_URL regexp.
309 Probably, they should also be added to the list of extractors.
310
311 Finally, the _WORKING attribute should be set to False for broken IEs
312 in order to warn the users and skip the tests.
313 """
314
315 _ready = False
316 _downloader = None
317 _WORKING = True
318
319 def __init__(self, downloader=None):
320 """Constructor. Receives an optional downloader."""
321 self._ready = False
322 self.set_downloader(downloader)
323
324 @classmethod
325 def suitable(cls, url):
326 """Receives a URL and returns True if suitable for this IE."""
327
328 # This does not use has/getattr intentionally - we want to know whether
329 # we have cached the regexp for *this* class, whereas getattr would also
330 # match the superclass
331 if '_VALID_URL_RE' not in cls.__dict__:
332 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
333 return cls._VALID_URL_RE.match(url) is not None
334
335 @classmethod
336 def _match_id(cls, url):
337 if '_VALID_URL_RE' not in cls.__dict__:
338 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
339 m = cls._VALID_URL_RE.match(url)
340 assert m
341 return m.group('id')
342
343 @classmethod
344 def working(cls):
345 """Getter method for _WORKING."""
346 return cls._WORKING
347
348 def initialize(self):
349 """Initializes an instance (authentication, etc)."""
350 if not self._ready:
351 self._real_initialize()
352 self._ready = True
353
354 def extract(self, url):
355 """Extracts URL information and returns it in list of dicts."""
356 try:
357 self.initialize()
358 return self._real_extract(url)
359 except ExtractorError:
360 raise
361 except compat_http_client.IncompleteRead as e:
362 raise ExtractorError('A network error has occurred.', cause=e, expected=True)
363 except (KeyError, StopIteration) as e:
364 raise ExtractorError('An extractor error has occurred.', cause=e)
365
366 def set_downloader(self, downloader):
367 """Sets the downloader for this IE."""
368 self._downloader = downloader
369
370 def _real_initialize(self):
371 """Real initialization process. Redefine in subclasses."""
372 pass
373
374 def _real_extract(self, url):
375 """Real extraction process. Redefine in subclasses."""
376 pass
377
378 @classmethod
379 def ie_key(cls):
380 """A string for getting the InfoExtractor with get_info_extractor"""
381 return compat_str(cls.__name__[:-2])
382
383 @property
384 def IE_NAME(self):
385 return compat_str(type(self).__name__[:-2])
386
387 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
388 """ Returns the response handle """
389 if note is None:
390 self.report_download_webpage(video_id)
391 elif note is not False:
392 if video_id is None:
393 self.to_screen('%s' % (note,))
394 else:
395 self.to_screen('%s: %s' % (video_id, note))
396 if isinstance(url_or_request, compat_urllib_request.Request):
397 url_or_request = update_Request(
398 url_or_request, data=data, headers=headers, query=query)
399 else:
400 if query:
401 url_or_request = update_url_query(url_or_request, query)
402 if data is not None or headers:
403 url_or_request = sanitized_Request(url_or_request, data, headers)
404 try:
405 return self._downloader.urlopen(url_or_request)
406 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
407 if errnote is False:
408 return False
409 if errnote is None:
410 errnote = 'Unable to download webpage'
411
412 errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
413 if fatal:
414 raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
415 else:
416 self._downloader.report_warning(errmsg)
417 return False
418
419 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
420 """ Returns a tuple (page content as string, URL handle) """
421 # Strip hashes from the URL (#1038)
422 if isinstance(url_or_request, (compat_str, str)):
423 url_or_request = url_or_request.partition('#')[0]
424
425 urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
426 if urlh is False:
427 assert not fatal
428 return False
429 content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
430 return (content, urlh)
431
432 @staticmethod
433 def _guess_encoding_from_content(content_type, webpage_bytes):
434 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
435 if m:
436 encoding = m.group(1)
437 else:
438 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
439 webpage_bytes[:1024])
440 if m:
441 encoding = m.group(1).decode('ascii')
442 elif webpage_bytes.startswith(b'\xff\xfe'):
443 encoding = 'utf-16'
444 else:
445 encoding = 'utf-8'
446
447 return encoding
448
449 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
450 content_type = urlh.headers.get('Content-Type', '')
451 webpage_bytes = urlh.read()
452 if prefix is not None:
453 webpage_bytes = prefix + webpage_bytes
454 if not encoding:
455 encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
456 if self._downloader.params.get('dump_intermediate_pages', False):
457 try:
458 url = url_or_request.get_full_url()
459 except AttributeError:
460 url = url_or_request
461 self.to_screen('Dumping request to ' + url)
462 dump = base64.b64encode(webpage_bytes).decode('ascii')
463 self._downloader.to_screen(dump)
464 if self._downloader.params.get('write_pages', False):
465 try:
466 url = url_or_request.get_full_url()
467 except AttributeError:
468 url = url_or_request
469 basen = '%s_%s' % (video_id, url)
470 if len(basen) > 240:
471 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
472 basen = basen[:240 - len(h)] + h
473 raw_filename = basen + '.dump'
474 filename = sanitize_filename(raw_filename, restricted=True)
475 self.to_screen('Saving request to ' + filename)
476 # Working around MAX_PATH limitation on Windows (see
477 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
478 if compat_os_name == 'nt':
479 absfilepath = os.path.abspath(filename)
480 if len(absfilepath) > 259:
481 filename = '\\\\?\\' + absfilepath
482 with open(filename, 'wb') as outf:
483 outf.write(webpage_bytes)
484
485 try:
486 content = webpage_bytes.decode(encoding, 'replace')
487 except LookupError:
488 content = webpage_bytes.decode('utf-8', 'replace')
489
490 if ('<title>Access to this site is blocked</title>' in content and
491 'Websense' in content[:512]):
492 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
493 blocked_iframe = self._html_search_regex(
494 r'<iframe src="([^"]+)"', content,
495 'Websense information URL', default=None)
496 if blocked_iframe:
497 msg += ' Visit %s for more details' % blocked_iframe
498 raise ExtractorError(msg, expected=True)
499 if '<title>The URL you requested has been blocked</title>' in content[:512]:
500 msg = (
501 'Access to this webpage has been blocked by Indian censorship. '
502 'Use a VPN or proxy server (with --proxy) to route around it.')
503 block_msg = self._html_search_regex(
504 r'</h1><p>(.*?)</p>',
505 content, 'block message', default=None)
506 if block_msg:
507 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
508 raise ExtractorError(msg, expected=True)
509
510 return content
511
512 def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
513 """ Returns the data of the page as a string """
514 success = False
515 try_count = 0
516 while success is False:
517 try:
518 res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
519 success = True
520 except compat_http_client.IncompleteRead as e:
521 try_count += 1
522 if try_count >= tries:
523 raise e
524 self._sleep(timeout, video_id)
525 if res is False:
526 return res
527 else:
528 content, _ = res
529 return content
530
531 def _download_xml(self, url_or_request, video_id,
532 note='Downloading XML', errnote='Unable to download XML',
533 transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
534 """Return the xml as an xml.etree.ElementTree.Element"""
535 xml_string = self._download_webpage(
536 url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
537 if xml_string is False:
538 return xml_string
539 if transform_source:
540 xml_string = transform_source(xml_string)
541 return compat_etree_fromstring(xml_string.encode('utf-8'))
542
543 def _download_json(self, url_or_request, video_id,
544 note='Downloading JSON metadata',
545 errnote='Unable to download JSON metadata',
546 transform_source=None,
547 fatal=True, encoding=None, data=None, headers={}, query={}):
548 json_string = self._download_webpage(
549 url_or_request, video_id, note, errnote, fatal=fatal,
550 encoding=encoding, data=data, headers=headers, query=query)
551 if (not fatal) and json_string is False:
552 return None
553 return self._parse_json(
554 json_string, video_id, transform_source=transform_source, fatal=fatal)
555
556 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
557 if transform_source:
558 json_string = transform_source(json_string)
559 try:
560 return json.loads(json_string)
561 except ValueError as ve:
562 errmsg = '%s: Failed to parse JSON ' % video_id
563 if fatal:
564 raise ExtractorError(errmsg, cause=ve)
565 else:
566 self.report_warning(errmsg + str(ve))
567
568 def report_warning(self, msg, video_id=None):
569 idstr = '' if video_id is None else '%s: ' % video_id
570 self._downloader.report_warning(
571 '[%s] %s%s' % (self.IE_NAME, idstr, msg))
572
573 def to_screen(self, msg):
574 """Print msg to screen, prefixing it with '[ie_name]'"""
575 self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
576
577 def report_extraction(self, id_or_name):
578 """Report information extraction."""
579 self.to_screen('%s: Extracting information' % id_or_name)
580
581 def report_download_webpage(self, video_id):
582 """Report webpage download."""
583 self.to_screen('%s: Downloading webpage' % video_id)
584
585 def report_age_confirmation(self):
586 """Report attempt to confirm age."""
587 self.to_screen('Confirming age')
588
589 def report_login(self):
590 """Report attempt to log in."""
591 self.to_screen('Logging in')
592
593 @staticmethod
594 def raise_login_required(msg='This video is only available for registered users'):
595 raise ExtractorError(
596 '%s. Use --username and --password or --netrc to provide account credentials.' % msg,
597 expected=True)
598
599 @staticmethod
600 def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
601 raise ExtractorError(
602 '%s. You might want to use --proxy to workaround.' % msg,
603 expected=True)
604
605 # Methods for following #608
606 @staticmethod
607 def url_result(url, ie=None, video_id=None, video_title=None):
608 """Returns a URL that points to a page that should be processed"""
609 # TODO: ie should be the class used for getting the info
610 video_info = {'_type': 'url',
611 'url': url,
612 'ie_key': ie}
613 if video_id is not None:
614 video_info['id'] = video_id
615 if video_title is not None:
616 video_info['title'] = video_title
617 return video_info
618
619 @staticmethod
620 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
621 """Returns a playlist"""
622 video_info = {'_type': 'playlist',
623 'entries': entries}
624 if playlist_id:
625 video_info['id'] = playlist_id
626 if playlist_title:
627 video_info['title'] = playlist_title
628 if playlist_description:
629 video_info['description'] = playlist_description
630 return video_info
631
632 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
633 """
634 Perform a regex search on the given string, using a single or a list of
635 patterns returning the first matching group.
636 In case of failure return a default value or raise a WARNING or a
637 RegexNotFoundError, depending on fatal, specifying the field name.
638 """
639 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
640 mobj = re.search(pattern, string, flags)
641 else:
642 for p in pattern:
643 mobj = re.search(p, string, flags)
644 if mobj:
645 break
646
647 if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
648 _name = '\033[0;34m%s\033[0m' % name
649 else:
650 _name = name
651
652 if mobj:
653 if group is None:
654 # return the first matching group
655 return next(g for g in mobj.groups() if g is not None)
656 else:
657 return mobj.group(group)
658 elif default is not NO_DEFAULT:
659 return default
660 elif fatal:
661 raise RegexNotFoundError('Unable to extract %s' % _name)
662 else:
663 self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
664 return None
665
666 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
667 """
668 Like _search_regex, but strips HTML tags and unescapes entities.
669 """
670 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
671 if res:
672 return clean_html(res).strip()
673 else:
674 return res
675
676 def _get_netrc_login_info(self, netrc_machine=None):
677 username = None
678 password = None
679 netrc_machine = netrc_machine or self._NETRC_MACHINE
680
681 if self._downloader.params.get('usenetrc', False):
682 try:
683 info = netrc.netrc().authenticators(netrc_machine)
684 if info is not None:
685 username = info[0]
686 password = info[2]
687 else:
688 raise netrc.NetrcParseError(
689 'No authenticators for %s' % netrc_machine)
690 except (IOError, netrc.NetrcParseError) as err:
691 self._downloader.report_warning(
692 'parsing .netrc: %s' % error_to_compat_str(err))
693
694 return username, password
695
696 def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
697 """
698 Get the login info as (username, password)
699 First look for the manually specified credentials using username_option
700 and password_option as keys in params dictionary. If no such credentials
701 available look in the netrc file using the netrc_machine or _NETRC_MACHINE
702 value.
703 If there's no info available, return (None, None)
704 """
705 if self._downloader is None:
706 return (None, None)
707
708 downloader_params = self._downloader.params
709
710 # Attempt to use provided username and password or .netrc data
711 if downloader_params.get(username_option) is not None:
712 username = downloader_params[username_option]
713 password = downloader_params[password_option]
714 else:
715 username, password = self._get_netrc_login_info(netrc_machine)
716
717 return username, password
718
719 def _get_tfa_info(self, note='two-factor verification code'):
720 """
721 Get the two-factor authentication info
722 TODO - asking the user will be required for sms/phone verify
723 currently just uses the command line option
724 If there's no info available, return None
725 """
726 if self._downloader is None:
727 return None
728 downloader_params = self._downloader.params
729
730 if downloader_params.get('twofactor') is not None:
731 return downloader_params['twofactor']
732
733 return compat_getpass('Type %s and press [Return]: ' % note)
734
735 # Helper functions for extracting OpenGraph info
736 @staticmethod
737 def _og_regexes(prop):
738 content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
739 property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
740 % {'prop': re.escape(prop)})
741 template = r'<meta[^>]+?%s[^>]+?%s'
742 return [
743 template % (property_re, content_re),
744 template % (content_re, property_re),
745 ]
746
747 @staticmethod
748 def _meta_regex(prop):
749 return r'''(?isx)<meta
750 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
751 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
752
753 def _og_search_property(self, prop, html, name=None, **kargs):
754 if not isinstance(prop, (list, tuple)):
755 prop = [prop]
756 if name is None:
757 name = 'OpenGraph %s' % prop[0]
758 og_regexes = []
759 for p in prop:
760 og_regexes.extend(self._og_regexes(p))
761 escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
762 if escaped is None:
763 return None
764 return unescapeHTML(escaped)
765
766 def _og_search_thumbnail(self, html, **kargs):
767 return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
768
769 def _og_search_description(self, html, **kargs):
770 return self._og_search_property('description', html, fatal=False, **kargs)
771
772 def _og_search_title(self, html, **kargs):
773 return self._og_search_property('title', html, **kargs)
774
775 def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
776 regexes = self._og_regexes('video') + self._og_regexes('video:url')
777 if secure:
778 regexes = self._og_regexes('video:secure_url') + regexes
779 return self._html_search_regex(regexes, html, name, **kargs)
780
781 def _og_search_url(self, html, **kargs):
782 return self._og_search_property('url', html, **kargs)
783
784 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
785 if not isinstance(name, (list, tuple)):
786 name = [name]
787 if display_name is None:
788 display_name = name[0]
789 return self._html_search_regex(
790 [self._meta_regex(n) for n in name],
791 html, display_name, fatal=fatal, group='content', **kwargs)
792
793 def _dc_search_uploader(self, html):
794 return self._html_search_meta('dc.creator', html, 'uploader')
795
796 def _rta_search(self, html):
797 # See http://www.rtalabel.org/index.php?content=howtofaq#single
798 if re.search(r'(?ix)<meta\s+name="rating"\s+'
799 r' content="RTA-5042-1996-1400-1577-RTA"',
800 html):
801 return 18
802 return 0
803
804 def _media_rating_search(self, html):
805 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
806 rating = self._html_search_meta('rating', html)
807
808 if not rating:
809 return None
810
811 RATING_TABLE = {
812 'safe for kids': 0,
813 'general': 8,
814 '14 years': 14,
815 'mature': 17,
816 'restricted': 19,
817 }
818 return RATING_TABLE.get(rating.lower())
819
820 def _family_friendly_search(self, html):
821 # See http://schema.org/VideoObject
822 family_friendly = self._html_search_meta('isFamilyFriendly', html)
823
824 if not family_friendly:
825 return None
826
827 RATING_TABLE = {
828 '1': 0,
829 'true': 0,
830 '0': 18,
831 'false': 18,
832 }
833 return RATING_TABLE.get(family_friendly.lower())
834
835 def _twitter_search_player(self, html):
836 return self._html_search_meta('twitter:player', html,
837 'twitter card player')
838
839 def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
840 json_ld = self._search_regex(
841 r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
842 html, 'JSON-LD', group='json_ld', **kwargs)
843 default = kwargs.get('default', NO_DEFAULT)
844 if not json_ld:
845 return default if default is not NO_DEFAULT else {}
846 # JSON-LD may be malformed and thus `fatal` should be respected.
847 # At the same time `default` may be passed that assumes `fatal=False`
848 # for _search_regex. Let's simulate the same behavior here as well.
849 fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
850 return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
851
852 def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
853 if isinstance(json_ld, compat_str):
854 json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
855 if not json_ld:
856 return {}
857 info = {}
858 if not isinstance(json_ld, (list, tuple, dict)):
859 return info
860 if isinstance(json_ld, dict):
861 json_ld = [json_ld]
862 for e in json_ld:
863 if e.get('@context') == 'http://schema.org':
864 item_type = e.get('@type')
865 if expected_type is not None and expected_type != item_type:
866 return info
867 if item_type == 'TVEpisode':
868 info.update({
869 'episode': unescapeHTML(e.get('name')),
870 'episode_number': int_or_none(e.get('episodeNumber')),
871 'description': unescapeHTML(e.get('description')),
872 })
873 part_of_season = e.get('partOfSeason')
874 if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
875 info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
876 part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
877 if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
878 info['series'] = unescapeHTML(part_of_series.get('name'))
879 elif item_type == 'Article':
880 info.update({
881 'timestamp': parse_iso8601(e.get('datePublished')),
882 'title': unescapeHTML(e.get('headline')),
883 'description': unescapeHTML(e.get('articleBody')),
884 })
885 elif item_type == 'VideoObject':
886 info.update({
887 'url': e.get('contentUrl'),
888 'title': unescapeHTML(e.get('name')),
889 'description': unescapeHTML(e.get('description')),
890 'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'),
891 'duration': parse_duration(e.get('duration')),
892 'timestamp': unified_timestamp(e.get('uploadDate')),
893 'filesize': float_or_none(e.get('contentSize')),
894 'tbr': int_or_none(e.get('bitrate')),
895 'width': int_or_none(e.get('width')),
896 'height': int_or_none(e.get('height')),
897 })
898 break
899 return dict((k, v) for k, v in info.items() if v is not None)
900
901 @staticmethod
902 def _hidden_inputs(html):
903 html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
904 hidden_inputs = {}
905 for input in re.findall(r'(?i)(<input[^>]+>)', html):
906 attrs = extract_attributes(input)
907 if not input:
908 continue
909 if attrs.get('type') not in ('hidden', 'submit'):
910 continue
911 name = attrs.get('name') or attrs.get('id')
912 value = attrs.get('value')
913 if name and value is not None:
914 hidden_inputs[name] = value
915 return hidden_inputs
916
917 def _form_hidden_inputs(self, form_id, html):
918 form = self._search_regex(
919 r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
920 html, '%s form' % form_id, group='form')
921 return self._hidden_inputs(form)
922
923 def _sort_formats(self, formats, field_preference=None):
924 if not formats:
925 raise ExtractorError('No video formats found')
926
927 for f in formats:
928 # Automatically determine tbr when missing based on abr and vbr (improves
929 # formats sorting in some cases)
930 if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
931 f['tbr'] = f['abr'] + f['vbr']
932
933 def _formats_key(f):
934 # TODO remove the following workaround
935 from ..utils import determine_ext
936 if not f.get('ext') and 'url' in f:
937 f['ext'] = determine_ext(f['url'])
938
939 if isinstance(field_preference, (list, tuple)):
940 return tuple(
941 f.get(field)
942 if f.get(field) is not None
943 else ('' if field == 'format_id' else -1)
944 for field in field_preference)
945
946 preference = f.get('preference')
947 if preference is None:
948 preference = 0
949 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
950 preference -= 0.5
951
952 protocol = f.get('protocol') or determine_protocol(f)
953 proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
954
955 if f.get('vcodec') == 'none': # audio only
956 preference -= 50
957 if self._downloader.params.get('prefer_free_formats'):
958 ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
959 else:
960 ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
961 ext_preference = 0
962 try:
963 audio_ext_preference = ORDER.index(f['ext'])
964 except ValueError:
965 audio_ext_preference = -1
966 else:
967 if f.get('acodec') == 'none': # video only
968 preference -= 40
969 if self._downloader.params.get('prefer_free_formats'):
970 ORDER = ['flv', 'mp4', 'webm']
971 else:
972 ORDER = ['webm', 'flv', 'mp4']
973 try:
974 ext_preference = ORDER.index(f['ext'])
975 except ValueError:
976 ext_preference = -1
977 audio_ext_preference = 0
978
979 return (
980 preference,
981 f.get('language_preference') if f.get('language_preference') is not None else -1,
982 f.get('quality') if f.get('quality') is not None else -1,
983 f.get('tbr') if f.get('tbr') is not None else -1,
984 f.get('filesize') if f.get('filesize') is not None else -1,
985 f.get('vbr') if f.get('vbr') is not None else -1,
986 f.get('height') if f.get('height') is not None else -1,
987 f.get('width') if f.get('width') is not None else -1,
988 proto_preference,
989 ext_preference,
990 f.get('abr') if f.get('abr') is not None else -1,
991 audio_ext_preference,
992 f.get('fps') if f.get('fps') is not None else -1,
993 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
994 f.get('source_preference') if f.get('source_preference') is not None else -1,
995 f.get('format_id') if f.get('format_id') is not None else '',
996 )
997 formats.sort(key=_formats_key)
998
999 def _check_formats(self, formats, video_id):
1000 if formats:
1001 formats[:] = filter(
1002 lambda f: self._is_valid_url(
1003 f['url'], video_id,
1004 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
1005 formats)
1006
1007 @staticmethod
1008 def _remove_duplicate_formats(formats):
1009 format_urls = set()
1010 unique_formats = []
1011 for f in formats:
1012 if f['url'] not in format_urls:
1013 format_urls.add(f['url'])
1014 unique_formats.append(f)
1015 formats[:] = unique_formats
1016
1017 def _is_valid_url(self, url, video_id, item='video'):
1018 url = self._proto_relative_url(url, scheme='http:')
1019 # For now assume non HTTP(S) URLs always valid
1020 if not (url.startswith('http://') or url.startswith('https://')):
1021 return True
1022 try:
1023 self._request_webpage(url, video_id, 'Checking %s URL' % item)
1024 return True
1025 except ExtractorError as e:
1026 if isinstance(e.cause, compat_urllib_error.URLError):
1027 self.to_screen(
1028 '%s: %s URL is invalid, skipping' % (video_id, item))
1029 return False
1030 raise
1031
1032 def http_scheme(self):
1033 """ Either "http:" or "https:", depending on the user's preferences """
1034 return (
1035 'http:'
1036 if self._downloader.params.get('prefer_insecure', False)
1037 else 'https:')
1038
1039 def _proto_relative_url(self, url, scheme=None):
1040 if url is None:
1041 return url
1042 if url.startswith('//'):
1043 if scheme is None:
1044 scheme = self.http_scheme()
1045 return scheme + url
1046 else:
1047 return url
1048
1049 def _sleep(self, timeout, video_id, msg_template=None):
1050 if msg_template is None:
1051 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
1052 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
1053 self.to_screen(msg)
1054 time.sleep(timeout)
1055
1056 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
1057 transform_source=lambda s: fix_xml_ampersands(s).strip(),
1058 fatal=True, m3u8_id=None):
1059 manifest = self._download_xml(
1060 manifest_url, video_id, 'Downloading f4m manifest',
1061 'Unable to download f4m manifest',
1062 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
1063 # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
1064 transform_source=transform_source,
1065 fatal=fatal)
1066
1067 if manifest is False:
1068 return []
1069
1070 return self._parse_f4m_formats(
1071 manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
1072 transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
1073
1074 def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
1075 transform_source=lambda s: fix_xml_ampersands(s).strip(),
1076 fatal=True, m3u8_id=None):
1077 # currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
1078 akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
1079 if akamai_pv is not None and ';' in akamai_pv.text:
1080 playerVerificationChallenge = akamai_pv.text.split(';')[0]
1081 if playerVerificationChallenge.strip() != '':
1082 return []
1083
1084 formats = []
1085 manifest_version = '1.0'
1086 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
1087 if not media_nodes:
1088 manifest_version = '2.0'
1089 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
1090 # Remove unsupported DRM protected media from final formats
1091 # rendition (see https://github.com/rg3/youtube-dl/issues/8573).
1092 media_nodes = remove_encrypted_media(media_nodes)
1093 if not media_nodes:
1094 return formats
1095 base_url = xpath_text(
1096 manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
1097 'base URL', default=None)
1098 if base_url:
1099 base_url = base_url.strip()
1100
1101 bootstrap_info = xpath_element(
1102 manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
1103 'bootstrap info', default=None)
1104
1105 vcodec = None
1106 mime_type = xpath_text(
1107 manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
1108 'base URL', default=None)
1109 if mime_type and mime_type.startswith('audio/'):
1110 vcodec = 'none'
1111
1112 for i, media_el in enumerate(media_nodes):
1113 tbr = int_or_none(media_el.attrib.get('bitrate'))
1114 width = int_or_none(media_el.attrib.get('width'))
1115 height = int_or_none(media_el.attrib.get('height'))
1116 format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
1117 # If <bootstrapInfo> is present, the specified f4m is a
1118 # stream-level manifest, and only set-level manifests may refer to
1119 # external resources. See section 11.4 and section 4 of F4M spec
1120 if bootstrap_info is None:
1121 media_url = None
1122 # @href is introduced in 2.0, see section 11.6 of F4M spec
1123 if manifest_version == '2.0':
1124 media_url = media_el.attrib.get('href')
1125 if media_url is None:
1126 media_url = media_el.attrib.get('url')
1127 if not media_url:
1128 continue
1129 manifest_url = (
1130 media_url if media_url.startswith('http://') or media_url.startswith('https://')
1131 else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
1132 # If media_url is itself a f4m manifest do the recursive extraction
1133 # since bitrates in parent manifest (this one) and media_url manifest
1134 # may differ leading to inability to resolve the format by requested
1135 # bitrate in f4m downloader
1136 ext = determine_ext(manifest_url)
1137 if ext == 'f4m':
1138 f4m_formats = self._extract_f4m_formats(
1139 manifest_url, video_id, preference=preference, f4m_id=f4m_id,
1140 transform_source=transform_source, fatal=fatal)
1141 # Sometimes stream-level manifest contains single media entry that
1142 # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
1143 # At the same time parent's media entry in set-level manifest may
1144 # contain it. We will copy it from parent in such cases.
1145 if len(f4m_formats) == 1:
1146 f = f4m_formats[0]
1147 f.update({
1148 'tbr': f.get('tbr') or tbr,
1149 'width': f.get('width') or width,
1150 'height': f.get('height') or height,
1151 'format_id': f.get('format_id') if not tbr else format_id,
1152 'vcodec': vcodec,
1153 })
1154 formats.extend(f4m_formats)
1155 continue
1156 elif ext == 'm3u8':
1157 formats.extend(self._extract_m3u8_formats(
1158 manifest_url, video_id, 'mp4', preference=preference,
1159 m3u8_id=m3u8_id, fatal=fatal))
1160 continue
1161 formats.append({
1162 'format_id': format_id,
1163 'url': manifest_url,
1164 'manifest_url': manifest_url,
1165 'ext': 'flv' if bootstrap_info is not None else None,
1166 'tbr': tbr,
1167 'width': width,
1168 'height': height,
1169 'vcodec': vcodec,
1170 'preference': preference,
1171 })
1172 return formats
1173
1174 def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
1175 return {
1176 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
1177 'url': m3u8_url,
1178 'ext': ext,
1179 'protocol': 'm3u8',
1180 'preference': preference - 100 if preference else -100,
1181 'resolution': 'multiple',
1182 'format_note': 'Quality selection URL',
1183 }
1184
1185 def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
1186 entry_protocol='m3u8', preference=None,
1187 m3u8_id=None, note=None, errnote=None,
1188 fatal=True, live=False):
1189
1190 res = self._download_webpage_handle(
1191 m3u8_url, video_id,
1192 note=note or 'Downloading m3u8 information',
1193 errnote=errnote or 'Failed to download m3u8 information',
1194 fatal=fatal)
1195 if res is False:
1196 return []
1197 m3u8_doc, urlh = res
1198 m3u8_url = urlh.geturl()
1199
1200 formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
1201
1202 format_url = lambda u: (
1203 u
1204 if re.match(r'^https?://', u)
1205 else compat_urlparse.urljoin(m3u8_url, u))
1206
1207 # We should try extracting formats only from master playlists [1], i.e.
1208 # playlists that describe available qualities. On the other hand media
1209 # playlists [2] should be returned as is since they contain just the media
1210 # without qualities renditions.
1211 # Fortunately, master playlist can be easily distinguished from media
1212 # playlist based on particular tags availability. As of [1, 2] master
1213 # playlist tags MUST NOT appear in a media playist and vice versa.
1214 # As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
1215 # and MUST NOT appear in master playlist thus we can clearly detect media
1216 # playlist with this criterion.
1217 # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
1218 # 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
1219 # 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
1220 if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
1221 return [{
1222 'url': m3u8_url,
1223 'format_id': m3u8_id,
1224 'ext': ext,
1225 'protocol': entry_protocol,
1226 'preference': preference,
1227 }]
1228 audio_groups = set()
1229 last_info = {}
1230 last_media = {}
1231 for line in m3u8_doc.splitlines():
1232 if line.startswith('#EXT-X-STREAM-INF:'):
1233 last_info = parse_m3u8_attributes(line)
1234 elif line.startswith('#EXT-X-MEDIA:'):
1235 media = parse_m3u8_attributes(line)
1236 media_type = media.get('TYPE')
1237 if media_type in ('VIDEO', 'AUDIO'):
1238 media_url = media.get('URI')
1239 if media_url:
1240 format_id = []
1241 for v in (media.get('GROUP-ID'), media.get('NAME')):
1242 if v:
1243 format_id.append(v)
1244 f = {
1245 'format_id': '-'.join(format_id),
1246 'url': format_url(media_url),
1247 'language': media.get('LANGUAGE'),
1248 'ext': ext,
1249 'protocol': entry_protocol,
1250 'preference': preference,
1251 }
1252 if media_type == 'AUDIO':
1253 f['vcodec'] = 'none'
1254 audio_groups.add(media['GROUP-ID'])
1255 formats.append(f)
1256 else:
1257 # When there is no URI in EXT-X-MEDIA let this tag's
1258 # data be used by regular URI lines below
1259 last_media = media
1260 elif line.startswith('#') or not line.strip():
1261 continue
1262 else:
1263 tbr = int_or_none(last_info.get('AVERAGE-BANDWIDTH') or last_info.get('BANDWIDTH'), scale=1000)
1264 format_id = []
1265 if m3u8_id:
1266 format_id.append(m3u8_id)
1267 # Despite specification does not mention NAME attribute for
1268 # EXT-X-STREAM-INF it still sometimes may be present
1269 stream_name = last_info.get('NAME') or last_media.get('NAME')
1270 # Bandwidth of live streams may differ over time thus making
1271 # format_id unpredictable. So it's better to keep provided
1272 # format_id intact.
1273 if not live:
1274 format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
1275 manifest_url = format_url(line.strip())
1276 f = {
1277 'format_id': '-'.join(format_id),
1278 'url': manifest_url,
1279 'manifest_url': manifest_url,
1280 'tbr': tbr,
1281 'ext': ext,
1282 'fps': float_or_none(last_info.get('FRAME-RATE')),
1283 'protocol': entry_protocol,
1284 'preference': preference,
1285 }
1286 resolution = last_info.get('RESOLUTION')
1287 if resolution:
1288 mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
1289 if mobj:
1290 f['width'] = int(mobj.group('width'))
1291 f['height'] = int(mobj.group('height'))
1292 # Unified Streaming Platform
1293 mobj = re.search(
1294 r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
1295 if mobj:
1296 abr, vbr = mobj.groups()
1297 abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
1298 f.update({
1299 'vbr': vbr,
1300 'abr': abr,
1301 })
1302 f.update(parse_codecs(last_info.get('CODECS')))
1303 if last_info.get('AUDIO') in audio_groups:
1304 # TODO: update acodec for for audio only formats with the same GROUP-ID
1305 f['acodec'] = 'none'
1306 formats.append(f)
1307 last_info = {}
1308 last_media = {}
1309 return formats
1310
1311 @staticmethod
1312 def _xpath_ns(path, namespace=None):
1313 if not namespace:
1314 return path
1315 out = []
1316 for c in path.split('/'):
1317 if not c or c == '.':
1318 out.append(c)
1319 else:
1320 out.append('{%s}%s' % (namespace, c))
1321 return '/'.join(out)
1322
1323 def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
1324 smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
1325
1326 if smil is False:
1327 assert not fatal
1328 return []
1329
1330 namespace = self._parse_smil_namespace(smil)
1331
1332 return self._parse_smil_formats(
1333 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1334
1335 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
1336 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1337 if smil is False:
1338 return {}
1339 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
1340
1341 def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
1342 return self._download_xml(
1343 smil_url, video_id, 'Downloading SMIL file',
1344 'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
1345
1346 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
1347 namespace = self._parse_smil_namespace(smil)
1348
1349 formats = self._parse_smil_formats(
1350 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1351 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
1352
1353 video_id = os.path.splitext(url_basename(smil_url))[0]
1354 title = None
1355 description = None
1356 upload_date = None
1357 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1358 name = meta.attrib.get('name')
1359 content = meta.attrib.get('content')
1360 if not name or not content:
1361 continue
1362 if not title and name == 'title':
1363 title = content
1364 elif not description and name in ('description', 'abstract'):
1365 description = content
1366 elif not upload_date and name == 'date':
1367 upload_date = unified_strdate(content)
1368
1369 thumbnails = [{
1370 'id': image.get('type'),
1371 'url': image.get('src'),
1372 'width': int_or_none(image.get('width')),
1373 'height': int_or_none(image.get('height')),
1374 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
1375
1376 return {
1377 'id': video_id,
1378 'title': title or video_id,
1379 'description': description,
1380 'upload_date': upload_date,
1381 'thumbnails': thumbnails,
1382 'formats': formats,
1383 'subtitles': subtitles,
1384 }
1385
1386 def _parse_smil_namespace(self, smil):
1387 return self._search_regex(
1388 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
1389
1390 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
1391 base = smil_url
1392 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1393 b = meta.get('base') or meta.get('httpBase')
1394 if b:
1395 base = b
1396 break
1397
1398 formats = []
1399 rtmp_count = 0
1400 http_count = 0
1401 m3u8_count = 0
1402
1403 srcs = []
1404 media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
1405 for medium in media:
1406 src = medium.get('src')
1407 if not src or src in srcs:
1408 continue
1409 srcs.append(src)
1410
1411 bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
1412 filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
1413 width = int_or_none(medium.get('width'))
1414 height = int_or_none(medium.get('height'))
1415 proto = medium.get('proto')
1416 ext = medium.get('ext')
1417 src_ext = determine_ext(src)
1418 streamer = medium.get('streamer') or base
1419
1420 if proto == 'rtmp' or streamer.startswith('rtmp'):
1421 rtmp_count += 1
1422 formats.append({
1423 'url': streamer,
1424 'play_path': src,
1425 'ext': 'flv',
1426 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
1427 'tbr': bitrate,
1428 'filesize': filesize,
1429 'width': width,
1430 'height': height,
1431 })
1432 if transform_rtmp_url:
1433 streamer, src = transform_rtmp_url(streamer, src)
1434 formats[-1].update({
1435 'url': streamer,
1436 'play_path': src,
1437 })
1438 continue
1439
1440 src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
1441 src_url = src_url.strip()
1442
1443 if proto == 'm3u8' or src_ext == 'm3u8':
1444 m3u8_formats = self._extract_m3u8_formats(
1445 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
1446 if len(m3u8_formats) == 1:
1447 m3u8_count += 1
1448 m3u8_formats[0].update({
1449 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
1450 'tbr': bitrate,
1451 'width': width,
1452 'height': height,
1453 })
1454 formats.extend(m3u8_formats)
1455 continue
1456
1457 if src_ext == 'f4m':
1458 f4m_url = src_url
1459 if not f4m_params:
1460 f4m_params = {
1461 'hdcore': '3.2.0',
1462 'plugin': 'flowplayer-3.2.0.1',
1463 }
1464 f4m_url += '&' if '?' in f4m_url else '?'
1465 f4m_url += compat_urllib_parse_urlencode(f4m_params)
1466 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
1467 continue
1468
1469 if src_url.startswith('http') and self._is_valid_url(src, video_id):
1470 http_count += 1
1471 formats.append({
1472 'url': src_url,
1473 'ext': ext or src_ext or 'flv',
1474 'format_id': 'http-%d' % (bitrate or http_count),
1475 'tbr': bitrate,
1476 'filesize': filesize,
1477 'width': width,
1478 'height': height,
1479 })
1480 continue
1481
1482 return formats
1483
1484 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
1485 urls = []
1486 subtitles = {}
1487 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
1488 src = textstream.get('src')
1489 if not src or src in urls:
1490 continue
1491 urls.append(src)
1492 ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
1493 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
1494 subtitles.setdefault(lang, []).append({
1495 'url': src,
1496 'ext': ext,
1497 })
1498 return subtitles
1499
1500 def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
1501 xspf = self._download_xml(
1502 playlist_url, playlist_id, 'Downloading xpsf playlist',
1503 'Unable to download xspf manifest', fatal=fatal)
1504 if xspf is False:
1505 return []
1506 return self._parse_xspf(xspf, playlist_id)
1507
1508 def _parse_xspf(self, playlist, playlist_id):
1509 NS_MAP = {
1510 'xspf': 'http://xspf.org/ns/0/',
1511 's1': 'http://static.streamone.nl/player/ns/0',
1512 }
1513
1514 entries = []
1515 for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
1516 title = xpath_text(
1517 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
1518 description = xpath_text(
1519 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
1520 thumbnail = xpath_text(
1521 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
1522 duration = float_or_none(
1523 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
1524
1525 formats = [{
1526 'url': location.text,
1527 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
1528 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
1529 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
1530 } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
1531 self._sort_formats(formats)
1532
1533 entries.append({
1534 'id': playlist_id,
1535 'title': title,
1536 'description': description,
1537 'thumbnail': thumbnail,
1538 'duration': duration,
1539 'formats': formats,
1540 })
1541 return entries
1542
1543 def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
1544 res = self._download_webpage_handle(
1545 mpd_url, video_id,
1546 note=note or 'Downloading MPD manifest',
1547 errnote=errnote or 'Failed to download MPD manifest',
1548 fatal=fatal)
1549 if res is False:
1550 return []
1551 mpd, urlh = res
1552 mpd_base_url = base_url(urlh.geturl())
1553
1554 return self._parse_mpd_formats(
1555 compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
1556 formats_dict=formats_dict, mpd_url=mpd_url)
1557
1558 def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
1559 """
1560 Parse formats from MPD manifest.
1561 References:
1562 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
1563 http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
1564 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
1565 """
1566 if mpd_doc.get('type') == 'dynamic':
1567 return []
1568
1569 namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
1570
1571 def _add_ns(path):
1572 return self._xpath_ns(path, namespace)
1573
1574 def is_drm_protected(element):
1575 return element.find(_add_ns('ContentProtection')) is not None
1576
1577 def extract_multisegment_info(element, ms_parent_info):
1578 ms_info = ms_parent_info.copy()
1579
1580 # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
1581 # common attributes and elements. We will only extract relevant
1582 # for us.
1583 def extract_common(source):
1584 segment_timeline = source.find(_add_ns('SegmentTimeline'))
1585 if segment_timeline is not None:
1586 s_e = segment_timeline.findall(_add_ns('S'))
1587 if s_e:
1588 ms_info['total_number'] = 0
1589 ms_info['s'] = []
1590 for s in s_e:
1591 r = int(s.get('r', 0))
1592 ms_info['total_number'] += 1 + r
1593 ms_info['s'].append({
1594 't': int(s.get('t', 0)),
1595 # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
1596 'd': int(s.attrib['d']),
1597 'r': r,
1598 })
1599 start_number = source.get('startNumber')
1600 if start_number:
1601 ms_info['start_number'] = int(start_number)
1602 timescale = source.get('timescale')
1603 if timescale:
1604 ms_info['timescale'] = int(timescale)
1605 segment_duration = source.get('duration')
1606 if segment_duration:
1607 ms_info['segment_duration'] = int(segment_duration)
1608
1609 def extract_Initialization(source):
1610 initialization = source.find(_add_ns('Initialization'))
1611 if initialization is not None:
1612 ms_info['initialization_url'] = initialization.attrib['sourceURL']
1613
1614 segment_list = element.find(_add_ns('SegmentList'))
1615 if segment_list is not None:
1616 extract_common(segment_list)
1617 extract_Initialization(segment_list)
1618 segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
1619 if segment_urls_e:
1620 ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
1621 else:
1622 segment_template = element.find(_add_ns('SegmentTemplate'))
1623 if segment_template is not None:
1624 extract_common(segment_template)
1625 media_template = segment_template.get('media')
1626 if media_template:
1627 ms_info['media_template'] = media_template
1628 initialization = segment_template.get('initialization')
1629 if initialization:
1630 ms_info['initialization_url'] = initialization
1631 else:
1632 extract_Initialization(segment_template)
1633 return ms_info
1634
1635 mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
1636 formats = []
1637 for period in mpd_doc.findall(_add_ns('Period')):
1638 period_duration = parse_duration(period.get('duration')) or mpd_duration
1639 period_ms_info = extract_multisegment_info(period, {
1640 'start_number': 1,
1641 'timescale': 1,
1642 })
1643 for adaptation_set in period.findall(_add_ns('AdaptationSet')):
1644 if is_drm_protected(adaptation_set):
1645 continue
1646 adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
1647 for representation in adaptation_set.findall(_add_ns('Representation')):
1648 if is_drm_protected(representation):
1649 continue
1650 representation_attrib = adaptation_set.attrib.copy()
1651 representation_attrib.update(representation.attrib)
1652 # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
1653 mime_type = representation_attrib['mimeType']
1654 content_type = mime_type.split('/')[0]
1655 if content_type == 'text':
1656 # TODO implement WebVTT downloading
1657 pass
1658 elif content_type == 'video' or content_type == 'audio':
1659 base_url = ''
1660 for element in (representation, adaptation_set, period, mpd_doc):
1661 base_url_e = element.find(_add_ns('BaseURL'))
1662 if base_url_e is not None:
1663 base_url = base_url_e.text + base_url
1664 if re.match(r'^https?://', base_url):
1665 break
1666 if mpd_base_url and not re.match(r'^https?://', base_url):
1667 if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
1668 mpd_base_url += '/'
1669 base_url = mpd_base_url + base_url
1670 representation_id = representation_attrib.get('id')
1671 lang = representation_attrib.get('lang')
1672 url_el = representation.find(_add_ns('BaseURL'))
1673 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
1674 f = {
1675 'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
1676 'url': base_url,
1677 'manifest_url': mpd_url,
1678 'ext': mimetype2ext(mime_type),
1679 'width': int_or_none(representation_attrib.get('width')),
1680 'height': int_or_none(representation_attrib.get('height')),
1681 'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
1682 'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
1683 'fps': int_or_none(representation_attrib.get('frameRate')),
1684 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
1685 'format_note': 'DASH %s' % content_type,
1686 'filesize': filesize,
1687 }
1688 f.update(parse_codecs(representation_attrib.get('codecs')))
1689 representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
1690 if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
1691
1692 media_template = representation_ms_info['media_template']
1693 media_template = media_template.replace('$RepresentationID$', representation_id)
1694 media_template = re.sub(r'\$(Number|Bandwidth|Time)\$', r'%(\1)d', media_template)
1695 media_template = re.sub(r'\$(Number|Bandwidth|Time)%([^$]+)\$', r'%(\1)\2', media_template)
1696 media_template.replace('$$', '$')
1697
1698 # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
1699 # can't be used at the same time
1700 if '%(Number' in media_template and 's' not in representation_ms_info:
1701 segment_duration = None
1702 if 'total_number' not in representation_ms_info and 'segment_duration':
1703 segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
1704 representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
1705 representation_ms_info['fragments'] = [{
1706 'url': media_template % {
1707 'Number': segment_number,
1708 'Bandwidth': int_or_none(representation_attrib.get('bandwidth')),
1709 },
1710 'duration': segment_duration,
1711 } for segment_number in range(
1712 representation_ms_info['start_number'],
1713 representation_ms_info['total_number'] + representation_ms_info['start_number'])]
1714 else:
1715 # $Number*$ or $Time$ in media template with S list available
1716 # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
1717 # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
1718 representation_ms_info['fragments'] = []
1719 segment_time = 0
1720 segment_d = None
1721 segment_number = representation_ms_info['start_number']
1722
1723 def add_segment_url():
1724 segment_url = media_template % {
1725 'Time': segment_time,
1726 'Bandwidth': int_or_none(representation_attrib.get('bandwidth')),
1727 'Number': segment_number,
1728 }
1729 representation_ms_info['fragments'].append({
1730 'url': segment_url,
1731 'duration': float_or_none(segment_d, representation_ms_info['timescale']),
1732 })
1733
1734 for num, s in enumerate(representation_ms_info['s']):
1735 segment_time = s.get('t') or segment_time
1736 segment_d = s['d']
1737 add_segment_url()
1738 segment_number += 1
1739 for r in range(s.get('r', 0)):
1740 segment_time += segment_d
1741 add_segment_url()
1742 segment_number += 1
1743 segment_time += segment_d
1744 elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
1745 # No media template
1746 # Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
1747 # or any YouTube dashsegments video
1748 fragments = []
1749 s_num = 0
1750 for segment_url in representation_ms_info['segment_urls']:
1751 s = representation_ms_info['s'][s_num]
1752 for r in range(s.get('r', 0) + 1):
1753 fragments.append({
1754 'url': segment_url,
1755 'duration': float_or_none(s['d'], representation_ms_info['timescale']),
1756 })
1757 representation_ms_info['fragments'] = fragments
1758 # NB: MPD manifest may contain direct URLs to unfragmented media.
1759 # No fragments key is present in this case.
1760 if 'fragments' in representation_ms_info:
1761 f.update({
1762 'fragments': [],
1763 'protocol': 'http_dash_segments',
1764 })
1765 if 'initialization_url' in representation_ms_info:
1766 initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
1767 if not f.get('url'):
1768 f['url'] = initialization_url
1769 f['fragments'].append({'url': initialization_url})
1770 f['fragments'].extend(representation_ms_info['fragments'])
1771 for fragment in f['fragments']:
1772 fragment['url'] = urljoin(base_url, fragment['url'])
1773 try:
1774 existing_format = next(
1775 fo for fo in formats
1776 if fo['format_id'] == representation_id)
1777 except StopIteration:
1778 full_info = formats_dict.get(representation_id, {}).copy()
1779 full_info.update(f)
1780 formats.append(full_info)
1781 else:
1782 existing_format.update(f)
1783 else:
1784 self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
1785 return formats
1786
1787 def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
1788 res = self._download_webpage_handle(
1789 ism_url, video_id,
1790 note=note or 'Downloading ISM manifest',
1791 errnote=errnote or 'Failed to download ISM manifest',
1792 fatal=fatal)
1793 if res is False:
1794 return []
1795 ism, urlh = res
1796
1797 return self._parse_ism_formats(
1798 compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id)
1799
1800 def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
1801 if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
1802 return []
1803
1804 duration = int(ism_doc.attrib['Duration'])
1805 timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
1806
1807 formats = []
1808 for stream in ism_doc.findall('StreamIndex'):
1809 stream_type = stream.get('Type')
1810 if stream_type not in ('video', 'audio'):
1811 continue
1812 url_pattern = stream.attrib['Url']
1813 stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
1814 stream_name = stream.get('Name')
1815 for track in stream.findall('QualityLevel'):
1816 fourcc = track.get('FourCC')
1817 # TODO: add support for WVC1 and WMAP
1818 if fourcc not in ('H264', 'AVC1', 'AACL'):
1819 self.report_warning('%s is not a supported codec' % fourcc)
1820 continue
1821 tbr = int(track.attrib['Bitrate']) // 1000
1822 width = int_or_none(track.get('MaxWidth'))
1823 height = int_or_none(track.get('MaxHeight'))
1824 sampling_rate = int_or_none(track.get('SamplingRate'))
1825
1826 track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
1827 track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
1828
1829 fragments = []
1830 fragment_ctx = {
1831 'time': 0,
1832 }
1833 stream_fragments = stream.findall('c')
1834 for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
1835 fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
1836 fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
1837 fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
1838 if not fragment_ctx['duration']:
1839 try:
1840 next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
1841 except IndexError:
1842 next_fragment_time = duration
1843 fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
1844 for _ in range(fragment_repeat):
1845 fragments.append({
1846 'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
1847 'duration': fragment_ctx['duration'] / stream_timescale,
1848 })
1849 fragment_ctx['time'] += fragment_ctx['duration']
1850
1851 format_id = []
1852 if ism_id:
1853 format_id.append(ism_id)
1854 if stream_name:
1855 format_id.append(stream_name)
1856 format_id.append(compat_str(tbr))
1857
1858 formats.append({
1859 'format_id': '-'.join(format_id),
1860 'url': ism_url,
1861 'manifest_url': ism_url,
1862 'ext': 'ismv' if stream_type == 'video' else 'isma',
1863 'width': width,
1864 'height': height,
1865 'tbr': tbr,
1866 'asr': sampling_rate,
1867 'vcodec': 'none' if stream_type == 'audio' else fourcc,
1868 'acodec': 'none' if stream_type == 'video' else fourcc,
1869 'protocol': 'ism',
1870 'fragments': fragments,
1871 '_download_params': {
1872 'duration': duration,
1873 'timescale': stream_timescale,
1874 'width': width or 0,
1875 'height': height or 0,
1876 'fourcc': fourcc,
1877 'codec_private_data': track.get('CodecPrivateData'),
1878 'sampling_rate': sampling_rate,
1879 'channels': int_or_none(track.get('Channels', 2)),
1880 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
1881 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
1882 },
1883 })
1884 return formats
1885
1886 def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None):
1887 def absolute_url(video_url):
1888 return compat_urlparse.urljoin(base_url, video_url)
1889
1890 def parse_content_type(content_type):
1891 if not content_type:
1892 return {}
1893 ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
1894 if ctr:
1895 mimetype, codecs = ctr.groups()
1896 f = parse_codecs(codecs)
1897 f['ext'] = mimetype2ext(mimetype)
1898 return f
1899 return {}
1900
1901 def _media_formats(src, cur_media_type):
1902 full_url = absolute_url(src)
1903 ext = determine_ext(full_url)
1904 if ext == 'm3u8':
1905 is_plain_url = False
1906 formats = self._extract_m3u8_formats(
1907 full_url, video_id, ext='mp4',
1908 entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id)
1909 elif ext == 'mpd':
1910 is_plain_url = False
1911 formats = self._extract_mpd_formats(
1912 full_url, video_id, mpd_id=mpd_id)
1913 else:
1914 is_plain_url = True
1915 formats = [{
1916 'url': full_url,
1917 'vcodec': 'none' if cur_media_type == 'audio' else None,
1918 }]
1919 return is_plain_url, formats
1920
1921 entries = []
1922 media_tags = [(media_tag, media_type, '')
1923 for media_tag, media_type
1924 in re.findall(r'(?s)(<(video|audio)[^>]*/>)', webpage)]
1925 media_tags.extend(re.findall(r'(?s)(<(?P<tag>video|audio)[^>]*>)(.*?)</(?P=tag)>', webpage))
1926 for media_tag, media_type, media_content in media_tags:
1927 media_info = {
1928 'formats': [],
1929 'subtitles': {},
1930 }
1931 media_attributes = extract_attributes(media_tag)
1932 src = media_attributes.get('src')
1933 if src:
1934 _, formats = _media_formats(src, media_type)
1935 media_info['formats'].extend(formats)
1936 media_info['thumbnail'] = media_attributes.get('poster')
1937 if media_content:
1938 for source_tag in re.findall(r'<source[^>]+>', media_content):
1939 source_attributes = extract_attributes(source_tag)
1940 src = source_attributes.get('src')
1941 if not src:
1942 continue
1943 is_plain_url, formats = _media_formats(src, media_type)
1944 if is_plain_url:
1945 f = parse_content_type(source_attributes.get('type'))
1946 f.update(formats[0])
1947 media_info['formats'].append(f)
1948 else:
1949 media_info['formats'].extend(formats)
1950 for track_tag in re.findall(r'<track[^>]+>', media_content):
1951 track_attributes = extract_attributes(track_tag)
1952 kind = track_attributes.get('kind')
1953 if not kind or kind in ('subtitles', 'captions'):
1954 src = track_attributes.get('src')
1955 if not src:
1956 continue
1957 lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
1958 media_info['subtitles'].setdefault(lang, []).append({
1959 'url': absolute_url(src),
1960 })
1961 if media_info['formats'] or media_info['subtitles']:
1962 entries.append(media_info)
1963 return entries
1964
1965 def _extract_akamai_formats(self, manifest_url, video_id):
1966 formats = []
1967 hdcore_sign = 'hdcore=3.7.0'
1968 f4m_url = re.sub(r'(https?://.+?)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
1969 if 'hdcore=' not in f4m_url:
1970 f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
1971 f4m_formats = self._extract_f4m_formats(
1972 f4m_url, video_id, f4m_id='hds', fatal=False)
1973 for entry in f4m_formats:
1974 entry.update({'extra_param_to_segment_url': hdcore_sign})
1975 formats.extend(f4m_formats)
1976 m3u8_url = re.sub(r'(https?://.+?)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
1977 formats.extend(self._extract_m3u8_formats(
1978 m3u8_url, video_id, 'mp4', 'm3u8_native',
1979 m3u8_id='hls', fatal=False))
1980 return formats
1981
1982 def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
1983 url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
1984 url_base = self._search_regex(r'(?:https?|rtmp|rtsp)(://[^?]+)', url, 'format url')
1985 http_base_url = 'http' + url_base
1986 formats = []
1987 if 'm3u8' not in skip_protocols:
1988 formats.extend(self._extract_m3u8_formats(
1989 http_base_url + '/playlist.m3u8', video_id, 'mp4',
1990 m3u8_entry_protocol, m3u8_id='hls', fatal=False))
1991 if 'f4m' not in skip_protocols:
1992 formats.extend(self._extract_f4m_formats(
1993 http_base_url + '/manifest.f4m',
1994 video_id, f4m_id='hds', fatal=False))
1995 if 'dash' not in skip_protocols:
1996 formats.extend(self._extract_mpd_formats(
1997 http_base_url + '/manifest.mpd',
1998 video_id, mpd_id='dash', fatal=False))
1999 if re.search(r'(?:/smil:|\.smil)', url_base):
2000 if 'smil' not in skip_protocols:
2001 rtmp_formats = self._extract_smil_formats(
2002 http_base_url + '/jwplayer.smil',
2003 video_id, fatal=False)
2004 for rtmp_format in rtmp_formats:
2005 rtsp_format = rtmp_format.copy()
2006 rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
2007 del rtsp_format['play_path']
2008 del rtsp_format['ext']
2009 rtsp_format.update({
2010 'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
2011 'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
2012 'protocol': 'rtsp',
2013 })
2014 formats.extend([rtmp_format, rtsp_format])
2015 else:
2016 for protocol in ('rtmp', 'rtsp'):
2017 if protocol not in skip_protocols:
2018 formats.append({
2019 'url': protocol + url_base,
2020 'format_id': protocol,
2021 'protocol': protocol,
2022 })
2023 return formats
2024
2025 def _live_title(self, name):
2026 """ Generate the title for a live video """
2027 now = datetime.datetime.now()
2028 now_str = now.strftime('%Y-%m-%d %H:%M')
2029 return name + ' ' + now_str
2030
2031 def _int(self, v, name, fatal=False, **kwargs):
2032 res = int_or_none(v, **kwargs)
2033 if 'get_attr' in kwargs:
2034 print(getattr(v, kwargs['get_attr']))
2035 if res is None:
2036 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
2037 if fatal:
2038 raise ExtractorError(msg)
2039 else:
2040 self._downloader.report_warning(msg)
2041 return res
2042
2043 def _float(self, v, name, fatal=False, **kwargs):
2044 res = float_or_none(v, **kwargs)
2045 if res is None:
2046 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
2047 if fatal:
2048 raise ExtractorError(msg)
2049 else:
2050 self._downloader.report_warning(msg)
2051 return res
2052
2053 def _set_cookie(self, domain, name, value, expire_time=None):
2054 cookie = compat_cookiejar.Cookie(
2055 0, name, value, None, None, domain, None,
2056 None, '/', True, False, expire_time, '', None, None, None)
2057 self._downloader.cookiejar.set_cookie(cookie)
2058
2059 def _get_cookies(self, url):
2060 """ Return a compat_cookies.SimpleCookie with the cookies for the url """
2061 req = sanitized_Request(url)
2062 self._downloader.cookiejar.add_cookie_header(req)
2063 return compat_cookies.SimpleCookie(req.get_header('Cookie'))
2064
2065 def get_testcases(self, include_onlymatching=False):
2066 t = getattr(self, '_TEST', None)
2067 if t:
2068 assert not hasattr(self, '_TESTS'), \
2069 '%s has _TEST and _TESTS' % type(self).__name__
2070 tests = [t]
2071 else:
2072 tests = getattr(self, '_TESTS', [])
2073 for t in tests:
2074 if not include_onlymatching and t.get('only_matching', False):
2075 continue
2076 t['name'] = type(self).__name__[:-len('IE')]
2077 yield t
2078
2079 def is_suitable(self, age_limit):
2080 """ Test whether the extractor is generally suitable for the given
2081 age limit (i.e. pornographic sites are not, all others usually are) """
2082
2083 any_restricted = False
2084 for tc in self.get_testcases(include_onlymatching=False):
2085 if tc.get('playlist', []):
2086 tc = tc['playlist'][0]
2087 is_restricted = age_restricted(
2088 tc.get('info_dict', {}).get('age_limit'), age_limit)
2089 if not is_restricted:
2090 return True
2091 any_restricted = any_restricted or is_restricted
2092 return not any_restricted
2093
2094 def extract_subtitles(self, *args, **kwargs):
2095 if (self._downloader.params.get('writesubtitles', False) or
2096 self._downloader.params.get('listsubtitles')):
2097 return self._get_subtitles(*args, **kwargs)
2098 return {}
2099
2100 def _get_subtitles(self, *args, **kwargs):
2101 raise NotImplementedError('This method must be implemented by subclasses')
2102
2103 @staticmethod
2104 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
2105 """ Merge subtitle items for one language. Items with duplicated URLs
2106 will be dropped. """
2107 list1_urls = set([item['url'] for item in subtitle_list1])
2108 ret = list(subtitle_list1)
2109 ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
2110 return ret
2111
2112 @classmethod
2113 def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
2114 """ Merge two subtitle dictionaries, language by language. """
2115 ret = dict(subtitle_dict1)
2116 for lang in subtitle_dict2:
2117 ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
2118 return ret
2119
2120 def extract_automatic_captions(self, *args, **kwargs):
2121 if (self._downloader.params.get('writeautomaticsub', False) or
2122 self._downloader.params.get('listsubtitles')):
2123 return self._get_automatic_captions(*args, **kwargs)
2124 return {}
2125
2126 def _get_automatic_captions(self, *args, **kwargs):
2127 raise NotImplementedError('This method must be implemented by subclasses')
2128
2129 def mark_watched(self, *args, **kwargs):
2130 if (self._downloader.params.get('mark_watched', False) and
2131 (self._get_login_info()[0] is not None or
2132 self._downloader.params.get('cookiefile') is not None)):
2133 self._mark_watched(*args, **kwargs)
2134
2135 def _mark_watched(self, *args, **kwargs):
2136 raise NotImplementedError('This method must be implemented by subclasses')
2137
2138 def geo_verification_headers(self):
2139 headers = {}
2140 geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
2141 if geo_verification_proxy:
2142 headers['Ytdl-request-proxy'] = geo_verification_proxy
2143 return headers
2144
2145 def _generic_id(self, url):
2146 return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
2147
2148 def _generic_title(self, url):
2149 return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
2150
2151
2152 class SearchInfoExtractor(InfoExtractor):
2153 """
2154 Base class for paged search queries extractors.
2155 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
2156 Instances should define _SEARCH_KEY and _MAX_RESULTS.
2157 """
2158
2159 @classmethod
2160 def _make_valid_url(cls):
2161 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
2162
2163 @classmethod
2164 def suitable(cls, url):
2165 return re.match(cls._make_valid_url(), url) is not None
2166
2167 def _real_extract(self, query):
2168 mobj = re.match(self._make_valid_url(), query)
2169 if mobj is None:
2170 raise ExtractorError('Invalid search query "%s"' % query)
2171
2172 prefix = mobj.group('prefix')
2173 query = mobj.group('query')
2174 if prefix == '':
2175 return self._get_n_results(query, 1)
2176 elif prefix == 'all':
2177 return self._get_n_results(query, self._MAX_RESULTS)
2178 else:
2179 n = int(prefix)
2180 if n <= 0:
2181 raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
2182 elif n > self._MAX_RESULTS:
2183 self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
2184 n = self._MAX_RESULTS
2185 return self._get_n_results(query, n)
2186
2187 def _get_n_results(self, query, n):
2188 """Get a specified number of results for a query"""
2189 raise NotImplementedError('This method must be implemented by subclasses')
2190
2191 @property
2192 def SEARCH_KEY(self):
2193 return self._SEARCH_KEY