]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/common.py
[extractor/common] Properly decode error string on python 2 (Closes #1354, closes...
[yt-dlp.git] / youtube_dl / extractor / common.py
1 from __future__ import unicode_literals
2
3 import base64
4 import datetime
5 import hashlib
6 import json
7 import netrc
8 import os
9 import re
10 import socket
11 import sys
12 import time
13
14 from ..compat import (
15 compat_cookiejar,
16 compat_cookies,
17 compat_getpass,
18 compat_http_client,
19 compat_urllib_error,
20 compat_urllib_parse,
21 compat_urllib_parse_urlparse,
22 compat_urlparse,
23 compat_str,
24 compat_etree_fromstring,
25 )
26 from ..utils import (
27 NO_DEFAULT,
28 age_restricted,
29 bug_reports_message,
30 clean_html,
31 compiled_regex_type,
32 determine_ext,
33 ExtractorError,
34 fix_xml_ampersands,
35 float_or_none,
36 int_or_none,
37 preferredencoding,
38 RegexNotFoundError,
39 sanitize_filename,
40 sanitized_Request,
41 unescapeHTML,
42 unified_strdate,
43 url_basename,
44 xpath_text,
45 xpath_with_ns,
46 )
47
48
49 class InfoExtractor(object):
50 """Information Extractor class.
51
52 Information extractors are the classes that, given a URL, extract
53 information about the video (or videos) the URL refers to. This
54 information includes the real video URL, the video title, author and
55 others. The information is stored in a dictionary which is then
56 passed to the YoutubeDL. The YoutubeDL processes this
57 information possibly downloading the video to the file system, among
58 other possible outcomes.
59
60 The type field determines the type of the result.
61 By far the most common value (and the default if _type is missing) is
62 "video", which indicates a single video.
63
64 For a video, the dictionaries must include the following fields:
65
66 id: Video identifier.
67 title: Video title, unescaped.
68
69 Additionally, it must contain either a formats entry or a url one:
70
71 formats: A list of dictionaries for each format available, ordered
72 from worst to best quality.
73
74 Potential fields:
75 * url Mandatory. The URL of the video file
76 * ext Will be calculated from URL if missing
77 * format A human-readable description of the format
78 ("mp4 container with h264/opus").
79 Calculated from the format_id, width, height.
80 and format_note fields if missing.
81 * format_id A short description of the format
82 ("mp4_h264_opus" or "19").
83 Technically optional, but strongly recommended.
84 * format_note Additional info about the format
85 ("3D" or "DASH video")
86 * width Width of the video, if known
87 * height Height of the video, if known
88 * resolution Textual description of width and height
89 * tbr Average bitrate of audio and video in KBit/s
90 * abr Average audio bitrate in KBit/s
91 * acodec Name of the audio codec in use
92 * asr Audio sampling rate in Hertz
93 * vbr Average video bitrate in KBit/s
94 * fps Frame rate
95 * vcodec Name of the video codec in use
96 * container Name of the container format
97 * filesize The number of bytes, if known in advance
98 * filesize_approx An estimate for the number of bytes
99 * player_url SWF Player URL (used for rtmpdump).
100 * protocol The protocol that will be used for the actual
101 download, lower-case.
102 "http", "https", "rtsp", "rtmp", "rtmpe",
103 "m3u8", or "m3u8_native".
104 * preference Order number of this format. If this field is
105 present and not None, the formats get sorted
106 by this field, regardless of all other values.
107 -1 for default (order by other properties),
108 -2 or smaller for less than default.
109 < -1000 to hide the format (if there is
110 another one which is strictly better)
111 * language_preference Is this in the correct requested
112 language?
113 10 if it's what the URL is about,
114 -1 for default (don't know),
115 -10 otherwise, other values reserved for now.
116 * quality Order number of the video quality of this
117 format, irrespective of the file format.
118 -1 for default (order by other properties),
119 -2 or smaller for less than default.
120 * source_preference Order number for this video source
121 (quality takes higher priority)
122 -1 for default (order by other properties),
123 -2 or smaller for less than default.
124 * http_headers A dictionary of additional HTTP headers
125 to add to the request.
126 * stretched_ratio If given and not 1, indicates that the
127 video's pixels are not square.
128 width : height ratio as float.
129 * no_resume The server does not support resuming the
130 (HTTP or RTMP) download. Boolean.
131
132 url: Final video URL.
133 ext: Video filename extension.
134 format: The video format, defaults to ext (used for --get-format)
135 player_url: SWF Player URL (used for rtmpdump).
136
137 The following fields are optional:
138
139 alt_title: A secondary title of the video.
140 display_id An alternative identifier for the video, not necessarily
141 unique, but available before title. Typically, id is
142 something like "4234987", title "Dancing naked mole rats",
143 and display_id "dancing-naked-mole-rats"
144 thumbnails: A list of dictionaries, with the following entries:
145 * "id" (optional, string) - Thumbnail format ID
146 * "url"
147 * "preference" (optional, int) - quality of the image
148 * "width" (optional, int)
149 * "height" (optional, int)
150 * "resolution" (optional, string "{width}x{height"},
151 deprecated)
152 thumbnail: Full URL to a video thumbnail image.
153 description: Full video description.
154 uploader: Full name of the video uploader.
155 creator: The main artist who created the video.
156 release_date: The date (YYYYMMDD) when the video was released.
157 timestamp: UNIX timestamp of the moment the video became available.
158 upload_date: Video upload date (YYYYMMDD).
159 If not explicitly set, calculated from timestamp.
160 uploader_id: Nickname or id of the video uploader.
161 location: Physical location where the video was filmed.
162 subtitles: The available subtitles as a dictionary in the format
163 {language: subformats}. "subformats" is a list sorted from
164 lower to higher preference, each element is a dictionary
165 with the "ext" entry and one of:
166 * "data": The subtitles file contents
167 * "url": A URL pointing to the subtitles file
168 "ext" will be calculated from URL if missing
169 automatic_captions: Like 'subtitles', used by the YoutubeIE for
170 automatically generated captions
171 duration: Length of the video in seconds, as an integer or float.
172 view_count: How many users have watched the video on the platform.
173 like_count: Number of positive ratings of the video
174 dislike_count: Number of negative ratings of the video
175 repost_count: Number of reposts of the video
176 average_rating: Average rating give by users, the scale used depends on the webpage
177 comment_count: Number of comments on the video
178 comments: A list of comments, each with one or more of the following
179 properties (all but one of text or html optional):
180 * "author" - human-readable name of the comment author
181 * "author_id" - user ID of the comment author
182 * "id" - Comment ID
183 * "html" - Comment as HTML
184 * "text" - Plain text of the comment
185 * "timestamp" - UNIX timestamp of comment
186 * "parent" - ID of the comment this one is replying to.
187 Set to "root" to indicate that this is a
188 comment to the original video.
189 age_limit: Age restriction for the video, as an integer (years)
190 webpage_url: The URL to the video webpage, if given to youtube-dl it
191 should allow to get the same result again. (It will be set
192 by YoutubeDL if it's missing)
193 categories: A list of categories that the video falls in, for example
194 ["Sports", "Berlin"]
195 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
196 is_live: True, False, or None (=unknown). Whether this video is a
197 live stream that goes on instead of a fixed-length video.
198 start_time: Time in seconds where the reproduction should start, as
199 specified in the URL.
200 end_time: Time in seconds where the reproduction should end, as
201 specified in the URL.
202
203 Unless mentioned otherwise, the fields should be Unicode strings.
204
205 Unless mentioned otherwise, None is equivalent to absence of information.
206
207
208 _type "playlist" indicates multiple videos.
209 There must be a key "entries", which is a list, an iterable, or a PagedList
210 object, each element of which is a valid dictionary by this specification.
211
212 Additionally, playlists can have "title", "description" and "id" attributes
213 with the same semantics as videos (see above).
214
215
216 _type "multi_video" indicates that there are multiple videos that
217 form a single show, for examples multiple acts of an opera or TV episode.
218 It must have an entries key like a playlist and contain all the keys
219 required for a video at the same time.
220
221
222 _type "url" indicates that the video must be extracted from another
223 location, possibly by a different extractor. Its only required key is:
224 "url" - the next URL to extract.
225 The key "ie_key" can be set to the class name (minus the trailing "IE",
226 e.g. "Youtube") if the extractor class is known in advance.
227 Additionally, the dictionary may have any properties of the resolved entity
228 known in advance, for example "title" if the title of the referred video is
229 known ahead of time.
230
231
232 _type "url_transparent" entities have the same specification as "url", but
233 indicate that the given additional information is more precise than the one
234 associated with the resolved URL.
235 This is useful when a site employs a video service that hosts the video and
236 its technical metadata, but that video service does not embed a useful
237 title, description etc.
238
239
240 Subclasses of this one should re-define the _real_initialize() and
241 _real_extract() methods and define a _VALID_URL regexp.
242 Probably, they should also be added to the list of extractors.
243
244 Finally, the _WORKING attribute should be set to False for broken IEs
245 in order to warn the users and skip the tests.
246 """
247
248 _ready = False
249 _downloader = None
250 _WORKING = True
251
252 def __init__(self, downloader=None):
253 """Constructor. Receives an optional downloader."""
254 self._ready = False
255 self.set_downloader(downloader)
256
257 @classmethod
258 def suitable(cls, url):
259 """Receives a URL and returns True if suitable for this IE."""
260
261 # This does not use has/getattr intentionally - we want to know whether
262 # we have cached the regexp for *this* class, whereas getattr would also
263 # match the superclass
264 if '_VALID_URL_RE' not in cls.__dict__:
265 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
266 return cls._VALID_URL_RE.match(url) is not None
267
268 @classmethod
269 def _match_id(cls, url):
270 if '_VALID_URL_RE' not in cls.__dict__:
271 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
272 m = cls._VALID_URL_RE.match(url)
273 assert m
274 return m.group('id')
275
276 @classmethod
277 def working(cls):
278 """Getter method for _WORKING."""
279 return cls._WORKING
280
281 def initialize(self):
282 """Initializes an instance (authentication, etc)."""
283 if not self._ready:
284 self._real_initialize()
285 self._ready = True
286
287 def extract(self, url):
288 """Extracts URL information and returns it in list of dicts."""
289 try:
290 self.initialize()
291 return self._real_extract(url)
292 except ExtractorError:
293 raise
294 except compat_http_client.IncompleteRead as e:
295 raise ExtractorError('A network error has occured.', cause=e, expected=True)
296 except (KeyError, StopIteration) as e:
297 raise ExtractorError('An extractor error has occured.', cause=e)
298
299 def set_downloader(self, downloader):
300 """Sets the downloader for this IE."""
301 self._downloader = downloader
302
303 def _real_initialize(self):
304 """Real initialization process. Redefine in subclasses."""
305 pass
306
307 def _real_extract(self, url):
308 """Real extraction process. Redefine in subclasses."""
309 pass
310
311 @classmethod
312 def ie_key(cls):
313 """A string for getting the InfoExtractor with get_info_extractor"""
314 return compat_str(cls.__name__[:-2])
315
316 @property
317 def IE_NAME(self):
318 return compat_str(type(self).__name__[:-2])
319
320 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
321 """ Returns the response handle """
322 if note is None:
323 self.report_download_webpage(video_id)
324 elif note is not False:
325 if video_id is None:
326 self.to_screen('%s' % (note,))
327 else:
328 self.to_screen('%s: %s' % (video_id, note))
329 try:
330 return self._downloader.urlopen(url_or_request)
331 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
332 if errnote is False:
333 return False
334 if errnote is None:
335 errnote = 'Unable to download webpage'
336 err_str = str(err)
337 # On python 2 error byte string must be decoded with proper
338 # encoding rather than ascii
339 if sys.version_info[0] < 3:
340 err_str = err_str.decode(preferredencoding())
341 errmsg = '%s: %s' % (errnote, err_str)
342 if fatal:
343 raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
344 else:
345 self._downloader.report_warning(errmsg)
346 return False
347
348 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
349 """ Returns a tuple (page content as string, URL handle) """
350 # Strip hashes from the URL (#1038)
351 if isinstance(url_or_request, (compat_str, str)):
352 url_or_request = url_or_request.partition('#')[0]
353
354 urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
355 if urlh is False:
356 assert not fatal
357 return False
358 content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
359 return (content, urlh)
360
361 @staticmethod
362 def _guess_encoding_from_content(content_type, webpage_bytes):
363 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
364 if m:
365 encoding = m.group(1)
366 else:
367 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
368 webpage_bytes[:1024])
369 if m:
370 encoding = m.group(1).decode('ascii')
371 elif webpage_bytes.startswith(b'\xff\xfe'):
372 encoding = 'utf-16'
373 else:
374 encoding = 'utf-8'
375
376 return encoding
377
378 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
379 content_type = urlh.headers.get('Content-Type', '')
380 webpage_bytes = urlh.read()
381 if prefix is not None:
382 webpage_bytes = prefix + webpage_bytes
383 if not encoding:
384 encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
385 if self._downloader.params.get('dump_intermediate_pages', False):
386 try:
387 url = url_or_request.get_full_url()
388 except AttributeError:
389 url = url_or_request
390 self.to_screen('Dumping request to ' + url)
391 dump = base64.b64encode(webpage_bytes).decode('ascii')
392 self._downloader.to_screen(dump)
393 if self._downloader.params.get('write_pages', False):
394 try:
395 url = url_or_request.get_full_url()
396 except AttributeError:
397 url = url_or_request
398 basen = '%s_%s' % (video_id, url)
399 if len(basen) > 240:
400 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
401 basen = basen[:240 - len(h)] + h
402 raw_filename = basen + '.dump'
403 filename = sanitize_filename(raw_filename, restricted=True)
404 self.to_screen('Saving request to ' + filename)
405 # Working around MAX_PATH limitation on Windows (see
406 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
407 if os.name == 'nt':
408 absfilepath = os.path.abspath(filename)
409 if len(absfilepath) > 259:
410 filename = '\\\\?\\' + absfilepath
411 with open(filename, 'wb') as outf:
412 outf.write(webpage_bytes)
413
414 try:
415 content = webpage_bytes.decode(encoding, 'replace')
416 except LookupError:
417 content = webpage_bytes.decode('utf-8', 'replace')
418
419 if ('<title>Access to this site is blocked</title>' in content and
420 'Websense' in content[:512]):
421 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
422 blocked_iframe = self._html_search_regex(
423 r'<iframe src="([^"]+)"', content,
424 'Websense information URL', default=None)
425 if blocked_iframe:
426 msg += ' Visit %s for more details' % blocked_iframe
427 raise ExtractorError(msg, expected=True)
428 if '<title>The URL you requested has been blocked</title>' in content[:512]:
429 msg = (
430 'Access to this webpage has been blocked by Indian censorship. '
431 'Use a VPN or proxy server (with --proxy) to route around it.')
432 block_msg = self._html_search_regex(
433 r'</h1><p>(.*?)</p>',
434 content, 'block message', default=None)
435 if block_msg:
436 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
437 raise ExtractorError(msg, expected=True)
438
439 return content
440
441 def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
442 """ Returns the data of the page as a string """
443 success = False
444 try_count = 0
445 while success is False:
446 try:
447 res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
448 success = True
449 except compat_http_client.IncompleteRead as e:
450 try_count += 1
451 if try_count >= tries:
452 raise e
453 self._sleep(timeout, video_id)
454 if res is False:
455 return res
456 else:
457 content, _ = res
458 return content
459
460 def _download_xml(self, url_or_request, video_id,
461 note='Downloading XML', errnote='Unable to download XML',
462 transform_source=None, fatal=True, encoding=None):
463 """Return the xml as an xml.etree.ElementTree.Element"""
464 xml_string = self._download_webpage(
465 url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
466 if xml_string is False:
467 return xml_string
468 if transform_source:
469 xml_string = transform_source(xml_string)
470 return compat_etree_fromstring(xml_string.encode('utf-8'))
471
472 def _download_json(self, url_or_request, video_id,
473 note='Downloading JSON metadata',
474 errnote='Unable to download JSON metadata',
475 transform_source=None,
476 fatal=True, encoding=None):
477 json_string = self._download_webpage(
478 url_or_request, video_id, note, errnote, fatal=fatal,
479 encoding=encoding)
480 if (not fatal) and json_string is False:
481 return None
482 return self._parse_json(
483 json_string, video_id, transform_source=transform_source, fatal=fatal)
484
485 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
486 if transform_source:
487 json_string = transform_source(json_string)
488 try:
489 return json.loads(json_string)
490 except ValueError as ve:
491 errmsg = '%s: Failed to parse JSON ' % video_id
492 if fatal:
493 raise ExtractorError(errmsg, cause=ve)
494 else:
495 self.report_warning(errmsg + str(ve))
496
497 def report_warning(self, msg, video_id=None):
498 idstr = '' if video_id is None else '%s: ' % video_id
499 self._downloader.report_warning(
500 '[%s] %s%s' % (self.IE_NAME, idstr, msg))
501
502 def to_screen(self, msg):
503 """Print msg to screen, prefixing it with '[ie_name]'"""
504 self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
505
506 def report_extraction(self, id_or_name):
507 """Report information extraction."""
508 self.to_screen('%s: Extracting information' % id_or_name)
509
510 def report_download_webpage(self, video_id):
511 """Report webpage download."""
512 self.to_screen('%s: Downloading webpage' % video_id)
513
514 def report_age_confirmation(self):
515 """Report attempt to confirm age."""
516 self.to_screen('Confirming age')
517
518 def report_login(self):
519 """Report attempt to log in."""
520 self.to_screen('Logging in')
521
522 @staticmethod
523 def raise_login_required(msg='This video is only available for registered users'):
524 raise ExtractorError(
525 '%s. Use --username and --password or --netrc to provide account credentials.' % msg,
526 expected=True)
527
528 @staticmethod
529 def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
530 raise ExtractorError(
531 '%s. You might want to use --proxy to workaround.' % msg,
532 expected=True)
533
534 # Methods for following #608
535 @staticmethod
536 def url_result(url, ie=None, video_id=None, video_title=None):
537 """Returns a URL that points to a page that should be processed"""
538 # TODO: ie should be the class used for getting the info
539 video_info = {'_type': 'url',
540 'url': url,
541 'ie_key': ie}
542 if video_id is not None:
543 video_info['id'] = video_id
544 if video_title is not None:
545 video_info['title'] = video_title
546 return video_info
547
548 @staticmethod
549 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
550 """Returns a playlist"""
551 video_info = {'_type': 'playlist',
552 'entries': entries}
553 if playlist_id:
554 video_info['id'] = playlist_id
555 if playlist_title:
556 video_info['title'] = playlist_title
557 if playlist_description:
558 video_info['description'] = playlist_description
559 return video_info
560
561 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
562 """
563 Perform a regex search on the given string, using a single or a list of
564 patterns returning the first matching group.
565 In case of failure return a default value or raise a WARNING or a
566 RegexNotFoundError, depending on fatal, specifying the field name.
567 """
568 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
569 mobj = re.search(pattern, string, flags)
570 else:
571 for p in pattern:
572 mobj = re.search(p, string, flags)
573 if mobj:
574 break
575
576 if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
577 _name = '\033[0;34m%s\033[0m' % name
578 else:
579 _name = name
580
581 if mobj:
582 if group is None:
583 # return the first matching group
584 return next(g for g in mobj.groups() if g is not None)
585 else:
586 return mobj.group(group)
587 elif default is not NO_DEFAULT:
588 return default
589 elif fatal:
590 raise RegexNotFoundError('Unable to extract %s' % _name)
591 else:
592 self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
593 return None
594
595 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
596 """
597 Like _search_regex, but strips HTML tags and unescapes entities.
598 """
599 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
600 if res:
601 return clean_html(res).strip()
602 else:
603 return res
604
605 def _get_login_info(self):
606 """
607 Get the login info as (username, password)
608 It will look in the netrc file using the _NETRC_MACHINE value
609 If there's no info available, return (None, None)
610 """
611 if self._downloader is None:
612 return (None, None)
613
614 username = None
615 password = None
616 downloader_params = self._downloader.params
617
618 # Attempt to use provided username and password or .netrc data
619 if downloader_params.get('username', None) is not None:
620 username = downloader_params['username']
621 password = downloader_params['password']
622 elif downloader_params.get('usenetrc', False):
623 try:
624 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
625 if info is not None:
626 username = info[0]
627 password = info[2]
628 else:
629 raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
630 except (IOError, netrc.NetrcParseError) as err:
631 self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
632
633 return (username, password)
634
635 def _get_tfa_info(self, note='two-factor verification code'):
636 """
637 Get the two-factor authentication info
638 TODO - asking the user will be required for sms/phone verify
639 currently just uses the command line option
640 If there's no info available, return None
641 """
642 if self._downloader is None:
643 return None
644 downloader_params = self._downloader.params
645
646 if downloader_params.get('twofactor', None) is not None:
647 return downloader_params['twofactor']
648
649 return compat_getpass('Type %s and press [Return]: ' % note)
650
651 # Helper functions for extracting OpenGraph info
652 @staticmethod
653 def _og_regexes(prop):
654 content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
655 property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
656 % {'prop': re.escape(prop)})
657 template = r'<meta[^>]+?%s[^>]+?%s'
658 return [
659 template % (property_re, content_re),
660 template % (content_re, property_re),
661 ]
662
663 @staticmethod
664 def _meta_regex(prop):
665 return r'''(?isx)<meta
666 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
667 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
668
669 def _og_search_property(self, prop, html, name=None, **kargs):
670 if name is None:
671 name = 'OpenGraph %s' % prop
672 escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
673 if escaped is None:
674 return None
675 return unescapeHTML(escaped)
676
677 def _og_search_thumbnail(self, html, **kargs):
678 return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
679
680 def _og_search_description(self, html, **kargs):
681 return self._og_search_property('description', html, fatal=False, **kargs)
682
683 def _og_search_title(self, html, **kargs):
684 return self._og_search_property('title', html, **kargs)
685
686 def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
687 regexes = self._og_regexes('video') + self._og_regexes('video:url')
688 if secure:
689 regexes = self._og_regexes('video:secure_url') + regexes
690 return self._html_search_regex(regexes, html, name, **kargs)
691
692 def _og_search_url(self, html, **kargs):
693 return self._og_search_property('url', html, **kargs)
694
695 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
696 if display_name is None:
697 display_name = name
698 return self._html_search_regex(
699 self._meta_regex(name),
700 html, display_name, fatal=fatal, group='content', **kwargs)
701
702 def _dc_search_uploader(self, html):
703 return self._html_search_meta('dc.creator', html, 'uploader')
704
705 def _rta_search(self, html):
706 # See http://www.rtalabel.org/index.php?content=howtofaq#single
707 if re.search(r'(?ix)<meta\s+name="rating"\s+'
708 r' content="RTA-5042-1996-1400-1577-RTA"',
709 html):
710 return 18
711 return 0
712
713 def _media_rating_search(self, html):
714 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
715 rating = self._html_search_meta('rating', html)
716
717 if not rating:
718 return None
719
720 RATING_TABLE = {
721 'safe for kids': 0,
722 'general': 8,
723 '14 years': 14,
724 'mature': 17,
725 'restricted': 19,
726 }
727 return RATING_TABLE.get(rating.lower(), None)
728
729 def _family_friendly_search(self, html):
730 # See http://schema.org/VideoObject
731 family_friendly = self._html_search_meta('isFamilyFriendly', html)
732
733 if not family_friendly:
734 return None
735
736 RATING_TABLE = {
737 '1': 0,
738 'true': 0,
739 '0': 18,
740 'false': 18,
741 }
742 return RATING_TABLE.get(family_friendly.lower(), None)
743
744 def _twitter_search_player(self, html):
745 return self._html_search_meta('twitter:player', html,
746 'twitter card player')
747
748 @staticmethod
749 def _hidden_inputs(html):
750 html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
751 hidden_inputs = {}
752 for input in re.findall(r'(?i)<input([^>]+)>', html):
753 if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
754 continue
755 name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
756 if not name:
757 continue
758 value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
759 if not value:
760 continue
761 hidden_inputs[name.group('value')] = value.group('value')
762 return hidden_inputs
763
764 def _form_hidden_inputs(self, form_id, html):
765 form = self._search_regex(
766 r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
767 html, '%s form' % form_id, group='form')
768 return self._hidden_inputs(form)
769
770 def _sort_formats(self, formats, field_preference=None):
771 if not formats:
772 raise ExtractorError('No video formats found')
773
774 def _formats_key(f):
775 # TODO remove the following workaround
776 from ..utils import determine_ext
777 if not f.get('ext') and 'url' in f:
778 f['ext'] = determine_ext(f['url'])
779
780 if isinstance(field_preference, (list, tuple)):
781 return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
782
783 preference = f.get('preference')
784 if preference is None:
785 proto = f.get('protocol')
786 if proto is None:
787 proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
788
789 preference = 0 if proto in ['http', 'https'] else -0.1
790 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
791 preference -= 0.5
792
793 if f.get('vcodec') == 'none': # audio only
794 if self._downloader.params.get('prefer_free_formats'):
795 ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
796 else:
797 ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
798 ext_preference = 0
799 try:
800 audio_ext_preference = ORDER.index(f['ext'])
801 except ValueError:
802 audio_ext_preference = -1
803 else:
804 if self._downloader.params.get('prefer_free_formats'):
805 ORDER = ['flv', 'mp4', 'webm']
806 else:
807 ORDER = ['webm', 'flv', 'mp4']
808 try:
809 ext_preference = ORDER.index(f['ext'])
810 except ValueError:
811 ext_preference = -1
812 audio_ext_preference = 0
813
814 return (
815 preference,
816 f.get('language_preference') if f.get('language_preference') is not None else -1,
817 f.get('quality') if f.get('quality') is not None else -1,
818 f.get('tbr') if f.get('tbr') is not None else -1,
819 f.get('filesize') if f.get('filesize') is not None else -1,
820 f.get('vbr') if f.get('vbr') is not None else -1,
821 f.get('height') if f.get('height') is not None else -1,
822 f.get('width') if f.get('width') is not None else -1,
823 ext_preference,
824 f.get('abr') if f.get('abr') is not None else -1,
825 audio_ext_preference,
826 f.get('fps') if f.get('fps') is not None else -1,
827 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
828 f.get('source_preference') if f.get('source_preference') is not None else -1,
829 f.get('format_id') if f.get('format_id') is not None else '',
830 )
831 formats.sort(key=_formats_key)
832
833 def _check_formats(self, formats, video_id):
834 if formats:
835 formats[:] = filter(
836 lambda f: self._is_valid_url(
837 f['url'], video_id,
838 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
839 formats)
840
841 def _is_valid_url(self, url, video_id, item='video'):
842 url = self._proto_relative_url(url, scheme='http:')
843 # For now assume non HTTP(S) URLs always valid
844 if not (url.startswith('http://') or url.startswith('https://')):
845 return True
846 try:
847 self._request_webpage(url, video_id, 'Checking %s URL' % item)
848 return True
849 except ExtractorError as e:
850 if isinstance(e.cause, compat_urllib_error.URLError):
851 self.to_screen(
852 '%s: %s URL is invalid, skipping' % (video_id, item))
853 return False
854 raise
855
856 def http_scheme(self):
857 """ Either "http:" or "https:", depending on the user's preferences """
858 return (
859 'http:'
860 if self._downloader.params.get('prefer_insecure', False)
861 else 'https:')
862
863 def _proto_relative_url(self, url, scheme=None):
864 if url is None:
865 return url
866 if url.startswith('//'):
867 if scheme is None:
868 scheme = self.http_scheme()
869 return scheme + url
870 else:
871 return url
872
873 def _sleep(self, timeout, video_id, msg_template=None):
874 if msg_template is None:
875 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
876 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
877 self.to_screen(msg)
878 time.sleep(timeout)
879
880 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
881 transform_source=lambda s: fix_xml_ampersands(s).strip(),
882 fatal=True):
883 manifest = self._download_xml(
884 manifest_url, video_id, 'Downloading f4m manifest',
885 'Unable to download f4m manifest',
886 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
887 # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
888 transform_source=transform_source,
889 fatal=fatal)
890
891 if manifest is False:
892 return manifest
893
894 formats = []
895 manifest_version = '1.0'
896 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
897 if not media_nodes:
898 manifest_version = '2.0'
899 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
900 base_url = xpath_text(
901 manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
902 'base URL', default=None)
903 if base_url:
904 base_url = base_url.strip()
905 for i, media_el in enumerate(media_nodes):
906 if manifest_version == '2.0':
907 media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
908 if not media_url:
909 continue
910 manifest_url = (
911 media_url if media_url.startswith('http://') or media_url.startswith('https://')
912 else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
913 # If media_url is itself a f4m manifest do the recursive extraction
914 # since bitrates in parent manifest (this one) and media_url manifest
915 # may differ leading to inability to resolve the format by requested
916 # bitrate in f4m downloader
917 if determine_ext(manifest_url) == 'f4m':
918 f4m_formats = self._extract_f4m_formats(
919 manifest_url, video_id, preference, f4m_id, fatal=fatal)
920 if f4m_formats:
921 formats.extend(f4m_formats)
922 continue
923 tbr = int_or_none(media_el.attrib.get('bitrate'))
924 formats.append({
925 'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
926 'url': manifest_url,
927 'ext': 'flv',
928 'tbr': tbr,
929 'width': int_or_none(media_el.attrib.get('width')),
930 'height': int_or_none(media_el.attrib.get('height')),
931 'preference': preference,
932 })
933 self._sort_formats(formats)
934
935 return formats
936
937 def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
938 entry_protocol='m3u8', preference=None,
939 m3u8_id=None, note=None, errnote=None,
940 fatal=True):
941
942 formats = [{
943 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
944 'url': m3u8_url,
945 'ext': ext,
946 'protocol': 'm3u8',
947 'preference': preference - 1 if preference else -1,
948 'resolution': 'multiple',
949 'format_note': 'Quality selection URL',
950 }]
951
952 format_url = lambda u: (
953 u
954 if re.match(r'^https?://', u)
955 else compat_urlparse.urljoin(m3u8_url, u))
956
957 res = self._download_webpage_handle(
958 m3u8_url, video_id,
959 note=note or 'Downloading m3u8 information',
960 errnote=errnote or 'Failed to download m3u8 information',
961 fatal=fatal)
962 if res is False:
963 return res
964 m3u8_doc, urlh = res
965 m3u8_url = urlh.geturl()
966 last_info = None
967 last_media = None
968 kv_rex = re.compile(
969 r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
970 for line in m3u8_doc.splitlines():
971 if line.startswith('#EXT-X-STREAM-INF:'):
972 last_info = {}
973 for m in kv_rex.finditer(line):
974 v = m.group('val')
975 if v.startswith('"'):
976 v = v[1:-1]
977 last_info[m.group('key')] = v
978 elif line.startswith('#EXT-X-MEDIA:'):
979 last_media = {}
980 for m in kv_rex.finditer(line):
981 v = m.group('val')
982 if v.startswith('"'):
983 v = v[1:-1]
984 last_media[m.group('key')] = v
985 elif line.startswith('#') or not line.strip():
986 continue
987 else:
988 if last_info is None:
989 formats.append({'url': format_url(line)})
990 continue
991 tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
992 format_id = []
993 if m3u8_id:
994 format_id.append(m3u8_id)
995 last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
996 format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
997 f = {
998 'format_id': '-'.join(format_id),
999 'url': format_url(line.strip()),
1000 'tbr': tbr,
1001 'ext': ext,
1002 'protocol': entry_protocol,
1003 'preference': preference,
1004 }
1005 codecs = last_info.get('CODECS')
1006 if codecs:
1007 # TODO: looks like video codec is not always necessarily goes first
1008 va_codecs = codecs.split(',')
1009 if va_codecs[0]:
1010 f['vcodec'] = va_codecs[0].partition('.')[0]
1011 if len(va_codecs) > 1 and va_codecs[1]:
1012 f['acodec'] = va_codecs[1].partition('.')[0]
1013 resolution = last_info.get('RESOLUTION')
1014 if resolution:
1015 width_str, height_str = resolution.split('x')
1016 f['width'] = int(width_str)
1017 f['height'] = int(height_str)
1018 if last_media is not None:
1019 f['m3u8_media'] = last_media
1020 last_media = None
1021 formats.append(f)
1022 last_info = {}
1023 self._sort_formats(formats)
1024 return formats
1025
1026 @staticmethod
1027 def _xpath_ns(path, namespace=None):
1028 if not namespace:
1029 return path
1030 out = []
1031 for c in path.split('/'):
1032 if not c or c == '.':
1033 out.append(c)
1034 else:
1035 out.append('{%s}%s' % (namespace, c))
1036 return '/'.join(out)
1037
1038 def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
1039 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1040
1041 if smil is False:
1042 assert not fatal
1043 return []
1044
1045 namespace = self._parse_smil_namespace(smil)
1046
1047 return self._parse_smil_formats(
1048 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1049
1050 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
1051 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1052 if smil is False:
1053 return {}
1054 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
1055
1056 def _download_smil(self, smil_url, video_id, fatal=True):
1057 return self._download_xml(
1058 smil_url, video_id, 'Downloading SMIL file',
1059 'Unable to download SMIL file', fatal=fatal)
1060
1061 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
1062 namespace = self._parse_smil_namespace(smil)
1063
1064 formats = self._parse_smil_formats(
1065 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1066 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
1067
1068 video_id = os.path.splitext(url_basename(smil_url))[0]
1069 title = None
1070 description = None
1071 upload_date = None
1072 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1073 name = meta.attrib.get('name')
1074 content = meta.attrib.get('content')
1075 if not name or not content:
1076 continue
1077 if not title and name == 'title':
1078 title = content
1079 elif not description and name in ('description', 'abstract'):
1080 description = content
1081 elif not upload_date and name == 'date':
1082 upload_date = unified_strdate(content)
1083
1084 thumbnails = [{
1085 'id': image.get('type'),
1086 'url': image.get('src'),
1087 'width': int_or_none(image.get('width')),
1088 'height': int_or_none(image.get('height')),
1089 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
1090
1091 return {
1092 'id': video_id,
1093 'title': title or video_id,
1094 'description': description,
1095 'upload_date': upload_date,
1096 'thumbnails': thumbnails,
1097 'formats': formats,
1098 'subtitles': subtitles,
1099 }
1100
1101 def _parse_smil_namespace(self, smil):
1102 return self._search_regex(
1103 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
1104
1105 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
1106 base = smil_url
1107 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1108 b = meta.get('base') or meta.get('httpBase')
1109 if b:
1110 base = b
1111 break
1112
1113 formats = []
1114 rtmp_count = 0
1115 http_count = 0
1116
1117 videos = smil.findall(self._xpath_ns('.//video', namespace))
1118 for video in videos:
1119 src = video.get('src')
1120 if not src:
1121 continue
1122
1123 bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
1124 filesize = int_or_none(video.get('size') or video.get('fileSize'))
1125 width = int_or_none(video.get('width'))
1126 height = int_or_none(video.get('height'))
1127 proto = video.get('proto')
1128 ext = video.get('ext')
1129 src_ext = determine_ext(src)
1130 streamer = video.get('streamer') or base
1131
1132 if proto == 'rtmp' or streamer.startswith('rtmp'):
1133 rtmp_count += 1
1134 formats.append({
1135 'url': streamer,
1136 'play_path': src,
1137 'ext': 'flv',
1138 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
1139 'tbr': bitrate,
1140 'filesize': filesize,
1141 'width': width,
1142 'height': height,
1143 })
1144 if transform_rtmp_url:
1145 streamer, src = transform_rtmp_url(streamer, src)
1146 formats[-1].update({
1147 'url': streamer,
1148 'play_path': src,
1149 })
1150 continue
1151
1152 src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
1153
1154 if proto == 'm3u8' or src_ext == 'm3u8':
1155 m3u8_formats = self._extract_m3u8_formats(
1156 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
1157 if m3u8_formats:
1158 formats.extend(m3u8_formats)
1159 continue
1160
1161 if src_ext == 'f4m':
1162 f4m_url = src_url
1163 if not f4m_params:
1164 f4m_params = {
1165 'hdcore': '3.2.0',
1166 'plugin': 'flowplayer-3.2.0.1',
1167 }
1168 f4m_url += '&' if '?' in f4m_url else '?'
1169 f4m_url += compat_urllib_parse.urlencode(f4m_params)
1170 f4m_formats = self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)
1171 if f4m_formats:
1172 formats.extend(f4m_formats)
1173 continue
1174
1175 if src_url.startswith('http') and self._is_valid_url(src, video_id):
1176 http_count += 1
1177 formats.append({
1178 'url': src_url,
1179 'ext': ext or src_ext or 'flv',
1180 'format_id': 'http-%d' % (bitrate or http_count),
1181 'tbr': bitrate,
1182 'filesize': filesize,
1183 'width': width,
1184 'height': height,
1185 })
1186 continue
1187
1188 self._sort_formats(formats)
1189
1190 return formats
1191
1192 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
1193 subtitles = {}
1194 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
1195 src = textstream.get('src')
1196 if not src:
1197 continue
1198 ext = textstream.get('ext') or determine_ext(src)
1199 if not ext:
1200 type_ = textstream.get('type')
1201 SUBTITLES_TYPES = {
1202 'text/vtt': 'vtt',
1203 'text/srt': 'srt',
1204 'application/smptett+xml': 'tt',
1205 }
1206 if type_ in SUBTITLES_TYPES:
1207 ext = SUBTITLES_TYPES[type_]
1208 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
1209 subtitles.setdefault(lang, []).append({
1210 'url': src,
1211 'ext': ext,
1212 })
1213 return subtitles
1214
1215 def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
1216 xspf = self._download_xml(
1217 playlist_url, playlist_id, 'Downloading xpsf playlist',
1218 'Unable to download xspf manifest', fatal=fatal)
1219 if xspf is False:
1220 return []
1221 return self._parse_xspf(xspf, playlist_id)
1222
1223 def _parse_xspf(self, playlist, playlist_id):
1224 NS_MAP = {
1225 'xspf': 'http://xspf.org/ns/0/',
1226 's1': 'http://static.streamone.nl/player/ns/0',
1227 }
1228
1229 entries = []
1230 for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
1231 title = xpath_text(
1232 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
1233 description = xpath_text(
1234 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
1235 thumbnail = xpath_text(
1236 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
1237 duration = float_or_none(
1238 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
1239
1240 formats = [{
1241 'url': location.text,
1242 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
1243 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
1244 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
1245 } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
1246 self._sort_formats(formats)
1247
1248 entries.append({
1249 'id': playlist_id,
1250 'title': title,
1251 'description': description,
1252 'thumbnail': thumbnail,
1253 'duration': duration,
1254 'formats': formats,
1255 })
1256 return entries
1257
1258 def _live_title(self, name):
1259 """ Generate the title for a live video """
1260 now = datetime.datetime.now()
1261 now_str = now.strftime("%Y-%m-%d %H:%M")
1262 return name + ' ' + now_str
1263
1264 def _int(self, v, name, fatal=False, **kwargs):
1265 res = int_or_none(v, **kwargs)
1266 if 'get_attr' in kwargs:
1267 print(getattr(v, kwargs['get_attr']))
1268 if res is None:
1269 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1270 if fatal:
1271 raise ExtractorError(msg)
1272 else:
1273 self._downloader.report_warning(msg)
1274 return res
1275
1276 def _float(self, v, name, fatal=False, **kwargs):
1277 res = float_or_none(v, **kwargs)
1278 if res is None:
1279 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1280 if fatal:
1281 raise ExtractorError(msg)
1282 else:
1283 self._downloader.report_warning(msg)
1284 return res
1285
1286 def _set_cookie(self, domain, name, value, expire_time=None):
1287 cookie = compat_cookiejar.Cookie(
1288 0, name, value, None, None, domain, None,
1289 None, '/', True, False, expire_time, '', None, None, None)
1290 self._downloader.cookiejar.set_cookie(cookie)
1291
1292 def _get_cookies(self, url):
1293 """ Return a compat_cookies.SimpleCookie with the cookies for the url """
1294 req = sanitized_Request(url)
1295 self._downloader.cookiejar.add_cookie_header(req)
1296 return compat_cookies.SimpleCookie(req.get_header('Cookie'))
1297
1298 def get_testcases(self, include_onlymatching=False):
1299 t = getattr(self, '_TEST', None)
1300 if t:
1301 assert not hasattr(self, '_TESTS'), \
1302 '%s has _TEST and _TESTS' % type(self).__name__
1303 tests = [t]
1304 else:
1305 tests = getattr(self, '_TESTS', [])
1306 for t in tests:
1307 if not include_onlymatching and t.get('only_matching', False):
1308 continue
1309 t['name'] = type(self).__name__[:-len('IE')]
1310 yield t
1311
1312 def is_suitable(self, age_limit):
1313 """ Test whether the extractor is generally suitable for the given
1314 age limit (i.e. pornographic sites are not, all others usually are) """
1315
1316 any_restricted = False
1317 for tc in self.get_testcases(include_onlymatching=False):
1318 if 'playlist' in tc:
1319 tc = tc['playlist'][0]
1320 is_restricted = age_restricted(
1321 tc.get('info_dict', {}).get('age_limit'), age_limit)
1322 if not is_restricted:
1323 return True
1324 any_restricted = any_restricted or is_restricted
1325 return not any_restricted
1326
1327 def extract_subtitles(self, *args, **kwargs):
1328 if (self._downloader.params.get('writesubtitles', False) or
1329 self._downloader.params.get('listsubtitles')):
1330 return self._get_subtitles(*args, **kwargs)
1331 return {}
1332
1333 def _get_subtitles(self, *args, **kwargs):
1334 raise NotImplementedError("This method must be implemented by subclasses")
1335
1336 @staticmethod
1337 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
1338 """ Merge subtitle items for one language. Items with duplicated URLs
1339 will be dropped. """
1340 list1_urls = set([item['url'] for item in subtitle_list1])
1341 ret = list(subtitle_list1)
1342 ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
1343 return ret
1344
1345 @classmethod
1346 def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
1347 """ Merge two subtitle dictionaries, language by language. """
1348 ret = dict(subtitle_dict1)
1349 for lang in subtitle_dict2:
1350 ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
1351 return ret
1352
1353 def extract_automatic_captions(self, *args, **kwargs):
1354 if (self._downloader.params.get('writeautomaticsub', False) or
1355 self._downloader.params.get('listsubtitles')):
1356 return self._get_automatic_captions(*args, **kwargs)
1357 return {}
1358
1359 def _get_automatic_captions(self, *args, **kwargs):
1360 raise NotImplementedError("This method must be implemented by subclasses")
1361
1362
1363 class SearchInfoExtractor(InfoExtractor):
1364 """
1365 Base class for paged search queries extractors.
1366 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
1367 Instances should define _SEARCH_KEY and _MAX_RESULTS.
1368 """
1369
1370 @classmethod
1371 def _make_valid_url(cls):
1372 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
1373
1374 @classmethod
1375 def suitable(cls, url):
1376 return re.match(cls._make_valid_url(), url) is not None
1377
1378 def _real_extract(self, query):
1379 mobj = re.match(self._make_valid_url(), query)
1380 if mobj is None:
1381 raise ExtractorError('Invalid search query "%s"' % query)
1382
1383 prefix = mobj.group('prefix')
1384 query = mobj.group('query')
1385 if prefix == '':
1386 return self._get_n_results(query, 1)
1387 elif prefix == 'all':
1388 return self._get_n_results(query, self._MAX_RESULTS)
1389 else:
1390 n = int(prefix)
1391 if n <= 0:
1392 raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
1393 elif n > self._MAX_RESULTS:
1394 self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
1395 n = self._MAX_RESULTS
1396 return self._get_n_results(query, n)
1397
1398 def _get_n_results(self, query, n):
1399 """Get a specified number of results for a query"""
1400 raise NotImplementedError("This method must be implemented by subclasses")
1401
1402 @property
1403 def SEARCH_KEY(self):
1404 return self._SEARCH_KEY