]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/common.py
Merge pull request #7769 from remitamine/sort
[yt-dlp.git] / youtube_dl / extractor / common.py
1 from __future__ import unicode_literals
2
3 import base64
4 import datetime
5 import hashlib
6 import json
7 import netrc
8 import os
9 import re
10 import socket
11 import sys
12 import time
13
14 from ..compat import (
15 compat_cookiejar,
16 compat_cookies,
17 compat_getpass,
18 compat_http_client,
19 compat_urllib_error,
20 compat_urllib_parse,
21 compat_urlparse,
22 compat_str,
23 compat_etree_fromstring,
24 )
25 from ..utils import (
26 NO_DEFAULT,
27 age_restricted,
28 bug_reports_message,
29 clean_html,
30 compiled_regex_type,
31 determine_ext,
32 error_to_compat_str,
33 ExtractorError,
34 fix_xml_ampersands,
35 float_or_none,
36 int_or_none,
37 RegexNotFoundError,
38 sanitize_filename,
39 sanitized_Request,
40 unescapeHTML,
41 unified_strdate,
42 url_basename,
43 xpath_text,
44 xpath_with_ns,
45 determine_protocol,
46 )
47
48
49 class InfoExtractor(object):
50 """Information Extractor class.
51
52 Information extractors are the classes that, given a URL, extract
53 information about the video (or videos) the URL refers to. This
54 information includes the real video URL, the video title, author and
55 others. The information is stored in a dictionary which is then
56 passed to the YoutubeDL. The YoutubeDL processes this
57 information possibly downloading the video to the file system, among
58 other possible outcomes.
59
60 The type field determines the type of the result.
61 By far the most common value (and the default if _type is missing) is
62 "video", which indicates a single video.
63
64 For a video, the dictionaries must include the following fields:
65
66 id: Video identifier.
67 title: Video title, unescaped.
68
69 Additionally, it must contain either a formats entry or a url one:
70
71 formats: A list of dictionaries for each format available, ordered
72 from worst to best quality.
73
74 Potential fields:
75 * url Mandatory. The URL of the video file
76 * ext Will be calculated from URL if missing
77 * format A human-readable description of the format
78 ("mp4 container with h264/opus").
79 Calculated from the format_id, width, height.
80 and format_note fields if missing.
81 * format_id A short description of the format
82 ("mp4_h264_opus" or "19").
83 Technically optional, but strongly recommended.
84 * format_note Additional info about the format
85 ("3D" or "DASH video")
86 * width Width of the video, if known
87 * height Height of the video, if known
88 * resolution Textual description of width and height
89 * tbr Average bitrate of audio and video in KBit/s
90 * abr Average audio bitrate in KBit/s
91 * acodec Name of the audio codec in use
92 * asr Audio sampling rate in Hertz
93 * vbr Average video bitrate in KBit/s
94 * fps Frame rate
95 * vcodec Name of the video codec in use
96 * container Name of the container format
97 * filesize The number of bytes, if known in advance
98 * filesize_approx An estimate for the number of bytes
99 * player_url SWF Player URL (used for rtmpdump).
100 * protocol The protocol that will be used for the actual
101 download, lower-case.
102 "http", "https", "rtsp", "rtmp", "rtmpe",
103 "m3u8", or "m3u8_native".
104 * preference Order number of this format. If this field is
105 present and not None, the formats get sorted
106 by this field, regardless of all other values.
107 -1 for default (order by other properties),
108 -2 or smaller for less than default.
109 < -1000 to hide the format (if there is
110 another one which is strictly better)
111 * language_preference Is this in the correct requested
112 language?
113 10 if it's what the URL is about,
114 -1 for default (don't know),
115 -10 otherwise, other values reserved for now.
116 * quality Order number of the video quality of this
117 format, irrespective of the file format.
118 -1 for default (order by other properties),
119 -2 or smaller for less than default.
120 * source_preference Order number for this video source
121 (quality takes higher priority)
122 -1 for default (order by other properties),
123 -2 or smaller for less than default.
124 * http_headers A dictionary of additional HTTP headers
125 to add to the request.
126 * stretched_ratio If given and not 1, indicates that the
127 video's pixels are not square.
128 width : height ratio as float.
129 * no_resume The server does not support resuming the
130 (HTTP or RTMP) download. Boolean.
131
132 url: Final video URL.
133 ext: Video filename extension.
134 format: The video format, defaults to ext (used for --get-format)
135 player_url: SWF Player URL (used for rtmpdump).
136
137 The following fields are optional:
138
139 alt_title: A secondary title of the video.
140 display_id An alternative identifier for the video, not necessarily
141 unique, but available before title. Typically, id is
142 something like "4234987", title "Dancing naked mole rats",
143 and display_id "dancing-naked-mole-rats"
144 thumbnails: A list of dictionaries, with the following entries:
145 * "id" (optional, string) - Thumbnail format ID
146 * "url"
147 * "preference" (optional, int) - quality of the image
148 * "width" (optional, int)
149 * "height" (optional, int)
150 * "resolution" (optional, string "{width}x{height"},
151 deprecated)
152 thumbnail: Full URL to a video thumbnail image.
153 description: Full video description.
154 uploader: Full name of the video uploader.
155 creator: The main artist who created the video.
156 release_date: The date (YYYYMMDD) when the video was released.
157 timestamp: UNIX timestamp of the moment the video became available.
158 upload_date: Video upload date (YYYYMMDD).
159 If not explicitly set, calculated from timestamp.
160 uploader_id: Nickname or id of the video uploader.
161 location: Physical location where the video was filmed.
162 subtitles: The available subtitles as a dictionary in the format
163 {language: subformats}. "subformats" is a list sorted from
164 lower to higher preference, each element is a dictionary
165 with the "ext" entry and one of:
166 * "data": The subtitles file contents
167 * "url": A URL pointing to the subtitles file
168 "ext" will be calculated from URL if missing
169 automatic_captions: Like 'subtitles', used by the YoutubeIE for
170 automatically generated captions
171 duration: Length of the video in seconds, as an integer or float.
172 view_count: How many users have watched the video on the platform.
173 like_count: Number of positive ratings of the video
174 dislike_count: Number of negative ratings of the video
175 repost_count: Number of reposts of the video
176 average_rating: Average rating give by users, the scale used depends on the webpage
177 comment_count: Number of comments on the video
178 comments: A list of comments, each with one or more of the following
179 properties (all but one of text or html optional):
180 * "author" - human-readable name of the comment author
181 * "author_id" - user ID of the comment author
182 * "id" - Comment ID
183 * "html" - Comment as HTML
184 * "text" - Plain text of the comment
185 * "timestamp" - UNIX timestamp of comment
186 * "parent" - ID of the comment this one is replying to.
187 Set to "root" to indicate that this is a
188 comment to the original video.
189 age_limit: Age restriction for the video, as an integer (years)
190 webpage_url: The URL to the video webpage, if given to youtube-dl it
191 should allow to get the same result again. (It will be set
192 by YoutubeDL if it's missing)
193 categories: A list of categories that the video falls in, for example
194 ["Sports", "Berlin"]
195 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
196 is_live: True, False, or None (=unknown). Whether this video is a
197 live stream that goes on instead of a fixed-length video.
198 start_time: Time in seconds where the reproduction should start, as
199 specified in the URL.
200 end_time: Time in seconds where the reproduction should end, as
201 specified in the URL.
202
203 Unless mentioned otherwise, the fields should be Unicode strings.
204
205 Unless mentioned otherwise, None is equivalent to absence of information.
206
207
208 _type "playlist" indicates multiple videos.
209 There must be a key "entries", which is a list, an iterable, or a PagedList
210 object, each element of which is a valid dictionary by this specification.
211
212 Additionally, playlists can have "title", "description" and "id" attributes
213 with the same semantics as videos (see above).
214
215
216 _type "multi_video" indicates that there are multiple videos that
217 form a single show, for examples multiple acts of an opera or TV episode.
218 It must have an entries key like a playlist and contain all the keys
219 required for a video at the same time.
220
221
222 _type "url" indicates that the video must be extracted from another
223 location, possibly by a different extractor. Its only required key is:
224 "url" - the next URL to extract.
225 The key "ie_key" can be set to the class name (minus the trailing "IE",
226 e.g. "Youtube") if the extractor class is known in advance.
227 Additionally, the dictionary may have any properties of the resolved entity
228 known in advance, for example "title" if the title of the referred video is
229 known ahead of time.
230
231
232 _type "url_transparent" entities have the same specification as "url", but
233 indicate that the given additional information is more precise than the one
234 associated with the resolved URL.
235 This is useful when a site employs a video service that hosts the video and
236 its technical metadata, but that video service does not embed a useful
237 title, description etc.
238
239
240 Subclasses of this one should re-define the _real_initialize() and
241 _real_extract() methods and define a _VALID_URL regexp.
242 Probably, they should also be added to the list of extractors.
243
244 Finally, the _WORKING attribute should be set to False for broken IEs
245 in order to warn the users and skip the tests.
246 """
247
248 _ready = False
249 _downloader = None
250 _WORKING = True
251
252 def __init__(self, downloader=None):
253 """Constructor. Receives an optional downloader."""
254 self._ready = False
255 self.set_downloader(downloader)
256
257 @classmethod
258 def suitable(cls, url):
259 """Receives a URL and returns True if suitable for this IE."""
260
261 # This does not use has/getattr intentionally - we want to know whether
262 # we have cached the regexp for *this* class, whereas getattr would also
263 # match the superclass
264 if '_VALID_URL_RE' not in cls.__dict__:
265 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
266 return cls._VALID_URL_RE.match(url) is not None
267
268 @classmethod
269 def _match_id(cls, url):
270 if '_VALID_URL_RE' not in cls.__dict__:
271 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
272 m = cls._VALID_URL_RE.match(url)
273 assert m
274 return m.group('id')
275
276 @classmethod
277 def working(cls):
278 """Getter method for _WORKING."""
279 return cls._WORKING
280
281 def initialize(self):
282 """Initializes an instance (authentication, etc)."""
283 if not self._ready:
284 self._real_initialize()
285 self._ready = True
286
287 def extract(self, url):
288 """Extracts URL information and returns it in list of dicts."""
289 try:
290 self.initialize()
291 return self._real_extract(url)
292 except ExtractorError:
293 raise
294 except compat_http_client.IncompleteRead as e:
295 raise ExtractorError('A network error has occured.', cause=e, expected=True)
296 except (KeyError, StopIteration) as e:
297 raise ExtractorError('An extractor error has occured.', cause=e)
298
299 def set_downloader(self, downloader):
300 """Sets the downloader for this IE."""
301 self._downloader = downloader
302
303 def _real_initialize(self):
304 """Real initialization process. Redefine in subclasses."""
305 pass
306
307 def _real_extract(self, url):
308 """Real extraction process. Redefine in subclasses."""
309 pass
310
311 @classmethod
312 def ie_key(cls):
313 """A string for getting the InfoExtractor with get_info_extractor"""
314 return compat_str(cls.__name__[:-2])
315
316 @property
317 def IE_NAME(self):
318 return compat_str(type(self).__name__[:-2])
319
320 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
321 """ Returns the response handle """
322 if note is None:
323 self.report_download_webpage(video_id)
324 elif note is not False:
325 if video_id is None:
326 self.to_screen('%s' % (note,))
327 else:
328 self.to_screen('%s: %s' % (video_id, note))
329 try:
330 return self._downloader.urlopen(url_or_request)
331 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
332 if errnote is False:
333 return False
334 if errnote is None:
335 errnote = 'Unable to download webpage'
336
337 errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
338 if fatal:
339 raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
340 else:
341 self._downloader.report_warning(errmsg)
342 return False
343
344 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
345 """ Returns a tuple (page content as string, URL handle) """
346 # Strip hashes from the URL (#1038)
347 if isinstance(url_or_request, (compat_str, str)):
348 url_or_request = url_or_request.partition('#')[0]
349
350 urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
351 if urlh is False:
352 assert not fatal
353 return False
354 content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
355 return (content, urlh)
356
357 @staticmethod
358 def _guess_encoding_from_content(content_type, webpage_bytes):
359 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
360 if m:
361 encoding = m.group(1)
362 else:
363 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
364 webpage_bytes[:1024])
365 if m:
366 encoding = m.group(1).decode('ascii')
367 elif webpage_bytes.startswith(b'\xff\xfe'):
368 encoding = 'utf-16'
369 else:
370 encoding = 'utf-8'
371
372 return encoding
373
374 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
375 content_type = urlh.headers.get('Content-Type', '')
376 webpage_bytes = urlh.read()
377 if prefix is not None:
378 webpage_bytes = prefix + webpage_bytes
379 if not encoding:
380 encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
381 if self._downloader.params.get('dump_intermediate_pages', False):
382 try:
383 url = url_or_request.get_full_url()
384 except AttributeError:
385 url = url_or_request
386 self.to_screen('Dumping request to ' + url)
387 dump = base64.b64encode(webpage_bytes).decode('ascii')
388 self._downloader.to_screen(dump)
389 if self._downloader.params.get('write_pages', False):
390 try:
391 url = url_or_request.get_full_url()
392 except AttributeError:
393 url = url_or_request
394 basen = '%s_%s' % (video_id, url)
395 if len(basen) > 240:
396 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
397 basen = basen[:240 - len(h)] + h
398 raw_filename = basen + '.dump'
399 filename = sanitize_filename(raw_filename, restricted=True)
400 self.to_screen('Saving request to ' + filename)
401 # Working around MAX_PATH limitation on Windows (see
402 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
403 if os.name == 'nt':
404 absfilepath = os.path.abspath(filename)
405 if len(absfilepath) > 259:
406 filename = '\\\\?\\' + absfilepath
407 with open(filename, 'wb') as outf:
408 outf.write(webpage_bytes)
409
410 try:
411 content = webpage_bytes.decode(encoding, 'replace')
412 except LookupError:
413 content = webpage_bytes.decode('utf-8', 'replace')
414
415 if ('<title>Access to this site is blocked</title>' in content and
416 'Websense' in content[:512]):
417 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
418 blocked_iframe = self._html_search_regex(
419 r'<iframe src="([^"]+)"', content,
420 'Websense information URL', default=None)
421 if blocked_iframe:
422 msg += ' Visit %s for more details' % blocked_iframe
423 raise ExtractorError(msg, expected=True)
424 if '<title>The URL you requested has been blocked</title>' in content[:512]:
425 msg = (
426 'Access to this webpage has been blocked by Indian censorship. '
427 'Use a VPN or proxy server (with --proxy) to route around it.')
428 block_msg = self._html_search_regex(
429 r'</h1><p>(.*?)</p>',
430 content, 'block message', default=None)
431 if block_msg:
432 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
433 raise ExtractorError(msg, expected=True)
434
435 return content
436
437 def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
438 """ Returns the data of the page as a string """
439 success = False
440 try_count = 0
441 while success is False:
442 try:
443 res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
444 success = True
445 except compat_http_client.IncompleteRead as e:
446 try_count += 1
447 if try_count >= tries:
448 raise e
449 self._sleep(timeout, video_id)
450 if res is False:
451 return res
452 else:
453 content, _ = res
454 return content
455
456 def _download_xml(self, url_or_request, video_id,
457 note='Downloading XML', errnote='Unable to download XML',
458 transform_source=None, fatal=True, encoding=None):
459 """Return the xml as an xml.etree.ElementTree.Element"""
460 xml_string = self._download_webpage(
461 url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
462 if xml_string is False:
463 return xml_string
464 if transform_source:
465 xml_string = transform_source(xml_string)
466 return compat_etree_fromstring(xml_string.encode('utf-8'))
467
468 def _download_json(self, url_or_request, video_id,
469 note='Downloading JSON metadata',
470 errnote='Unable to download JSON metadata',
471 transform_source=None,
472 fatal=True, encoding=None):
473 json_string = self._download_webpage(
474 url_or_request, video_id, note, errnote, fatal=fatal,
475 encoding=encoding)
476 if (not fatal) and json_string is False:
477 return None
478 return self._parse_json(
479 json_string, video_id, transform_source=transform_source, fatal=fatal)
480
481 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
482 if transform_source:
483 json_string = transform_source(json_string)
484 try:
485 return json.loads(json_string)
486 except ValueError as ve:
487 errmsg = '%s: Failed to parse JSON ' % video_id
488 if fatal:
489 raise ExtractorError(errmsg, cause=ve)
490 else:
491 self.report_warning(errmsg + str(ve))
492
493 def report_warning(self, msg, video_id=None):
494 idstr = '' if video_id is None else '%s: ' % video_id
495 self._downloader.report_warning(
496 '[%s] %s%s' % (self.IE_NAME, idstr, msg))
497
498 def to_screen(self, msg):
499 """Print msg to screen, prefixing it with '[ie_name]'"""
500 self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
501
502 def report_extraction(self, id_or_name):
503 """Report information extraction."""
504 self.to_screen('%s: Extracting information' % id_or_name)
505
506 def report_download_webpage(self, video_id):
507 """Report webpage download."""
508 self.to_screen('%s: Downloading webpage' % video_id)
509
510 def report_age_confirmation(self):
511 """Report attempt to confirm age."""
512 self.to_screen('Confirming age')
513
514 def report_login(self):
515 """Report attempt to log in."""
516 self.to_screen('Logging in')
517
518 @staticmethod
519 def raise_login_required(msg='This video is only available for registered users'):
520 raise ExtractorError(
521 '%s. Use --username and --password or --netrc to provide account credentials.' % msg,
522 expected=True)
523
524 @staticmethod
525 def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
526 raise ExtractorError(
527 '%s. You might want to use --proxy to workaround.' % msg,
528 expected=True)
529
530 # Methods for following #608
531 @staticmethod
532 def url_result(url, ie=None, video_id=None, video_title=None):
533 """Returns a URL that points to a page that should be processed"""
534 # TODO: ie should be the class used for getting the info
535 video_info = {'_type': 'url',
536 'url': url,
537 'ie_key': ie}
538 if video_id is not None:
539 video_info['id'] = video_id
540 if video_title is not None:
541 video_info['title'] = video_title
542 return video_info
543
544 @staticmethod
545 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
546 """Returns a playlist"""
547 video_info = {'_type': 'playlist',
548 'entries': entries}
549 if playlist_id:
550 video_info['id'] = playlist_id
551 if playlist_title:
552 video_info['title'] = playlist_title
553 if playlist_description:
554 video_info['description'] = playlist_description
555 return video_info
556
557 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
558 """
559 Perform a regex search on the given string, using a single or a list of
560 patterns returning the first matching group.
561 In case of failure return a default value or raise a WARNING or a
562 RegexNotFoundError, depending on fatal, specifying the field name.
563 """
564 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
565 mobj = re.search(pattern, string, flags)
566 else:
567 for p in pattern:
568 mobj = re.search(p, string, flags)
569 if mobj:
570 break
571
572 if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
573 _name = '\033[0;34m%s\033[0m' % name
574 else:
575 _name = name
576
577 if mobj:
578 if group is None:
579 # return the first matching group
580 return next(g for g in mobj.groups() if g is not None)
581 else:
582 return mobj.group(group)
583 elif default is not NO_DEFAULT:
584 return default
585 elif fatal:
586 raise RegexNotFoundError('Unable to extract %s' % _name)
587 else:
588 self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
589 return None
590
591 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
592 """
593 Like _search_regex, but strips HTML tags and unescapes entities.
594 """
595 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
596 if res:
597 return clean_html(res).strip()
598 else:
599 return res
600
601 def _get_login_info(self):
602 """
603 Get the login info as (username, password)
604 It will look in the netrc file using the _NETRC_MACHINE value
605 If there's no info available, return (None, None)
606 """
607 if self._downloader is None:
608 return (None, None)
609
610 username = None
611 password = None
612 downloader_params = self._downloader.params
613
614 # Attempt to use provided username and password or .netrc data
615 if downloader_params.get('username', None) is not None:
616 username = downloader_params['username']
617 password = downloader_params['password']
618 elif downloader_params.get('usenetrc', False):
619 try:
620 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
621 if info is not None:
622 username = info[0]
623 password = info[2]
624 else:
625 raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
626 except (IOError, netrc.NetrcParseError) as err:
627 self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
628
629 return (username, password)
630
631 def _get_tfa_info(self, note='two-factor verification code'):
632 """
633 Get the two-factor authentication info
634 TODO - asking the user will be required for sms/phone verify
635 currently just uses the command line option
636 If there's no info available, return None
637 """
638 if self._downloader is None:
639 return None
640 downloader_params = self._downloader.params
641
642 if downloader_params.get('twofactor', None) is not None:
643 return downloader_params['twofactor']
644
645 return compat_getpass('Type %s and press [Return]: ' % note)
646
647 # Helper functions for extracting OpenGraph info
648 @staticmethod
649 def _og_regexes(prop):
650 content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
651 property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
652 % {'prop': re.escape(prop)})
653 template = r'<meta[^>]+?%s[^>]+?%s'
654 return [
655 template % (property_re, content_re),
656 template % (content_re, property_re),
657 ]
658
659 @staticmethod
660 def _meta_regex(prop):
661 return r'''(?isx)<meta
662 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
663 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
664
665 def _og_search_property(self, prop, html, name=None, **kargs):
666 if name is None:
667 name = 'OpenGraph %s' % prop
668 escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
669 if escaped is None:
670 return None
671 return unescapeHTML(escaped)
672
673 def _og_search_thumbnail(self, html, **kargs):
674 return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
675
676 def _og_search_description(self, html, **kargs):
677 return self._og_search_property('description', html, fatal=False, **kargs)
678
679 def _og_search_title(self, html, **kargs):
680 return self._og_search_property('title', html, **kargs)
681
682 def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
683 regexes = self._og_regexes('video') + self._og_regexes('video:url')
684 if secure:
685 regexes = self._og_regexes('video:secure_url') + regexes
686 return self._html_search_regex(regexes, html, name, **kargs)
687
688 def _og_search_url(self, html, **kargs):
689 return self._og_search_property('url', html, **kargs)
690
691 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
692 if display_name is None:
693 display_name = name
694 return self._html_search_regex(
695 self._meta_regex(name),
696 html, display_name, fatal=fatal, group='content', **kwargs)
697
698 def _dc_search_uploader(self, html):
699 return self._html_search_meta('dc.creator', html, 'uploader')
700
701 def _rta_search(self, html):
702 # See http://www.rtalabel.org/index.php?content=howtofaq#single
703 if re.search(r'(?ix)<meta\s+name="rating"\s+'
704 r' content="RTA-5042-1996-1400-1577-RTA"',
705 html):
706 return 18
707 return 0
708
709 def _media_rating_search(self, html):
710 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
711 rating = self._html_search_meta('rating', html)
712
713 if not rating:
714 return None
715
716 RATING_TABLE = {
717 'safe for kids': 0,
718 'general': 8,
719 '14 years': 14,
720 'mature': 17,
721 'restricted': 19,
722 }
723 return RATING_TABLE.get(rating.lower(), None)
724
725 def _family_friendly_search(self, html):
726 # See http://schema.org/VideoObject
727 family_friendly = self._html_search_meta('isFamilyFriendly', html)
728
729 if not family_friendly:
730 return None
731
732 RATING_TABLE = {
733 '1': 0,
734 'true': 0,
735 '0': 18,
736 'false': 18,
737 }
738 return RATING_TABLE.get(family_friendly.lower(), None)
739
740 def _twitter_search_player(self, html):
741 return self._html_search_meta('twitter:player', html,
742 'twitter card player')
743
744 @staticmethod
745 def _hidden_inputs(html):
746 html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
747 hidden_inputs = {}
748 for input in re.findall(r'(?i)<input([^>]+)>', html):
749 if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
750 continue
751 name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
752 if not name:
753 continue
754 value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
755 if not value:
756 continue
757 hidden_inputs[name.group('value')] = value.group('value')
758 return hidden_inputs
759
760 def _form_hidden_inputs(self, form_id, html):
761 form = self._search_regex(
762 r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
763 html, '%s form' % form_id, group='form')
764 return self._hidden_inputs(form)
765
766 def _sort_formats(self, formats, field_preference=None):
767 if not formats:
768 raise ExtractorError('No video formats found')
769
770 def _formats_key(f):
771 # TODO remove the following workaround
772 from ..utils import determine_ext
773 if not f.get('ext') and 'url' in f:
774 f['ext'] = determine_ext(f['url'])
775
776 if isinstance(field_preference, (list, tuple)):
777 return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
778
779 preference = f.get('preference')
780 if preference is None:
781 preference = 0
782 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
783 preference -= 0.5
784
785 proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1
786
787 if f.get('vcodec') == 'none': # audio only
788 if self._downloader.params.get('prefer_free_formats'):
789 ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
790 else:
791 ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
792 ext_preference = 0
793 try:
794 audio_ext_preference = ORDER.index(f['ext'])
795 except ValueError:
796 audio_ext_preference = -1
797 else:
798 if self._downloader.params.get('prefer_free_formats'):
799 ORDER = ['flv', 'mp4', 'webm']
800 else:
801 ORDER = ['webm', 'flv', 'mp4']
802 try:
803 ext_preference = ORDER.index(f['ext'])
804 except ValueError:
805 ext_preference = -1
806 audio_ext_preference = 0
807
808 return (
809 preference,
810 f.get('language_preference') if f.get('language_preference') is not None else -1,
811 f.get('quality') if f.get('quality') is not None else -1,
812 f.get('tbr') if f.get('tbr') is not None else -1,
813 f.get('filesize') if f.get('filesize') is not None else -1,
814 f.get('vbr') if f.get('vbr') is not None else -1,
815 f.get('height') if f.get('height') is not None else -1,
816 f.get('width') if f.get('width') is not None else -1,
817 proto_preference,
818 ext_preference,
819 f.get('abr') if f.get('abr') is not None else -1,
820 audio_ext_preference,
821 f.get('fps') if f.get('fps') is not None else -1,
822 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
823 f.get('source_preference') if f.get('source_preference') is not None else -1,
824 f.get('format_id') if f.get('format_id') is not None else '',
825 )
826 formats.sort(key=_formats_key)
827
828 def _check_formats(self, formats, video_id):
829 if formats:
830 formats[:] = filter(
831 lambda f: self._is_valid_url(
832 f['url'], video_id,
833 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
834 formats)
835
836 def _is_valid_url(self, url, video_id, item='video'):
837 url = self._proto_relative_url(url, scheme='http:')
838 # For now assume non HTTP(S) URLs always valid
839 if not (url.startswith('http://') or url.startswith('https://')):
840 return True
841 try:
842 self._request_webpage(url, video_id, 'Checking %s URL' % item)
843 return True
844 except ExtractorError as e:
845 if isinstance(e.cause, compat_urllib_error.URLError):
846 self.to_screen(
847 '%s: %s URL is invalid, skipping' % (video_id, item))
848 return False
849 raise
850
851 def http_scheme(self):
852 """ Either "http:" or "https:", depending on the user's preferences """
853 return (
854 'http:'
855 if self._downloader.params.get('prefer_insecure', False)
856 else 'https:')
857
858 def _proto_relative_url(self, url, scheme=None):
859 if url is None:
860 return url
861 if url.startswith('//'):
862 if scheme is None:
863 scheme = self.http_scheme()
864 return scheme + url
865 else:
866 return url
867
868 def _sleep(self, timeout, video_id, msg_template=None):
869 if msg_template is None:
870 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
871 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
872 self.to_screen(msg)
873 time.sleep(timeout)
874
875 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
876 transform_source=lambda s: fix_xml_ampersands(s).strip(),
877 fatal=True):
878 manifest = self._download_xml(
879 manifest_url, video_id, 'Downloading f4m manifest',
880 'Unable to download f4m manifest',
881 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
882 # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
883 transform_source=transform_source,
884 fatal=fatal)
885
886 if manifest is False:
887 return []
888
889 formats = []
890 manifest_version = '1.0'
891 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
892 if not media_nodes:
893 manifest_version = '2.0'
894 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
895 base_url = xpath_text(
896 manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
897 'base URL', default=None)
898 if base_url:
899 base_url = base_url.strip()
900 for i, media_el in enumerate(media_nodes):
901 if manifest_version == '2.0':
902 media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
903 if not media_url:
904 continue
905 manifest_url = (
906 media_url if media_url.startswith('http://') or media_url.startswith('https://')
907 else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
908 # If media_url is itself a f4m manifest do the recursive extraction
909 # since bitrates in parent manifest (this one) and media_url manifest
910 # may differ leading to inability to resolve the format by requested
911 # bitrate in f4m downloader
912 if determine_ext(manifest_url) == 'f4m':
913 f4m_formats = self._extract_f4m_formats(
914 manifest_url, video_id, preference, f4m_id, fatal=fatal)
915 if f4m_formats:
916 formats.extend(f4m_formats)
917 continue
918 tbr = int_or_none(media_el.attrib.get('bitrate'))
919 formats.append({
920 'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
921 'url': manifest_url,
922 'ext': 'flv',
923 'tbr': tbr,
924 'width': int_or_none(media_el.attrib.get('width')),
925 'height': int_or_none(media_el.attrib.get('height')),
926 'preference': preference,
927 })
928 self._sort_formats(formats)
929
930 return formats
931
932 def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
933 entry_protocol='m3u8', preference=None,
934 m3u8_id=None, note=None, errnote=None,
935 fatal=True):
936
937 formats = [{
938 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
939 'url': m3u8_url,
940 'ext': ext,
941 'protocol': 'm3u8',
942 'preference': preference - 1 if preference else -1,
943 'resolution': 'multiple',
944 'format_note': 'Quality selection URL',
945 }]
946
947 format_url = lambda u: (
948 u
949 if re.match(r'^https?://', u)
950 else compat_urlparse.urljoin(m3u8_url, u))
951
952 res = self._download_webpage_handle(
953 m3u8_url, video_id,
954 note=note or 'Downloading m3u8 information',
955 errnote=errnote or 'Failed to download m3u8 information',
956 fatal=fatal)
957 if res is False:
958 return []
959 m3u8_doc, urlh = res
960 m3u8_url = urlh.geturl()
961 last_info = None
962 last_media = None
963 kv_rex = re.compile(
964 r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
965 for line in m3u8_doc.splitlines():
966 if line.startswith('#EXT-X-STREAM-INF:'):
967 last_info = {}
968 for m in kv_rex.finditer(line):
969 v = m.group('val')
970 if v.startswith('"'):
971 v = v[1:-1]
972 last_info[m.group('key')] = v
973 elif line.startswith('#EXT-X-MEDIA:'):
974 last_media = {}
975 for m in kv_rex.finditer(line):
976 v = m.group('val')
977 if v.startswith('"'):
978 v = v[1:-1]
979 last_media[m.group('key')] = v
980 elif line.startswith('#') or not line.strip():
981 continue
982 else:
983 if last_info is None:
984 formats.append({'url': format_url(line)})
985 continue
986 tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
987 format_id = []
988 if m3u8_id:
989 format_id.append(m3u8_id)
990 last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
991 format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
992 f = {
993 'format_id': '-'.join(format_id),
994 'url': format_url(line.strip()),
995 'tbr': tbr,
996 'ext': ext,
997 'protocol': entry_protocol,
998 'preference': preference,
999 }
1000 codecs = last_info.get('CODECS')
1001 if codecs:
1002 # TODO: looks like video codec is not always necessarily goes first
1003 va_codecs = codecs.split(',')
1004 if va_codecs[0]:
1005 f['vcodec'] = va_codecs[0].partition('.')[0]
1006 if len(va_codecs) > 1 and va_codecs[1]:
1007 f['acodec'] = va_codecs[1].partition('.')[0]
1008 resolution = last_info.get('RESOLUTION')
1009 if resolution:
1010 width_str, height_str = resolution.split('x')
1011 f['width'] = int(width_str)
1012 f['height'] = int(height_str)
1013 if last_media is not None:
1014 f['m3u8_media'] = last_media
1015 last_media = None
1016 formats.append(f)
1017 last_info = {}
1018 self._sort_formats(formats)
1019 return formats
1020
1021 @staticmethod
1022 def _xpath_ns(path, namespace=None):
1023 if not namespace:
1024 return path
1025 out = []
1026 for c in path.split('/'):
1027 if not c or c == '.':
1028 out.append(c)
1029 else:
1030 out.append('{%s}%s' % (namespace, c))
1031 return '/'.join(out)
1032
1033 def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
1034 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1035
1036 if smil is False:
1037 assert not fatal
1038 return []
1039
1040 namespace = self._parse_smil_namespace(smil)
1041
1042 return self._parse_smil_formats(
1043 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1044
1045 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
1046 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1047 if smil is False:
1048 return {}
1049 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
1050
1051 def _download_smil(self, smil_url, video_id, fatal=True):
1052 return self._download_xml(
1053 smil_url, video_id, 'Downloading SMIL file',
1054 'Unable to download SMIL file', fatal=fatal)
1055
1056 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
1057 namespace = self._parse_smil_namespace(smil)
1058
1059 formats = self._parse_smil_formats(
1060 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1061 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
1062
1063 video_id = os.path.splitext(url_basename(smil_url))[0]
1064 title = None
1065 description = None
1066 upload_date = None
1067 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1068 name = meta.attrib.get('name')
1069 content = meta.attrib.get('content')
1070 if not name or not content:
1071 continue
1072 if not title and name == 'title':
1073 title = content
1074 elif not description and name in ('description', 'abstract'):
1075 description = content
1076 elif not upload_date and name == 'date':
1077 upload_date = unified_strdate(content)
1078
1079 thumbnails = [{
1080 'id': image.get('type'),
1081 'url': image.get('src'),
1082 'width': int_or_none(image.get('width')),
1083 'height': int_or_none(image.get('height')),
1084 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
1085
1086 return {
1087 'id': video_id,
1088 'title': title or video_id,
1089 'description': description,
1090 'upload_date': upload_date,
1091 'thumbnails': thumbnails,
1092 'formats': formats,
1093 'subtitles': subtitles,
1094 }
1095
1096 def _parse_smil_namespace(self, smil):
1097 return self._search_regex(
1098 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
1099
1100 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
1101 base = smil_url
1102 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1103 b = meta.get('base') or meta.get('httpBase')
1104 if b:
1105 base = b
1106 break
1107
1108 formats = []
1109 rtmp_count = 0
1110 http_count = 0
1111
1112 videos = smil.findall(self._xpath_ns('.//video', namespace))
1113 for video in videos:
1114 src = video.get('src')
1115 if not src:
1116 continue
1117
1118 bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
1119 filesize = int_or_none(video.get('size') or video.get('fileSize'))
1120 width = int_or_none(video.get('width'))
1121 height = int_or_none(video.get('height'))
1122 proto = video.get('proto')
1123 ext = video.get('ext')
1124 src_ext = determine_ext(src)
1125 streamer = video.get('streamer') or base
1126
1127 if proto == 'rtmp' or streamer.startswith('rtmp'):
1128 rtmp_count += 1
1129 formats.append({
1130 'url': streamer,
1131 'play_path': src,
1132 'ext': 'flv',
1133 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
1134 'tbr': bitrate,
1135 'filesize': filesize,
1136 'width': width,
1137 'height': height,
1138 })
1139 if transform_rtmp_url:
1140 streamer, src = transform_rtmp_url(streamer, src)
1141 formats[-1].update({
1142 'url': streamer,
1143 'play_path': src,
1144 })
1145 continue
1146
1147 src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
1148
1149 if proto == 'm3u8' or src_ext == 'm3u8':
1150 m3u8_formats = self._extract_m3u8_formats(
1151 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
1152 if m3u8_formats:
1153 formats.extend(m3u8_formats)
1154 continue
1155
1156 if src_ext == 'f4m':
1157 f4m_url = src_url
1158 if not f4m_params:
1159 f4m_params = {
1160 'hdcore': '3.2.0',
1161 'plugin': 'flowplayer-3.2.0.1',
1162 }
1163 f4m_url += '&' if '?' in f4m_url else '?'
1164 f4m_url += compat_urllib_parse.urlencode(f4m_params)
1165 f4m_formats = self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)
1166 if f4m_formats:
1167 formats.extend(f4m_formats)
1168 continue
1169
1170 if src_url.startswith('http') and self._is_valid_url(src, video_id):
1171 http_count += 1
1172 formats.append({
1173 'url': src_url,
1174 'ext': ext or src_ext or 'flv',
1175 'format_id': 'http-%d' % (bitrate or http_count),
1176 'tbr': bitrate,
1177 'filesize': filesize,
1178 'width': width,
1179 'height': height,
1180 })
1181 continue
1182
1183 self._sort_formats(formats)
1184
1185 return formats
1186
1187 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
1188 subtitles = {}
1189 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
1190 src = textstream.get('src')
1191 if not src:
1192 continue
1193 ext = textstream.get('ext') or determine_ext(src)
1194 if not ext:
1195 type_ = textstream.get('type')
1196 SUBTITLES_TYPES = {
1197 'text/vtt': 'vtt',
1198 'text/srt': 'srt',
1199 'application/smptett+xml': 'tt',
1200 }
1201 if type_ in SUBTITLES_TYPES:
1202 ext = SUBTITLES_TYPES[type_]
1203 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
1204 subtitles.setdefault(lang, []).append({
1205 'url': src,
1206 'ext': ext,
1207 })
1208 return subtitles
1209
1210 def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
1211 xspf = self._download_xml(
1212 playlist_url, playlist_id, 'Downloading xpsf playlist',
1213 'Unable to download xspf manifest', fatal=fatal)
1214 if xspf is False:
1215 return []
1216 return self._parse_xspf(xspf, playlist_id)
1217
1218 def _parse_xspf(self, playlist, playlist_id):
1219 NS_MAP = {
1220 'xspf': 'http://xspf.org/ns/0/',
1221 's1': 'http://static.streamone.nl/player/ns/0',
1222 }
1223
1224 entries = []
1225 for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
1226 title = xpath_text(
1227 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
1228 description = xpath_text(
1229 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
1230 thumbnail = xpath_text(
1231 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
1232 duration = float_or_none(
1233 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
1234
1235 formats = [{
1236 'url': location.text,
1237 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
1238 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
1239 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
1240 } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
1241 self._sort_formats(formats)
1242
1243 entries.append({
1244 'id': playlist_id,
1245 'title': title,
1246 'description': description,
1247 'thumbnail': thumbnail,
1248 'duration': duration,
1249 'formats': formats,
1250 })
1251 return entries
1252
1253 def _live_title(self, name):
1254 """ Generate the title for a live video """
1255 now = datetime.datetime.now()
1256 now_str = now.strftime("%Y-%m-%d %H:%M")
1257 return name + ' ' + now_str
1258
1259 def _int(self, v, name, fatal=False, **kwargs):
1260 res = int_or_none(v, **kwargs)
1261 if 'get_attr' in kwargs:
1262 print(getattr(v, kwargs['get_attr']))
1263 if res is None:
1264 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1265 if fatal:
1266 raise ExtractorError(msg)
1267 else:
1268 self._downloader.report_warning(msg)
1269 return res
1270
1271 def _float(self, v, name, fatal=False, **kwargs):
1272 res = float_or_none(v, **kwargs)
1273 if res is None:
1274 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1275 if fatal:
1276 raise ExtractorError(msg)
1277 else:
1278 self._downloader.report_warning(msg)
1279 return res
1280
1281 def _set_cookie(self, domain, name, value, expire_time=None):
1282 cookie = compat_cookiejar.Cookie(
1283 0, name, value, None, None, domain, None,
1284 None, '/', True, False, expire_time, '', None, None, None)
1285 self._downloader.cookiejar.set_cookie(cookie)
1286
1287 def _get_cookies(self, url):
1288 """ Return a compat_cookies.SimpleCookie with the cookies for the url """
1289 req = sanitized_Request(url)
1290 self._downloader.cookiejar.add_cookie_header(req)
1291 return compat_cookies.SimpleCookie(req.get_header('Cookie'))
1292
1293 def get_testcases(self, include_onlymatching=False):
1294 t = getattr(self, '_TEST', None)
1295 if t:
1296 assert not hasattr(self, '_TESTS'), \
1297 '%s has _TEST and _TESTS' % type(self).__name__
1298 tests = [t]
1299 else:
1300 tests = getattr(self, '_TESTS', [])
1301 for t in tests:
1302 if not include_onlymatching and t.get('only_matching', False):
1303 continue
1304 t['name'] = type(self).__name__[:-len('IE')]
1305 yield t
1306
1307 def is_suitable(self, age_limit):
1308 """ Test whether the extractor is generally suitable for the given
1309 age limit (i.e. pornographic sites are not, all others usually are) """
1310
1311 any_restricted = False
1312 for tc in self.get_testcases(include_onlymatching=False):
1313 if 'playlist' in tc:
1314 tc = tc['playlist'][0]
1315 is_restricted = age_restricted(
1316 tc.get('info_dict', {}).get('age_limit'), age_limit)
1317 if not is_restricted:
1318 return True
1319 any_restricted = any_restricted or is_restricted
1320 return not any_restricted
1321
1322 def extract_subtitles(self, *args, **kwargs):
1323 if (self._downloader.params.get('writesubtitles', False) or
1324 self._downloader.params.get('listsubtitles')):
1325 return self._get_subtitles(*args, **kwargs)
1326 return {}
1327
1328 def _get_subtitles(self, *args, **kwargs):
1329 raise NotImplementedError("This method must be implemented by subclasses")
1330
1331 @staticmethod
1332 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
1333 """ Merge subtitle items for one language. Items with duplicated URLs
1334 will be dropped. """
1335 list1_urls = set([item['url'] for item in subtitle_list1])
1336 ret = list(subtitle_list1)
1337 ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
1338 return ret
1339
1340 @classmethod
1341 def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
1342 """ Merge two subtitle dictionaries, language by language. """
1343 ret = dict(subtitle_dict1)
1344 for lang in subtitle_dict2:
1345 ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
1346 return ret
1347
1348 def extract_automatic_captions(self, *args, **kwargs):
1349 if (self._downloader.params.get('writeautomaticsub', False) or
1350 self._downloader.params.get('listsubtitles')):
1351 return self._get_automatic_captions(*args, **kwargs)
1352 return {}
1353
1354 def _get_automatic_captions(self, *args, **kwargs):
1355 raise NotImplementedError("This method must be implemented by subclasses")
1356
1357
1358 class SearchInfoExtractor(InfoExtractor):
1359 """
1360 Base class for paged search queries extractors.
1361 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
1362 Instances should define _SEARCH_KEY and _MAX_RESULTS.
1363 """
1364
1365 @classmethod
1366 def _make_valid_url(cls):
1367 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
1368
1369 @classmethod
1370 def suitable(cls, url):
1371 return re.match(cls._make_valid_url(), url) is not None
1372
1373 def _real_extract(self, query):
1374 mobj = re.match(self._make_valid_url(), query)
1375 if mobj is None:
1376 raise ExtractorError('Invalid search query "%s"' % query)
1377
1378 prefix = mobj.group('prefix')
1379 query = mobj.group('query')
1380 if prefix == '':
1381 return self._get_n_results(query, 1)
1382 elif prefix == 'all':
1383 return self._get_n_results(query, self._MAX_RESULTS)
1384 else:
1385 n = int(prefix)
1386 if n <= 0:
1387 raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
1388 elif n > self._MAX_RESULTS:
1389 self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
1390 n = self._MAX_RESULTS
1391 return self._get_n_results(query, n)
1392
1393 def _get_n_results(self, query, n):
1394 """Get a specified number of results for a query"""
1395 raise NotImplementedError("This method must be implemented by subclasses")
1396
1397 @property
1398 def SEARCH_KEY(self):
1399 return self._SEARCH_KEY