]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/common.py
Merge branch 'daum' of https://github.com/remitamine/youtube-dl into remitamine-daum
[yt-dlp.git] / youtube_dl / extractor / common.py
1 from __future__ import unicode_literals
2
3 import base64
4 import datetime
5 import hashlib
6 import json
7 import netrc
8 import os
9 import re
10 import socket
11 import sys
12 import time
13
14 from ..compat import (
15 compat_cookiejar,
16 compat_cookies,
17 compat_getpass,
18 compat_http_client,
19 compat_urllib_error,
20 compat_urllib_parse,
21 compat_urlparse,
22 compat_str,
23 compat_etree_fromstring,
24 )
25 from ..utils import (
26 NO_DEFAULT,
27 age_restricted,
28 bug_reports_message,
29 clean_html,
30 compiled_regex_type,
31 determine_ext,
32 error_to_compat_str,
33 ExtractorError,
34 fix_xml_ampersands,
35 float_or_none,
36 int_or_none,
37 RegexNotFoundError,
38 sanitize_filename,
39 sanitized_Request,
40 unescapeHTML,
41 unified_strdate,
42 url_basename,
43 xpath_text,
44 xpath_with_ns,
45 determine_protocol,
46 )
47
48
49 class InfoExtractor(object):
50 """Information Extractor class.
51
52 Information extractors are the classes that, given a URL, extract
53 information about the video (or videos) the URL refers to. This
54 information includes the real video URL, the video title, author and
55 others. The information is stored in a dictionary which is then
56 passed to the YoutubeDL. The YoutubeDL processes this
57 information possibly downloading the video to the file system, among
58 other possible outcomes.
59
60 The type field determines the type of the result.
61 By far the most common value (and the default if _type is missing) is
62 "video", which indicates a single video.
63
64 For a video, the dictionaries must include the following fields:
65
66 id: Video identifier.
67 title: Video title, unescaped.
68
69 Additionally, it must contain either a formats entry or a url one:
70
71 formats: A list of dictionaries for each format available, ordered
72 from worst to best quality.
73
74 Potential fields:
75 * url Mandatory. The URL of the video file
76 * ext Will be calculated from URL if missing
77 * format A human-readable description of the format
78 ("mp4 container with h264/opus").
79 Calculated from the format_id, width, height.
80 and format_note fields if missing.
81 * format_id A short description of the format
82 ("mp4_h264_opus" or "19").
83 Technically optional, but strongly recommended.
84 * format_note Additional info about the format
85 ("3D" or "DASH video")
86 * width Width of the video, if known
87 * height Height of the video, if known
88 * resolution Textual description of width and height
89 * tbr Average bitrate of audio and video in KBit/s
90 * abr Average audio bitrate in KBit/s
91 * acodec Name of the audio codec in use
92 * asr Audio sampling rate in Hertz
93 * vbr Average video bitrate in KBit/s
94 * fps Frame rate
95 * vcodec Name of the video codec in use
96 * container Name of the container format
97 * filesize The number of bytes, if known in advance
98 * filesize_approx An estimate for the number of bytes
99 * player_url SWF Player URL (used for rtmpdump).
100 * protocol The protocol that will be used for the actual
101 download, lower-case.
102 "http", "https", "rtsp", "rtmp", "rtmpe",
103 "m3u8", or "m3u8_native".
104 * preference Order number of this format. If this field is
105 present and not None, the formats get sorted
106 by this field, regardless of all other values.
107 -1 for default (order by other properties),
108 -2 or smaller for less than default.
109 < -1000 to hide the format (if there is
110 another one which is strictly better)
111 * language_preference Is this in the correct requested
112 language?
113 10 if it's what the URL is about,
114 -1 for default (don't know),
115 -10 otherwise, other values reserved for now.
116 * quality Order number of the video quality of this
117 format, irrespective of the file format.
118 -1 for default (order by other properties),
119 -2 or smaller for less than default.
120 * source_preference Order number for this video source
121 (quality takes higher priority)
122 -1 for default (order by other properties),
123 -2 or smaller for less than default.
124 * http_headers A dictionary of additional HTTP headers
125 to add to the request.
126 * stretched_ratio If given and not 1, indicates that the
127 video's pixels are not square.
128 width : height ratio as float.
129 * no_resume The server does not support resuming the
130 (HTTP or RTMP) download. Boolean.
131
132 url: Final video URL.
133 ext: Video filename extension.
134 format: The video format, defaults to ext (used for --get-format)
135 player_url: SWF Player URL (used for rtmpdump).
136
137 The following fields are optional:
138
139 alt_title: A secondary title of the video.
140 display_id An alternative identifier for the video, not necessarily
141 unique, but available before title. Typically, id is
142 something like "4234987", title "Dancing naked mole rats",
143 and display_id "dancing-naked-mole-rats"
144 thumbnails: A list of dictionaries, with the following entries:
145 * "id" (optional, string) - Thumbnail format ID
146 * "url"
147 * "preference" (optional, int) - quality of the image
148 * "width" (optional, int)
149 * "height" (optional, int)
150 * "resolution" (optional, string "{width}x{height"},
151 deprecated)
152 thumbnail: Full URL to a video thumbnail image.
153 description: Full video description.
154 uploader: Full name of the video uploader.
155 creator: The main artist who created the video.
156 release_date: The date (YYYYMMDD) when the video was released.
157 timestamp: UNIX timestamp of the moment the video became available.
158 upload_date: Video upload date (YYYYMMDD).
159 If not explicitly set, calculated from timestamp.
160 uploader_id: Nickname or id of the video uploader.
161 location: Physical location where the video was filmed.
162 subtitles: The available subtitles as a dictionary in the format
163 {language: subformats}. "subformats" is a list sorted from
164 lower to higher preference, each element is a dictionary
165 with the "ext" entry and one of:
166 * "data": The subtitles file contents
167 * "url": A URL pointing to the subtitles file
168 "ext" will be calculated from URL if missing
169 automatic_captions: Like 'subtitles', used by the YoutubeIE for
170 automatically generated captions
171 duration: Length of the video in seconds, as an integer or float.
172 view_count: How many users have watched the video on the platform.
173 like_count: Number of positive ratings of the video
174 dislike_count: Number of negative ratings of the video
175 repost_count: Number of reposts of the video
176 average_rating: Average rating give by users, the scale used depends on the webpage
177 comment_count: Number of comments on the video
178 comments: A list of comments, each with one or more of the following
179 properties (all but one of text or html optional):
180 * "author" - human-readable name of the comment author
181 * "author_id" - user ID of the comment author
182 * "id" - Comment ID
183 * "html" - Comment as HTML
184 * "text" - Plain text of the comment
185 * "timestamp" - UNIX timestamp of comment
186 * "parent" - ID of the comment this one is replying to.
187 Set to "root" to indicate that this is a
188 comment to the original video.
189 age_limit: Age restriction for the video, as an integer (years)
190 webpage_url: The URL to the video webpage, if given to youtube-dl it
191 should allow to get the same result again. (It will be set
192 by YoutubeDL if it's missing)
193 categories: A list of categories that the video falls in, for example
194 ["Sports", "Berlin"]
195 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
196 is_live: True, False, or None (=unknown). Whether this video is a
197 live stream that goes on instead of a fixed-length video.
198 start_time: Time in seconds where the reproduction should start, as
199 specified in the URL.
200 end_time: Time in seconds where the reproduction should end, as
201 specified in the URL.
202
203 Unless mentioned otherwise, the fields should be Unicode strings.
204
205 Unless mentioned otherwise, None is equivalent to absence of information.
206
207
208 _type "playlist" indicates multiple videos.
209 There must be a key "entries", which is a list, an iterable, or a PagedList
210 object, each element of which is a valid dictionary by this specification.
211
212 Additionally, playlists can have "title", "description" and "id" attributes
213 with the same semantics as videos (see above).
214
215
216 _type "multi_video" indicates that there are multiple videos that
217 form a single show, for examples multiple acts of an opera or TV episode.
218 It must have an entries key like a playlist and contain all the keys
219 required for a video at the same time.
220
221
222 _type "url" indicates that the video must be extracted from another
223 location, possibly by a different extractor. Its only required key is:
224 "url" - the next URL to extract.
225 The key "ie_key" can be set to the class name (minus the trailing "IE",
226 e.g. "Youtube") if the extractor class is known in advance.
227 Additionally, the dictionary may have any properties of the resolved entity
228 known in advance, for example "title" if the title of the referred video is
229 known ahead of time.
230
231
232 _type "url_transparent" entities have the same specification as "url", but
233 indicate that the given additional information is more precise than the one
234 associated with the resolved URL.
235 This is useful when a site employs a video service that hosts the video and
236 its technical metadata, but that video service does not embed a useful
237 title, description etc.
238
239
240 Subclasses of this one should re-define the _real_initialize() and
241 _real_extract() methods and define a _VALID_URL regexp.
242 Probably, they should also be added to the list of extractors.
243
244 Finally, the _WORKING attribute should be set to False for broken IEs
245 in order to warn the users and skip the tests.
246 """
247
248 _ready = False
249 _downloader = None
250 _WORKING = True
251
252 def __init__(self, downloader=None):
253 """Constructor. Receives an optional downloader."""
254 self._ready = False
255 self.set_downloader(downloader)
256
257 @classmethod
258 def suitable(cls, url):
259 """Receives a URL and returns True if suitable for this IE."""
260
261 # This does not use has/getattr intentionally - we want to know whether
262 # we have cached the regexp for *this* class, whereas getattr would also
263 # match the superclass
264 if '_VALID_URL_RE' not in cls.__dict__:
265 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
266 return cls._VALID_URL_RE.match(url) is not None
267
268 @classmethod
269 def _match_id(cls, url):
270 if '_VALID_URL_RE' not in cls.__dict__:
271 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
272 m = cls._VALID_URL_RE.match(url)
273 assert m
274 return m.group('id')
275
276 @classmethod
277 def working(cls):
278 """Getter method for _WORKING."""
279 return cls._WORKING
280
281 def initialize(self):
282 """Initializes an instance (authentication, etc)."""
283 if not self._ready:
284 self._real_initialize()
285 self._ready = True
286
287 def extract(self, url):
288 """Extracts URL information and returns it in list of dicts."""
289 try:
290 self.initialize()
291 return self._real_extract(url)
292 except ExtractorError:
293 raise
294 except compat_http_client.IncompleteRead as e:
295 raise ExtractorError('A network error has occured.', cause=e, expected=True)
296 except (KeyError, StopIteration) as e:
297 raise ExtractorError('An extractor error has occured.', cause=e)
298
299 def set_downloader(self, downloader):
300 """Sets the downloader for this IE."""
301 self._downloader = downloader
302
303 def _real_initialize(self):
304 """Real initialization process. Redefine in subclasses."""
305 pass
306
307 def _real_extract(self, url):
308 """Real extraction process. Redefine in subclasses."""
309 pass
310
311 @classmethod
312 def ie_key(cls):
313 """A string for getting the InfoExtractor with get_info_extractor"""
314 return compat_str(cls.__name__[:-2])
315
316 @property
317 def IE_NAME(self):
318 return compat_str(type(self).__name__[:-2])
319
320 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
321 """ Returns the response handle """
322 if note is None:
323 self.report_download_webpage(video_id)
324 elif note is not False:
325 if video_id is None:
326 self.to_screen('%s' % (note,))
327 else:
328 self.to_screen('%s: %s' % (video_id, note))
329 try:
330 return self._downloader.urlopen(url_or_request)
331 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
332 if errnote is False:
333 return False
334 if errnote is None:
335 errnote = 'Unable to download webpage'
336
337 errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
338 if fatal:
339 raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
340 else:
341 self._downloader.report_warning(errmsg)
342 return False
343
344 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
345 """ Returns a tuple (page content as string, URL handle) """
346 # Strip hashes from the URL (#1038)
347 if isinstance(url_or_request, (compat_str, str)):
348 url_or_request = url_or_request.partition('#')[0]
349
350 urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
351 if urlh is False:
352 assert not fatal
353 return False
354 content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
355 return (content, urlh)
356
357 @staticmethod
358 def _guess_encoding_from_content(content_type, webpage_bytes):
359 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
360 if m:
361 encoding = m.group(1)
362 else:
363 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
364 webpage_bytes[:1024])
365 if m:
366 encoding = m.group(1).decode('ascii')
367 elif webpage_bytes.startswith(b'\xff\xfe'):
368 encoding = 'utf-16'
369 else:
370 encoding = 'utf-8'
371
372 return encoding
373
374 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
375 content_type = urlh.headers.get('Content-Type', '')
376 webpage_bytes = urlh.read()
377 if prefix is not None:
378 webpage_bytes = prefix + webpage_bytes
379 if not encoding:
380 encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
381 if self._downloader.params.get('dump_intermediate_pages', False):
382 try:
383 url = url_or_request.get_full_url()
384 except AttributeError:
385 url = url_or_request
386 self.to_screen('Dumping request to ' + url)
387 dump = base64.b64encode(webpage_bytes).decode('ascii')
388 self._downloader.to_screen(dump)
389 if self._downloader.params.get('write_pages', False):
390 try:
391 url = url_or_request.get_full_url()
392 except AttributeError:
393 url = url_or_request
394 basen = '%s_%s' % (video_id, url)
395 if len(basen) > 240:
396 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
397 basen = basen[:240 - len(h)] + h
398 raw_filename = basen + '.dump'
399 filename = sanitize_filename(raw_filename, restricted=True)
400 self.to_screen('Saving request to ' + filename)
401 # Working around MAX_PATH limitation on Windows (see
402 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
403 if os.name == 'nt':
404 absfilepath = os.path.abspath(filename)
405 if len(absfilepath) > 259:
406 filename = '\\\\?\\' + absfilepath
407 with open(filename, 'wb') as outf:
408 outf.write(webpage_bytes)
409
410 try:
411 content = webpage_bytes.decode(encoding, 'replace')
412 except LookupError:
413 content = webpage_bytes.decode('utf-8', 'replace')
414
415 if ('<title>Access to this site is blocked</title>' in content and
416 'Websense' in content[:512]):
417 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
418 blocked_iframe = self._html_search_regex(
419 r'<iframe src="([^"]+)"', content,
420 'Websense information URL', default=None)
421 if blocked_iframe:
422 msg += ' Visit %s for more details' % blocked_iframe
423 raise ExtractorError(msg, expected=True)
424 if '<title>The URL you requested has been blocked</title>' in content[:512]:
425 msg = (
426 'Access to this webpage has been blocked by Indian censorship. '
427 'Use a VPN or proxy server (with --proxy) to route around it.')
428 block_msg = self._html_search_regex(
429 r'</h1><p>(.*?)</p>',
430 content, 'block message', default=None)
431 if block_msg:
432 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
433 raise ExtractorError(msg, expected=True)
434
435 return content
436
437 def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
438 """ Returns the data of the page as a string """
439 success = False
440 try_count = 0
441 while success is False:
442 try:
443 res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
444 success = True
445 except compat_http_client.IncompleteRead as e:
446 try_count += 1
447 if try_count >= tries:
448 raise e
449 self._sleep(timeout, video_id)
450 if res is False:
451 return res
452 else:
453 content, _ = res
454 return content
455
456 def _download_xml(self, url_or_request, video_id,
457 note='Downloading XML', errnote='Unable to download XML',
458 transform_source=None, fatal=True, encoding=None):
459 """Return the xml as an xml.etree.ElementTree.Element"""
460 xml_string = self._download_webpage(
461 url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
462 if xml_string is False:
463 return xml_string
464 if transform_source:
465 xml_string = transform_source(xml_string)
466 return compat_etree_fromstring(xml_string.encode('utf-8'))
467
468 def _download_json(self, url_or_request, video_id,
469 note='Downloading JSON metadata',
470 errnote='Unable to download JSON metadata',
471 transform_source=None,
472 fatal=True, encoding=None):
473 json_string = self._download_webpage(
474 url_or_request, video_id, note, errnote, fatal=fatal,
475 encoding=encoding)
476 if (not fatal) and json_string is False:
477 return None
478 return self._parse_json(
479 json_string, video_id, transform_source=transform_source, fatal=fatal)
480
481 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
482 if transform_source:
483 json_string = transform_source(json_string)
484 try:
485 return json.loads(json_string)
486 except ValueError as ve:
487 errmsg = '%s: Failed to parse JSON ' % video_id
488 if fatal:
489 raise ExtractorError(errmsg, cause=ve)
490 else:
491 self.report_warning(errmsg + str(ve))
492
493 def report_warning(self, msg, video_id=None):
494 idstr = '' if video_id is None else '%s: ' % video_id
495 self._downloader.report_warning(
496 '[%s] %s%s' % (self.IE_NAME, idstr, msg))
497
498 def to_screen(self, msg):
499 """Print msg to screen, prefixing it with '[ie_name]'"""
500 self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
501
502 def report_extraction(self, id_or_name):
503 """Report information extraction."""
504 self.to_screen('%s: Extracting information' % id_or_name)
505
506 def report_download_webpage(self, video_id):
507 """Report webpage download."""
508 self.to_screen('%s: Downloading webpage' % video_id)
509
510 def report_age_confirmation(self):
511 """Report attempt to confirm age."""
512 self.to_screen('Confirming age')
513
514 def report_login(self):
515 """Report attempt to log in."""
516 self.to_screen('Logging in')
517
518 @staticmethod
519 def raise_login_required(msg='This video is only available for registered users'):
520 raise ExtractorError(
521 '%s. Use --username and --password or --netrc to provide account credentials.' % msg,
522 expected=True)
523
524 @staticmethod
525 def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
526 raise ExtractorError(
527 '%s. You might want to use --proxy to workaround.' % msg,
528 expected=True)
529
530 # Methods for following #608
531 @staticmethod
532 def url_result(url, ie=None, video_id=None, video_title=None):
533 """Returns a URL that points to a page that should be processed"""
534 # TODO: ie should be the class used for getting the info
535 video_info = {'_type': 'url',
536 'url': url,
537 'ie_key': ie}
538 if video_id is not None:
539 video_info['id'] = video_id
540 if video_title is not None:
541 video_info['title'] = video_title
542 return video_info
543
544 @staticmethod
545 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
546 """Returns a playlist"""
547 video_info = {'_type': 'playlist',
548 'entries': entries}
549 if playlist_id:
550 video_info['id'] = playlist_id
551 if playlist_title:
552 video_info['title'] = playlist_title
553 if playlist_description:
554 video_info['description'] = playlist_description
555 return video_info
556
557 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
558 """
559 Perform a regex search on the given string, using a single or a list of
560 patterns returning the first matching group.
561 In case of failure return a default value or raise a WARNING or a
562 RegexNotFoundError, depending on fatal, specifying the field name.
563 """
564 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
565 mobj = re.search(pattern, string, flags)
566 else:
567 for p in pattern:
568 mobj = re.search(p, string, flags)
569 if mobj:
570 break
571
572 if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
573 _name = '\033[0;34m%s\033[0m' % name
574 else:
575 _name = name
576
577 if mobj:
578 if group is None:
579 # return the first matching group
580 return next(g for g in mobj.groups() if g is not None)
581 else:
582 return mobj.group(group)
583 elif default is not NO_DEFAULT:
584 return default
585 elif fatal:
586 raise RegexNotFoundError('Unable to extract %s' % _name)
587 else:
588 self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
589 return None
590
591 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
592 """
593 Like _search_regex, but strips HTML tags and unescapes entities.
594 """
595 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
596 if res:
597 return clean_html(res).strip()
598 else:
599 return res
600
601 def _get_login_info(self):
602 """
603 Get the login info as (username, password)
604 It will look in the netrc file using the _NETRC_MACHINE value
605 If there's no info available, return (None, None)
606 """
607 if self._downloader is None:
608 return (None, None)
609
610 username = None
611 password = None
612 downloader_params = self._downloader.params
613
614 # Attempt to use provided username and password or .netrc data
615 if downloader_params.get('username', None) is not None:
616 username = downloader_params['username']
617 password = downloader_params['password']
618 elif downloader_params.get('usenetrc', False):
619 try:
620 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
621 if info is not None:
622 username = info[0]
623 password = info[2]
624 else:
625 raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
626 except (IOError, netrc.NetrcParseError) as err:
627 self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
628
629 return (username, password)
630
631 def _get_tfa_info(self, note='two-factor verification code'):
632 """
633 Get the two-factor authentication info
634 TODO - asking the user will be required for sms/phone verify
635 currently just uses the command line option
636 If there's no info available, return None
637 """
638 if self._downloader is None:
639 return None
640 downloader_params = self._downloader.params
641
642 if downloader_params.get('twofactor', None) is not None:
643 return downloader_params['twofactor']
644
645 return compat_getpass('Type %s and press [Return]: ' % note)
646
647 # Helper functions for extracting OpenGraph info
648 @staticmethod
649 def _og_regexes(prop):
650 content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
651 property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
652 % {'prop': re.escape(prop)})
653 template = r'<meta[^>]+?%s[^>]+?%s'
654 return [
655 template % (property_re, content_re),
656 template % (content_re, property_re),
657 ]
658
659 @staticmethod
660 def _meta_regex(prop):
661 return r'''(?isx)<meta
662 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
663 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
664
665 def _og_search_property(self, prop, html, name=None, **kargs):
666 if name is None:
667 name = 'OpenGraph %s' % prop
668 escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
669 if escaped is None:
670 return None
671 return unescapeHTML(escaped)
672
673 def _og_search_thumbnail(self, html, **kargs):
674 return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
675
676 def _og_search_description(self, html, **kargs):
677 return self._og_search_property('description', html, fatal=False, **kargs)
678
679 def _og_search_title(self, html, **kargs):
680 return self._og_search_property('title', html, **kargs)
681
682 def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
683 regexes = self._og_regexes('video') + self._og_regexes('video:url')
684 if secure:
685 regexes = self._og_regexes('video:secure_url') + regexes
686 return self._html_search_regex(regexes, html, name, **kargs)
687
688 def _og_search_url(self, html, **kargs):
689 return self._og_search_property('url', html, **kargs)
690
691 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
692 if display_name is None:
693 display_name = name
694 return self._html_search_regex(
695 self._meta_regex(name),
696 html, display_name, fatal=fatal, group='content', **kwargs)
697
698 def _dc_search_uploader(self, html):
699 return self._html_search_meta('dc.creator', html, 'uploader')
700
701 def _rta_search(self, html):
702 # See http://www.rtalabel.org/index.php?content=howtofaq#single
703 if re.search(r'(?ix)<meta\s+name="rating"\s+'
704 r' content="RTA-5042-1996-1400-1577-RTA"',
705 html):
706 return 18
707 return 0
708
709 def _media_rating_search(self, html):
710 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
711 rating = self._html_search_meta('rating', html)
712
713 if not rating:
714 return None
715
716 RATING_TABLE = {
717 'safe for kids': 0,
718 'general': 8,
719 '14 years': 14,
720 'mature': 17,
721 'restricted': 19,
722 }
723 return RATING_TABLE.get(rating.lower(), None)
724
725 def _family_friendly_search(self, html):
726 # See http://schema.org/VideoObject
727 family_friendly = self._html_search_meta('isFamilyFriendly', html)
728
729 if not family_friendly:
730 return None
731
732 RATING_TABLE = {
733 '1': 0,
734 'true': 0,
735 '0': 18,
736 'false': 18,
737 }
738 return RATING_TABLE.get(family_friendly.lower(), None)
739
740 def _twitter_search_player(self, html):
741 return self._html_search_meta('twitter:player', html,
742 'twitter card player')
743
744 @staticmethod
745 def _hidden_inputs(html):
746 html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
747 hidden_inputs = {}
748 for input in re.findall(r'(?i)<input([^>]+)>', html):
749 if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
750 continue
751 name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
752 if not name:
753 continue
754 value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
755 if not value:
756 continue
757 hidden_inputs[name.group('value')] = value.group('value')
758 return hidden_inputs
759
760 def _form_hidden_inputs(self, form_id, html):
761 form = self._search_regex(
762 r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
763 html, '%s form' % form_id, group='form')
764 return self._hidden_inputs(form)
765
766 def _sort_formats(self, formats, field_preference=None):
767 if not formats:
768 raise ExtractorError('No video formats found')
769
770 def _formats_key(f):
771 # TODO remove the following workaround
772 from ..utils import determine_ext
773 if not f.get('ext') and 'url' in f:
774 f['ext'] = determine_ext(f['url'])
775
776 if isinstance(field_preference, (list, tuple)):
777 return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
778
779 preference = f.get('preference')
780 if preference is None:
781 preference = 0
782 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
783 preference -= 0.5
784
785 proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1
786
787 if f.get('vcodec') == 'none': # audio only
788 if self._downloader.params.get('prefer_free_formats'):
789 ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
790 else:
791 ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
792 ext_preference = 0
793 try:
794 audio_ext_preference = ORDER.index(f['ext'])
795 except ValueError:
796 audio_ext_preference = -1
797 else:
798 if self._downloader.params.get('prefer_free_formats'):
799 ORDER = ['flv', 'mp4', 'webm']
800 else:
801 ORDER = ['webm', 'flv', 'mp4']
802 try:
803 ext_preference = ORDER.index(f['ext'])
804 except ValueError:
805 ext_preference = -1
806 audio_ext_preference = 0
807
808 return (
809 preference,
810 f.get('language_preference') if f.get('language_preference') is not None else -1,
811 f.get('quality') if f.get('quality') is not None else -1,
812 f.get('tbr') if f.get('tbr') is not None else -1,
813 f.get('filesize') if f.get('filesize') is not None else -1,
814 f.get('vbr') if f.get('vbr') is not None else -1,
815 f.get('height') if f.get('height') is not None else -1,
816 f.get('width') if f.get('width') is not None else -1,
817 proto_preference,
818 ext_preference,
819 f.get('abr') if f.get('abr') is not None else -1,
820 audio_ext_preference,
821 f.get('fps') if f.get('fps') is not None else -1,
822 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
823 f.get('source_preference') if f.get('source_preference') is not None else -1,
824 f.get('format_id') if f.get('format_id') is not None else '',
825 )
826 formats.sort(key=_formats_key)
827
828 def _check_formats(self, formats, video_id):
829 if formats:
830 formats[:] = filter(
831 lambda f: self._is_valid_url(
832 f['url'], video_id,
833 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
834 formats)
835
836 def _is_valid_url(self, url, video_id, item='video'):
837 url = self._proto_relative_url(url, scheme='http:')
838 # For now assume non HTTP(S) URLs always valid
839 if not (url.startswith('http://') or url.startswith('https://')):
840 return True
841 try:
842 self._request_webpage(url, video_id, 'Checking %s URL' % item)
843 return True
844 except ExtractorError as e:
845 if isinstance(e.cause, compat_urllib_error.URLError):
846 self.to_screen(
847 '%s: %s URL is invalid, skipping' % (video_id, item))
848 return False
849 raise
850
851 def http_scheme(self):
852 """ Either "http:" or "https:", depending on the user's preferences """
853 return (
854 'http:'
855 if self._downloader.params.get('prefer_insecure', False)
856 else 'https:')
857
858 def _proto_relative_url(self, url, scheme=None):
859 if url is None:
860 return url
861 if url.startswith('//'):
862 if scheme is None:
863 scheme = self.http_scheme()
864 return scheme + url
865 else:
866 return url
867
868 def _sleep(self, timeout, video_id, msg_template=None):
869 if msg_template is None:
870 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
871 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
872 self.to_screen(msg)
873 time.sleep(timeout)
874
875 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
876 transform_source=lambda s: fix_xml_ampersands(s).strip(),
877 fatal=True):
878 manifest = self._download_xml(
879 manifest_url, video_id, 'Downloading f4m manifest',
880 'Unable to download f4m manifest',
881 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
882 # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
883 transform_source=transform_source,
884 fatal=fatal)
885
886 if manifest is False:
887 return []
888
889 formats = []
890 manifest_version = '1.0'
891 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
892 if not media_nodes:
893 manifest_version = '2.0'
894 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
895 base_url = xpath_text(
896 manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
897 'base URL', default=None)
898 if base_url:
899 base_url = base_url.strip()
900 for i, media_el in enumerate(media_nodes):
901 if manifest_version == '2.0':
902 media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
903 if not media_url:
904 continue
905 manifest_url = (
906 media_url if media_url.startswith('http://') or media_url.startswith('https://')
907 else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
908 # If media_url is itself a f4m manifest do the recursive extraction
909 # since bitrates in parent manifest (this one) and media_url manifest
910 # may differ leading to inability to resolve the format by requested
911 # bitrate in f4m downloader
912 if determine_ext(manifest_url) == 'f4m':
913 formats.extend(self._extract_f4m_formats(
914 manifest_url, video_id, preference, f4m_id, fatal=fatal))
915 continue
916 tbr = int_or_none(media_el.attrib.get('bitrate'))
917 formats.append({
918 'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
919 'url': manifest_url,
920 'ext': 'flv',
921 'tbr': tbr,
922 'width': int_or_none(media_el.attrib.get('width')),
923 'height': int_or_none(media_el.attrib.get('height')),
924 'preference': preference,
925 })
926 self._sort_formats(formats)
927
928 return formats
929
930 def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
931 entry_protocol='m3u8', preference=None,
932 m3u8_id=None, note=None, errnote=None,
933 fatal=True):
934
935 formats = [{
936 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
937 'url': m3u8_url,
938 'ext': ext,
939 'protocol': 'm3u8',
940 'preference': preference - 1 if preference else -1,
941 'resolution': 'multiple',
942 'format_note': 'Quality selection URL',
943 }]
944
945 format_url = lambda u: (
946 u
947 if re.match(r'^https?://', u)
948 else compat_urlparse.urljoin(m3u8_url, u))
949
950 res = self._download_webpage_handle(
951 m3u8_url, video_id,
952 note=note or 'Downloading m3u8 information',
953 errnote=errnote or 'Failed to download m3u8 information',
954 fatal=fatal)
955 if res is False:
956 return []
957 m3u8_doc, urlh = res
958 m3u8_url = urlh.geturl()
959 last_info = None
960 last_media = None
961 kv_rex = re.compile(
962 r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
963 for line in m3u8_doc.splitlines():
964 if line.startswith('#EXT-X-STREAM-INF:'):
965 last_info = {}
966 for m in kv_rex.finditer(line):
967 v = m.group('val')
968 if v.startswith('"'):
969 v = v[1:-1]
970 last_info[m.group('key')] = v
971 elif line.startswith('#EXT-X-MEDIA:'):
972 last_media = {}
973 for m in kv_rex.finditer(line):
974 v = m.group('val')
975 if v.startswith('"'):
976 v = v[1:-1]
977 last_media[m.group('key')] = v
978 elif line.startswith('#') or not line.strip():
979 continue
980 else:
981 if last_info is None:
982 formats.append({'url': format_url(line)})
983 continue
984 tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
985 format_id = []
986 if m3u8_id:
987 format_id.append(m3u8_id)
988 last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
989 format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
990 f = {
991 'format_id': '-'.join(format_id),
992 'url': format_url(line.strip()),
993 'tbr': tbr,
994 'ext': ext,
995 'protocol': entry_protocol,
996 'preference': preference,
997 }
998 codecs = last_info.get('CODECS')
999 if codecs:
1000 # TODO: looks like video codec is not always necessarily goes first
1001 va_codecs = codecs.split(',')
1002 if va_codecs[0]:
1003 f['vcodec'] = va_codecs[0].partition('.')[0]
1004 if len(va_codecs) > 1 and va_codecs[1]:
1005 f['acodec'] = va_codecs[1].partition('.')[0]
1006 resolution = last_info.get('RESOLUTION')
1007 if resolution:
1008 width_str, height_str = resolution.split('x')
1009 f['width'] = int(width_str)
1010 f['height'] = int(height_str)
1011 if last_media is not None:
1012 f['m3u8_media'] = last_media
1013 last_media = None
1014 formats.append(f)
1015 last_info = {}
1016 self._sort_formats(formats)
1017 return formats
1018
1019 @staticmethod
1020 def _xpath_ns(path, namespace=None):
1021 if not namespace:
1022 return path
1023 out = []
1024 for c in path.split('/'):
1025 if not c or c == '.':
1026 out.append(c)
1027 else:
1028 out.append('{%s}%s' % (namespace, c))
1029 return '/'.join(out)
1030
1031 def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
1032 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1033
1034 if smil is False:
1035 assert not fatal
1036 return []
1037
1038 namespace = self._parse_smil_namespace(smil)
1039
1040 return self._parse_smil_formats(
1041 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1042
1043 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
1044 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1045 if smil is False:
1046 return {}
1047 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
1048
1049 def _download_smil(self, smil_url, video_id, fatal=True):
1050 return self._download_xml(
1051 smil_url, video_id, 'Downloading SMIL file',
1052 'Unable to download SMIL file', fatal=fatal)
1053
1054 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
1055 namespace = self._parse_smil_namespace(smil)
1056
1057 formats = self._parse_smil_formats(
1058 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1059 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
1060
1061 video_id = os.path.splitext(url_basename(smil_url))[0]
1062 title = None
1063 description = None
1064 upload_date = None
1065 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1066 name = meta.attrib.get('name')
1067 content = meta.attrib.get('content')
1068 if not name or not content:
1069 continue
1070 if not title and name == 'title':
1071 title = content
1072 elif not description and name in ('description', 'abstract'):
1073 description = content
1074 elif not upload_date and name == 'date':
1075 upload_date = unified_strdate(content)
1076
1077 thumbnails = [{
1078 'id': image.get('type'),
1079 'url': image.get('src'),
1080 'width': int_or_none(image.get('width')),
1081 'height': int_or_none(image.get('height')),
1082 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
1083
1084 return {
1085 'id': video_id,
1086 'title': title or video_id,
1087 'description': description,
1088 'upload_date': upload_date,
1089 'thumbnails': thumbnails,
1090 'formats': formats,
1091 'subtitles': subtitles,
1092 }
1093
1094 def _parse_smil_namespace(self, smil):
1095 return self._search_regex(
1096 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
1097
1098 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
1099 base = smil_url
1100 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1101 b = meta.get('base') or meta.get('httpBase')
1102 if b:
1103 base = b
1104 break
1105
1106 formats = []
1107 rtmp_count = 0
1108 http_count = 0
1109
1110 videos = smil.findall(self._xpath_ns('.//video', namespace))
1111 for video in videos:
1112 src = video.get('src')
1113 if not src:
1114 continue
1115
1116 bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
1117 filesize = int_or_none(video.get('size') or video.get('fileSize'))
1118 width = int_or_none(video.get('width'))
1119 height = int_or_none(video.get('height'))
1120 proto = video.get('proto')
1121 ext = video.get('ext')
1122 src_ext = determine_ext(src)
1123 streamer = video.get('streamer') or base
1124
1125 if proto == 'rtmp' or streamer.startswith('rtmp'):
1126 rtmp_count += 1
1127 formats.append({
1128 'url': streamer,
1129 'play_path': src,
1130 'ext': 'flv',
1131 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
1132 'tbr': bitrate,
1133 'filesize': filesize,
1134 'width': width,
1135 'height': height,
1136 })
1137 if transform_rtmp_url:
1138 streamer, src = transform_rtmp_url(streamer, src)
1139 formats[-1].update({
1140 'url': streamer,
1141 'play_path': src,
1142 })
1143 continue
1144
1145 src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
1146
1147 if proto == 'm3u8' or src_ext == 'm3u8':
1148 formats.extend(self._extract_m3u8_formats(
1149 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False))
1150 continue
1151
1152 if src_ext == 'f4m':
1153 f4m_url = src_url
1154 if not f4m_params:
1155 f4m_params = {
1156 'hdcore': '3.2.0',
1157 'plugin': 'flowplayer-3.2.0.1',
1158 }
1159 f4m_url += '&' if '?' in f4m_url else '?'
1160 f4m_url += compat_urllib_parse.urlencode(f4m_params)
1161 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
1162 continue
1163
1164 if src_url.startswith('http') and self._is_valid_url(src, video_id):
1165 http_count += 1
1166 formats.append({
1167 'url': src_url,
1168 'ext': ext or src_ext or 'flv',
1169 'format_id': 'http-%d' % (bitrate or http_count),
1170 'tbr': bitrate,
1171 'filesize': filesize,
1172 'width': width,
1173 'height': height,
1174 })
1175 continue
1176
1177 self._sort_formats(formats)
1178
1179 return formats
1180
1181 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
1182 subtitles = {}
1183 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
1184 src = textstream.get('src')
1185 if not src:
1186 continue
1187 ext = textstream.get('ext') or determine_ext(src)
1188 if not ext:
1189 type_ = textstream.get('type')
1190 SUBTITLES_TYPES = {
1191 'text/vtt': 'vtt',
1192 'text/srt': 'srt',
1193 'application/smptett+xml': 'tt',
1194 }
1195 if type_ in SUBTITLES_TYPES:
1196 ext = SUBTITLES_TYPES[type_]
1197 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
1198 subtitles.setdefault(lang, []).append({
1199 'url': src,
1200 'ext': ext,
1201 })
1202 return subtitles
1203
1204 def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
1205 xspf = self._download_xml(
1206 playlist_url, playlist_id, 'Downloading xpsf playlist',
1207 'Unable to download xspf manifest', fatal=fatal)
1208 if xspf is False:
1209 return []
1210 return self._parse_xspf(xspf, playlist_id)
1211
1212 def _parse_xspf(self, playlist, playlist_id):
1213 NS_MAP = {
1214 'xspf': 'http://xspf.org/ns/0/',
1215 's1': 'http://static.streamone.nl/player/ns/0',
1216 }
1217
1218 entries = []
1219 for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
1220 title = xpath_text(
1221 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
1222 description = xpath_text(
1223 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
1224 thumbnail = xpath_text(
1225 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
1226 duration = float_or_none(
1227 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
1228
1229 formats = [{
1230 'url': location.text,
1231 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
1232 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
1233 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
1234 } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
1235 self._sort_formats(formats)
1236
1237 entries.append({
1238 'id': playlist_id,
1239 'title': title,
1240 'description': description,
1241 'thumbnail': thumbnail,
1242 'duration': duration,
1243 'formats': formats,
1244 })
1245 return entries
1246
1247 def _live_title(self, name):
1248 """ Generate the title for a live video """
1249 now = datetime.datetime.now()
1250 now_str = now.strftime("%Y-%m-%d %H:%M")
1251 return name + ' ' + now_str
1252
1253 def _int(self, v, name, fatal=False, **kwargs):
1254 res = int_or_none(v, **kwargs)
1255 if 'get_attr' in kwargs:
1256 print(getattr(v, kwargs['get_attr']))
1257 if res is None:
1258 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1259 if fatal:
1260 raise ExtractorError(msg)
1261 else:
1262 self._downloader.report_warning(msg)
1263 return res
1264
1265 def _float(self, v, name, fatal=False, **kwargs):
1266 res = float_or_none(v, **kwargs)
1267 if res is None:
1268 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1269 if fatal:
1270 raise ExtractorError(msg)
1271 else:
1272 self._downloader.report_warning(msg)
1273 return res
1274
1275 def _set_cookie(self, domain, name, value, expire_time=None):
1276 cookie = compat_cookiejar.Cookie(
1277 0, name, value, None, None, domain, None,
1278 None, '/', True, False, expire_time, '', None, None, None)
1279 self._downloader.cookiejar.set_cookie(cookie)
1280
1281 def _get_cookies(self, url):
1282 """ Return a compat_cookies.SimpleCookie with the cookies for the url """
1283 req = sanitized_Request(url)
1284 self._downloader.cookiejar.add_cookie_header(req)
1285 return compat_cookies.SimpleCookie(req.get_header('Cookie'))
1286
1287 def get_testcases(self, include_onlymatching=False):
1288 t = getattr(self, '_TEST', None)
1289 if t:
1290 assert not hasattr(self, '_TESTS'), \
1291 '%s has _TEST and _TESTS' % type(self).__name__
1292 tests = [t]
1293 else:
1294 tests = getattr(self, '_TESTS', [])
1295 for t in tests:
1296 if not include_onlymatching and t.get('only_matching', False):
1297 continue
1298 t['name'] = type(self).__name__[:-len('IE')]
1299 yield t
1300
1301 def is_suitable(self, age_limit):
1302 """ Test whether the extractor is generally suitable for the given
1303 age limit (i.e. pornographic sites are not, all others usually are) """
1304
1305 any_restricted = False
1306 for tc in self.get_testcases(include_onlymatching=False):
1307 if 'playlist' in tc:
1308 tc = tc['playlist'][0]
1309 is_restricted = age_restricted(
1310 tc.get('info_dict', {}).get('age_limit'), age_limit)
1311 if not is_restricted:
1312 return True
1313 any_restricted = any_restricted or is_restricted
1314 return not any_restricted
1315
1316 def extract_subtitles(self, *args, **kwargs):
1317 if (self._downloader.params.get('writesubtitles', False) or
1318 self._downloader.params.get('listsubtitles')):
1319 return self._get_subtitles(*args, **kwargs)
1320 return {}
1321
1322 def _get_subtitles(self, *args, **kwargs):
1323 raise NotImplementedError("This method must be implemented by subclasses")
1324
1325 @staticmethod
1326 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
1327 """ Merge subtitle items for one language. Items with duplicated URLs
1328 will be dropped. """
1329 list1_urls = set([item['url'] for item in subtitle_list1])
1330 ret = list(subtitle_list1)
1331 ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
1332 return ret
1333
1334 @classmethod
1335 def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
1336 """ Merge two subtitle dictionaries, language by language. """
1337 ret = dict(subtitle_dict1)
1338 for lang in subtitle_dict2:
1339 ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
1340 return ret
1341
1342 def extract_automatic_captions(self, *args, **kwargs):
1343 if (self._downloader.params.get('writeautomaticsub', False) or
1344 self._downloader.params.get('listsubtitles')):
1345 return self._get_automatic_captions(*args, **kwargs)
1346 return {}
1347
1348 def _get_automatic_captions(self, *args, **kwargs):
1349 raise NotImplementedError("This method must be implemented by subclasses")
1350
1351
1352 class SearchInfoExtractor(InfoExtractor):
1353 """
1354 Base class for paged search queries extractors.
1355 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
1356 Instances should define _SEARCH_KEY and _MAX_RESULTS.
1357 """
1358
1359 @classmethod
1360 def _make_valid_url(cls):
1361 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
1362
1363 @classmethod
1364 def suitable(cls, url):
1365 return re.match(cls._make_valid_url(), url) is not None
1366
1367 def _real_extract(self, query):
1368 mobj = re.match(self._make_valid_url(), query)
1369 if mobj is None:
1370 raise ExtractorError('Invalid search query "%s"' % query)
1371
1372 prefix = mobj.group('prefix')
1373 query = mobj.group('query')
1374 if prefix == '':
1375 return self._get_n_results(query, 1)
1376 elif prefix == 'all':
1377 return self._get_n_results(query, self._MAX_RESULTS)
1378 else:
1379 n = int(prefix)
1380 if n <= 0:
1381 raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
1382 elif n > self._MAX_RESULTS:
1383 self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
1384 n = self._MAX_RESULTS
1385 return self._get_n_results(query, n)
1386
1387 def _get_n_results(self, query, n):
1388 """Get a specified number of results for a query"""
1389 raise NotImplementedError("This method must be implemented by subclasses")
1390
1391 @property
1392 def SEARCH_KEY(self):
1393 return self._SEARCH_KEY