from .utils import *
-class InfoExtractor(object):
- """Information Extractor class.
-
- Information extractors are the classes that, given a URL, extract
- information about the video (or videos) the URL refers to. This
- information includes the real video URL, the video title, author and
- others. The information is stored in a dictionary which is then
- passed to the FileDownloader. The FileDownloader processes this
- information possibly downloading the video to the file system, among
- other possible outcomes.
-
- The dictionaries must include the following fields:
-
- id: Video identifier.
- url: Final video URL.
- title: Video title, unescaped.
- ext: Video filename extension.
-
- The following fields are optional:
-
- format: The video format, defaults to ext (used for --get-format)
- thumbnail: Full URL to a video thumbnail image.
- description: One-line video description.
- uploader: Full name of the video uploader.
- upload_date: Video upload date (YYYYMMDD).
- uploader_id: Nickname or id of the video uploader.
- location: Physical location of the video.
- player_url: SWF Player URL (used for rtmpdump).
- subtitles: The subtitle file contents.
- urlhandle: [internal] The urlHandle to be used to download the file,
- like returned by urllib.request.urlopen
-
- The fields should all be Unicode strings.
-
- Subclasses of this one should re-define the _real_initialize() and
- _real_extract() methods and define a _VALID_URL regexp.
- Probably, they should also be added to the list of extractors.
-
- _real_extract() must return a *list* of information dictionaries as
- described above.
-
- Finally, the _WORKING attribute should be set to False for broken IEs
- in order to warn the users and skip the tests.
- """
-
- _ready = False
- _downloader = None
- _WORKING = True
-
- def __init__(self, downloader=None):
- """Constructor. Receives an optional downloader."""
- self._ready = False
- self.set_downloader(downloader)
-
- @classmethod
- def suitable(cls, url):
- """Receives a URL and returns True if suitable for this IE."""
- return re.match(cls._VALID_URL, url) is not None
-
- @classmethod
- def working(cls):
- """Getter method for _WORKING."""
- return cls._WORKING
-
- def initialize(self):
- """Initializes an instance (authentication, etc)."""
- if not self._ready:
- self._real_initialize()
- self._ready = True
-
- def extract(self, url):
- """Extracts URL information and returns it in list of dicts."""
- self.initialize()
- return self._real_extract(url)
-
- def set_downloader(self, downloader):
- """Sets the downloader for this IE."""
- self._downloader = downloader
-
- def _real_initialize(self):
- """Real initialization process. Redefine in subclasses."""
- pass
-
- def _real_extract(self, url):
- """Real extraction process. Redefine in subclasses."""
- pass
-
- @property
- def IE_NAME(self):
- return type(self).__name__[:-2]
-
- def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
- """ Returns the response handle """
- if note is None:
- self.report_download_webpage(video_id)
- elif note is not False:
- self.to_screen(u'%s: %s' % (video_id, note))
- try:
- return compat_urllib_request.urlopen(url_or_request)
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- if errnote is None:
- errnote = u'Unable to download webpage'
- raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
-
- def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
- """ Returns a tuple (page content as string, URL handle) """
- urlh = self._request_webpage(url_or_request, video_id, note, errnote)
- content_type = urlh.headers.get('Content-Type', '')
- m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
- if m:
- encoding = m.group(1)
- else:
- encoding = 'utf-8'
- webpage_bytes = urlh.read()
- if self._downloader.params.get('dump_intermediate_pages', False):
- try:
- url = url_or_request.get_full_url()
- except AttributeError:
- url = url_or_request
- self.to_screen(u'Dumping request to ' + url)
- dump = base64.b64encode(webpage_bytes).decode('ascii')
- self._downloader.to_screen(dump)
- content = webpage_bytes.decode(encoding, 'replace')
- return (content, urlh)
-
- def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
- """ Returns the data of the page as a string """
- return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
-
- def to_screen(self, msg):
- """Print msg to screen, prefixing it with '[ie_name]'"""
- self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
-
- def report_extraction(self, id_or_name):
- """Report information extraction."""
- self.to_screen(u'%s: Extracting information' % id_or_name)
-
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- self.to_screen(u'%s: Downloading webpage' % video_id)
-
- def report_age_confirmation(self):
- """Report attempt to confirm age."""
- self.to_screen(u'Confirming age')
-
- #Methods for following #608
- #They set the correct value of the '_type' key
- def video_result(self, video_info):
- """Returns a video"""
- video_info['_type'] = 'video'
- return video_info
- def url_result(self, url, ie=None):
- """Returns a url that points to a page that should be processed"""
- #TODO: ie should be the class used for getting the info
- video_info = {'_type': 'url',
- 'url': url,
- 'ie_key': ie}
- return video_info
- def playlist_result(self, entries, playlist_id=None, playlist_title=None):
- """Returns a playlist"""
- video_info = {'_type': 'playlist',
- 'entries': entries}
- if playlist_id:
- video_info['id'] = playlist_id
- if playlist_title:
- video_info['title'] = playlist_title
- return video_info
-
- def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
- """
- Perform a regex search on the given string, using a single or a list of
- patterns returning the first matching group.
- In case of failure return a default value or raise a WARNING or a
- ExtractorError, depending on fatal, specifying the field name.
- """
- if isinstance(pattern, (str, compat_str, compiled_regex_type)):
- mobj = re.search(pattern, string, flags)
- else:
- for p in pattern:
- mobj = re.search(p, string, flags)
- if mobj: break
-
- if sys.stderr.isatty() and os.name != 'nt':
- _name = u'\033[0;34m%s\033[0m' % name
- else:
- _name = name
-
- if mobj:
- # return the first matching group
- return next(g for g in mobj.groups() if g is not None)
- elif default is not None:
- return default
- elif fatal:
- raise ExtractorError(u'Unable to extract %s' % _name)
- else:
- self._downloader.report_warning(u'unable to extract %s; '
- u'please report this issue on GitHub.' % _name)
- return None
-
- def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
- """
- Like _search_regex, but strips HTML tags and unescapes entities.
- """
- res = self._search_regex(pattern, string, name, default, fatal, flags)
- if res:
- return clean_html(res).strip()
- else:
- return res
-
-class SearchInfoExtractor(InfoExtractor):
- """
- Base class for paged search queries extractors.
- They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
- Instances should define _SEARCH_KEY and _MAX_RESULTS.
- """
-
- @classmethod
- def _make_valid_url(cls):
- return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
-
- @classmethod
- def suitable(cls, url):
- return re.match(cls._make_valid_url(), url) is not None
-
- def _real_extract(self, query):
- mobj = re.match(self._make_valid_url(), query)
- if mobj is None:
- raise ExtractorError(u'Invalid search query "%s"' % query)
-
- prefix = mobj.group('prefix')
- query = mobj.group('query')
- if prefix == '':
- return self._get_n_results(query, 1)
- elif prefix == 'all':
- return self._get_n_results(query, self._MAX_RESULTS)
- else:
- n = int(prefix)
- if n <= 0:
- raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
- elif n > self._MAX_RESULTS:
- self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
- n = self._MAX_RESULTS
- return self._get_n_results(query, n)
-
- def _get_n_results(self, query, n):
- """Get a specified number of results for a query"""
- raise NotImplementedError("This method must be implemented by sublclasses")
-
-
-class YoutubeIE(InfoExtractor):
- """Information extractor for youtube.com."""
-
- _VALID_URL = r"""^
- (
- (?:https?://)? # http(s):// (optional)
- (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
- tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains
- (?:.*?\#/)? # handle anchor (#/) redirect urls
- (?: # the various things that can precede the ID:
- (?:(?:v|embed|e)/) # v/ or embed/ or e/
- |(?: # or the v= param in all its forms
- (?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
- (?:\?|\#!?) # the params delimiter ? or # or #!
- (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
- v=
- )
- )? # optional -> youtube.com/xxxx is OK
- )? # all until now is optional -> you can pass the naked ID
- ([0-9A-Za-z_-]+) # here is it! the YouTube video ID
- (?(1).+)? # if we found the ID, everything can follow
- $"""
- _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
- _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
- _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
- _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
- _NETRC_MACHINE = 'youtube'
- # Listed in order of quality
- _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
- _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
- _video_extensions = {
- '13': '3gp',
- '17': 'mp4',
- '18': 'mp4',
- '22': 'mp4',
- '37': 'mp4',
- '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
- '43': 'webm',
- '44': 'webm',
- '45': 'webm',
- '46': 'webm',
- }
- _video_dimensions = {
- '5': '240x400',
- '6': '???',
- '13': '???',
- '17': '144x176',
- '18': '360x640',
- '22': '720x1280',
- '34': '360x640',
- '35': '480x854',
- '37': '1080x1920',
- '38': '3072x4096',
- '43': '360x640',
- '44': '480x854',
- '45': '720x1280',
- '46': '1080x1920',
- }
- IE_NAME = u'youtube'
-
- @classmethod
- def suitable(cls, url):
- """Receives a URL and returns True if suitable for this IE."""
- if YoutubePlaylistIE.suitable(url): return False
- return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
- def report_lang(self):
- """Report attempt to set language."""
- self.to_screen(u'Setting language')
-
- def report_login(self):
- """Report attempt to log in."""
- self.to_screen(u'Logging in')
-
- def report_video_webpage_download(self, video_id):
- """Report attempt to download video webpage."""
- self.to_screen(u'%s: Downloading video webpage' % video_id)
-
- def report_video_info_webpage_download(self, video_id):
- """Report attempt to download video info webpage."""
- self.to_screen(u'%s: Downloading video info webpage' % video_id)
-
- def report_video_subtitles_download(self, video_id):
- """Report attempt to download video info webpage."""
- self.to_screen(u'%s: Checking available subtitles' % video_id)
-
- def report_video_subtitles_request(self, video_id, sub_lang, format):
- """Report attempt to download video info webpage."""
- self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
-
- def report_video_subtitles_available(self, video_id, sub_lang_list):
- """Report available subtitles."""
- sub_lang = ",".join(list(sub_lang_list.keys()))
- self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang))
-
- def report_information_extraction(self, video_id):
- """Report attempt to extract video information."""
- self.to_screen(u'%s: Extracting video information' % video_id)
-
- def report_unavailable_format(self, video_id, format):
- """Report extracted video URL."""
- self.to_screen(u'%s: Format %s not available' % (video_id, format))
-
- def report_rtmp_download(self):
- """Indicate the download will use the RTMP protocol."""
- self.to_screen(u'RTMP download detected')
-
- def _get_available_subtitles(self, video_id):
- self.report_video_subtitles_download(video_id)
- request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
- try:
- sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- return (u'unable to download video subtitles: %s' % compat_str(err), None)
- sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
- sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
- if not sub_lang_list:
- return (u'video doesn\'t have subtitles', None)
- return sub_lang_list
-
- def _list_available_subtitles(self, video_id):
- sub_lang_list = self._get_available_subtitles(video_id)
- self.report_video_subtitles_available(video_id, sub_lang_list)
-
- def _request_subtitle(self, sub_lang, sub_name, video_id, format):
- """
- Return tuple:
- (error_message, sub_lang, sub)
- """
- self.report_video_subtitles_request(video_id, sub_lang, format)
- params = compat_urllib_parse.urlencode({
- 'lang': sub_lang,
- 'name': sub_name,
- 'v': video_id,
- 'fmt': format,
- })
- url = 'http://www.youtube.com/api/timedtext?' + params
- try:
- sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
- if not sub:
- return (u'Did not fetch video subtitles', None, None)
- return (None, sub_lang, sub)
-
- def _request_automatic_caption(self, video_id, webpage):
- """We need the webpage for getting the captions url, pass it as an
- argument to speed up the process."""
- sub_lang = self._downloader.params.get('subtitleslang')
- sub_format = self._downloader.params.get('subtitlesformat')
- self.to_screen(u'%s: Looking for automatic captions' % video_id)
- mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
- err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang
- if mobj is None:
- return [(err_msg, None, None)]
- player_config = json.loads(mobj.group(1))
- try:
- args = player_config[u'args']
- caption_url = args[u'ttsurl']
- timestamp = args[u'timestamp']
- params = compat_urllib_parse.urlencode({
- 'lang': 'en',
- 'tlang': sub_lang,
- 'fmt': sub_format,
- 'ts': timestamp,
- 'kind': 'asr',
- })
- subtitles_url = caption_url + '&' + params
- sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions')
- return [(None, sub_lang, sub)]
- except KeyError:
- return [(err_msg, None, None)]
-
- def _extract_subtitle(self, video_id):
- """
- Return a list with a tuple:
- [(error_message, sub_lang, sub)]
- """
- sub_lang_list = self._get_available_subtitles(video_id)
- sub_format = self._downloader.params.get('subtitlesformat')
- if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
- return [(sub_lang_list[0], None, None)]
- if self._downloader.params.get('subtitleslang', False):
- sub_lang = self._downloader.params.get('subtitleslang')
- elif 'en' in sub_lang_list:
- sub_lang = 'en'
- else:
- sub_lang = list(sub_lang_list.keys())[0]
- if not sub_lang in sub_lang_list:
- return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
-
- subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
- return [subtitle]
-
- def _extract_all_subtitles(self, video_id):
- sub_lang_list = self._get_available_subtitles(video_id)
- sub_format = self._downloader.params.get('subtitlesformat')
- if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
- return [(sub_lang_list[0], None, None)]
- subtitles = []
- for sub_lang in sub_lang_list:
- subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
- subtitles.append(subtitle)
- return subtitles
-
- def _print_formats(self, formats):
- print('Available formats:')
- for x in formats:
- print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')))
-
- def _real_initialize(self):
- if self._downloader is None:
- return
-
- username = None
- password = None
- downloader_params = self._downloader.params
-
- # Attempt to use provided username and password or .netrc data
- if downloader_params.get('username', None) is not None:
- username = downloader_params['username']
- password = downloader_params['password']
- elif downloader_params.get('usenetrc', False):
- try:
- info = netrc.netrc().authenticators(self._NETRC_MACHINE)
- if info is not None:
- username = info[0]
- password = info[2]
- else:
- raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
- except (IOError, netrc.NetrcParseError) as err:
- self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
- return
-
- # Set language
- request = compat_urllib_request.Request(self._LANG_URL)
- try:
- self.report_lang()
- compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_warning(u'unable to set language: %s' % compat_str(err))
- return
-
- # No authentication to be performed
- if username is None:
- return
-
- request = compat_urllib_request.Request(self._LOGIN_URL)
- try:
- login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))
- return
-
- galx = None
- dsh = None
- match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
- if match:
- galx = match.group(1)
-
- match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
- if match:
- dsh = match.group(1)
-
- # Log in
- login_form_strs = {
- u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
- u'Email': username,
- u'GALX': galx,
- u'Passwd': password,
- u'PersistentCookie': u'yes',
- u'_utf8': u'霱',
- u'bgresponse': u'js_disabled',
- u'checkConnection': u'',
- u'checkedDomains': u'youtube',
- u'dnConn': u'',
- u'dsh': dsh,
- u'pstMsg': u'0',
- u'rmShown': u'1',
- u'secTok': u'',
- u'signIn': u'Sign in',
- u'timeStmp': u'',
- u'service': u'youtube',
- u'uilel': u'3',
- u'hl': u'en_US',
- }
- # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
- # chokes on unicode
- login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
- login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
- request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
- try:
- self.report_login()
- login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
- if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
- self._downloader.report_warning(u'unable to log in: bad username or password')
- return
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
- return
-
- # Confirm age
- age_form = {
- 'next_url': '/',
- 'action_confirm': 'Confirm',
- }
- request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form))
- try:
- self.report_age_confirmation()
- age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
-
- def _extract_id(self, url):
- mobj = re.match(self._VALID_URL, url, re.VERBOSE)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
- video_id = mobj.group(2)
- return video_id
-
- def _real_extract(self, url):
- # Extract original video URL from URL with redirection, like age verification, using next_url parameter
- mobj = re.search(self._NEXT_URL_RE, url)
- if mobj:
- url = 'https://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
- video_id = self._extract_id(url)
-
- # Get video webpage
- self.report_video_webpage_download(video_id)
- url = 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
- request = compat_urllib_request.Request(url)
- try:
- video_webpage_bytes = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err))
-
- video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
-
- # Attempt to extract SWF player URL
- mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
- if mobj is not None:
- player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
- else:
- player_url = None
-
- # Get video info
- self.report_video_info_webpage_download(video_id)
- for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
- video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
- % (video_id, el_type))
- video_info_webpage = self._download_webpage(video_info_url, video_id,
- note=False,
- errnote='unable to download video info webpage')
- video_info = compat_parse_qs(video_info_webpage)
- if 'token' in video_info:
- break
- if 'token' not in video_info:
- if 'reason' in video_info:
- raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0])
- else:
- raise ExtractorError(u'"token" parameter not in video info for unknown reason')
-
- # Check for "rental" videos
- if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
- raise ExtractorError(u'"rental" videos not supported')
-
- # Start extracting information
- self.report_information_extraction(video_id)
-
- # uploader
- if 'author' not in video_info:
- raise ExtractorError(u'Unable to extract uploader name')
- video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
-
- # uploader_id
- video_uploader_id = None
- mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
- if mobj is not None:
- video_uploader_id = mobj.group(1)
- else:
- self._downloader.report_warning(u'unable to extract uploader nickname')
-
- # title
- if 'title' not in video_info:
- raise ExtractorError(u'Unable to extract video title')
- video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
-
- # thumbnail image
- if 'thumbnail_url' not in video_info:
- self._downloader.report_warning(u'unable to extract video thumbnail')
- video_thumbnail = ''
- else: # don't panic if we can't find it
- video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
-
- # upload date
- upload_date = None
- mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
- if mobj is not None:
- upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
- upload_date = unified_strdate(upload_date)
-
- # description
- video_description = get_element_by_id("eow-description", video_webpage)
- if video_description:
- video_description = clean_html(video_description)
- else:
- fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
- if fd_mobj:
- video_description = unescapeHTML(fd_mobj.group(1))
- else:
- video_description = u''
-
- # subtitles
- video_subtitles = None
-
- if self._downloader.params.get('writesubtitles', False):
- video_subtitles = self._extract_subtitle(video_id)
- if video_subtitles:
- (sub_error, sub_lang, sub) = video_subtitles[0]
- if sub_error:
- # We try with the automatic captions
- video_subtitles = self._request_automatic_caption(video_id, video_webpage)
- (sub_error_auto, sub_lang, sub) = video_subtitles[0]
- if sub is not None:
- pass
- else:
- # We report the original error
- self._downloader.report_error(sub_error)
-
- if self._downloader.params.get('allsubtitles', False):
- video_subtitles = self._extract_all_subtitles(video_id)
- for video_subtitle in video_subtitles:
- (sub_error, sub_lang, sub) = video_subtitle
- if sub_error:
- self._downloader.report_error(sub_error)
-
- if self._downloader.params.get('listsubtitles', False):
- sub_lang_list = self._list_available_subtitles(video_id)
- return
-
- if 'length_seconds' not in video_info:
- self._downloader.report_warning(u'unable to extract video duration')
- video_duration = ''
- else:
- video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
-
- # token
- video_token = compat_urllib_parse.unquote_plus(video_info['token'][0])
-
- # Decide which formats to download
- req_format = self._downloader.params.get('format', None)
-
- if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
- self.report_rtmp_download()
- video_url_list = [(None, video_info['conn'][0])]
- elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
- url_map = {}
- for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
- url_data = compat_parse_qs(url_data_str)
- if 'itag' in url_data and 'url' in url_data:
- url = url_data['url'][0] + '&signature=' + url_data['sig'][0]
- if not 'ratebypass' in url: url += '&ratebypass=yes'
- url_map[url_data['itag'][0]] = url
-
- format_limit = self._downloader.params.get('format_limit', None)
- available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
- if format_limit is not None and format_limit in available_formats:
- format_list = available_formats[available_formats.index(format_limit):]
- else:
- format_list = available_formats
- existing_formats = [x for x in format_list if x in url_map]
- if len(existing_formats) == 0:
- raise ExtractorError(u'no known formats available for video')
- if self._downloader.params.get('listformats', None):
- self._print_formats(existing_formats)
- return
- if req_format is None or req_format == 'best':
- video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
- elif req_format == 'worst':
- video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
- elif req_format in ('-1', 'all'):
- video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
- else:
- # Specific formats. We pick the first in a slash-delimeted sequence.
- # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
- req_formats = req_format.split('/')
- video_url_list = None
- for rf in req_formats:
- if rf in url_map:
- video_url_list = [(rf, url_map[rf])]
- break
- if video_url_list is None:
- raise ExtractorError(u'requested format not available')
- else:
- raise ExtractorError(u'no conn or url_encoded_fmt_stream_map information found in video info')
-
- results = []
- for format_param, video_real_url in video_url_list:
- # Extension
- video_extension = self._video_extensions.get(format_param, 'flv')
-
- video_format = '{0} - {1}'.format(format_param if format_param else video_extension,
- self._video_dimensions.get(format_param, '???'))
+from .extractor.common import InfoExtractor, SearchInfoExtractor
+from .extractor.youtube import YoutubeIE, YoutubePlaylistIE, YoutubeUserIE, YoutubeChannelIE
- results.append({
- 'id': video_id,
- 'url': video_real_url,
- 'uploader': video_uploader,
- 'uploader_id': video_uploader_id,
- 'upload_date': upload_date,
- 'title': video_title,
- 'ext': video_extension,
- 'format': video_format,
- 'thumbnail': video_thumbnail,
- 'description': video_description,
- 'player_url': player_url,
- 'subtitles': video_subtitles,
- 'duration': video_duration
- })
- return results
class MetacafeIE(InfoExtractor):
video_title = unescapeHTML(mobj.group('title'))
video_uploader = None
- mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage)
- if mobj is None:
- # lookin for official user
- mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
- if mobj_official is None:
- self._downloader.report_warning(u'unable to extract uploader nickname')
- else:
- video_uploader = mobj_official.group(1)
- else:
- video_uploader = mobj.group(1)
+ video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
+ # Looking for official user
+ r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
+ webpage, 'video uploader')
video_upload_date = None
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
_VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
IE_NAME = u'vimeo'
+ def _verify_video_password(self, url, video_id, webpage):
+ password = self._downloader.params.get('password', None)
+ if password is None:
+ raise ExtractorError(u'This video is protected by a password, use the --password option')
+ token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1)
+ data = compat_urllib_parse.urlencode({'password': password,
+ 'token': token})
+ # I didn't manage to use the password with https
+ if url.startswith('https'):
+ pass_url = url.replace('https','http')
+ else:
+ pass_url = url
+ password_request = compat_urllib_request.Request(pass_url+'/password', data)
+ password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ password_request.add_header('Cookie', 'xsrft=%s' % token)
+ pass_web = self._download_webpage(password_request, video_id,
+ u'Verifying the password',
+ u'Wrong password')
+
def _real_extract(self, url, new_video=True):
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
except:
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
+
+ if re.search('If so please provide the correct password.', webpage):
+ self._verify_video_password(url, video_id, webpage)
+ return self._real_extract(url)
else:
raise ExtractorError(u'Unable to extract info section')
if mobj is None:
# Broaden the search a little bit: JWPlayer JS loader
mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
+ if mobj is None:
+ # Try to find twitter cards info
+ mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
+ if mobj is None:
+ # We look for Open Graph info:
+ # We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
+ m_video_type = re.search(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
+ # We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
+ if m_video_type is not None:
+ mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
- mobj = re.search(r'<title>(.*)</title>', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract title')
- video_title = mobj.group(1)
+ video_title = self._html_search_regex(r'<title>(.*)</title>',
+ webpage, u'video title')
# video uploader is domain name
- mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
- if mobj is None:
- raise ExtractorError(u'Unable to extract title')
- video_uploader = mobj.group(1)
+ video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*',
+ url, u'video uploader')
return [{
'id': video_id,
def report_download_page(self, query, pagenum):
"""Report attempt to download search page with given number."""
- query = query.decode(preferredencoding())
self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
def _get_n_results(self, query, n):
return res
-class YoutubePlaylistIE(InfoExtractor):
- """Information Extractor for YouTube playlists."""
-
- _VALID_URL = r"""(?:
- (?:https?://)?
- (?:\w+\.)?
- youtube\.com/
- (?:
- (?:course|view_play_list|my_playlists|artist|playlist|watch)
- \? (?:.*?&)*? (?:p|a|list)=
- | p/
- )
- ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
- .*
- |
- ((?:PL|EC|UU)[0-9A-Za-z-_]{10,})
- )"""
- _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json'
- _MAX_RESULTS = 50
- IE_NAME = u'youtube:playlist'
-
- @classmethod
- def suitable(cls, url):
- """Receives a URL and returns True if suitable for this IE."""
- return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
- def _real_extract(self, url):
- # Extract playlist id
- mobj = re.match(self._VALID_URL, url, re.VERBOSE)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- # Download playlist videos from API
- playlist_id = mobj.group(1) or mobj.group(2)
- page_num = 1
- videos = []
-
- while True:
- url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
- page = self._download_webpage(url, playlist_id, u'Downloading page #%s' % page_num)
-
- try:
- response = json.loads(page)
- except ValueError as err:
- raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
-
- if 'feed' not in response:
- raise ExtractorError(u'Got a malformed response from YouTube API')
- playlist_title = response['feed']['title']['$t']
- if 'entry' not in response['feed']:
- # Number of videos is a multiple of self._MAX_RESULTS
- break
-
- videos += [ (entry['yt$position']['$t'], entry['content']['src'])
- for entry in response['feed']['entry']
- if 'content' in entry ]
-
- if len(response['feed']['entry']) < self._MAX_RESULTS:
- break
- page_num += 1
-
- videos = [v[1] for v in sorted(videos)]
-
- url_results = [self.url_result(url, 'Youtube') for url in videos]
- return [self.playlist_result(url_results, playlist_id, playlist_title)]
-
-
-class YoutubeChannelIE(InfoExtractor):
- """Information Extractor for YouTube channels."""
-
- _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
- _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
- _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
- _MORE_PAGES_URL = 'http://www.youtube.com/channel_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
- IE_NAME = u'youtube:channel'
-
- def extract_videos_from_page(self, page):
- ids_in_page = []
- for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
- if mobj.group(1) not in ids_in_page:
- ids_in_page.append(mobj.group(1))
- return ids_in_page
-
- def _real_extract(self, url):
- # Extract channel id
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- # Download channel page
- channel_id = mobj.group(1)
- video_ids = []
- pagenum = 1
-
- url = self._TEMPLATE_URL % (channel_id, pagenum)
- page = self._download_webpage(url, channel_id,
- u'Downloading page #%s' % pagenum)
-
- # Extract video identifiers
- ids_in_page = self.extract_videos_from_page(page)
- video_ids.extend(ids_in_page)
-
- # Download any subsequent channel pages using the json-based channel_ajax query
- if self._MORE_PAGES_INDICATOR in page:
- while True:
- pagenum = pagenum + 1
-
- url = self._MORE_PAGES_URL % (pagenum, channel_id)
- page = self._download_webpage(url, channel_id,
- u'Downloading page #%s' % pagenum)
-
- page = json.loads(page)
-
- ids_in_page = self.extract_videos_from_page(page['content_html'])
- video_ids.extend(ids_in_page)
-
- if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
- break
-
- self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
-
- urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
- url_entries = [self.url_result(url, 'Youtube') for url in urls]
- return [self.playlist_result(url_entries, channel_id)]
-
-
-class YoutubeUserIE(InfoExtractor):
- """Information Extractor for YouTube users."""
-
- _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
- _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
- _GDATA_PAGE_SIZE = 50
- _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
- _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
- IE_NAME = u'youtube:user'
-
- def _real_extract(self, url):
- # Extract username
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
-
- username = mobj.group(1)
-
- # Download video ids using YouTube Data API. Result size per
- # query is limited (currently to 50 videos) so we need to query
- # page by page until there are no video ids - it means we got
- # all of them.
-
- video_ids = []
- pagenum = 0
-
- while True:
- start_index = pagenum * self._GDATA_PAGE_SIZE + 1
-
- gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
- page = self._download_webpage(gdata_url, username,
- u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE))
-
- # Extract video identifiers
- ids_in_page = []
-
- for mobj in re.finditer(self._VIDEO_INDICATOR, page):
- if mobj.group(1) not in ids_in_page:
- ids_in_page.append(mobj.group(1))
-
- video_ids.extend(ids_in_page)
-
- # A little optimization - if current page is not
- # "full", ie. does not contain PAGE_SIZE video ids then
- # we can assume that this page is the last one - there
- # are no more ids on further pages - no need to query
- # again.
-
- if len(ids_in_page) < self._GDATA_PAGE_SIZE:
- break
-
- pagenum += 1
-
- urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
- url_results = [self.url_result(url, 'Youtube') for url in urls]
- return [self.playlist_result(url_results, playlist_title = username)]
-
-
class BlipTVUserIE(InfoExtractor):
"""Information Extractor for blip.tv users."""
(?P<gameID>\d+)/?
(?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
"""
+ _VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
+ _AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
@classmethod
def suitable(cls, url):
def _real_extract(self, url):
m = re.match(self._VALID_URL, url, re.VERBOSE)
gameID = m.group('gameID')
- videourl = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970' % gameID
- self.report_age_confirmation()
+
+ videourl = self._VIDEO_PAGE_TEMPLATE % gameID
webpage = self._download_webpage(videourl, gameID)
- game_title = re.search(r'<h2 class="pageheader">(?P<game_title>.*?)</h2>', webpage).group('game_title')
-
+
+ if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None:
+ videourl = self._AGECHECK_TEMPLATE % gameID
+ self.report_age_confirmation()
+ webpage = self._download_webpage(videourl, gameID)
+
+ self.report_extraction(gameID)
+ game_title = self._html_search_regex(r'<h2 class="pageheader">(.*?)</h2>',
+ webpage, 'game title')
+
urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
mweb = re.finditer(urlRE, webpage)
namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
webpage = self._download_webpage(url, video_id)
- json_data = self._search_regex(r'<script>window.gon = {.*?};gon\.show=(.+?);</script>',
- webpage, u'json data')
+ json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$',
+ webpage, u'json data', flags=re.MULTILINE)
try:
data = json.loads(json_data)
self.to_screen(u'Getting info of playlist %s: "%s"' % (playlist_id,name))
return [self._playlist_videos_info(url,name,playlist_id)]
- def _talk_video_link(self,mediaSlug):
- '''Returns the video link for that mediaSlug'''
- return 'http://download.ted.com/talks/%s.mp4' % mediaSlug
-
def _playlist_videos_info(self,url,name,playlist_id=0):
'''Returns the videos of the playlist'''
video_RE=r'''
m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
m_names=re.finditer(video_name_RE,webpage)
- playlist_RE = r'div class="headline">(\s*?)<h1>(\s*?)<span>(?P<playlist_title>.*?)</span>'
- m_playlist = re.search(playlist_RE, webpage)
- playlist_title = m_playlist.group('playlist_title')
+ playlist_title = self._html_search_regex(r'div class="headline">\s*?<h1>\s*?<span>(.*?)</span>',
+ webpage, 'playlist title')
playlist_entries = []
for m_video, m_name in zip(m_videos,m_names):
def _talk_info(self, url, video_id=0):
"""Return the video for the talk in the url"""
- m=re.match(self._VALID_URL, url,re.VERBOSE)
- videoName=m.group('name')
- webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
+ m = re.match(self._VALID_URL, url,re.VERBOSE)
+ video_name = m.group('name')
+ webpage = self._download_webpage(url, video_id, 'Downloading \"%s\" page' % video_name)
+ self.report_extraction(video_name)
# If the url includes the language we get the title translated
- title_RE=r'<span id="altHeadline" >(?P<title>.*)</span>'
- title=re.search(title_RE, webpage).group('title')
- info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
- "id":(?P<videoID>[\d]+).*?
- "mediaSlug":"(?P<mediaSlug>[\w\d]+?)"'''
- thumb_RE=r'</span>[\s.]*</div>[\s.]*<img src="(?P<thumbnail>.*?)"'
- thumb_match=re.search(thumb_RE,webpage)
- info_match=re.search(info_RE,webpage,re.VERBOSE)
- video_id=info_match.group('videoID')
- mediaSlug=info_match.group('mediaSlug')
- video_url=self._talk_video_link(mediaSlug)
+ title = self._html_search_regex(r'<span id="altHeadline" >(?P<title>.*)</span>',
+ webpage, 'title')
+ json_data = self._search_regex(r'<script.*?>var talkDetails = ({.*?})</script>',
+ webpage, 'json data')
+ info = json.loads(json_data)
+ desc = self._html_search_regex(r'<div class="talk-intro">.*?<p.*?>(.*?)</p>',
+ webpage, 'description', flags = re.DOTALL)
+
+ thumbnail = self._search_regex(r'</span>[\s.]*</div>[\s.]*<img src="(.*?)"',
+ webpage, 'thumbnail')
info = {
- 'id': video_id,
- 'url': video_url,
+ 'id': info['id'],
+ 'url': info['htmlStreams'][-1]['file'],
'ext': 'mp4',
'title': title,
- 'thumbnail': thumb_match.group('thumbnail')
+ 'thumbnail': thumbnail,
+ 'description': desc,
}
return info
info["url"] = stream["video_url"]
return [info]
+class ZDFIE(InfoExtractor):
+ _VALID_URL = r'^http://www\.zdf\.de\/ZDFmediathek\/(.*beitrag\/video\/)(?P<video_id>[^/\?]+)(?:\?.*)?'
+ _TITLE = r'<h1(?: class="beitragHeadline")?>(?P<title>.*)</h1>'
+ _MEDIA_STREAM = r'<a href="(?P<video_url>.+(?P<media_type>.streaming).+/zdf/(?P<quality>[^\/]+)/[^"]*)".+class="play".+>'
+ _MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"'
+ _RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ video_id = mobj.group('video_id')
+
+ html = self._download_webpage(url, video_id)
+ streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
+ if streams is None:
+ raise ExtractorError(u'No media url found.')
+
+ # s['media_type'] == 'wstreaming' -> use 'Windows Media Player' and mms url
+ # s['media_type'] == 'hstreaming' -> use 'Quicktime' and rtsp url
+ # choose first/default media type and highest quality for now
+ for s in streams: #find 300 - dsl1000mbit
+ if s['quality'] == '300' and s['media_type'] == 'wstreaming':
+ stream_=s
+ break
+ for s in streams: #find veryhigh - dsl2000mbit
+ if s['quality'] == 'veryhigh' and s['media_type'] == 'wstreaming': # 'hstreaming' - rtsp is not working
+ stream_=s
+ break
+ if stream_ is None:
+ raise ExtractorError(u'No stream found.')
+
+ media_link = self._download_webpage(stream_['video_url'], video_id,'Get stream URL')
+
+ self.report_extraction(video_id)
+ mobj = re.search(self._TITLE, html)
+ if mobj is None:
+ raise ExtractorError(u'Cannot extract title')
+ title = unescapeHTML(mobj.group('title'))
+
+ mobj = re.search(self._MMS_STREAM, media_link)
+ if mobj is None:
+ mobj = re.search(self._RTSP_STREAM, media_link)
+ if mobj is None:
+ raise ExtractorError(u'Cannot extract mms:// or rtsp:// URL')
+ mms_url = mobj.group('video_url')
+
+ mobj = re.search('(.*)[.](?P<ext>[^.]+)', mms_url)
+ if mobj is None:
+ raise ExtractorError(u'Cannot extract extention')
+ ext = mobj.group('ext')
+
+ return [{'id': video_id,
+ 'url': mms_url,
+ 'title': title,
+ 'ext': ext
+ }]
+
class TumblrIE(InfoExtractor):
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)/(.*?)'
'artist': artist,
}]
+class Vbox7IE(InfoExtractor):
+ """Information Extractor for Vbox7"""
+ _VALID_URL = r'(?:http://)?(?:www\.)?vbox7\.com/play:([^/]+)'
+
+ def _real_extract(self,url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ video_id = mobj.group(1)
+
+ redirect_page, urlh = self._download_webpage_handle(url, video_id)
+ new_location = self._search_regex(r'window\.location = \'(.*)\';', redirect_page, u'redirect location')
+ redirect_url = urlh.geturl() + new_location
+ webpage = self._download_webpage(redirect_url, video_id, u'Downloading redirect page')
+
+ title = self._html_search_regex(r'<title>(.*)</title>',
+ webpage, u'title').split('/')[0].strip()
+
+ ext = "flv"
+ info_url = "http://vbox7.com/play/magare.do"
+ data = compat_urllib_parse.urlencode({'as3':'1','vid':video_id})
+ info_request = compat_urllib_request.Request(info_url, data)
+ info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ info_response = self._download_webpage(info_request, video_id, u'Downloading info webpage')
+ if info_response is None:
+ raise ExtractorError(u'Unable to extract the media url')
+ (final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
+
+ return [{
+ 'id': video_id,
+ 'url': final_url,
+ 'ext': ext,
+ 'title': title,
+ 'thumbnail': thumbnail_url,
+ }]
+
+class GametrailersIE(InfoExtractor):
+ _VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ video_id = mobj.group('id')
+ video_type = mobj.group('type')
+ webpage = self._download_webpage(url, video_id)
+ if video_type == 'full-episodes':
+ mgid_re = r'data-video="(?P<mgid>mgid:.*?)"'
+ else:
+ mgid_re = r'data-contentId=\'(?P<mgid>mgid:.*?)\''
+ mgid = self._search_regex(mgid_re, webpage, u'mgid')
+ data = compat_urllib_parse.urlencode({'uri': mgid, 'acceptMethods': 'fms'})
+
+ info_page = self._download_webpage('http://www.gametrailers.com/feeds/mrss?' + data,
+ video_id, u'Downloading video info')
+ links_webpage = self._download_webpage('http://www.gametrailers.com/feeds/mediagen/?' + data,
+ video_id, u'Downloading video urls info')
+
+ self.report_extraction(video_id)
+ info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
+ <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
+ '''
+
+ m_info = re.search(info_re, info_page, re.VERBOSE|re.DOTALL)
+ if m_info is None:
+ raise ExtractorError(u'Unable to extract video info')
+ video_title = m_info.group('title')
+ video_description = m_info.group('description')
+ video_thumb = m_info.group('thumb')
+
+ m_urls = list(re.finditer(r'<src>(?P<url>.*)</src>', links_webpage))
+ if m_urls is None or len(m_urls) == 0:
+ raise ExtractError(u'Unable to extrat video url')
+ # They are sorted from worst to best quality
+ video_url = m_urls[-1].group('url')
+
+ return {'url': video_url,
+ 'id': video_id,
+ 'title': video_title,
+ # Videos are actually flv not mp4
+ 'ext': 'flv',
+ 'thumbnail': video_thumb,
+ 'description': video_description,
+ }
+
+class StatigramIE(InfoExtractor):
+ _VALID_URL = r'(?:http://)?(?:www\.)?statigr\.am/p/([^/]+)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+
+ video_id = mobj.group(1)
+ webpage = self._download_webpage(url, video_id)
+ video_url = self._html_search_regex(
+ r'<meta property="og:video:secure_url" content="(.+?)">',
+ webpage, u'video URL')
+ thumbnail_url = self._html_search_regex(
+ r'<meta property="og:image" content="(.+?)" />',
+ webpage, u'thumbnail URL', fatal=False)
+ html_title = self._html_search_regex(
+ r'<title>(.+?)</title>',
+ webpage, u'title')
+ title = html_title.rpartition(u' | Statigram')[0]
+ uploader_id = self._html_search_regex(
+ r'@([^ ]+)', title, u'uploader name', fatal=False)
+ ext = 'mp4'
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': ext,
+ 'title': title,
+ 'thumbnail': thumbnail_url,
+ 'uploader_id' : uploader_id
+ }]
def gen_extractors():
""" Return a list of an instance of every supported extractor.
SpiegelIE(),
LiveLeakIE(),
ARDIE(),
+ ZDFIE(),
TumblrIE(),
BandcampIE(),
RedTubeIE(),
TeamcocoIE(),
XHamsterIE(),
HypemIE(),
+ Vbox7IE(),
+ GametrailersIE(),
+ StatigramIE(),
GenericIE()
]