]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/niconico.py
[ie/youtube] Suppress "Unavailable videos are hidden" warning (#10159)
[yt-dlp.git] / yt_dlp / extractor / niconico.py
index 05a1a3ddb8ca1969124360d431eaf91b81e15fa9..9d7b010c54e69d694c23d409c3b98f99a47ac81a 100644 (file)
@@ -1,11 +1,10 @@
-import datetime
+import datetime as dt
 import functools
 import itertools
 import json
 import re
 import time
-
-from urllib.parse import urlparse
+import urllib.parse
 
 from .common import InfoExtractor, SearchInfoExtractor
 from ..networking import Request
@@ -36,6 +35,8 @@
 class NiconicoIE(InfoExtractor):
     IE_NAME = 'niconico'
     IE_DESC = 'ニコニコ動画'
+    _GEO_COUNTRIES = ['JP']
+    _GEO_BYPASS = False
 
     _TESTS = [{
         'url': 'http://www.nicovideo.jp/watch/sm22312215',
@@ -161,8 +162,6 @@ class NiconicoIE(InfoExtractor):
             'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
             'timestamp': 1341128008,
             'upload_date': '20120701',
-            'uploader': None,
-            'uploader_id': None,
             'thumbnail': r're:https?://.*',
             'duration': 5271,
             'view_count': int,
@@ -214,7 +213,7 @@ def _perform_login(self, username, password):
                 urljoin('https://account.nicovideo.jp', post_url), None,
                 note='Performing MFA', errnote='Unable to complete MFA',
                 data=urlencode_postdata({
-                    'otp': self._get_tfa_info('6 digits code')
+                    'otp': self._get_tfa_info('6 digits code'),
                 }), headers={
                     'Content-Type': 'application/x-www-form-urlencoded',
                 })
@@ -265,7 +264,7 @@ def ping():
                 'http_output_download_parameters': {
                     'use_ssl': yesno(session_api_data['urls'][0]['isSsl']),
                     'use_well_known_port': yesno(session_api_data['urls'][0]['isWellKnownPort']),
-                }
+                },
             }
         elif dmc_protocol == 'hls':
             protocol = 'm3u8'
@@ -278,14 +277,14 @@ def ping():
                     'transfer_preset': '',
                     'use_ssl': yesno(session_api_data['urls'][0]['isSsl']),
                     'use_well_known_port': yesno(session_api_data['urls'][0]['isWellKnownPort']),
-                }
+                },
             }
             if 'hls_encryption' in parsed_token and encryption:
                 protocol_parameters['hls_parameters']['encryption'] = {
                     parsed_token['hls_encryption']: {
                         'encrypted_key': encryption['encryptedKey'],
                         'key_uri': encryption['keyUri'],
-                    }
+                    },
                 }
             else:
                 protocol = 'm3u8_native'
@@ -296,7 +295,7 @@ def ping():
             session_api_endpoint['url'], video_id,
             query={'_format': 'json'},
             headers={'Content-Type': 'application/json'},
-            note='Downloading JSON metadata for %s' % info_dict['format_id'],
+            note='Downloading JSON metadata for {}'.format(info_dict['format_id']),
             data=json.dumps({
                 'session': {
                     'client_info': {
@@ -306,7 +305,7 @@ def ping():
                         'auth_type': try_get(session_api_data, lambda x: x['authTypes'][session_api_data['protocols'][0]]),
                         'content_key_timeout': session_api_data.get('contentKeyTimeout'),
                         'service_id': 'nicovideo',
-                        'service_user_id': session_api_data.get('serviceUserId')
+                        'service_user_id': session_api_data.get('serviceUserId'),
                     },
                     'content_id': session_api_data.get('contentId'),
                     'content_src_id_sets': [{
@@ -314,34 +313,34 @@ def ping():
                             'src_id_to_mux': {
                                 'audio_src_ids': [audio_src_id],
                                 'video_src_ids': [video_src_id],
-                            }
-                        }]
+                            },
+                        }],
                     }],
                     'content_type': 'movie',
                     'content_uri': '',
                     'keep_method': {
                         'heartbeat': {
-                            'lifetime': session_api_data.get('heartbeatLifetime')
-                        }
+                            'lifetime': session_api_data.get('heartbeatLifetime'),
+                        },
                     },
                     'priority': session_api_data['priority'],
                     'protocol': {
                         'name': 'http',
                         'parameters': {
                             'http_parameters': {
-                                'parameters': protocol_parameters
-                            }
-                        }
+                                'parameters': protocol_parameters,
+                            },
+                        },
                     },
                     'recipe_id': session_api_data.get('recipeId'),
                     'session_operation_auth': {
                         'session_operation_auth_by_signature': {
                             'signature': session_api_data.get('signature'),
                             'token': session_api_data.get('token'),
-                        }
+                        },
                     },
-                    'timing_constraint': 'unlimited'
-                }
+                    'timing_constraint': 'unlimited',
+                },
             }).encode())
 
         info_dict['url'] = session_response['data']['session']['content_uri']
@@ -353,7 +352,7 @@ def ping():
             'data': json.dumps(session_response['data']),
             # interval, convert milliseconds to seconds, then halve to make a buffer.
             'interval': float_or_none(session_api_data.get('heartbeatLifetime'), scale=3000),
-            'ping': ping
+            'ping': ping,
         }
 
         return info_dict, heartbeat_info_dict
@@ -369,7 +368,7 @@ def _extract_format_for_quality(self, video_id, audio_quality, video_quality, dm
         vid_qual_label = traverse_obj(video_quality, ('metadata', 'label'))
 
         return {
-            'url': 'niconico_dmc:%s/%s/%s' % (video_id, video_quality['id'], audio_quality['id']),
+            'url': 'niconico_dmc:{}/{}/{}'.format(video_id, video_quality['id'], audio_quality['id']),
             'format_id': format_id,
             'format_note': join_nonempty('DMC', vid_qual_label, dmc_protocol.upper(), delim=' '),
             'ext': 'mp4',  # Session API are used in HTML5, which always serves mp4
@@ -390,7 +389,7 @@ def _extract_format_for_quality(self, video_id, audio_quality, video_quality, dm
             'http_headers': {
                 'Origin': 'https://www.nicovideo.jp',
                 'Referer': 'https://www.nicovideo.jp/watch/' + video_id,
-            }
+            },
         }
 
     def _yield_dmc_formats(self, api_data, video_id):
@@ -417,7 +416,7 @@ def _yield_dms_formats(self, api_data, video_id):
         dms_m3u8_url = self._download_json(
             f'https://nvapi.nicovideo.jp/v1/watch/{video_id}/access-rights/hls', video_id,
             data=json.dumps({
-                'outputs': list(itertools.product((v['id'] for v in videos), (a['id'] for a in audios)))
+                'outputs': list(itertools.product((v['id'] for v in videos), (a['id'] for a in audios))),
             }).encode(), query={'actionTrackId': track_id}, headers={
                 'x-access-right-key': access_key,
                 'x-frontend-id': 6,
@@ -465,7 +464,7 @@ def _real_extract(self, url):
         except ExtractorError as e:
             try:
                 api_data = self._download_json(
-                    'https://www.nicovideo.jp/api/watch/v3/%s?_frontendId=6&_frontendVersion=0&actionTrackId=AAAAAAAAAA_%d' % (video_id, round(time.time() * 1000)), video_id,
+                    f'https://www.nicovideo.jp/api/watch/v3/{video_id}?_frontendId=6&_frontendVersion=0&actionTrackId=AAAAAAAAAA_{round(time.time() * 1000)}', video_id,
                     note='Downloading API JSON', errnote='Unable to fetch data')['data']
             except ExtractorError:
                 if not isinstance(e.cause, HTTPError):
@@ -478,15 +477,27 @@ def _real_extract(self, url):
                     raise
                 raise ExtractorError(clean_html(error_msg), expected=True)
 
-        club_joined = traverse_obj(api_data, ('channel', 'viewer', 'follow', 'isFollowed', {bool}))
-        if club_joined is None:
-            fail_msg = self._html_search_regex(
+        availability = self._availability(**(traverse_obj(api_data, ('payment', 'video', {
+            'needs_premium': ('isPremium', {bool}),
+            'needs_subscription': ('isAdmission', {bool}),
+        })) or {'needs_auth': True}))
+        formats = [*self._yield_dmc_formats(api_data, video_id),
+                   *self._yield_dms_formats(api_data, video_id)]
+        if not formats:
+            fail_msg = clean_html(self._html_search_regex(
                 r'<p[^>]+\bclass="fail-message"[^>]*>(?P<msg>.+?)</p>',
-                webpage, 'fail message', default=None, group='msg')
+                webpage, 'fail message', default=None, group='msg'))
             if fail_msg:
-                self.raise_login_required(clean_html(fail_msg), metadata_available=True)
-        elif not club_joined:
-            self.raise_login_required('This video is for members only', metadata_available=True)
+                self.to_screen(f'Niconico said: {fail_msg}')
+            if fail_msg and 'された地域と同じ地域からのみ視聴できます。' in fail_msg:
+                availability = None
+                self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
+            elif availability == 'premium_only':
+                self.raise_login_required('This video requires premium', metadata_available=True)
+            elif availability == 'subscriber_only':
+                self.raise_login_required('This video is for members only', metadata_available=True)
+            elif availability == 'needs_auth':
+                self.raise_login_required(metadata_available=False)
 
         # Start extracting information
         tags = None
@@ -512,8 +523,8 @@ def get_video_info(*items, get_first=True, **kwargs):
             'id': video_id,
             '_api_data': api_data,
             'title': get_video_info(('originalTitle', 'title')) or self._og_search_title(webpage, default=None),
-            'formats': [*self._yield_dmc_formats(api_data, video_id),
-                        *self._yield_dms_formats(api_data, video_id)],
+            'formats': formats,
+            'availability': availability,
             'thumbnails': [{
                 'id': key,
                 'url': url,
@@ -575,7 +586,7 @@ class NiconicoPlaylistBaseIE(InfoExtractor):
     _API_HEADERS = {
         'X-Frontend-ID': '6',
         'X-Frontend-Version': '0',
-        'X-Niconico-Language': 'en-us'
+        'X-Niconico-Language': 'en-us',
     }
 
     def _call_api(self, list_id, resource, query):
@@ -590,7 +601,7 @@ def _parse_owner(item):
 
     def _fetch_page(self, list_id, page):
         page += 1
-        resp = self._call_api(list_id, 'page %d' % page, {
+        resp = self._call_api(list_id, f'page {page}', {
             'page': page,
             'pageSize': self._PAGE_SIZE,
         })
@@ -778,14 +789,14 @@ class NicovideoSearchURLIE(NicovideoSearchBaseIE):
         'url': 'http://www.nicovideo.jp/search/sm9',
         'info_dict': {
             'id': 'sm9',
-            'title': 'sm9'
+            'title': 'sm9',
         },
         'playlist_mincount': 40,
     }, {
         'url': 'https://www.nicovideo.jp/search/sm9?sort=h&order=d&end=2020-12-31&start=2020-01-01',
         'info_dict': {
             'id': 'sm9',
-            'title': 'sm9'
+            'title': 'sm9',
         },
         'playlist_count': 31,
     }]
@@ -803,17 +814,17 @@ class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
         'url': 'nicosearchdateall:a',
         'info_dict': {
             'id': 'a',
-            'title': 'a'
+            'title': 'a',
         },
         'playlist_mincount': 1610,
     }]
 
-    _START_DATE = datetime.date(2007, 1, 1)
+    _START_DATE = dt.date(2007, 1, 1)
     _RESULTS_PER_PAGE = 32
     _MAX_PAGES = 50
 
     def _entries(self, url, item_id, start_date=None, end_date=None):
-        start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
+        start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date()
 
         # If the last page has a full page of videos, we need to break down the query interval further
         last_page_len = len(list(self._get_entries_for_date(
@@ -850,7 +861,7 @@ class NicovideoTagURLIE(NicovideoSearchBaseIE):
         'url': 'https://www.nicovideo.jp/tag/ドキュメンタリー淫夢',
         'info_dict': {
             'id': 'ドキュメンタリー淫夢',
-            'title': 'ドキュメンタリー淫夢'
+            'title': 'ドキュメンタリー淫夢',
         },
         'playlist_mincount': 400,
     }]
@@ -869,12 +880,12 @@ class NiconicoUserIE(InfoExtractor):
         },
         'playlist_mincount': 101,
     }
-    _API_URL = "https://nvapi.nicovideo.jp/v1/users/%s/videos?sortKey=registeredAt&sortOrder=desc&pageSize=%s&page=%s"
+    _API_URL = 'https://nvapi.nicovideo.jp/v1/users/%s/videos?sortKey=registeredAt&sortOrder=desc&pageSize=%s&page=%s'
     _PAGE_SIZE = 100
 
     _API_HEADERS = {
         'X-Frontend-ID': '6',
-        'X-Frontend-Version': '0'
+        'X-Frontend-Version': '0',
     }
 
     def _entries(self, list_id):
@@ -884,12 +895,12 @@ def _entries(self, list_id):
             json_parsed = self._download_json(
                 self._API_URL % (list_id, self._PAGE_SIZE, page_num + 1), list_id,
                 headers=self._API_HEADERS,
-                note='Downloading JSON metadata%s' % (' page %d' % page_num if page_num else ''))
+                note='Downloading JSON metadata%s' % (f' page {page_num}' if page_num else ''))
             if not page_num:
                 total_count = int_or_none(json_parsed['data'].get('totalCount'))
-            for entry in json_parsed["data"]["items"]:
+            for entry in json_parsed['data']['items']:
                 count += 1
-                yield self.url_result('https://www.nicovideo.jp/watch/%s' % entry['id'])
+                yield self.url_result('https://www.nicovideo.jp/watch/{}'.format(entry['id']))
             page_num += 1
 
     def _real_extract(self, url):
@@ -906,7 +917,7 @@ class NiconicoLiveIE(InfoExtractor):
         'url': 'https://live.nicovideo.jp/watch/lv339533123',
         'info_dict': {
             'id': 'lv339533123',
-            'title': '激辛ペヤング食べます‪( ;ᯅ; )‬(歌枠オーディション参加中)',
+            'title': '激辛ペヤング食べます\u202a( ;ᯅ; )\u202c(歌枠オーディション参加中)',
             'view_count': 1526,
             'comment_count': 1772,
             'description': '初めましてもかって言います❕\nのんびり自由に適当に暮らしてます',
@@ -945,7 +956,7 @@ def _real_extract(self, url):
             'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
         })
 
-        hostname = remove_start(urlparse(urlh.url).hostname, 'sp.')
+        hostname = remove_start(urllib.parse.urlparse(urlh.url).hostname, 'sp.')
         latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
         if latency not in self._KNOWN_LATENCY:
             latency = 'high'
@@ -962,14 +973,14 @@ def _real_extract(self, url):
                     'quality': 'abr',
                     'protocol': 'hls+fmp4',
                     'latency': latency,
-                    'chasePlay': False
+                    'chasePlay': False,
                 },
                 'room': {
                     'protocol': 'webSocket',
-                    'commentable': True
+                    'commentable': True,
                 },
                 'reconnect': False,
-            }
+            },
         }))
 
         while True:
@@ -993,7 +1004,7 @@ def _real_extract(self, url):
             elif self.get_param('verbose', False):
                 if len(recv) > 100:
                     recv = recv[:100] + '...'
-                self.write_debug('Server said: %s' % recv)
+                self.write_debug(f'Server said: {recv}')
 
         title = traverse_obj(embedded_data, ('program', 'title')) or self._html_search_meta(
             ('og:title', 'twitter:title'), webpage, 'live title', fatal=False)