]> jfr.im git - yt-dlp.git/blame - yt_dlp/extractor/archiveorg.py
[compat] Remove more functions
[yt-dlp.git] / yt_dlp / extractor / archiveorg.py
CommitLineData
a3e26449 1import json
ac668111 2import re
3import urllib.parse
4
a4a554a7 5from .common import InfoExtractor
ac668111 6from .youtube import YoutubeBaseInfoExtractor, YoutubeIE
7from ..compat import compat_HTTPError, compat_urllib_parse_unquote
d50aca41 8from ..utils import (
ac668111 9 KNOWN_EXTENSIONS,
10 ExtractorError,
11 HEADRequest,
aa4b0545 12 bug_reports_message,
d50aca41 13 clean_html,
a3e26449 14 dict_get,
879e7199 15 extract_attributes,
aa4b0545 16 get_element_by_id,
a3e26449 17 int_or_none,
1f13021e 18 join_nonempty,
a3e26449 19 merge_dicts,
879e7199 20 mimetype2ext,
aa4b0545 21 orderedSet,
879e7199 22 parse_duration,
4dfbf869 23 parse_qs,
879e7199 24 str_or_none,
ac668111 25 str_to_int,
aa4b0545 26 traverse_obj,
879e7199 27 try_get,
28 unified_strdate,
29 unified_timestamp,
ac668111 30 url_or_none,
aa4b0545 31 urlhandle_detect_ext,
d50aca41 32)
5fe3a3c3
PH
33
34
a4a554a7 35class ArchiveOrgIE(InfoExtractor):
5fe3a3c3 36 IE_NAME = 'archive.org'
a3e26449 37 IE_DESC = 'archive.org video and audio'
38 _VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^?#]+)(?:[?].*)?$'
e8e28989
S
39 _TESTS = [{
40 'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
3798eadc
PH
41 'md5': '8af1d4cf447933ed3c7f4871162602db',
42 'info_dict': {
e8e28989 43 'id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect',
a3e26449 44 'ext': 'ogv',
e8e28989 45 'title': '1968 Demo - FJCC Conference Presentation Reel #1',
d50aca41 46 'description': 'md5:da45c349df039f1cc8075268eb1b5c25',
a3e26449 47 'release_date': '19681210',
48 'timestamp': 1268695290,
49 'upload_date': '20100315',
50 'creator': 'SRI International',
51 'uploader': 'laura@archive.org',
52 },
e8e28989
S
53 }, {
54 'url': 'https://archive.org/details/Cops1922',
c12b4b80 55 'md5': '0869000b4ce265e8ca62738b336b268a',
e8e28989
S
56 'info_dict': {
57 'id': 'Cops1922',
d50aca41 58 'ext': 'mp4',
e8e28989 59 'title': 'Buster Keaton\'s "Cops" (1922)',
a3e26449 60 'description': 'md5:43a603fd6c5b4b90d12a96b921212b9c',
61 'uploader': 'yorkmba99@hotmail.com',
62 'timestamp': 1387699629,
1f13021e 63 'upload_date': '20131222',
a3e26449 64 },
d50aca41
RA
65 }, {
66 'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
67 'only_matching': True,
a3e26449 68 }, {
69 'url': 'https://archive.org/details/Election_Ads',
70 'md5': '284180e857160cf866358700bab668a3',
71 'info_dict': {
72 'id': 'Election_Ads/Commercial-JFK1960ElectionAdCampaignJingle.mpg',
73 'title': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg',
74 'ext': 'mp4',
75 },
76 }, {
77 'url': 'https://archive.org/details/Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
78 'md5': '7915213ef02559b5501fe630e1a53f59',
79 'info_dict': {
80 'id': 'Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
81 'title': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg',
82 'ext': 'mp4',
83 'timestamp': 1205588045,
84 'uploader': 'mikedavisstripmaster@yahoo.com',
85 'description': '1960 Presidential Campaign Election Commercials John F Kennedy, Richard M Nixon',
86 'upload_date': '20080315',
87 },
88 }, {
89 'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16',
90 'md5': '7d07ffb42aba6537c28e053efa4b54c9',
91 'info_dict': {
92 'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t01.flac',
93 'title': 'Turning',
94 'ext': 'flac',
95 },
96 }, {
97 'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
98 'md5': 'a07cd8c6ab4ee1560f8a0021717130f3',
99 'info_dict': {
100 'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
101 'title': 'Deal',
102 'ext': 'flac',
103 'timestamp': 1205895624,
104 'uploader': 'mvernon54@yahoo.com',
105 'description': 'md5:6a31f1996db0aa0fc9da6d6e708a1bb0',
106 'upload_date': '20080319',
107 'location': 'Barton Hall - Cornell University',
108 },
109 }, {
110 'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik',
111 'md5': '7cb019baa9b332e82ea7c10403acd180',
112 'info_dict': {
113 'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/01.01. Bells Of Rostov.mp3',
114 'title': 'Bells Of Rostov',
115 'ext': 'mp3',
116 },
117 }, {
118 'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3',
119 'md5': '1d0aabe03edca83ca58d9ed3b493a3c3',
120 'info_dict': {
121 'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02. Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1).mp3',
122 'title': 'Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1)',
123 'ext': 'mp3',
124 'timestamp': 1569662587,
125 'uploader': 'associate-joygen-odiongan@archive.org',
126 'description': 'md5:012b2d668ae753be36896f343d12a236',
127 'upload_date': '20190928',
128 },
e8e28989 129 }]
ff7a07d5 130
a3e26449 131 @staticmethod
132 def _playlist_data(webpage):
133 element = re.findall(r'''(?xs)
134 <input
135 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
136 \s+class=['"]?js-play8-playlist['"]?
137 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
138 \s*/>
139 ''', webpage)[0]
140
141 return json.loads(extract_attributes(element)['value'])
142
5fe3a3c3 143 def _real_extract(self, url):
ac668111 144 video_id = urllib.parse.unquote_plus(self._match_id(url))
a3e26449 145 identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
146
147 # Archive.org metadata API doesn't clearly demarcate playlist entries
148 # or subtitle tracks, so we get them from the embeddable player.
1f13021e 149 embed_page = self._download_webpage(f'https://archive.org/embed/{identifier}', identifier)
a3e26449 150 playlist = self._playlist_data(embed_page)
151
152 entries = {}
153 for p in playlist:
154 # If the user specified a playlist entry in the URL, ignore the
155 # rest of the playlist.
156 if entry_id and p['orig'] != entry_id:
157 continue
158
159 entries[p['orig']] = {
160 'formats': [],
161 'thumbnails': [],
162 'artist': p.get('artist'),
163 'track': p.get('title'),
1f13021e 164 'subtitles': {},
165 }
a3e26449 166
167 for track in p.get('tracks', []):
168 if track['kind'] != 'subtitles':
169 continue
a3e26449 170 entries[p['orig']][track['label']] = {
1f13021e 171 'url': 'https://archive.org/' + track['file'].lstrip('/')
172 }
5fe3a3c3 173
1f13021e 174 metadata = self._download_json('http://archive.org/metadata/' + identifier, identifier)
a3e26449 175 m = metadata['metadata']
176 identifier = m['identifier']
177
178 info = {
179 'id': identifier,
180 'title': m['title'],
181 'description': clean_html(m.get('description')),
182 'uploader': dict_get(m, ['uploader', 'adder']),
183 'creator': m.get('creator'),
184 'license': m.get('licenseurl'),
185 'release_date': unified_strdate(m.get('date')),
186 'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
1f13021e 187 'webpage_url': f'https://archive.org/details/{identifier}',
a3e26449 188 'location': m.get('venue'),
189 'release_year': int_or_none(m.get('year'))}
190
191 for f in metadata['files']:
192 if f['name'] in entries:
193 entries[f['name']] = merge_dicts(entries[f['name']], {
194 'id': identifier + '/' + f['name'],
195 'title': f.get('title') or f['name'],
196 'display_id': f['name'],
197 'description': clean_html(f.get('description')),
198 'creator': f.get('creator'),
199 'duration': parse_duration(f.get('length')),
200 'track_number': int_or_none(f.get('track')),
201 'album': f.get('album'),
202 'discnumber': int_or_none(f.get('disc')),
203 'release_year': int_or_none(f.get('year'))})
204 entry = entries[f['name']]
e612f66c 205 elif traverse_obj(f, 'original', expected_type=str) in entries:
a3e26449 206 entry = entries[f['original']]
207 else:
208 continue
209
210 if f.get('format') == 'Thumbnail':
211 entry['thumbnails'].append({
212 'id': f['name'],
213 'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
214 'width': int_or_none(f.get('width')),
215 'height': int_or_none(f.get('width')),
216 'filesize': int_or_none(f.get('size'))})
217
218 extension = (f['name'].rsplit('.', 1) + [None])[1]
219 if extension in KNOWN_EXTENSIONS:
220 entry['formats'].append({
221 'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
222 'format': f.get('format'),
223 'width': int_or_none(f.get('width')),
224 'height': int_or_none(f.get('height')),
225 'filesize': int_or_none(f.get('size')),
226 'protocol': 'https'})
227
a3e26449 228 for entry in entries.values():
1f13021e 229 self._sort_formats(entry['formats'])
a3e26449 230
231 if len(entries) == 1:
232 # If there's only one item, use it as the main info dict
1f13021e 233 only_video = next(iter(entries.values()))
a3e26449 234 if entry_id:
235 info = merge_dicts(only_video, info)
236 else:
237 info = merge_dicts(info, only_video)
238 else:
239 # Otherwise, we have a playlist.
240 info['_type'] = 'playlist'
241 info['entries'] = list(entries.values())
242
243 if metadata.get('reviews'):
244 info['comments'] = []
245 for review in metadata['reviews']:
246 info['comments'].append({
247 'id': review.get('review_id'),
248 'author': review.get('reviewer'),
249 'text': str_or_none(review.get('reviewtitle'), '') + '\n\n' + review.get('reviewbody'),
250 'timestamp': unified_timestamp(review.get('createdate')),
251 'parent': 'root'})
252
84bc23b4 253 return info
879e7199 254
255
256class YoutubeWebArchiveIE(InfoExtractor):
257 IE_NAME = 'web.archive:youtube'
1f13021e 258 IE_DESC = 'web.archive.org saved youtube videos, "ytarchive:" prefix'
259 _VALID_URL = r'''(?x)(?:(?P<prefix>ytarchive:)|
260 (?:https?://)?web\.archive\.org/
261 (?:web/)?(?:(?P<date>[0-9]{14})?[0-9A-Za-z_*]*/)? # /web and the version index is optional
262 (?:https?(?::|%3[Aa])//)?(?:
263 (?:\w+\.)?youtube\.com(?::(?:80|443))?/watch(?:\.php)?(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
264 |(?:wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
265 )
266 )(?P<id>[0-9A-Za-z_-]{11})
267 (?(prefix)
268 (?::(?P<date2>[0-9]{14}))?$|
269 (?:%26|[#&]|$)
270 )'''
879e7199 271
272 _TESTS = [
273 {
274 'url': 'https://web.archive.org/web/20150415002341/https://www.youtube.com/watch?v=aYAGB11YrSs',
275 'info_dict': {
276 'id': 'aYAGB11YrSs',
277 'ext': 'webm',
aa4b0545 278 'title': 'Team Fortress 2 - Sandviches!',
279 'description': 'md5:4984c0f9a07f349fc5d8e82ab7af4eaf',
280 'upload_date': '20110926',
281 'uploader': 'Zeurel',
282 'channel_id': 'UCukCyHaD-bK3in_pKpfH9Eg',
283 'duration': 32,
284 'uploader_id': 'Zeurel',
285 'uploader_url': 'http://www.youtube.com/user/Zeurel'
879e7199 286 }
aa4b0545 287 }, {
879e7199 288 # Internal link
289 'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
290 'info_dict': {
291 'id': '97t7Xj_iBv0',
292 'ext': 'mp4',
aa4b0545 293 'title': 'Why Machines That Bend Are Better',
294 'description': 'md5:00404df2c632d16a674ff8df1ecfbb6c',
295 'upload_date': '20190312',
296 'uploader': 'Veritasium',
297 'channel_id': 'UCHnyfMqiRRG1u-2MsSQLbXA',
298 'duration': 771,
299 'uploader_id': '1veritasium',
300 'uploader_url': 'http://www.youtube.com/user/1veritasium'
879e7199 301 }
aa4b0545 302 }, {
303 # Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description.
304 # Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description
879e7199 305 'url': 'https://web.archive.org/web/20120712231619/http://www.youtube.com/watch?v=AkhihxRKcrs&gl=US&hl=en',
306 'info_dict': {
307 'id': 'AkhihxRKcrs',
308 'ext': 'webm',
aa4b0545 309 'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)',
310 'upload_date': '20120712',
311 'duration': 398,
312 'description': 'md5:ff4de6a7980cb65d951c2f6966a4f2f3',
313 'uploader_id': 'machinima',
314 'uploader_url': 'http://www.youtube.com/user/machinima'
879e7199 315 }
aa4b0545 316 }, {
317 # FLV video. Video file URL does not provide itag information
879e7199 318 'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
319 'info_dict': {
320 'id': 'jNQXAC9IVRw',
aa4b0545 321 'ext': 'flv',
322 'title': 'Me at the zoo',
323 'upload_date': '20050423',
324 'channel_id': 'UC4QobU6STFB0P71PMvOGN5A',
325 'duration': 19,
326 'description': 'md5:10436b12e07ac43ff8df65287a56efb4',
327 'uploader_id': 'jawed',
328 'uploader_url': 'http://www.youtube.com/user/jawed'
879e7199 329 }
aa4b0545 330 }, {
879e7199 331 'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
332 'info_dict': {
333 'id': 'lTx3G6h2xyA',
334 'ext': 'flv',
aa4b0545 335 'title': 'Madeon - Pop Culture (live mashup)',
336 'upload_date': '20110711',
337 'uploader': 'Madeon',
338 'channel_id': 'UCqMDNf3Pn5L7pcNkuSEeO3w',
339 'duration': 204,
340 'description': 'md5:f7535343b6eda34a314eff8b85444680',
341 'uploader_id': 'itsmadeon',
342 'uploader_url': 'http://www.youtube.com/user/itsmadeon'
879e7199 343 }
aa4b0545 344 }, {
345 # First capture is of dead video, second is the oldest from CDX response.
346 'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E',
879e7199 347 'info_dict': {
aa4b0545 348 'id': '1JYutPM8O6E',
879e7199 349 'ext': 'mp4',
aa4b0545 350 'title': 'Fake Teen Doctor Strikes AGAIN! - Weekly Weird News',
351 'upload_date': '20160218',
352 'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
353 'duration': 1236,
354 'description': 'md5:21032bae736421e89c2edf36d1936947',
355 'uploader_id': 'MachinimaETC',
356 'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
357 }
358 }, {
359 # First capture of dead video, capture date in link links to dead capture.
360 'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E',
879e7199 361 'info_dict': {
aa4b0545 362 'id': '6FPhZJGvf4E',
879e7199 363 'ext': 'mp4',
aa4b0545 364 'title': 'WTF: Video Games Still Launch BROKEN?! - T.U.G.S.',
365 'upload_date': '20160219',
366 'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
367 'duration': 798,
368 'description': 'md5:a1dbf12d9a3bd7cb4c5e33b27d77ffe7',
369 'uploader_id': 'MachinimaETC',
370 'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
879e7199 371 },
372 'expected_warnings': [
aa4b0545 373 r'unable to download capture webpage \(it may not be archived\)'
879e7199 374 ]
aa4b0545 375 }, { # Very old YouTube page, has - YouTube in title.
376 'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg',
377 'info_dict': {
378 'id': '-06-KB9XTzg',
379 'ext': 'flv',
380 'title': 'New Coin Hack!! 100% Safe!!'
381 }
382 }, {
383 'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8',
384 'info_dict': {
385 'id': 'dWW7qP423y8',
386 'ext': 'mp4',
387 'title': 'It\'s Bootleg AirPods Time.',
388 'upload_date': '20211021',
389 'channel_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
390 'channel_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug',
391 'duration': 810,
392 'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc',
393 'uploader': 'DankPods',
394 'uploader_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
395 'uploader_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug'
396 }
397 }, {
398 # player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093
399 'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4',
400 'info_dict': {
401 'id': '6Dh-RL__uN4',
402 'ext': 'mp4',
403 'title': 'bitch lasagna',
404 'upload_date': '20181005',
405 'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw',
406 'channel_url': 'http://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw',
407 'duration': 135,
408 'description': 'md5:2dbe4051feeff2dab5f41f82bb6d11d0',
409 'uploader': 'PewDiePie',
410 'uploader_id': 'PewDiePie',
411 'uploader_url': 'http://www.youtube.com/user/PewDiePie'
412 }
413 }, {
414 'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
415 'only_matching': True
416 }, {
417 'url': 'https://web.archive.org/web/20050214000000_if/http://www.youtube.com/watch?v=0altSZ96U4M',
418 'only_matching': True
419 }, {
879e7199 420 # Video not archived, only capture is unavailable video page
421 'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
aa4b0545 422 'only_matching': True
423 }, { # Encoded url
879e7199 424 'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
aa4b0545 425 'only_matching': True
426 }, {
879e7199 427 'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
aa4b0545 428 'only_matching': True
429 }, {
430 'url': 'https://web.archive.org/web/20060527081937/http://www.youtube.com:80/watch.php?v=ELTFsLT73fA&amp;search=soccer',
431 'only_matching': True
432 }, {
433 'url': 'https://web.archive.org/http://www.youtube.com:80/watch?v=-05VVye-ffg',
434 'only_matching': True
1f13021e 435 }, {
436 'url': 'ytarchive:BaW_jenozKc:20050214000000',
437 'only_matching': True
88f23a18 438 }, {
1f13021e 439 'url': 'ytarchive:BaW_jenozKc',
440 'only_matching': True
441 },
879e7199 442 ]
b7c47b74 443 _YT_INITIAL_DATA_RE = YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE
444 _YT_INITIAL_PLAYER_RESPONSE_RE = fr'''(?x)
445 (?:window\s*\[\s*["\']ytInitialPlayerResponse["\']\s*\]|ytInitialPlayerResponse)\s*=[(\s]*|
446 {YoutubeBaseInfoExtractor._YT_INITIAL_PLAYER_RESPONSE_RE}'''
aa4b0545 447
448 _YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers
449 _YT_ALL_THUMB_SERVERS = orderedSet(
450 _YT_DEFAULT_THUMB_SERVERS + ['img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(0, 5), 9)]])
451
452 _WAYBACK_BASE_URL = 'https://web.archive.org/web/%sif_/'
453 _OLDEST_CAPTURE_DATE = 20050214000000
454 _NEWEST_CAPTURE_DATE = 20500101000000
455
c8e856a5 456 def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note=None, fatal=False):
aa4b0545 457 # CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md
458 query = {
459 'url': url,
460 'output': 'json',
461 'fl': 'original,mimetype,length,timestamp',
462 'limit': 500,
463 'filter': ['statuscode:200'] + (filters or []),
464 'collapse': collapse or [],
465 **(query or {})
466 }
c8e856a5 467 res = self._download_json(
468 'https://web.archive.org/cdx/search/cdx', item_id,
469 note or 'Downloading CDX API JSON', query=query, fatal=fatal)
aa4b0545 470 if isinstance(res, list) and len(res) >= 2:
471 # format response to make it easier to use
472 return list(dict(zip(res[0], v)) for v in res[1:])
473 elif not isinstance(res, list) or len(res) != 0:
474 self.report_warning('Error while parsing CDX API response' + bug_reports_message())
475
aa4b0545 476 def _extract_webpage_title(self, webpage):
04f3fd2c 477 page_title = self._html_extract_title(webpage, default='')
aa4b0545 478 # YouTube video pages appear to always have either 'YouTube -' as prefix or '- YouTube' as suffix.
479 return self._html_search_regex(
480 r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
481 page_title, 'title', default='')
482
483 def _extract_metadata(self, video_id, webpage):
aa4b0545 484 search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) if webpage else (lambda x: None))
b7c47b74 485 player_response = self._search_json(
486 self._YT_INITIAL_PLAYER_RESPONSE_RE, webpage, 'initial player response',
f0bc6e20 487 video_id, default={})
b7c47b74 488 initial_data = self._search_json(
f0bc6e20 489 self._YT_INITIAL_DATA_RE, webpage, 'initial data', video_id, default={})
aa4b0545 490
491 initial_data_video = traverse_obj(
492 initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'videoPrimaryInfoRenderer'),
493 expected_type=dict, get_all=False, default={})
494
495 video_details = traverse_obj(
496 player_response, 'videoDetails', expected_type=dict, get_all=False, default={})
497
498 microformats = traverse_obj(
499 player_response, ('microformat', 'playerMicroformatRenderer'), expected_type=dict, get_all=False, default={})
500
501 video_title = (
502 video_details.get('title')
503 or YoutubeBaseInfoExtractor._get_text(microformats, 'title')
504 or YoutubeBaseInfoExtractor._get_text(initial_data_video, 'title')
505 or self._extract_webpage_title(webpage)
506 or search_meta(['og:title', 'twitter:title', 'title']))
507
508 channel_id = str_or_none(
509 video_details.get('channelId')
510 or microformats.get('externalChannelId')
511 or search_meta('channelId')
512 or self._search_regex(
513 r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1', # @b45a9e6
514 webpage, 'channel id', default=None, group='id'))
515 channel_url = f'http://www.youtube.com/channel/{channel_id}' if channel_id else None
516
517 duration = int_or_none(
518 video_details.get('lengthSeconds')
519 or microformats.get('lengthSeconds')
520 or parse_duration(search_meta('duration')))
521 description = (
522 video_details.get('shortDescription')
523 or YoutubeBaseInfoExtractor._get_text(microformats, 'description')
524 or clean_html(get_element_by_id('eow-description', webpage)) # @9e6dd23
525 or search_meta(['description', 'og:description', 'twitter:description']))
526
527 uploader = video_details.get('author')
528
529 # Uploader ID and URL
530 uploader_mobj = re.search(
531 r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">', # @fd05024
532 webpage)
533 if uploader_mobj is not None:
534 uploader_id, uploader_url = uploader_mobj.group('uploader_id'), uploader_mobj.group('uploader_url')
535 else:
536 # @a6211d2
537 uploader_url = url_or_none(microformats.get('ownerProfileUrl'))
538 uploader_id = self._search_regex(
539 r'(?:user|channel)/([^/]+)', uploader_url or '', 'uploader id', default=None)
540
541 upload_date = unified_strdate(
542 dict_get(microformats, ('uploadDate', 'publishDate'))
543 or search_meta(['uploadDate', 'datePublished'])
544 or self._search_regex(
545 [r'(?s)id="eow-date.*?>(.*?)</span>',
546 r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'], # @7998520
547 webpage, 'upload date', default=None))
548
549 return {
550 'title': video_title,
551 'description': description,
552 'upload_date': upload_date,
553 'uploader': uploader,
554 'channel_id': channel_id,
555 'channel_url': channel_url,
556 'duration': duration,
557 'uploader_url': uploader_url,
558 'uploader_id': uploader_id,
559 }
560
561 def _extract_thumbnails(self, video_id):
562 try_all = 'thumbnails' in self._configuration_arg('check_all')
563 thumbnail_base_urls = ['http://{server}/vi{webp}/{video_id}'.format(
564 webp='_webp' if ext == 'webp' else '', video_id=video_id, server=server)
565 for server in (self._YT_ALL_THUMB_SERVERS if try_all else self._YT_DEFAULT_THUMB_SERVERS) for ext in (('jpg', 'webp') if try_all else ('jpg',))]
566
567 thumbnails = []
568 for url in thumbnail_base_urls:
569 response = self._call_cdx_api(
570 video_id, url, filters=['mimetype:image/(?:webp|jpeg)'],
571 collapse=['urlkey'], query={'matchType': 'prefix'})
572 if not response:
573 continue
574 thumbnails.extend(
575 {
576 'url': (self._WAYBACK_BASE_URL % (int_or_none(thumbnail_dict.get('timestamp')) or self._OLDEST_CAPTURE_DATE)) + thumbnail_dict.get('original'),
577 'filesize': int_or_none(thumbnail_dict.get('length')),
578 'preference': int_or_none(thumbnail_dict.get('length'))
579 } for thumbnail_dict in response)
580 if not try_all:
581 break
582
583 self._remove_duplicate_formats(thumbnails)
584 return thumbnails
585
586 def _get_capture_dates(self, video_id, url_date):
587 capture_dates = []
588 # Note: CDX API will not find watch pages with extra params in the url.
589 response = self._call_cdx_api(
590 video_id, f'https://www.youtube.com/watch?v={video_id}',
591 filters=['mimetype:text/html'], collapse=['timestamp:6', 'digest'], query={'matchType': 'prefix'}) or []
86e5f3ed 592 all_captures = sorted(int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None)
aa4b0545 593
594 # Prefer the new polymer UI captures as we support extracting more metadata from them
595 # WBM captures seem to all switch to this layout ~July 2020
1f13021e 596 modern_captures = [x for x in all_captures if x >= 20200701000000]
aa4b0545 597 if modern_captures:
598 capture_dates.append(modern_captures[0])
599 capture_dates.append(url_date)
600 if all_captures:
601 capture_dates.append(all_captures[0])
602
603 if 'captures' in self._configuration_arg('check_all'):
604 capture_dates.extend(modern_captures + all_captures)
605
606 # Fallbacks if any of the above fail
607 capture_dates.extend([self._OLDEST_CAPTURE_DATE, self._NEWEST_CAPTURE_DATE])
1f13021e 608 return orderedSet(filter(None, capture_dates))
879e7199 609
610 def _real_extract(self, url):
1f13021e 611 video_id, url_date, url_date_2 = self._match_valid_url(url).group('id', 'date', 'date2')
612 url_date = url_date or url_date_2
aa4b0545 613
614 urlh = None
879e7199 615 try:
aa4b0545 616 urlh = self._request_webpage(
617 HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id),
618 video_id, note='Fetching archived video file url', expected_status=True)
879e7199 619 except ExtractorError as e:
620 # HTTP Error 404 is expected if the video is not saved.
621 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
aa4b0545 622 self.raise_no_formats(
623 'The requested video is not archived, indexed, or there is an issue with web.archive.org',
879e7199 624 expected=True)
aa4b0545 625 else:
626 raise
627
628 capture_dates = self._get_capture_dates(video_id, int_or_none(url_date))
1f13021e 629 self.write_debug('Captures to try: ' + join_nonempty(*capture_dates, delim=', '))
aa4b0545 630 info = {'id': video_id}
631 for capture in capture_dates:
aa4b0545 632 webpage = self._download_webpage(
633 (self._WAYBACK_BASE_URL + 'http://www.youtube.com/watch?v=%s') % (capture, video_id),
634 video_id=video_id, fatal=False, errnote='unable to download capture webpage (it may not be archived)',
635 note='Downloading capture webpage')
636 current_info = self._extract_metadata(video_id, webpage or '')
637 # Try avoid getting deleted video metadata
638 if current_info.get('title'):
639 info = merge_dicts(info, current_info)
640 if 'captures' not in self._configuration_arg('check_all'):
641 break
642
643 info['thumbnails'] = self._extract_thumbnails(video_id)
644
645 if urlh:
1f13021e 646 url = compat_urllib_parse_unquote(urlh.geturl())
aa4b0545 647 video_file_url_qs = parse_qs(url)
648 # Attempt to recover any ext & format info from playback url & response headers
649 format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
650 itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
651 if itag and itag in YoutubeIE._formats:
652 format.update(YoutubeIE._formats[itag])
653 format.update({'format_id': itag})
654 else:
655 mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
656 ext = (mimetype2ext(mime)
657 or urlhandle_detect_ext(urlh)
658 or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type')))
659 format.update({'ext': ext})
660 info['formats'] = [format]
661 if not info.get('duration'):
662 info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
663
664 if not info.get('title'):
665 info['title'] = video_id
666 return info