]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/archiveorg.py
[youtube] Add `shorts` to `_VALID_URL`
[yt-dlp.git] / yt_dlp / extractor / archiveorg.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5 import json
6
7 from .common import InfoExtractor
8 from .youtube import YoutubeIE
9 from ..compat import (
10 compat_urllib_parse_unquote,
11 compat_urllib_parse_unquote_plus,
12 compat_urlparse,
13 compat_parse_qs,
14 compat_HTTPError
15 )
16 from ..utils import (
17 clean_html,
18 determine_ext,
19 dict_get,
20 extract_attributes,
21 ExtractorError,
22 HEADRequest,
23 int_or_none,
24 KNOWN_EXTENSIONS,
25 merge_dicts,
26 mimetype2ext,
27 parse_duration,
28 RegexNotFoundError,
29 str_to_int,
30 str_or_none,
31 try_get,
32 unified_strdate,
33 unified_timestamp,
34 )
35
36
37 class ArchiveOrgIE(InfoExtractor):
38 IE_NAME = 'archive.org'
39 IE_DESC = 'archive.org video and audio'
40 _VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^?#]+)(?:[?].*)?$'
41 _TESTS = [{
42 'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
43 'md5': '8af1d4cf447933ed3c7f4871162602db',
44 'info_dict': {
45 'id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect',
46 'ext': 'ogv',
47 'title': '1968 Demo - FJCC Conference Presentation Reel #1',
48 'description': 'md5:da45c349df039f1cc8075268eb1b5c25',
49 'release_date': '19681210',
50 'timestamp': 1268695290,
51 'upload_date': '20100315',
52 'creator': 'SRI International',
53 'uploader': 'laura@archive.org',
54 },
55 }, {
56 'url': 'https://archive.org/details/Cops1922',
57 'md5': '0869000b4ce265e8ca62738b336b268a',
58 'info_dict': {
59 'id': 'Cops1922',
60 'ext': 'mp4',
61 'title': 'Buster Keaton\'s "Cops" (1922)',
62 'description': 'md5:43a603fd6c5b4b90d12a96b921212b9c',
63 'uploader': 'yorkmba99@hotmail.com',
64 'timestamp': 1387699629,
65 'upload_date': "20131222",
66 },
67 }, {
68 'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
69 'only_matching': True,
70 }, {
71 'url': 'https://archive.org/details/Election_Ads',
72 'md5': '284180e857160cf866358700bab668a3',
73 'info_dict': {
74 'id': 'Election_Ads/Commercial-JFK1960ElectionAdCampaignJingle.mpg',
75 'title': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg',
76 'ext': 'mp4',
77 },
78 }, {
79 'url': 'https://archive.org/details/Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
80 'md5': '7915213ef02559b5501fe630e1a53f59',
81 'info_dict': {
82 'id': 'Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
83 'title': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg',
84 'ext': 'mp4',
85 'timestamp': 1205588045,
86 'uploader': 'mikedavisstripmaster@yahoo.com',
87 'description': '1960 Presidential Campaign Election Commercials John F Kennedy, Richard M Nixon',
88 'upload_date': '20080315',
89 },
90 }, {
91 'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16',
92 'md5': '7d07ffb42aba6537c28e053efa4b54c9',
93 'info_dict': {
94 'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t01.flac',
95 'title': 'Turning',
96 'ext': 'flac',
97 },
98 }, {
99 'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
100 'md5': 'a07cd8c6ab4ee1560f8a0021717130f3',
101 'info_dict': {
102 'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
103 'title': 'Deal',
104 'ext': 'flac',
105 'timestamp': 1205895624,
106 'uploader': 'mvernon54@yahoo.com',
107 'description': 'md5:6a31f1996db0aa0fc9da6d6e708a1bb0',
108 'upload_date': '20080319',
109 'location': 'Barton Hall - Cornell University',
110 },
111 }, {
112 'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik',
113 'md5': '7cb019baa9b332e82ea7c10403acd180',
114 'info_dict': {
115 'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/01.01. Bells Of Rostov.mp3',
116 'title': 'Bells Of Rostov',
117 'ext': 'mp3',
118 },
119 }, {
120 'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3',
121 'md5': '1d0aabe03edca83ca58d9ed3b493a3c3',
122 'info_dict': {
123 'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02. Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1).mp3',
124 'title': 'Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1)',
125 'ext': 'mp3',
126 'timestamp': 1569662587,
127 'uploader': 'associate-joygen-odiongan@archive.org',
128 'description': 'md5:012b2d668ae753be36896f343d12a236',
129 'upload_date': '20190928',
130 },
131 }]
132
133 @staticmethod
134 def _playlist_data(webpage):
135 element = re.findall(r'''(?xs)
136 <input
137 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
138 \s+class=['"]?js-play8-playlist['"]?
139 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
140 \s*/>
141 ''', webpage)[0]
142
143 return json.loads(extract_attributes(element)['value'])
144
145 def _real_extract(self, url):
146 video_id = compat_urllib_parse_unquote_plus(self._match_id(url))
147 identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
148
149 # Archive.org metadata API doesn't clearly demarcate playlist entries
150 # or subtitle tracks, so we get them from the embeddable player.
151 embed_page = self._download_webpage(
152 'https://archive.org/embed/' + identifier, identifier)
153 playlist = self._playlist_data(embed_page)
154
155 entries = {}
156 for p in playlist:
157 # If the user specified a playlist entry in the URL, ignore the
158 # rest of the playlist.
159 if entry_id and p['orig'] != entry_id:
160 continue
161
162 entries[p['orig']] = {
163 'formats': [],
164 'thumbnails': [],
165 'artist': p.get('artist'),
166 'track': p.get('title'),
167 'subtitles': {}}
168
169 for track in p.get('tracks', []):
170 if track['kind'] != 'subtitles':
171 continue
172
173 entries[p['orig']][track['label']] = {
174 'url': 'https://archive.org/' + track['file'].lstrip('/')}
175
176 metadata = self._download_json(
177 'http://archive.org/metadata/' + identifier, identifier)
178 m = metadata['metadata']
179 identifier = m['identifier']
180
181 info = {
182 'id': identifier,
183 'title': m['title'],
184 'description': clean_html(m.get('description')),
185 'uploader': dict_get(m, ['uploader', 'adder']),
186 'creator': m.get('creator'),
187 'license': m.get('licenseurl'),
188 'release_date': unified_strdate(m.get('date')),
189 'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
190 'webpage_url': 'https://archive.org/details/' + identifier,
191 'location': m.get('venue'),
192 'release_year': int_or_none(m.get('year'))}
193
194 for f in metadata['files']:
195 if f['name'] in entries:
196 entries[f['name']] = merge_dicts(entries[f['name']], {
197 'id': identifier + '/' + f['name'],
198 'title': f.get('title') or f['name'],
199 'display_id': f['name'],
200 'description': clean_html(f.get('description')),
201 'creator': f.get('creator'),
202 'duration': parse_duration(f.get('length')),
203 'track_number': int_or_none(f.get('track')),
204 'album': f.get('album'),
205 'discnumber': int_or_none(f.get('disc')),
206 'release_year': int_or_none(f.get('year'))})
207 entry = entries[f['name']]
208 elif f.get('original') in entries:
209 entry = entries[f['original']]
210 else:
211 continue
212
213 if f.get('format') == 'Thumbnail':
214 entry['thumbnails'].append({
215 'id': f['name'],
216 'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
217 'width': int_or_none(f.get('width')),
218 'height': int_or_none(f.get('width')),
219 'filesize': int_or_none(f.get('size'))})
220
221 extension = (f['name'].rsplit('.', 1) + [None])[1]
222 if extension in KNOWN_EXTENSIONS:
223 entry['formats'].append({
224 'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
225 'format': f.get('format'),
226 'width': int_or_none(f.get('width')),
227 'height': int_or_none(f.get('height')),
228 'filesize': int_or_none(f.get('size')),
229 'protocol': 'https'})
230
231 # Sort available formats by filesize
232 for entry in entries.values():
233 entry['formats'] = list(sorted(entry['formats'], key=lambda x: x.get('filesize', -1)))
234
235 if len(entries) == 1:
236 # If there's only one item, use it as the main info dict
237 only_video = entries[list(entries.keys())[0]]
238 if entry_id:
239 info = merge_dicts(only_video, info)
240 else:
241 info = merge_dicts(info, only_video)
242 else:
243 # Otherwise, we have a playlist.
244 info['_type'] = 'playlist'
245 info['entries'] = list(entries.values())
246
247 if metadata.get('reviews'):
248 info['comments'] = []
249 for review in metadata['reviews']:
250 info['comments'].append({
251 'id': review.get('review_id'),
252 'author': review.get('reviewer'),
253 'text': str_or_none(review.get('reviewtitle'), '') + '\n\n' + review.get('reviewbody'),
254 'timestamp': unified_timestamp(review.get('createdate')),
255 'parent': 'root'})
256
257 return info
258
259
260 class YoutubeWebArchiveIE(InfoExtractor):
261 IE_NAME = 'web.archive:youtube'
262 IE_DESC = 'web.archive.org saved youtube videos'
263 _VALID_URL = r"""(?x)^
264 (?:https?://)?web\.archive\.org/
265 (?:web/)?
266 (?:[0-9A-Za-z_*]+/)? # /web and the version index is optional
267
268 (?:https?(?::|%3[Aa])//)?
269 (?:
270 (?:\w+\.)?youtube\.com/watch(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
271 |(wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
272 )
273 (?P<id>[0-9A-Za-z_-]{11})(?:%26|\#|&|$)
274 """
275
276 _TESTS = [
277 {
278 'url': 'https://web.archive.org/web/20150415002341/https://www.youtube.com/watch?v=aYAGB11YrSs',
279 'info_dict': {
280 'id': 'aYAGB11YrSs',
281 'ext': 'webm',
282 'title': 'Team Fortress 2 - Sandviches!'
283 }
284 },
285 {
286 # Internal link
287 'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
288 'info_dict': {
289 'id': '97t7Xj_iBv0',
290 'ext': 'mp4',
291 'title': 'How Flexible Machines Could Save The World'
292 }
293 },
294 {
295 # Video from 2012, webm format itag 45.
296 'url': 'https://web.archive.org/web/20120712231619/http://www.youtube.com/watch?v=AkhihxRKcrs&gl=US&hl=en',
297 'info_dict': {
298 'id': 'AkhihxRKcrs',
299 'ext': 'webm',
300 'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)'
301 }
302 },
303 {
304 # Old flash-only video. Webpage title starts with "YouTube - ".
305 'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
306 'info_dict': {
307 'id': 'jNQXAC9IVRw',
308 'ext': 'unknown_video',
309 'title': 'Me at the zoo'
310 }
311 },
312 {
313 # Flash video with .flv extension (itag 34). Title has prefix "YouTube -"
314 # Title has some weird unicode characters too.
315 'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
316 'info_dict': {
317 'id': 'lTx3G6h2xyA',
318 'ext': 'flv',
319 'title': '‪Madeon - Pop Culture (live mashup)‬‏'
320 }
321 },
322 { # Some versions of Youtube have have "YouTube" as page title in html (and later rewritten by js).
323 'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
324 'info_dict': {
325 'id': 'kH-G_aIBlFw',
326 'ext': 'mp4',
327 'title': 'kH-G_aIBlFw'
328 },
329 'expected_warnings': [
330 'unable to extract title',
331 ]
332 },
333 {
334 # First capture is a 302 redirect intermediary page.
335 'url': 'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=0altSZ96U4M',
336 'info_dict': {
337 'id': '0altSZ96U4M',
338 'ext': 'mp4',
339 'title': '0altSZ96U4M'
340 },
341 'expected_warnings': [
342 'unable to extract title',
343 ]
344 },
345 {
346 # Video not archived, only capture is unavailable video page
347 'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
348 'only_matching': True,
349 },
350 { # Encoded url
351 'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
352 'only_matching': True,
353 },
354 {
355 'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
356 'only_matching': True,
357 }
358 ]
359
360 def _real_extract(self, url):
361 video_id = self._match_id(url)
362 title = video_id # if we are not able get a title
363
364 def _extract_title(webpage):
365 page_title = self._html_search_regex(
366 r'<title>([^<]*)</title>', webpage, 'title', fatal=False) or ''
367 # YouTube video pages appear to always have either 'YouTube -' as suffix or '- YouTube' as prefix.
368 try:
369 page_title = self._html_search_regex(
370 r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
371 page_title, 'title', default='')
372 except RegexNotFoundError:
373 page_title = None
374
375 if not page_title:
376 self.report_warning('unable to extract title', video_id=video_id)
377 return
378 return page_title
379
380 # If the video is no longer available, the oldest capture may be one before it was removed.
381 # Setting the capture date in url to early date seems to redirect to earliest capture.
382 webpage = self._download_webpage(
383 'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=%s' % video_id,
384 video_id=video_id, fatal=False, errnote='unable to download video webpage (probably not archived).')
385 if webpage:
386 title = _extract_title(webpage) or title
387
388 # Use link translator mentioned in https://github.com/ytdl-org/youtube-dl/issues/13655
389 internal_fake_url = 'https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id
390 try:
391 video_file_webpage = self._request_webpage(
392 HEADRequest(internal_fake_url), video_id,
393 note='Fetching video file url', expected_status=True)
394 except ExtractorError as e:
395 # HTTP Error 404 is expected if the video is not saved.
396 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
397 raise ExtractorError(
398 'HTTP Error %s. Most likely the video is not archived or issue with web.archive.org.' % e.cause.code,
399 expected=True)
400 raise
401 video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
402 video_file_url_qs = compat_parse_qs(compat_urlparse.urlparse(video_file_url).query)
403
404 # Attempt to recover any ext & format info from playback url
405 format = {'url': video_file_url}
406 itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
407 if itag and itag in YoutubeIE._formats: # Naughty access but it works
408 format.update(YoutubeIE._formats[itag])
409 format.update({'format_id': itag})
410 else:
411 mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
412 ext = mimetype2ext(mime) or determine_ext(video_file_url)
413 format.update({'ext': ext})
414 return {
415 'id': video_id,
416 'title': title,
417 'formats': [format],
418 'duration': str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
419 }