]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/mixcloud.py
Merge branch 'niconico_nm' of https://github.com/ndac-todoroki/youtube-dl into ndac...
[yt-dlp.git] / youtube_dl / extractor / mixcloud.py
1 from __future__ import unicode_literals
2
3 import re
4 import itertools
5
6 from .common import InfoExtractor
7 from ..compat import (
8 compat_urllib_parse,
9 )
10 from ..utils import (
11 ExtractorError,
12 HEADRequest,
13 str_to_int,
14 )
15
16
17 class MixcloudIE(InfoExtractor):
18 _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([^/]+)/([^/]+)'
19 IE_NAME = 'mixcloud'
20
21 _TESTS = [{
22 'url': 'http://www.mixcloud.com/dholbach/cryptkeeper/',
23 'info_dict': {
24 'id': 'dholbach-cryptkeeper',
25 'ext': 'mp3',
26 'title': 'Cryptkeeper',
27 'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
28 'uploader': 'Daniel Holbach',
29 'uploader_id': 'dholbach',
30 'thumbnail': 're:https?://.*\.jpg',
31 'view_count': int,
32 'like_count': int,
33 },
34 }, {
35 'url': 'http://www.mixcloud.com/gillespeterson/caribou-7-inch-vinyl-mix-chat/',
36 'info_dict': {
37 'id': 'gillespeterson-caribou-7-inch-vinyl-mix-chat',
38 'ext': 'mp3',
39 'title': 'Caribou 7 inch Vinyl Mix & Chat',
40 'description': 'md5:2b8aec6adce69f9d41724647c65875e8',
41 'uploader': 'Gilles Peterson Worldwide',
42 'uploader_id': 'gillespeterson',
43 'thumbnail': 're:https?://.*/images/',
44 'view_count': int,
45 'like_count': int,
46 },
47 }]
48
49 def _get_url(self, track_id, template_url, server_number):
50 boundaries = (1, 30)
51 for nr in server_numbers(server_number, boundaries):
52 url = template_url % nr
53 try:
54 # We only want to know if the request succeed
55 # don't download the whole file
56 self._request_webpage(
57 HEADRequest(url), track_id,
58 'Checking URL %d/%d ...' % (nr, boundaries[-1]))
59 return url
60 except ExtractorError:
61 pass
62 return None
63
64 def _real_extract(self, url):
65 mobj = re.match(self._VALID_URL, url)
66 uploader = mobj.group(1)
67 cloudcast_name = mobj.group(2)
68 track_id = compat_urllib_parse.unquote('-'.join((uploader, cloudcast_name)))
69
70 webpage = self._download_webpage(url, track_id)
71
72 preview_url = self._search_regex(
73 r'\s(?:data-preview-url|m-preview)="([^"]+)"', webpage, 'preview url')
74 song_url = preview_url.replace('/previews/', '/c/originals/')
75 server_number = int(self._search_regex(r'stream(\d+)', song_url, 'server number'))
76 template_url = re.sub(r'(stream\d*)', 'stream%d', song_url)
77 final_song_url = self._get_url(track_id, template_url, server_number)
78 if final_song_url is None:
79 self.to_screen('Trying with m4a extension')
80 template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/')
81 final_song_url = self._get_url(track_id, template_url, server_number)
82 if final_song_url is None:
83 raise ExtractorError('Unable to extract track url')
84
85 PREFIX = (
86 r'm-play-on-spacebar[^>]+'
87 r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+')
88 title = self._html_search_regex(
89 PREFIX + r'm-title="([^"]+)"', webpage, 'title')
90 thumbnail = self._proto_relative_url(self._html_search_regex(
91 PREFIX + r'm-thumbnail-url="([^"]+)"', webpage, 'thumbnail',
92 fatal=False))
93 uploader = self._html_search_regex(
94 PREFIX + r'm-owner-name="([^"]+)"',
95 webpage, 'uploader', fatal=False)
96 uploader_id = self._search_regex(
97 r'\s+"profile": "([^"]+)",', webpage, 'uploader id', fatal=False)
98 description = self._og_search_description(webpage)
99 like_count = str_to_int(self._search_regex(
100 r'\bbutton-favorite\b.+m-ajax-toggle-count="([^"]+)"',
101 webpage, 'like count', fatal=False))
102 view_count = str_to_int(self._search_regex(
103 [r'<meta itemprop="interactionCount" content="UserPlays:([0-9]+)"',
104 r'/listeners/?">([0-9,.]+)</a>'],
105 webpage, 'play count', fatal=False))
106
107 return {
108 'id': track_id,
109 'title': title,
110 'url': final_song_url,
111 'description': description,
112 'thumbnail': thumbnail,
113 'uploader': uploader,
114 'uploader_id': uploader_id,
115 'view_count': view_count,
116 'like_count': like_count,
117 }
118
119
120 def server_numbers(first, boundaries):
121 """ Server numbers to try in descending order of probable availability.
122 Starting from first (i.e. the number of the server hosting the preview file)
123 and going further and further up to the higher boundary and down to the
124 lower one in an alternating fashion. Namely:
125
126 server_numbers(2, (1, 5))
127
128 # Where the preview server is 2, min number is 1 and max is 5.
129 # Yields: 2, 3, 1, 4, 5
130
131 Why not random numbers or increasing sequences? Since from what I've seen,
132 full length files seem to be hosted on servers whose number is closer to
133 that of the preview; to be confirmed.
134 """
135 zip_longest = getattr(itertools, 'zip_longest', None)
136 if zip_longest is None:
137 # python 2.x
138 zip_longest = itertools.izip_longest
139
140 if len(boundaries) != 2:
141 raise ValueError("boundaries should be a two-element tuple")
142 min, max = boundaries
143 highs = range(first + 1, max + 1)
144 lows = range(first - 1, min - 1, -1)
145 rest = filter(
146 None, itertools.chain.from_iterable(zip_longest(highs, lows)))
147 yield first
148 for n in rest:
149 yield n