]> jfr.im git - yt-dlp.git/commitdiff
[utils] Add `parse_qs`
authorpukkandan <redacted>
Sun, 22 Aug 2021 19:02:00 +0000 (00:32 +0530)
committerpukkandan <redacted>
Sun, 22 Aug 2021 19:20:43 +0000 (00:50 +0530)
36 files changed:
test/test_utils.py
yt_dlp/extractor/aol.py
yt_dlp/extractor/archiveorg.py
yt_dlp/extractor/arkena.py
yt_dlp/extractor/arte.py
yt_dlp/extractor/bbc.py
yt_dlp/extractor/beeg.py
yt_dlp/extractor/brightcove.py
yt_dlp/extractor/ciscolive.py
yt_dlp/extractor/clyp.py
yt_dlp/extractor/daum.py
yt_dlp/extractor/europa.py
yt_dlp/extractor/francetv.py
yt_dlp/extractor/internetvideoarchive.py
yt_dlp/extractor/lbry.py
yt_dlp/extractor/mediaset.py
yt_dlp/extractor/nba.py
yt_dlp/extractor/noco.py
yt_dlp/extractor/pandoratv.py
yt_dlp/extractor/pladform.py
yt_dlp/extractor/pluralsight.py
yt_dlp/extractor/rutube.py
yt_dlp/extractor/seznamzpravy.py
yt_dlp/extractor/sixplay.py
yt_dlp/extractor/sportdeutschland.py
yt_dlp/extractor/tele5.py
yt_dlp/extractor/theplatform.py
yt_dlp/extractor/twitch.py
yt_dlp/extractor/varzesh3.py
yt_dlp/extractor/vevo.py
yt_dlp/extractor/videa.py
yt_dlp/extractor/videomore.py
yt_dlp/extractor/vimeo.py
yt_dlp/extractor/xboxclips.py
yt_dlp/extractor/youtube.py
yt_dlp/utils.py

index dedc598f7b9f37b9c99a834f8ceb98ea689032fd..d20bca79500b74dbea62f3f54fd78074aa9cd538 100644 (file)
@@ -62,6 +62,7 @@
     parse_iso8601,
     parse_resolution,
     parse_bitrate,
+    parse_qs,
     pkcs1pad,
     read_batch_urls,
     sanitize_filename,
     compat_getenv,
     compat_os_name,
     compat_setenv,
-    compat_urlparse,
-    compat_parse_qs,
 )
 
 
@@ -688,38 +687,36 @@ def test_urlencode_postdata(self):
         self.assertTrue(isinstance(data, bytes))
 
     def test_update_url_query(self):
-        def query_dict(url):
-            return compat_parse_qs(compat_urlparse.urlparse(url).query)
-        self.assertEqual(query_dict(update_url_query(
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
-            query_dict('http://example.com/path?quality=HD&format=mp4'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?quality=HD&format=mp4'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
-            query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'fields': 'id,formats,subtitles'})),
-            query_dict('http://example.com/path?fields=id,formats,subtitles'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?fields=id,formats,subtitles'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
-            query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path?manifest=f4m', {'manifest': []})),
-            query_dict('http://example.com/path'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
-            query_dict('http://example.com/path?system=LINUX'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?system=LINUX'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'fields': b'id,formats,subtitles'})),
-            query_dict('http://example.com/path?fields=id,formats,subtitles'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?fields=id,formats,subtitles'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'width': 1080, 'height': 720})),
-            query_dict('http://example.com/path?width=1080&height=720'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?width=1080&height=720'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'bitrate': 5020.43})),
-            query_dict('http://example.com/path?bitrate=5020.43'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?bitrate=5020.43'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'test': '第二行тест'})),
-            query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
+            parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
 
     def test_multipart_encode(self):
         self.assertEqual(
index 133b5e25410f2455f847d1f547defa64e61abdfc..4766a2c7747b11720b2aabbeac0b38ed8a4e34b7 100644 (file)
@@ -4,13 +4,10 @@
 import re
 
 from .yahoo import YahooIE
-from ..compat import (
-    compat_parse_qs,
-    compat_urllib_parse_urlparse,
-)
 from ..utils import (
     ExtractorError,
     int_or_none,
+    parse_qs,
     url_or_none,
 )
 
@@ -119,7 +116,7 @@ def _real_extract(self, url):
                         'height': int(mobj.group(2)),
                     })
                 else:
-                    qs = compat_parse_qs(compat_urllib_parse_urlparse(video_url).query)
+                    qs = parse_qs(video_url)
                     f.update({
                         'width': int_or_none(qs.get('w', [None])[0]),
                         'height': int_or_none(qs.get('h', [None])[0]),
index db685ff427f9bba6a53ad72f34daf0e1ea5873db..d90fcb13aa7b2bc3d8df83614402e14d6a9dd473 100644 (file)
@@ -9,8 +9,6 @@
 from ..compat import (
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote_plus,
-    compat_urlparse,
-    compat_parse_qs,
     compat_HTTPError
 )
 from ..utils import (
@@ -25,6 +23,7 @@
     merge_dicts,
     mimetype2ext,
     parse_duration,
+    parse_qs,
     RegexNotFoundError,
     str_to_int,
     str_or_none,
@@ -399,7 +398,7 @@ def _extract_title(webpage):
                     expected=True)
             raise
         video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
-        video_file_url_qs = compat_parse_qs(compat_urlparse.urlparse(video_file_url).query)
+        video_file_url_qs = parse_qs(video_file_url)
 
         # Attempt to recover any ext & format info from playback url
         format = {'url': video_file_url}
index 7b188614102b335262cbe7272bb68a0c74bbd828..4f4f457c16565445e14ca9266dcb80183cb95e2c 100644 (file)
@@ -4,12 +4,12 @@
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urlparse
 from ..utils import (
     ExtractorError,
     float_or_none,
     int_or_none,
     parse_iso8601,
+    parse_qs,
     try_get,
 )
 
@@ -69,7 +69,7 @@ def _real_extract(self, url):
 
         # Handle http://video.arkena.com/play2/embed/player URL
         if not video_id:
-            qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+            qs = parse_qs(url)
             video_id = qs.get('mediaId', [None])[0]
             account_id = qs.get('accountId', [None])[0]
             if not video_id or not account_id:
index c163db9c94a19fe2ba2769053e57ca15b0bbde17..ed245b75fd020cb6e5a44aa355356cb891c02767 100644 (file)
@@ -6,11 +6,11 @@
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urlparse,
 )
 from ..utils import (
     ExtractorError,
     int_or_none,
+    parse_qs,
     qualities,
     try_get,
     unified_strdate,
@@ -204,7 +204,7 @@ def _extract_urls(webpage):
             webpage)]
 
     def _real_extract(self, url):
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
         json_url = qs['json_url'][0]
         video_id = ArteTVIE._match_id(json_url)
         return self.url_result(
index 09b2932d20ef53d799b4775c4644d407b63501d3..de497ab1d3d6fd162d0e1b610e40fe45a78d4080 100644 (file)
@@ -10,9 +10,7 @@
 from ..compat import (
     compat_etree_Element,
     compat_HTTPError,
-    compat_parse_qs,
     compat_str,
-    compat_urllib_parse_urlparse,
     compat_urlparse,
 )
 from ..utils import (
@@ -26,6 +24,7 @@
     js_to_json,
     parse_duration,
     parse_iso8601,
+    parse_qs,
     strip_or_none,
     try_get,
     unescapeHTML,
@@ -1410,7 +1409,7 @@ def _fetch_page(self, programme_id, per_page, series_id, page):
 
     def _real_extract(self, url):
         pid = self._match_id(url)
-        qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        qs = parse_qs(url)
         series_id = qs.get('seriesId', [None])[0]
         page = qs.get('page', [None])[0]
         per_page = 36 if page else self._PAGE_SIZE
index 5788d13baae816ad69155d42f5f2d2b5ce837231..8fbabe70833c4db74d2cd42fc6ae7514b132b0ad 100644 (file)
@@ -3,10 +3,10 @@
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urlparse,
 )
 from ..utils import (
     int_or_none,
+    parse_qs,
     unified_timestamp,
 )
 
@@ -57,7 +57,7 @@ def _real_extract(self, url):
             query = {
                 'v': 2,
             }
-            qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+            qs = parse_qs(url)
             t = qs.get('t', [''])[0].split('-')
             if len(t) > 1:
                 query.update({
index 31606d3bda02a11190d73554f25798f946e9d9c7..f3d955d6bfbc9b80e3b55d9b25256972e65e6989 100644 (file)
@@ -11,7 +11,6 @@
     compat_etree_fromstring,
     compat_HTTPError,
     compat_parse_qs,
-    compat_urllib_parse_urlparse,
     compat_urlparse,
     compat_xml_parse_error,
 )
@@ -26,6 +25,7 @@
     js_to_json,
     mimetype2ext,
     parse_iso8601,
+    parse_qs,
     smuggle_url,
     str_or_none,
     try_get,
@@ -177,7 +177,7 @@ def _build_brightcove_url(cls, object_str):
             flashvars = {}
 
         data_url = object_doc.attrib.get('data', '')
-        data_url_params = compat_parse_qs(compat_urllib_parse_urlparse(data_url).query)
+        data_url_params = parse_qs(data_url)
 
         def find_param(name):
             if name in flashvars:
index da404e4dc57b997834a61d217b39e84d497db173..349c5eb50d25b3301a42512d1c31975d2c476b01 100644 (file)
@@ -4,14 +4,11 @@
 import itertools
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_parse_qs,
-    compat_urllib_parse_urlparse,
-)
 from ..utils import (
     clean_html,
     float_or_none,
     int_or_none,
+    parse_qs,
     try_get,
     urlencode_postdata,
 )
@@ -145,7 +142,7 @@ def _entries(self, query, url):
             query['from'] += query['size']
 
     def _real_extract(self, url):
-        query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        query = parse_qs(url)
         query['type'] = 'session'
         return self.playlist_result(
             self._entries(query, url), playlist_title='Search query')
index 06d04de139444e0e48d46cc2132eee9f1f94919d..e6b2ac4d4887504cf7e901d625aca1b77c8e476c 100644 (file)
@@ -1,12 +1,9 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_parse_qs,
-    compat_urllib_parse_urlparse,
-)
 from ..utils import (
     float_or_none,
+    parse_qs,
     unified_timestamp,
 )
 
@@ -44,7 +41,7 @@ class ClypIE(InfoExtractor):
     def _real_extract(self, url):
         audio_id = self._match_id(url)
 
-        qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        qs = parse_qs(url)
         token = qs.get('token', [None])[0]
 
         query = {}
index b0911cf94349d2adc6d688b3f61f74b7b13b6675..8aa2af9a8add204004acc7f01400abb43742aca2 100644 (file)
@@ -6,10 +6,9 @@
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_parse_qs,
     compat_urllib_parse_unquote,
-    compat_urlparse,
 )
+from ..utils import parse_qs
 
 
 class DaumBaseIE(InfoExtractor):
@@ -155,7 +154,7 @@ def _get_entries(self, list_id, list_id_type):
         return name, entries
 
     def _check_clip(self, url, list_id):
-        query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
+        query_dict = parse_qs(url)
         if 'clipid' in query_dict:
             clip_id = query_dict['clipid'][0]
             if self.get_param('noplaylist'):
@@ -256,7 +255,7 @@ def _real_extract(self, url):
         if clip_result:
             return clip_result
 
-        query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
+        query_dict = parse_qs(url)
         if 'playlistid' in query_dict:
             playlist_id = query_dict['playlistid'][0]
             return self.url_result(DaumPlaylistIE._URL_TEMPLATE % playlist_id, 'DaumPlaylist')
index 2c1c747a1d741381c7a7f9eea158269e451ca60a..60ab2ce133330584bc68d8545f3e7adc86067839 100644 (file)
@@ -2,11 +2,11 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import compat_urlparse
 from ..utils import (
     int_or_none,
     orderedSet,
     parse_duration,
+    parse_qs,
     qualities,
     unified_strdate,
     xpath_text
@@ -53,7 +53,7 @@ def get_item(type_, preference):
                 if items.get(p):
                     return items[p]
 
-        query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        query = parse_qs(url)
         preferred_lang = query.get('sitelang', ('en', ))[0]
 
         preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
index 202141497c202f7b77646000da5d5ab5435e8272..41910cefb1f6b03a20bf86fc3c4259e05dd206d3 100644 (file)
@@ -6,7 +6,6 @@
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urlparse,
 )
 from ..utils import (
     clean_html,
@@ -14,6 +13,7 @@
     ExtractorError,
     int_or_none,
     parse_duration,
+    parse_qs,
     try_get,
     url_or_none,
     urljoin,
@@ -226,7 +226,7 @@ def _real_extract(self, url):
         catalog = mobj.group('catalog')
 
         if not video_id:
-            qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+            qs = parse_qs(url)
             video_id = qs.get('idDiffusion', [None])[0]
             catalog = qs.get('catalogue', [None])[0]
             if not video_id:
index 59b0a90c398c4c8ad6ab8fd39566bffbb8793046..880918cd793a2e1952b44e22df6e64c74dc7efc8 100644 (file)
@@ -4,10 +4,7 @@
 import re
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_parse_qs,
-    compat_urlparse,
-)
+from ..utils import parse_qs
 
 
 class InternetVideoArchiveIE(InfoExtractor):
@@ -32,7 +29,7 @@ def _build_json_url(query):
         return 'http://video.internetvideoarchive.net/player/6/configuration.ashx?' + query
 
     def _real_extract(self, url):
-        query = compat_parse_qs(compat_urlparse.urlparse(url).query)
+        query = parse_qs(url)
         video_id = query['publishedid'][0]
         data = self._download_json(
             'https://video.internetvideoarchive.net/videojs7/videojs7.ivasettings.ashx',
index cdfbefcd431e85ffc5b72151a666b55f704228e7..4289c51b812b6d926e7100142e8c0b28a0c92949 100644 (file)
@@ -6,16 +6,15 @@
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_parse_qs,
     compat_str,
     compat_urllib_parse_unquote,
-    compat_urllib_parse_urlparse,
 )
 from ..utils import (
     determine_ext,
     ExtractorError,
     int_or_none,
     mimetype2ext,
+    parse_qs,
     OnDemandPagedList,
     try_get,
     urljoin,
@@ -256,7 +255,7 @@ def _real_extract(self, url):
         result = self._resolve_url(
             'lbry://' + display_id, display_id, 'channel')
         claim_id = result['claim_id']
-        qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        qs = parse_qs(url)
         content = qs.get('content', [None])[0]
         params = {
             'fee_amount': qs.get('fee_amount', ['>=0'])[0],
index 491e716bd3c0d9d3d44f1b665f5d19c521cfbfd7..d8f12dca6bfaa2230993da12b3c1c5acd07748d9 100644 (file)
@@ -4,13 +4,10 @@
 import re
 
 from .theplatform import ThePlatformBaseIE
-from ..compat import (
-    compat_parse_qs,
-    compat_urllib_parse_urlparse,
-)
 from ..utils import (
     ExtractorError,
     int_or_none,
+    parse_qs,
     update_url_query,
 )
 
@@ -96,7 +93,7 @@ class MediasetIE(ThePlatformBaseIE):
     @staticmethod
     def _extract_urls(ie, webpage):
         def _qs(url):
-            return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+            return parse_qs(url)
 
         def _program_guid(qs):
             return qs.get('programGuid', [None])[0]
index 3c678c50d00688c232904b64099210b7e38fdd51..7390ef8bcd23cce11a3e9784dff5d8182db0aa08 100644 (file)
@@ -5,10 +5,8 @@
 
 from .turner import TurnerBaseIE
 from ..compat import (
-    compat_parse_qs,
     compat_str,
     compat_urllib_parse_unquote,
-    compat_urllib_parse_urlparse,
 )
 from ..utils import (
     int_or_none,
@@ -16,6 +14,7 @@
     OnDemandPagedList,
     parse_duration,
     parse_iso8601,
+    parse_qs,
     try_get,
     update_url_query,
     urljoin,
@@ -165,7 +164,7 @@ class NBAWatchIE(NBAWatchBaseIE):
 
     def _real_extract(self, url):
         display_id = self._match_id(url)
-        collection_id = compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('collection', [None])[0]
+        collection_id = parse_qs(url).get('collection', [None])[0]
         if collection_id:
             if self.get_param('noplaylist'):
                 self.to_screen('Downloading just video %s because of --no-playlist' % display_id)
@@ -359,7 +358,7 @@ class NBAEmbedIE(NBABaseIE):
     }]
 
     def _real_extract(self, url):
-        qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        qs = parse_qs(url)
         content_id = qs['contentId'][0]
         team = qs.get('team', [None])[0]
         if not team:
index aec8433ded526312064bf141f299cf3f86e26192..78c4952f4774f9764c0f499dd11cf0bb721fc0e2 100644 (file)
@@ -8,7 +8,6 @@
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urlparse,
 )
 from ..utils import (
     clean_html,
@@ -16,6 +15,7 @@
     int_or_none,
     float_or_none,
     parse_iso8601,
+    parse_qs,
     sanitized_Request,
     urlencode_postdata,
 )
@@ -123,7 +123,7 @@ def _real_extract(self, url):
             webpage, 'noco player', group='player',
             default='http://noco.tv/cdata/js/player/NocoPlayer-v1.2.40.swf')
 
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(player_url).query)
+        qs = parse_qs(player_url)
         ts = int_or_none(qs.get('ts', [None])[0])
         self._ts_offset = ts - self._ts() if ts else 0
         self._referer = player_url
index 44b462bebfdc1a65b8e6a67848c8ea20bc2482ad..623005338b210ac4fd9d5924180e04eef15c6a1e 100644 (file)
@@ -5,12 +5,12 @@
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urlparse,
 )
 from ..utils import (
     ExtractorError,
     float_or_none,
     parse_duration,
+    parse_qs,
     str_to_int,
     urlencode_postdata,
 )
@@ -75,7 +75,7 @@ def _real_extract(self, url):
         video_id = mobj.group('id')
 
         if not user_id or not video_id:
-            qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+            qs = parse_qs(url)
             video_id = qs.get('prgid', [None])[0]
             user_id = qs.get('ch_userid', [None])[0]
             if any(not f for f in (video_id, user_id,)):
index e86c65396bc0326c80268b4a3130b6bb786dfa9f..dc2030017b5b95cc84f9952bc68a61f7e60db924 100644 (file)
@@ -4,11 +4,11 @@
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urlparse
 from ..utils import (
     determine_ext,
     ExtractorError,
     int_or_none,
+    parse_qs,
     xpath_text,
     qualities,
 )
@@ -56,7 +56,7 @@ def _extract_url(webpage):
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
         pl = qs.get('pl', ['1'])[0]
 
         video = self._download_xml(
index d494753e685a9dbd8424e4c790e83cef8bd5ef3e..801057ee10ca18af0d2ba33eb913928e281dc377 100644 (file)
@@ -17,6 +17,7 @@
     float_or_none,
     int_or_none,
     parse_duration,
+    parse_qs,
     qualities,
     srt_subtitles_timecode,
     try_get,
@@ -273,7 +274,7 @@ def _convert_subtitles(duration, subs):
         return srt
 
     def _real_extract(self, url):
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
 
         author = qs.get('author', [None])[0]
         name = qs.get('name', [None])[0]
index 8f54d5675e3fbd41083bfe76f0641ff7681b7dcf..01529315f923fb3fa18f3832dce6bea398c107f7 100644 (file)
@@ -7,13 +7,12 @@
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_parse_qs,
-    compat_urllib_parse_urlparse,
 )
 from ..utils import (
     determine_ext,
     bool_or_none,
     int_or_none,
+    parse_qs,
     try_get,
     unified_timestamp,
     url_or_none,
@@ -178,7 +177,7 @@ def _real_extract(self, url):
         embed_id = self._match_id(url)
         # Query may contain private videos token and should be passed to API
         # requests (see #19163)
-        query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        query = parse_qs(url)
         options = self._download_api_options(embed_id, query)
         video_id = options['effective_video']
         formats = self._extract_formats(options, video_id)
@@ -300,14 +299,14 @@ class RutubePlaylistIE(RutubePlaylistBaseIE):
     def suitable(cls, url):
         if not super(RutubePlaylistIE, cls).suitable(url):
             return False
-        params = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        params = parse_qs(url)
         return params.get('pl_type', [None])[0] and int_or_none(params.get('pl_id', [None])[0])
 
     def _next_page_url(self, page_num, playlist_id, item_kind):
         return self._PAGE_TEMPLATE % (item_kind, playlist_id, page_num)
 
     def _real_extract(self, url):
-        qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        qs = parse_qs(url)
         playlist_kind = qs['pl_type'][0]
         playlist_id = qs['pl_id'][0]
         return self._extract_playlist(playlist_id, item_kind=playlist_kind)
index 7a1c7e38bec915a31713a98a2cbee234faf0edb1..eef4975cba866d6a59a59830091e93402bca4af8 100644 (file)
@@ -5,7 +5,6 @@
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_parse_qs,
     compat_str,
     compat_urllib_parse_urlparse,
 )
@@ -13,6 +12,7 @@
     urljoin,
     int_or_none,
     parse_codecs,
+    parse_qs,
     try_get,
 )
 
@@ -108,7 +108,7 @@ def get_url(format_id):
         return formats
 
     def _real_extract(self, url):
-        params = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        params = parse_qs(url)
 
         src = params['src'][0]
         title = params['title'][0]
index 3a3a99256981abfd5b05c974ca741d8cb71a0ee2..fd747f59b5d5dac7cc520eb38fb5317b4e43890a 100644 (file)
@@ -4,13 +4,12 @@
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_parse_qs,
     compat_str,
-    compat_urllib_parse_urlparse,
 )
 from ..utils import (
     determine_ext,
     int_or_none,
+    parse_qs,
     try_get,
     qualities,
 )
@@ -78,7 +77,7 @@ def _real_extract(self, url):
                 continue
             if container == 'm3u8' or ext == 'm3u8':
                 if protocol == 'usp':
-                    if compat_parse_qs(compat_urllib_parse_urlparse(asset_url).query).get('token', [None])[0]:
+                    if parse_qs(asset_url).get('token', [None])[0]:
                         urlh = self._request_webpage(
                             asset_url, video_id, fatal=False,
                             headers=self.geo_verification_headers())
index 2129a5670fdbc7817441144236c0b3a5d5b906d8..94bcaba448e202aaa147f55cb141b8ec16f85de2 100644 (file)
@@ -2,15 +2,12 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_parse_qs,
-    compat_urllib_parse_urlparse,
-)
 from ..utils import (
     clean_html,
     float_or_none,
     int_or_none,
     parse_iso8601,
+    parse_qs,
     strip_or_none,
     try_get,
 )
@@ -61,7 +58,7 @@ def _real_extract(self, url):
         }
         videos = asset.get('videos') or []
         if len(videos) > 1:
-            playlist_id = compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('playlistId', [None])[0]
+            playlist_id = parse_qs(url).get('playlistId', [None])[0]
             if playlist_id:
                 if self.get_param('noplaylist'):
                     videos = [videos[int(playlist_id)]]
index 3e1a7a9e609a9eb80732348b86e5976e12093a0b..0d9cf75ca9e1a460b29cac1eecc03ed1b73ef2af 100644 (file)
@@ -6,9 +6,9 @@
 from .common import InfoExtractor
 from .jwplatform import JWPlatformIE
 from .nexx import NexxIE
-from ..compat import compat_urlparse
 from ..utils import (
     NO_DEFAULT,
+    parse_qs,
     smuggle_url,
 )
 
@@ -64,7 +64,7 @@ class Tele5IE(InfoExtractor):
     }]
 
     def _real_extract(self, url):
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
         video_id = (qs.get('vid') or qs.get('ve_id') or [None])[0]
 
         NEXX_ID_RE = r'\d{6,}'
index c56b708b8208b5a08c0c87755c9a8ad3741c1f36..c2729f12d7691e0898019a606592ac8cbae1a2e7 100644 (file)
 
 from .once import OnceIE
 from .adobepass import AdobePassIE
-from ..compat import (
-    compat_parse_qs,
-    compat_urllib_parse_urlparse,
-)
 from ..utils import (
     determine_ext,
     ExtractorError,
     float_or_none,
     int_or_none,
+    parse_qs,
     sanitized_Request,
     unsmuggle_url,
     update_url_query,
@@ -250,7 +247,7 @@ def _real_extract(self, url):
             path += mobj.group('media')
         path += video_id
 
-        qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        qs_dict = parse_qs(url)
         if 'guid' in qs_dict:
             webpage = self._download_webpage(url, video_id)
             scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
@@ -359,7 +356,7 @@ def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custo
             if first_video_id is None:
                 first_video_id = cur_video_id
                 duration = float_or_none(item.get('plfile$duration'))
-            file_asset_types = item.get('plfile$assetTypes') or compat_parse_qs(compat_urllib_parse_urlparse(smil_url).query)['assetTypes']
+            file_asset_types = item.get('plfile$assetTypes') or parse_qs(smil_url)['assetTypes']
             for asset_type in file_asset_types:
                 if asset_type in asset_types:
                     continue
index e544e47371faf9261bca877226423925ec4af241..be70beed4b2afdb7faf5f40f43aef38e8a092ea9 100644 (file)
@@ -11,7 +11,6 @@
 from ..compat import (
     compat_parse_qs,
     compat_str,
-    compat_urlparse,
     compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
 )
@@ -23,6 +22,7 @@
     int_or_none,
     parse_duration,
     parse_iso8601,
+    parse_qs,
     qualities,
     try_get,
     unified_timestamp,
@@ -571,7 +571,7 @@ def _extract_entry(node):
 
     def _real_extract(self, url):
         channel_name = self._match_id(url)
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
         filter = qs.get('filter', ['all'])[0]
         sort = qs.get('sort', ['time'])[0]
         broadcast = self._BROADCASTS.get(filter, self._DEFAULT_BROADCAST)
@@ -647,7 +647,7 @@ def _extract_entry(node):
 
     def _real_extract(self, url):
         channel_name = self._match_id(url)
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
         range = qs.get('range', ['7d'])[0]
         clip = self._RANGE.get(range, self._DEFAULT_CLIP)
         return self.playlist_result(
index f474ed73f861910d9c593510a4aff6be8244e903..81313dc9d767a875ff05019a7afffa012ce19cb2 100644 (file)
@@ -2,12 +2,9 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse_urlparse,
-    compat_parse_qs,
-)
 from ..utils import (
     clean_html,
+    parse_qs,
     remove_start,
 )
 
@@ -59,7 +56,7 @@ def _real_extract(self, url):
             fb_sharer_url = self._search_regex(
                 r'<a[^>]+href="(https?://www\.facebook\.com/sharer/sharer\.php?[^"]+)"',
                 webpage, 'facebook sharer URL', fatal=False)
-            sharer_params = compat_parse_qs(compat_urllib_parse_urlparse(fb_sharer_url).query)
+            sharer_params = parse_qs(fb_sharer_url)
             thumbnail = sharer_params.get('p[images][0]', [None])[0]
 
         video_id = self._search_regex(
index 142ac8dc2615e60168124d7d7ed41bd8d39c551f..8a0f29259c517c14de9c82cd9386b2c63d5de643 100644 (file)
@@ -6,13 +6,13 @@
 from .common import InfoExtractor
 from ..compat import (
     compat_str,
-    compat_urlparse,
     compat_HTTPError,
 )
 from ..utils import (
     ExtractorError,
     int_or_none,
     parse_iso8601,
+    parse_qs,
 )
 
 
@@ -218,7 +218,7 @@ def _real_extract(self, url):
 
         webpage = self._download_webpage(url, playlist_id)
 
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
         index = qs.get('index', [None])[0]
 
         if index:
index f7c24d2595a3e4cc29a1db1945d4087a27c0dc4b..da0212bb2748b27185f142bc1843efe592cf4f3a 100644 (file)
@@ -11,6 +11,7 @@
     int_or_none,
     mimetype2ext,
     parse_codecs,
+    parse_qs,
     update_url_query,
     urljoin,
     xpath_element,
@@ -20,7 +21,6 @@
     compat_b64decode,
     compat_ord,
     compat_struct_pack,
-    compat_urlparse,
 )
 
 
@@ -113,7 +113,7 @@ def _real_extract(self, url):
         for i in range(0, 32):
             result += s[i - (self._STATIC_SECRET.index(l[i]) - 31)]
 
-        query = compat_urlparse.parse_qs(compat_urlparse.urlparse(player_url).query)
+        query = parse_qs(player_url)
         random_seed = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
         query['_s'] = random_seed
         query['_t'] = result[:16]
index baafdc15d080e01c81273a5982b918da3547f86e..17ef3b1b9ed81f93c86ed893796883a1d4cc252a 100644 (file)
@@ -5,12 +5,11 @@
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_parse_qs,
     compat_str,
-    compat_urllib_parse_urlparse,
 )
 from ..utils import (
     int_or_none,
+    parse_qs,
 )
 
 
@@ -146,7 +145,7 @@ def _extract_url(webpage):
     def _real_extract(self, url):
         mobj = self._match_valid_url(url)
         video_id = mobj.group('sid') or mobj.group('id')
-        partner_id = mobj.group('partner_id') or compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('partner_id', [None])[0] or '97'
+        partner_id = mobj.group('partner_id') or parse_qs(url).get('partner_id', [None])[0] or '97'
 
         item = self._download_json(
             'https://siren.more.tv/player/config', video_id, query={
index 5c09c8520c06ba074f703da80f0a33183e68aa0b..8b367a4e636fd3703ebf6bcdc1cacbdada18ba77 100644 (file)
@@ -25,6 +25,7 @@
     OnDemandPagedList,
     parse_filesize,
     parse_iso8601,
+    parse_qs,
     RegexNotFoundError,
     sanitized_Request,
     smuggle_url,
@@ -265,7 +266,7 @@ def _extract_original_format(self, url, video_id, unlisted_hash=None):
             download_url = download_data.get('link')
             if not download_url or download_data.get('quality') != 'source':
                 continue
-            query = compat_urlparse.parse_qs(compat_urlparse.urlparse(download_url).query)
+            query = parse_qs(download_url)
             return {
                 'url': download_url,
                 'ext': determine_ext(query.get('filename', [''])[0].lower()),
index 25f487e1ee03297fd5fcd36aaeae5fe6efb5ccc1..9bac982f84abb8b855cccc3e6c81563fa1bcffe0 100644 (file)
@@ -4,14 +4,11 @@
 import re
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_parse_qs,
-    compat_urllib_parse_urlparse,
-)
 from ..utils import (
     int_or_none,
     month_by_abbreviation,
     parse_filesize,
+    parse_qs,
 )
 
 
@@ -37,7 +34,7 @@ def _real_extract(self, url):
         video_id = self._match_id(url)
 
         if '/video.php' in url:
-            qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+            qs = parse_qs(url)
             url = 'https://gameclips.io/%s/%s' % (qs['gamertag'][0], qs['vid'][0])
 
         webpage = self._download_webpage(url, video_id)
index 5bce53349d997652431579c657ecc7350de79aa1..15e0f8adbc0ac1fa3de56719b24d4867f5443715 100644 (file)
@@ -46,6 +46,7 @@
     parse_count,
     parse_duration,
     parse_iso8601,
+    parse_qs,
     qualities,
     remove_start,
     smuggle_url,
 )
 
 
-def parse_qs(url):
-    return compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
-
-
 # any clients starting with _ cannot be explicity requested by the user
 INNERTUBE_CLIENTS = {
     'web': {
@@ -1842,7 +1839,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
     def suitable(cls, url):
         # Hack for lazy extractors until more generic solution is implemented
         # (see #28780)
-        from .youtube import parse_qs
+        from ..utils import parse_qs
+
         qs = parse_qs(url)
         if qs.get('list', [None])[0]:
             return False
@@ -4598,7 +4596,7 @@ def _make_valid_url(cls):
         return cls._VALID_URL
 
     def _real_extract(self, url):
-        qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        qs = parse_qs(url)
         query = (qs.get('search_query') or qs.get('q'))[0]
         self._SEARCH_PARAMS = qs.get('sp', ('',))[0]
         return self._get_n_results(query, self._MAX_RESULTS)
index 6276ac726be379f628e056b8e76b5d7b8523d0b8..c07a17099d518d1937d3c7ae122142da1cb95843 100644 (file)
@@ -4167,6 +4167,10 @@ def escape_url(url):
     ).geturl()
 
 
+def parse_qs(url):
+    return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+
+
 def read_batch_urls(batch_fd):
     def fixup(url):
         if not isinstance(url, compat_str):