]> jfr.im git - yt-dlp.git/commitdiff
[cleanup] Standardize `import datetime as dt` (#8978)
authorpukkandan <redacted>
Sun, 25 Feb 2024 00:16:34 +0000 (05:46 +0530)
committerpukkandan <redacted>
Mon, 1 Apr 2024 00:02:15 +0000 (05:32 +0530)
21 files changed:
devscripts/tomlparse.py
devscripts/update-version.py
test/test_cookies.py
yt_dlp/YoutubeDL.py
yt_dlp/cookies.py
yt_dlp/extractor/atvat.py
yt_dlp/extractor/aws.py
yt_dlp/extractor/cda.py
yt_dlp/extractor/goplay.py
yt_dlp/extractor/joqrag.py
yt_dlp/extractor/leeco.py
yt_dlp/extractor/motherless.py
yt_dlp/extractor/niconico.py
yt_dlp/extractor/panopto.py
yt_dlp/extractor/pr0gramm.py
yt_dlp/extractor/rokfin.py
yt_dlp/extractor/sejmpl.py
yt_dlp/extractor/sonyliv.py
yt_dlp/extractor/tenplay.py
yt_dlp/extractor/youtube.py
yt_dlp/utils/_utils.py

index 85ac4eef789337b7de71f5aa7607d68dd1741b59..ac9ea3170738738103d4ee6b74506afcbd2e15e5 100755 (executable)
@@ -11,7 +11,7 @@
 
 from __future__ import annotations
 
-import datetime
+import datetime as dt
 import json
 import re
 
@@ -115,9 +115,9 @@ def parse_value(data: str, index: int):
     for func in [
         int,
         float,
-        datetime.time.fromisoformat,
-        datetime.date.fromisoformat,
-        datetime.datetime.fromisoformat,
+        dt.time.fromisoformat,
+        dt.date.fromisoformat,
+        dt.datetime.fromisoformat,
         {'true': True, 'false': False}.get,
     ]:
         try:
@@ -179,7 +179,7 @@ def main():
         data = file.read()
 
     def default(obj):
-        if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)):
+        if isinstance(obj, (dt.date, dt.time, dt.datetime)):
             return obj.isoformat()
 
     print(json.dumps(parse_toml(data), default=default))
index da54a6a2588268e8030f9f7c18a7349a06a50902..07a071745866a2a2a11fccef5aa92b5c6d9d93f6 100644 (file)
@@ -9,15 +9,15 @@
 
 import argparse
 import contextlib
+import datetime as dt
 import sys
-from datetime import datetime, timezone
 
 from devscripts.utils import read_version, run_process, write_file
 
 
 def get_new_version(version, revision):
     if not version:
-        version = datetime.now(timezone.utc).strftime('%Y.%m.%d')
+        version = dt.datetime.now(dt.timezone.utc).strftime('%Y.%m.%d')
 
     if revision:
         assert revision.isdecimal(), 'Revision must be a number'
index 5282ef6215d05001b404b1c016fcc9c52b4275f1..bd61f30a660d1452827544012ee5804a7d1cad3a 100644 (file)
@@ -1,5 +1,5 @@
+import datetime as dt
 import unittest
-from datetime import datetime, timezone
 
 from yt_dlp import cookies
 from yt_dlp.cookies import (
@@ -138,7 +138,7 @@ def test_safari_cookie_parsing(self):
         self.assertEqual(cookie.name, 'foo')
         self.assertEqual(cookie.value, 'test%20%3Bcookie')
         self.assertFalse(cookie.secure)
-        expected_expiration = datetime(2021, 6, 18, 21, 39, 19, tzinfo=timezone.utc)
+        expected_expiration = dt.datetime(2021, 6, 18, 21, 39, 19, tzinfo=dt.timezone.utc)
         self.assertEqual(cookie.expires, int(expected_expiration.timestamp()))
 
     def test_pbkdf2_sha1(self):
index e83108619e07300b77b02d66af9315aa5652f67f..291fc8d00cdc4c0afcf00fad83e260beb7990333 100644 (file)
@@ -1,7 +1,7 @@
 import collections
 import contextlib
 import copy
-import datetime
+import datetime as dt
 import errno
 import fileinput
 import http.cookiejar
@@ -2629,7 +2629,7 @@ def _fill_common_fields(self, info_dict, final=True):
                 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
                 # see http://bugs.python.org/issue1646728)
                 with contextlib.suppress(ValueError, OverflowError, OSError):
-                    upload_date = datetime.datetime.fromtimestamp(info_dict[ts_key], datetime.timezone.utc)
+                    upload_date = dt.datetime.fromtimestamp(info_dict[ts_key], dt.timezone.utc)
                     info_dict[date_key] = upload_date.strftime('%Y%m%d')
 
         if not info_dict.get('release_year'):
@@ -2783,7 +2783,7 @@ def sanitize_numeric_fields(info):
 
         get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
         if not get_from_start:
-            info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
+            info_dict['title'] += ' ' + dt.datetime.now().strftime('%Y-%m-%d %H:%M')
         if info_dict.get('is_live') and formats:
             formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
             if get_from_start and not formats:
index 28d174a09f12e15daa7862b788cf4be69937343d..85d6dd18232e1f7e54abacb4c861fb193bd1baa9 100644 (file)
@@ -1,6 +1,7 @@
 import base64
 import collections
 import contextlib
+import datetime as dt
 import glob
 import http.cookiejar
 import http.cookies
@@ -15,7 +16,6 @@
 import tempfile
 import time
 import urllib.request
-from datetime import datetime, timedelta, timezone
 from enum import Enum, auto
 from hashlib import pbkdf2_hmac
 
@@ -594,7 +594,7 @@ def skip_to_end(self, description='unknown'):
 
 
 def _mac_absolute_time_to_posix(timestamp):
-    return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
+    return int((dt.datetime(2001, 1, 1, 0, 0, tzinfo=dt.timezone.utc) + dt.timedelta(seconds=timestamp)).timestamp())
 
 
 def _parse_safari_cookies_header(data, logger):
index d6ed9e49586f5f02e64b0c20cdd5596b9a6a007d..d60feba3159af8c73df4e6d3c241d14e7480c25f 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 
 from .common import InfoExtractor
 from ..utils import (
@@ -71,9 +71,9 @@ def _real_extract(self, url):
         content_ids = [{'id': id, 'subclip_start': content['start'], 'subclip_end': content['end']}
                        for id, content in enumerate(contentResource)]
 
-        time_of_request = datetime.datetime.now()
-        not_before = time_of_request - datetime.timedelta(minutes=5)
-        expire = time_of_request + datetime.timedelta(minutes=5)
+        time_of_request = dt.datetime.now()
+        not_before = time_of_request - dt.timedelta(minutes=5)
+        expire = time_of_request + dt.timedelta(minutes=5)
         payload = {
             'content_ids': {
                 content_id: content_ids,
index c4741a6a1197a955aa2e728f9167bd354ed1e6d3..4ebef929573912ece001faff268f25181c25742d 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 import hashlib
 import hmac
 
@@ -12,7 +12,7 @@ class AWSIE(InfoExtractor):  # XXX: Conventionally, base classes should end with
 
     def _aws_execute_api(self, aws_dict, video_id, query=None):
         query = query or {}
-        amz_date = datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%dT%H%M%SZ')
+        amz_date = dt.datetime.now(dt.timezone.utc).strftime('%Y%m%dT%H%M%SZ')
         date = amz_date[:8]
         headers = {
             'Accept': 'application/json',
index 1157114b2ad9f65aabfbf4cab55ad056a9787404..90b4d082e2d35e8b52f3cb080c8c5681f6c2b80a 100644 (file)
@@ -1,6 +1,6 @@
 import base64
 import codecs
-import datetime
+import datetime as dt
 import hashlib
 import hmac
 import json
@@ -134,7 +134,7 @@ def _perform_login(self, username, password):
         self._API_HEADERS['User-Agent'] = f'pl.cda 1.0 (version {app_version}; Android {android_version}; {phone_model})'
 
         cached_bearer = self.cache.load(self._BEARER_CACHE, username) or {}
-        if cached_bearer.get('valid_until', 0) > datetime.datetime.now().timestamp() + 5:
+        if cached_bearer.get('valid_until', 0) > dt.datetime.now().timestamp() + 5:
             self._API_HEADERS['Authorization'] = f'Bearer {cached_bearer["token"]}'
             return
 
@@ -154,7 +154,7 @@ def _perform_login(self, username, password):
             })
         self.cache.store(self._BEARER_CACHE, username, {
             'token': token_res['access_token'],
-            'valid_until': token_res['expires_in'] + datetime.datetime.now().timestamp(),
+            'valid_until': token_res['expires_in'] + dt.datetime.now().timestamp(),
         })
         self._API_HEADERS['Authorization'] = f'Bearer {token_res["access_token"]}'
 
index 74aad1192750a8dbdc44b311fe1d4da5c78eec04..7a98e0f31c0737d8aa3702ffbe49b99f13dabd56 100644 (file)
@@ -1,6 +1,6 @@
 import base64
 import binascii
-import datetime
+import datetime as dt
 import hashlib
 import hmac
 import json
@@ -422,7 +422,7 @@ def __get_current_timestamp():
         months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
         days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
 
-        time_now = datetime.datetime.now(datetime.timezone.utc)
+        time_now = dt.datetime.now(dt.timezone.utc)
         format_string = "{} {} {} %H:%M:%S UTC %Y".format(days[time_now.weekday()], months[time_now.month], time_now.day)
         time_string = time_now.strftime(format_string)
         return time_string
index 3bb28af94e1251af47c3bd08d0c225aaec1c3de1..c68ad8cb5f4b9d6ccb67ba0c537a43d8b8ead8f6 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 import urllib.parse
 
 from .common import InfoExtractor
@@ -50,8 +50,8 @@ def _extract_metadata(self, variable, html):
 
     def _extract_start_timestamp(self, video_id, is_live):
         def extract_start_time_from(date_str):
-            dt = datetime_from_str(date_str) + datetime.timedelta(hours=9)
-            date = dt.strftime('%Y%m%d')
+            dt_ = datetime_from_str(date_str) + dt.timedelta(hours=9)
+            date = dt_.strftime('%Y%m%d')
             start_time = self._search_regex(
                 r'<h3[^>]+\bclass="dailyProgram-itemHeaderTime"[^>]*>[\s\d:]+–\s*(\d{1,2}:\d{1,2})',
                 self._download_webpage(
@@ -60,7 +60,7 @@ def extract_start_time_from(date_str):
                     errnote=f'Failed to download program list of {date}') or '',
                 'start time', default=None)
             if start_time:
-                return unified_timestamp(f'{dt.strftime("%Y/%m/%d")} {start_time} +09:00')
+                return unified_timestamp(f'{dt_.strftime("%Y/%m/%d")} {start_time} +09:00')
             return None
 
         start_timestamp = extract_start_time_from('today')
@@ -87,7 +87,7 @@ def _real_extract(self, url):
             msg = 'This stream is not currently live'
             if release_timestamp:
                 msg += (' and will start at '
-                        + datetime.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S'))
+                        + dt.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S'))
             self.raise_no_formats(msg, expected=True)
         else:
             m3u8_path = self._search_regex(
index 85033b8f8b48c5b79c44e3e17941fa1805c55c25..5d61a607f7cf30abae973276e3f13b5db5cac9db 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 import hashlib
 import re
 import time
@@ -185,7 +185,7 @@ def get_flash_urls(media_url, format_id):
 
         publish_time = parse_iso8601(self._html_search_regex(
             r'发布时间&nbsp;([^<>]+) ', page, 'publish time', default=None),
-            delimiter=' ', timezone=datetime.timedelta(hours=8))
+            delimiter=' ', timezone=dt.timedelta(hours=8))
         description = self._html_search_meta('description', page, fatal=False)
 
         return {
index 160150a7b6be626e39a0600cda76ddc85cd20f15..b6c18fe5bfb2dded7890a98047801ab121304e56 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 import re
 import urllib.parse
 
@@ -151,7 +151,7 @@ def _real_extract(self, url):
                     'd': 'days',
                 }
                 kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta}
-                upload_date = (datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(**kwargs)).strftime('%Y%m%d')
+                upload_date = (dt.datetime.now(dt.timezone.utc) - dt.timedelta(**kwargs)).strftime('%Y%m%d')
 
         comment_count = len(re.findall(r'''class\s*=\s*['"]media-comment-contents\b''', webpage))
         uploader_id = self._html_search_regex(
index 5da728fa165a6f65b5f9b7bdd1e1704a88bc24db..b04ce96154b0b1d458b2ffca47f0cbea6bcca40c 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 import functools
 import itertools
 import json
@@ -819,12 +819,12 @@ class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
         'playlist_mincount': 1610,
     }]
 
-    _START_DATE = datetime.date(2007, 1, 1)
+    _START_DATE = dt.date(2007, 1, 1)
     _RESULTS_PER_PAGE = 32
     _MAX_PAGES = 50
 
     def _entries(self, url, item_id, start_date=None, end_date=None):
-        start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
+        start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date()
 
         # If the last page has a full page of videos, we need to break down the query interval further
         last_page_len = len(list(self._get_entries_for_date(
index 52e703e0447cf7bc4d7ecbe03bd8f2ce10278e65..63c5fd68f138a9f597eb6187e690983e2611cd0a 100644 (file)
@@ -1,5 +1,5 @@
 import calendar
-import datetime
+import datetime as dt
 import functools
 import json
 import random
@@ -243,7 +243,7 @@ def _mark_watched(self, base_url, video_id, delivery_info):
         invocation_id = delivery_info.get('InvocationId')
         stream_id = traverse_obj(delivery_info, ('Delivery', 'Streams', ..., 'PublicID'), get_all=False, expected_type=str)
         if invocation_id and stream_id and duration:
-            timestamp_str = f'/Date({calendar.timegm(datetime.datetime.now(datetime.timezone.utc).timetuple())}000)/'
+            timestamp_str = f'/Date({calendar.timegm(dt.datetime.now(dt.timezone.utc).timetuple())}000)/'
             data = {
                 'streamRequests': [
                     {
index 6b2f57186f12722c0e189d8de45d3be17a578a03..3e0ccba174bad2ae64172502d1d4aa52e5fd53c9 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 import json
 import urllib.parse
 
@@ -197,7 +197,7 @@ def _real_extract(self, url):
                 'like_count': ('up', {int}),
                 'dislike_count': ('down', {int}),
                 'timestamp': ('created', {int}),
-                'upload_date': ('created', {int}, {datetime.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
+                'upload_date': ('created', {int}, {dt.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
                 'thumbnail': ('thumb', {lambda x: urljoin('https://thumb.pr0gramm.com', x)})
             }),
         }
index 56bbccde40f85a9a264eff79a00ebcce28f42234..3bc5f3cab293483e602b3b4b86f13d86f2db074d 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 import itertools
 import json
 import re
@@ -156,7 +156,7 @@ def _real_extract(self, url):
                 self.raise_login_required('This video is only available to premium users', True, method='cookies')
             elif scheduled:
                 self.raise_no_formats(
-                    f'Stream is offline; scheduled for {datetime.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
+                    f'Stream is offline; scheduled for {dt.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
                     video_id=video_id, expected=True)
 
         uploader = traverse_obj(metadata, ('createdBy', 'username'), ('creator', 'username'))
index 29cb0152a2d4429e551076af5adabdc90251ede7..eb433d2ac32186b2953614d0bca1fc2f912c89b9 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 
 from .common import InfoExtractor
 from .redge import RedCDNLivxIE
 
 
 def is_dst(date):
-    last_march = datetime.datetime(date.year, 3, 31)
-    last_october = datetime.datetime(date.year, 10, 31)
-    last_sunday_march = last_march - datetime.timedelta(days=last_march.isoweekday() % 7)
-    last_sunday_october = last_october - datetime.timedelta(days=last_october.isoweekday() % 7)
+    last_march = dt.datetime(date.year, 3, 31)
+    last_october = dt.datetime(date.year, 10, 31)
+    last_sunday_march = last_march - dt.timedelta(days=last_march.isoweekday() % 7)
+    last_sunday_october = last_october - dt.timedelta(days=last_october.isoweekday() % 7)
     return last_sunday_march.replace(hour=2) <= date <= last_sunday_october.replace(hour=3)
 
 
 def rfc3339_to_atende(date):
-    date = datetime.datetime.fromisoformat(date)
-    date = date + datetime.timedelta(hours=1 if is_dst(date) else 0)
+    date = dt.datetime.fromisoformat(date)
+    date = date + dt.timedelta(hours=1 if is_dst(date) else 0)
     return int((date.timestamp() - 978307200) * 1000)
 
 
index a6da445250889c2a60fc935f8fe8be22e002fc37..7c914acbed231a9210dd804ea6a94fdaf8a4511e 100644 (file)
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
 import itertools
 import json
 import math
@@ -94,7 +94,7 @@ def _perform_login(self, username, password):
                 'mobileNumber': username,
                 'channelPartnerID': 'MSMIND',
                 'country': 'IN',
-                'timestamp': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
+                'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
                 'otpSize': 6,
                 'loginType': 'REGISTERORSIGNIN',
                 'isMobileMandatory': True,
@@ -111,7 +111,7 @@ def _perform_login(self, username, password):
                 'otp': self._get_tfa_info('OTP'),
                 'dmaId': 'IN',
                 'ageConfirmation': True,
-                'timestamp': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
+                'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
                 'isMobileMandatory': True,
             }).encode())
         if otp_verify_json['resultCode'] == 'KO':
index ea4041976a2fed84acf03941c743a094cc353cd5..11cc5705e9539294d40f74e8de81bf5cc926abc3 100644 (file)
@@ -1,5 +1,5 @@
 import base64
-import datetime
+import datetime as dt
 import functools
 import itertools
 
@@ -70,7 +70,7 @@ def _get_bearer_token(self, video_id):
         username, password = self._get_login_info()
         if username is None or password is None:
             self.raise_login_required('Your 10play account\'s details must be provided with --username and --password.')
-        _timestamp = datetime.datetime.now().strftime('%Y%m%d000000')
+        _timestamp = dt.datetime.now().strftime('%Y%m%d000000')
         _auth_header = base64.b64encode(_timestamp.encode('ascii')).decode('ascii')
         data = self._download_json('https://10play.com.au/api/user/auth', video_id, 'Getting bearer token', headers={
             'X-Network-Ten-Auth': _auth_header,
index 1f1db1ad3197a0444a43bdd40d4eb8e16a418c53..e553fff9f1714d4e32237e15585de13af9430095 100644 (file)
@@ -2,7 +2,7 @@
 import calendar
 import collections
 import copy
-import datetime
+import datetime as dt
 import enum
 import hashlib
 import itertools
@@ -924,10 +924,10 @@ def extract_relative_time(relative_time_text):
     def _parse_time_text(self, text):
         if not text:
             return
-        dt = self.extract_relative_time(text)
+        dt_ = self.extract_relative_time(text)
         timestamp = None
-        if isinstance(dt, datetime.datetime):
-            timestamp = calendar.timegm(dt.timetuple())
+        if isinstance(dt_, dt.datetime):
+            timestamp = calendar.timegm(dt_.timetuple())
 
         if timestamp is None:
             timestamp = (
@@ -4568,7 +4568,7 @@ def process_language(container, base_url, lang_code, sub_name, query):
 
         if upload_date and live_status not in ('is_live', 'post_live', 'is_upcoming'):
             # Newly uploaded videos' HLS formats are potentially problematic and need to be checked
-            upload_datetime = datetime_from_str(upload_date).replace(tzinfo=datetime.timezone.utc)
+            upload_datetime = datetime_from_str(upload_date).replace(tzinfo=dt.timezone.utc)
             if upload_datetime >= datetime_from_str('today-2days'):
                 for fmt in info['formats']:
                     if fmt.get('protocol') == 'm3u8_native':
index 648cf0abd534709ab787483a59261edc6d32484f..dec514674f5cf51b0036831c5875915fc8861401 100644 (file)
@@ -5,7 +5,7 @@
 import collections
 import collections.abc
 import contextlib
-import datetime
+import datetime as dt
 import email.header
 import email.utils
 import errno
@@ -1150,14 +1150,14 @@ def extract_timezone(date_str):
         timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip())
         if timezone is not None:
             date_str = date_str[:-len(m.group('tz'))]
-        timezone = datetime.timedelta(hours=timezone or 0)
+        timezone = dt.timedelta(hours=timezone or 0)
     else:
         date_str = date_str[:-len(m.group('tz'))]
         if not m.group('sign'):
-            timezone = datetime.timedelta()
+            timezone = dt.timedelta()
         else:
             sign = 1 if m.group('sign') == '+' else -1
-            timezone = datetime.timedelta(
+            timezone = dt.timedelta(
                 hours=sign * int(m.group('hours')),
                 minutes=sign * int(m.group('minutes')))
     return timezone, date_str
@@ -1176,8 +1176,8 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
 
     with contextlib.suppress(ValueError):
         date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
-        dt = datetime.datetime.strptime(date_str, date_format) - timezone
-        return calendar.timegm(dt.timetuple())
+        dt_ = dt.datetime.strptime(date_str, date_format) - timezone
+        return calendar.timegm(dt_.timetuple())
 
 
 def date_formats(day_first=True):
@@ -1198,12 +1198,12 @@ def unified_strdate(date_str, day_first=True):
 
     for expression in date_formats(day_first):
         with contextlib.suppress(ValueError):
-            upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
+            upload_date = dt.datetime.strptime(date_str, expression).strftime('%Y%m%d')
     if upload_date is None:
         timetuple = email.utils.parsedate_tz(date_str)
         if timetuple:
             with contextlib.suppress(ValueError):
-                upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
+                upload_date = dt.datetime(*timetuple[:6]).strftime('%Y%m%d')
     if upload_date is not None:
         return str(upload_date)
 
@@ -1233,8 +1233,8 @@ def unified_timestamp(date_str, day_first=True):
 
     for expression in date_formats(day_first):
         with contextlib.suppress(ValueError):
-            dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
-            return calendar.timegm(dt.timetuple())
+            dt_ = dt.datetime.strptime(date_str, expression) - timezone + dt.timedelta(hours=pm_delta)
+            return calendar.timegm(dt_.timetuple())
 
     timetuple = email.utils.parsedate_tz(date_str)
     if timetuple:
@@ -1272,11 +1272,11 @@ def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
     if precision == 'auto':
         auto_precision = True
         precision = 'microsecond'
-    today = datetime_round(datetime.datetime.now(datetime.timezone.utc), precision)
+    today = datetime_round(dt.datetime.now(dt.timezone.utc), precision)
     if date_str in ('now', 'today'):
         return today
     if date_str == 'yesterday':
-        return today - datetime.timedelta(days=1)
+        return today - dt.timedelta(days=1)
     match = re.match(
         r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
         date_str)
@@ -1291,13 +1291,13 @@ def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
             if unit == 'week':
                 unit = 'day'
                 time *= 7
-            delta = datetime.timedelta(**{unit + 's': time})
+            delta = dt.timedelta(**{unit + 's': time})
             new_date = start_time + delta
         if auto_precision:
             return datetime_round(new_date, unit)
         return new_date
 
-    return datetime_round(datetime.datetime.strptime(date_str, format), precision)
+    return datetime_round(dt.datetime.strptime(date_str, format), precision)
 
 
 def date_from_str(date_str, format='%Y%m%d', strict=False):
@@ -1312,21 +1312,21 @@ def date_from_str(date_str, format='%Y%m%d', strict=False):
     return datetime_from_str(date_str, precision='microsecond', format=format).date()
 
 
-def datetime_add_months(dt, months):
+def datetime_add_months(dt_, months):
     """Increment/Decrement a datetime object by months."""
-    month = dt.month + months - 1
-    year = dt.year + month // 12
+    month = dt_.month + months - 1
+    year = dt_.year + month // 12
     month = month % 12 + 1
-    day = min(dt.day, calendar.monthrange(year, month)[1])
-    return dt.replace(year, month, day)
+    day = min(dt_.day, calendar.monthrange(year, month)[1])
+    return dt_.replace(year, month, day)
 
 
-def datetime_round(dt, precision='day'):
+def datetime_round(dt_, precision='day'):
     """
     Round a datetime object's time to a specific precision
     """
     if precision == 'microsecond':
-        return dt
+        return dt_
 
     unit_seconds = {
         'day': 86400,
@@ -1335,8 +1335,8 @@ def datetime_round(dt, precision='day'):
         'second': 1,
     }
     roundto = lambda x, n: ((x + n / 2) // n) * n
-    timestamp = roundto(calendar.timegm(dt.timetuple()), unit_seconds[precision])
-    return datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
+    timestamp = roundto(calendar.timegm(dt_.timetuple()), unit_seconds[precision])
+    return dt.datetime.fromtimestamp(timestamp, dt.timezone.utc)
 
 
 def hyphenate_date(date_str):
@@ -1357,11 +1357,11 @@ def __init__(self, start=None, end=None):
         if start is not None:
             self.start = date_from_str(start, strict=True)
         else:
-            self.start = datetime.datetime.min.date()
+            self.start = dt.datetime.min.date()
         if end is not None:
             self.end = date_from_str(end, strict=True)
         else:
-            self.end = datetime.datetime.max.date()
+            self.end = dt.datetime.max.date()
         if self.start > self.end:
             raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
 
@@ -1372,7 +1372,7 @@ def day(cls, day):
 
     def __contains__(self, date):
         """Check if the date is in the range"""
-        if not isinstance(date, datetime.date):
+        if not isinstance(date, dt.date):
             date = date_from_str(date)
         return self.start <= date <= self.end
 
@@ -1996,12 +1996,12 @@ def strftime_or_none(timestamp, date_format='%Y%m%d', default=None):
         if isinstance(timestamp, (int, float)):  # unix timestamp
             # Using naive datetime here can break timestamp() in Windows
             # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
-            # Also, datetime.datetime.fromtimestamp breaks for negative timestamps
+            # Also, dt.datetime.fromtimestamp breaks for negative timestamps
             # Ref: https://github.com/yt-dlp/yt-dlp/issues/6706#issuecomment-1496842642
-            datetime_object = (datetime.datetime.fromtimestamp(0, datetime.timezone.utc)
-                               + datetime.timedelta(seconds=timestamp))
+            datetime_object = (dt.datetime.fromtimestamp(0, dt.timezone.utc)
+                               + dt.timedelta(seconds=timestamp))
         elif isinstance(timestamp, str):  # assume YYYYMMDD
-            datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
+            datetime_object = dt.datetime.strptime(timestamp, '%Y%m%d')
         date_format = re.sub(  # Support %s on windows
             r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format)
         return datetime_object.strftime(date_format)
@@ -4490,10 +4490,10 @@ def write_xattr(path, key, value):
 
 
 def random_birthday(year_field, month_field, day_field):
-    start_date = datetime.date(1950, 1, 1)
-    end_date = datetime.date(1995, 12, 31)
+    start_date = dt.date(1950, 1, 1)
+    end_date = dt.date(1995, 12, 31)
     offset = random.randint(0, (end_date - start_date).days)
-    random_date = start_date + datetime.timedelta(offset)
+    random_date = start_date + dt.timedelta(offset)
     return {
         year_field: str(random_date.year),
         month_field: str(random_date.month),
@@ -4672,7 +4672,7 @@ def time_seconds(**kwargs):
     """
     Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
     """
-    return time.time() + datetime.timedelta(**kwargs).total_seconds()
+    return time.time() + dt.timedelta(**kwargs).total_seconds()
 
 
 # create a JSON Web Signature (jws) with HS256 algorithm