aboutsummaryrefslogtreecommitdiffstats
path: root/scripts/common.py
diff options
context:
space:
mode:
authorEike Ziller <eike.ziller@qt.io>2024-11-21 09:15:17 +0100
committerEike Ziller <eike.ziller@qt.io>2024-11-22 08:01:59 +0000
commitf415fc424be17257353f36891d875aca784903cc (patch)
treedbdcc8a03b0d69e0a15508b0d284f9eb0ef9e641 /scripts/common.py
parent6137372afc35d35e67aacd20db345426b2486698 (diff)
Scripts: Small download improvements
- better readable message - first download to temporary (-part), then move to target (makes it more obvious which files have finished on disk) - add option for skipping downloading existing files Change-Id: Ie36b4b65b7e7a206a54cec8d643095970d85ff07 Reviewed-by: David Schulz <david.schulz@qt.io>
Diffstat (limited to 'scripts/common.py')
-rw-r--r--scripts/common.py30
1 files changed, 24 insertions, 6 deletions
diff --git a/scripts/common.py b/scripts/common.py
index e343a46dad1..1b9edd780a8 100644
--- a/scripts/common.py
+++ b/scripts/common.py
@@ -154,19 +154,34 @@ def extract_file(archive: Path, target: Path) -> None:
async def download(url: str, target: Path) -> None:
- print('- Starting download {} -> {}'.format(url, str(target)))
+ print(('''
+- Starting download {}
+ -> {}''').strip().format(url, str(target)))
# Since urlretrieve does blocking I/O it would prevent parallel downloads.
# Run in default thread pool.
+ temp_target = target.with_suffix(target.suffix + '-part')
loop = asyncio.get_running_loop()
- await loop.run_in_executor(None, urllib.request.urlretrieve, url, str(target))
+ await loop.run_in_executor(None, urllib.request.urlretrieve, url, str(temp_target))
+ temp_target.rename(target)
print('+ finished downloading {}'.format(str(target)))
-def download_and_extract(urls: list[str], target: Path, temp: Path) -> None:
- download_and_extract_tuples([(url, target) for url in urls], temp)
+def download_and_extract(
+ urls: list[str],
+ target: Path,
+ temp: Path,
+ skip_existing: bool = False
+) -> None:
+ download_and_extract_tuples([(url, target) for url in urls],
+ temp,
+ skip_existing)
-def download_and_extract_tuples(urls_and_targets: list[tuple[str, Path]], temp: Path) -> None:
+def download_and_extract_tuples(
+ urls_and_targets: list[tuple[str, Path]],
+ temp: Path,
+ skip_existing: bool = False
+) -> None:
temp.mkdir(parents=True, exist_ok=True)
target_tuples : list[tuple[Path, Path]] = []
# TODO make this work with file URLs, which then aren't downloaded
@@ -178,7 +193,10 @@ def download_and_extract_tuples(urls_and_targets: list[tuple[str, Path]], temp:
filename = Path(u.path).name
target_file = temp / filename
target_tuples.append((target_file, target_path))
- tasks.append(asyncio.create_task(download(url, target_file)))
+ if skip_existing and target_file.exists():
+ print('Skipping download of {}'.format(url))
+ else:
+ tasks.append(asyncio.create_task(download(url, target_file)))
for task in tasks:
await task
asyncio.run(impl())