Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

utils.py 16 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
  1. import bz2
  2. import gzip
  3. import hashlib
  4. import lzma
  5. import os
  6. import os.path
  7. import pathlib
  8. import re
  9. import sys
  10. import tarfile
  11. import urllib
  12. import urllib.error
  13. import urllib.request
  14. import zipfile
  15. from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Tuple, TypeVar, Union
  16. from urllib.parse import urlparse
  17. import numpy as np
  18. import torch
  19. from torch.utils.model_zoo import tqdm
  20. from .._internally_replaced_utils import _download_file_from_remote_location, _is_remote_location_available
  21. USER_AGENT = "pytorch/vision"
  22. def _urlretrieve(url: str, filename: Union[str, pathlib.Path], chunk_size: int = 1024 * 32) -> None:
  23. with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": USER_AGENT})) as response:
  24. with open(filename, "wb") as fh, tqdm(total=response.length, unit="B", unit_scale=True) as pbar:
  25. while chunk := response.read(chunk_size):
  26. fh.write(chunk)
  27. pbar.update(len(chunk))
  28. def calculate_md5(fpath: Union[str, pathlib.Path], chunk_size: int = 1024 * 1024) -> str:
  29. # Setting the `usedforsecurity` flag does not change anything about the functionality, but indicates that we are
  30. # not using the MD5 checksum for cryptography. This enables its usage in restricted environments like FIPS. Without
  31. # it torchvision.datasets is unusable in these environments since we perform a MD5 check everywhere.
  32. if sys.version_info >= (3, 9):
  33. md5 = hashlib.md5(usedforsecurity=False)
  34. else:
  35. md5 = hashlib.md5()
  36. with open(fpath, "rb") as f:
  37. while chunk := f.read(chunk_size):
  38. md5.update(chunk)
  39. return md5.hexdigest()
  40. def check_md5(fpath: Union[str, pathlib.Path], md5: str, **kwargs: Any) -> bool:
  41. return md5 == calculate_md5(fpath, **kwargs)
  42. def check_integrity(fpath: Union[str, pathlib.Path], md5: Optional[str] = None) -> bool:
  43. if not os.path.isfile(fpath):
  44. return False
  45. if md5 is None:
  46. return True
  47. return check_md5(fpath, md5)
  48. def _get_redirect_url(url: str, max_hops: int = 3) -> str:
  49. initial_url = url
  50. headers = {"Method": "HEAD", "User-Agent": USER_AGENT}
  51. for _ in range(max_hops + 1):
  52. with urllib.request.urlopen(urllib.request.Request(url, headers=headers)) as response:
  53. if response.url == url or response.url is None:
  54. return url
  55. url = response.url
  56. else:
  57. raise RecursionError(
  58. f"Request to {initial_url} exceeded {max_hops} redirects. The last redirect points to {url}."
  59. )
  60. def _get_google_drive_file_id(url: str) -> Optional[str]:
  61. parts = urlparse(url)
  62. if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
  63. return None
  64. match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
  65. if match is None:
  66. return None
  67. return match.group("id")
  68. def download_url(
  69. url: str,
  70. root: Union[str, pathlib.Path],
  71. filename: Optional[Union[str, pathlib.Path]] = None,
  72. md5: Optional[str] = None,
  73. max_redirect_hops: int = 3,
  74. ) -> None:
  75. """Download a file from a url and place it in root.
  76. Args:
  77. url (str): URL to download file from
  78. root (str): Directory to place downloaded file in
  79. filename (str, optional): Name to save the file under. If None, use the basename of the URL
  80. md5 (str, optional): MD5 checksum of the download. If None, do not check
  81. max_redirect_hops (int, optional): Maximum number of redirect hops allowed
  82. """
  83. root = os.path.expanduser(root)
  84. if not filename:
  85. filename = os.path.basename(url)
  86. fpath = os.fspath(os.path.join(root, filename))
  87. os.makedirs(root, exist_ok=True)
  88. # check if file is already present locally
  89. if check_integrity(fpath, md5):
  90. return
  91. if _is_remote_location_available():
  92. _download_file_from_remote_location(fpath, url)
  93. else:
  94. # expand redirect chain if needed
  95. url = _get_redirect_url(url, max_hops=max_redirect_hops)
  96. # check if file is located on Google Drive
  97. file_id = _get_google_drive_file_id(url)
  98. if file_id is not None:
  99. return download_file_from_google_drive(file_id, root, filename, md5)
  100. # download the file
  101. try:
  102. _urlretrieve(url, fpath)
  103. except (urllib.error.URLError, OSError) as e: # type: ignore[attr-defined]
  104. if url[:5] == "https":
  105. url = url.replace("https:", "http:")
  106. _urlretrieve(url, fpath)
  107. else:
  108. raise e
  109. # check integrity of downloaded file
  110. if not check_integrity(fpath, md5):
  111. raise RuntimeError("File not found or corrupted.")
  112. def list_dir(root: Union[str, pathlib.Path], prefix: bool = False) -> List[str]:
  113. """List all directories at a given root
  114. Args:
  115. root (str): Path to directory whose folders need to be listed
  116. prefix (bool, optional): If true, prepends the path to each result, otherwise
  117. only returns the name of the directories found
  118. """
  119. root = os.path.expanduser(root)
  120. directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
  121. if prefix is True:
  122. directories = [os.path.join(root, d) for d in directories]
  123. return directories
  124. def list_files(root: Union[str, pathlib.Path], suffix: str, prefix: bool = False) -> List[str]:
  125. """List all files ending with a suffix at a given root
  126. Args:
  127. root (str): Path to directory whose folders need to be listed
  128. suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
  129. It uses the Python "str.endswith" method and is passed directly
  130. prefix (bool, optional): If true, prepends the path to each result, otherwise
  131. only returns the name of the files found
  132. """
  133. root = os.path.expanduser(root)
  134. files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
  135. if prefix is True:
  136. files = [os.path.join(root, d) for d in files]
  137. return files
  138. def download_file_from_google_drive(
  139. file_id: str,
  140. root: Union[str, pathlib.Path],
  141. filename: Optional[Union[str, pathlib.Path]] = None,
  142. md5: Optional[str] = None,
  143. ):
  144. """Download a Google Drive file from and place it in root.
  145. Args:
  146. file_id (str): id of file to be downloaded
  147. root (str): Directory to place downloaded file in
  148. filename (str, optional): Name to save the file under. If None, use the id of the file.
  149. md5 (str, optional): MD5 checksum of the download. If None, do not check
  150. """
  151. try:
  152. import gdown
  153. except ModuleNotFoundError:
  154. raise RuntimeError(
  155. "To download files from GDrive, 'gdown' is required. You can install it with 'pip install gdown'."
  156. )
  157. root = os.path.expanduser(root)
  158. if not filename:
  159. filename = file_id
  160. fpath = os.fspath(os.path.join(root, filename))
  161. os.makedirs(root, exist_ok=True)
  162. if check_integrity(fpath, md5):
  163. return
  164. gdown.download(id=file_id, output=fpath, quiet=False, user_agent=USER_AGENT)
  165. if not check_integrity(fpath, md5):
  166. raise RuntimeError("File not found or corrupted.")
  167. def _extract_tar(
  168. from_path: Union[str, pathlib.Path], to_path: Union[str, pathlib.Path], compression: Optional[str]
  169. ) -> None:
  170. with tarfile.open(from_path, f"r:{compression[1:]}" if compression else "r") as tar:
  171. tar.extractall(to_path)
  172. _ZIP_COMPRESSION_MAP: Dict[str, int] = {
  173. ".bz2": zipfile.ZIP_BZIP2,
  174. ".xz": zipfile.ZIP_LZMA,
  175. }
  176. def _extract_zip(
  177. from_path: Union[str, pathlib.Path], to_path: Union[str, pathlib.Path], compression: Optional[str]
  178. ) -> None:
  179. with zipfile.ZipFile(
  180. from_path, "r", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED
  181. ) as zip:
  182. zip.extractall(to_path)
  183. _ARCHIVE_EXTRACTORS: Dict[str, Callable[[Union[str, pathlib.Path], Union[str, pathlib.Path], Optional[str]], None]] = {
  184. ".tar": _extract_tar,
  185. ".zip": _extract_zip,
  186. }
  187. _COMPRESSED_FILE_OPENERS: Dict[str, Callable[..., IO]] = {
  188. ".bz2": bz2.open,
  189. ".gz": gzip.open,
  190. ".xz": lzma.open,
  191. }
  192. _FILE_TYPE_ALIASES: Dict[str, Tuple[Optional[str], Optional[str]]] = {
  193. ".tbz": (".tar", ".bz2"),
  194. ".tbz2": (".tar", ".bz2"),
  195. ".tgz": (".tar", ".gz"),
  196. }
  197. def _detect_file_type(file: Union[str, pathlib.Path]) -> Tuple[str, Optional[str], Optional[str]]:
  198. """Detect the archive type and/or compression of a file.
  199. Args:
  200. file (str): the filename
  201. Returns:
  202. (tuple): tuple of suffix, archive type, and compression
  203. Raises:
  204. RuntimeError: if file has no suffix or suffix is not supported
  205. """
  206. suffixes = pathlib.Path(file).suffixes
  207. if not suffixes:
  208. raise RuntimeError(
  209. f"File '{file}' has no suffixes that could be used to detect the archive type and compression."
  210. )
  211. suffix = suffixes[-1]
  212. # check if the suffix is a known alias
  213. if suffix in _FILE_TYPE_ALIASES:
  214. return (suffix, *_FILE_TYPE_ALIASES[suffix])
  215. # check if the suffix is an archive type
  216. if suffix in _ARCHIVE_EXTRACTORS:
  217. return suffix, suffix, None
  218. # check if the suffix is a compression
  219. if suffix in _COMPRESSED_FILE_OPENERS:
  220. # check for suffix hierarchy
  221. if len(suffixes) > 1:
  222. suffix2 = suffixes[-2]
  223. # check if the suffix2 is an archive type
  224. if suffix2 in _ARCHIVE_EXTRACTORS:
  225. return suffix2 + suffix, suffix2, suffix
  226. return suffix, None, suffix
  227. valid_suffixes = sorted(set(_FILE_TYPE_ALIASES) | set(_ARCHIVE_EXTRACTORS) | set(_COMPRESSED_FILE_OPENERS))
  228. raise RuntimeError(f"Unknown compression or archive type: '{suffix}'.\nKnown suffixes are: '{valid_suffixes}'.")
  229. def _decompress(
  230. from_path: Union[str, pathlib.Path],
  231. to_path: Optional[Union[str, pathlib.Path]] = None,
  232. remove_finished: bool = False,
  233. ) -> pathlib.Path:
  234. r"""Decompress a file.
  235. The compression is automatically detected from the file name.
  236. Args:
  237. from_path (str): Path to the file to be decompressed.
  238. to_path (str): Path to the decompressed file. If omitted, ``from_path`` without compression extension is used.
  239. remove_finished (bool): If ``True``, remove the file after the extraction.
  240. Returns:
  241. (str): Path to the decompressed file.
  242. """
  243. suffix, archive_type, compression = _detect_file_type(from_path)
  244. if not compression:
  245. raise RuntimeError(f"Couldn't detect a compression from suffix {suffix}.")
  246. if to_path is None:
  247. to_path = pathlib.Path(os.fspath(from_path).replace(suffix, archive_type if archive_type is not None else ""))
  248. # We don't need to check for a missing key here, since this was already done in _detect_file_type()
  249. compressed_file_opener = _COMPRESSED_FILE_OPENERS[compression]
  250. with compressed_file_opener(from_path, "rb") as rfh, open(to_path, "wb") as wfh:
  251. wfh.write(rfh.read())
  252. if remove_finished:
  253. os.remove(from_path)
  254. return pathlib.Path(to_path)
  255. def extract_archive(
  256. from_path: Union[str, pathlib.Path],
  257. to_path: Optional[Union[str, pathlib.Path]] = None,
  258. remove_finished: bool = False,
  259. ) -> Union[str, pathlib.Path]:
  260. """Extract an archive.
  261. The archive type and a possible compression is automatically detected from the file name. If the file is compressed
  262. but not an archive the call is dispatched to :func:`decompress`.
  263. Args:
  264. from_path (str): Path to the file to be extracted.
  265. to_path (str): Path to the directory the file will be extracted to. If omitted, the directory of the file is
  266. used.
  267. remove_finished (bool): If ``True``, remove the file after the extraction.
  268. Returns:
  269. (str): Path to the directory the file was extracted to.
  270. """
  271. def path_or_str(ret_path: pathlib.Path) -> Union[str, pathlib.Path]:
  272. if isinstance(from_path, str):
  273. return os.fspath(ret_path)
  274. else:
  275. return ret_path
  276. if to_path is None:
  277. to_path = os.path.dirname(from_path)
  278. suffix, archive_type, compression = _detect_file_type(from_path)
  279. if not archive_type:
  280. ret_path = _decompress(
  281. from_path,
  282. os.path.join(to_path, os.path.basename(from_path).replace(suffix, "")),
  283. remove_finished=remove_finished,
  284. )
  285. return path_or_str(ret_path)
  286. # We don't need to check for a missing key here, since this was already done in _detect_file_type()
  287. extractor = _ARCHIVE_EXTRACTORS[archive_type]
  288. extractor(from_path, to_path, compression)
  289. if remove_finished:
  290. os.remove(from_path)
  291. return path_or_str(pathlib.Path(to_path))
  292. def download_and_extract_archive(
  293. url: str,
  294. download_root: Union[str, pathlib.Path],
  295. extract_root: Optional[Union[str, pathlib.Path]] = None,
  296. filename: Optional[Union[str, pathlib.Path]] = None,
  297. md5: Optional[str] = None,
  298. remove_finished: bool = False,
  299. ) -> None:
  300. download_root = os.path.expanduser(download_root)
  301. if extract_root is None:
  302. extract_root = download_root
  303. if not filename:
  304. filename = os.path.basename(url)
  305. download_url(url, download_root, filename, md5)
  306. archive = os.path.join(download_root, filename)
  307. extract_archive(archive, extract_root, remove_finished)
  308. def iterable_to_str(iterable: Iterable) -> str:
  309. return "'" + "', '".join([str(item) for item in iterable]) + "'"
  310. T = TypeVar("T", str, bytes)
  311. def verify_str_arg(
  312. value: T,
  313. arg: Optional[str] = None,
  314. valid_values: Optional[Iterable[T]] = None,
  315. custom_msg: Optional[str] = None,
  316. ) -> T:
  317. if not isinstance(value, str):
  318. if arg is None:
  319. msg = "Expected type str, but got type {type}."
  320. else:
  321. msg = "Expected type str for argument {arg}, but got type {type}."
  322. msg = msg.format(type=type(value), arg=arg)
  323. raise ValueError(msg)
  324. if valid_values is None:
  325. return value
  326. if value not in valid_values:
  327. if custom_msg is not None:
  328. msg = custom_msg
  329. else:
  330. msg = "Unknown value '{value}' for argument {arg}. Valid values are {{{valid_values}}}."
  331. msg = msg.format(value=value, arg=arg, valid_values=iterable_to_str(valid_values))
  332. raise ValueError(msg)
  333. return value
  334. def _read_pfm(file_name: Union[str, pathlib.Path], slice_channels: int = 2) -> np.ndarray:
  335. """Read file in .pfm format. Might contain either 1 or 3 channels of data.
  336. Args:
  337. file_name (str): Path to the file.
  338. slice_channels (int): Number of channels to slice out of the file.
  339. Useful for reading different data formats stored in .pfm files: Optical Flows, Stereo Disparity Maps, etc.
  340. """
  341. with open(file_name, "rb") as f:
  342. header = f.readline().rstrip()
  343. if header not in [b"PF", b"Pf"]:
  344. raise ValueError("Invalid PFM file")
  345. dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline())
  346. if not dim_match:
  347. raise Exception("Malformed PFM header.")
  348. w, h = (int(dim) for dim in dim_match.groups())
  349. scale = float(f.readline().rstrip())
  350. if scale < 0: # little-endian
  351. endian = "<"
  352. scale = -scale
  353. else:
  354. endian = ">" # big-endian
  355. data = np.fromfile(f, dtype=endian + "f")
  356. pfm_channels = 3 if header == b"PF" else 1
  357. data = data.reshape(h, w, pfm_channels).transpose(2, 0, 1)
  358. data = np.flip(data, axis=1) # flip on h dimension
  359. data = data[:slice_channels, :, :]
  360. return data.astype(np.float32)
  361. def _flip_byte_order(t: torch.Tensor) -> torch.Tensor:
  362. return (
  363. t.contiguous().view(torch.uint8).view(*t.shape, t.element_size()).flip(-1).view(*t.shape[:-1], -1).view(t.dtype)
  364. )
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...