summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorAbhijeet Kasurde <akasurde@redhat.com>2025-01-14 16:44:29 +0100
committerGitHub <noreply@github.com>2025-01-14 16:44:29 +0100
commit5b0d1704962b4634380c20f8ab5d23f80cbc5f52 (patch)
treeb11d9e81535931f61fac8d2944d7d0e6c895e878 /lib
parentAllows iptables chain creation with wait parameter (#84491) (diff)
downloadansible-5b0d1704962b4634380c20f8ab5d23f80cbc5f52.tar.xz
ansible-5b0d1704962b4634380c20f8ab5d23f80cbc5f52.zip
get_url: add support for BSD-style digest (#84485)
* Added support for BSD-style digest file to test checksum of downloaded file. Fixes: #84476 Signed-off-by: Abhijeet Kasurde <akasurde@redhat.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/ansible/modules/get_url.py55
1 files changed, 34 insertions, 21 deletions
diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py
index 52c812c0c6..563ae5a61e 100644
--- a/lib/ansible/modules/get_url.py
+++ b/lib/ansible/modules/get_url.py
@@ -460,6 +460,37 @@ def is_url(checksum):
return urlsplit(checksum).scheme in supported_schemes
+def parse_digest_lines(filename, lines):
+ """Returns a list of tuple containing the filename and digest depending upon
+ the lines provided
+
+ Args:
+ filename (str): Name of the filename, used only when the digest is one-liner
+ lines (list): A list of lines containing filenames and checksums
+ """
+ checksum_map = []
+ BSD_DIGEST_LINE = re.compile(r'^(\w+) ?\((?P<path>.+)\) ?= (?P<digest>[\w.]+)$')
+ GNU_DIGEST_LINE = re.compile(r'^(?P<digest>[\w.]+) ([ *])(?P<path>.+)$')
+
+ if len(lines) == 1 and len(lines[0].split()) == 1:
+ # Only a single line with a single string
+ # treat it as a checksum only file
+ checksum_map.append((lines[0], filename))
+ return checksum_map
+ # The assumption here is the file is in the format of
+ # checksum filename
+ for line in lines:
+ match = BSD_DIGEST_LINE.match(line)
+ if match:
+ checksum_map.append((match.group('digest'), match.group('path')))
+ else:
+ match = GNU_DIGEST_LINE.match(line)
+ if match:
+ checksum_map.append((match.group('digest'), match.group('path').lstrip("./")))
+
+ return checksum_map
+
+
# ==============================================================
# main
@@ -527,31 +558,13 @@ def main():
if is_url(checksum):
checksum_url = checksum
# download checksum file to checksum_tmpsrc
- checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest,
- unredirected_headers=unredirected_headers, ciphers=ciphers, use_netrc=use_netrc)
+ checksum_tmpsrc, _dummy = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest,
+ unredirected_headers=unredirected_headers, ciphers=ciphers, use_netrc=use_netrc)
with open(checksum_tmpsrc) as f:
lines = [line.rstrip('\n') for line in f]
os.remove(checksum_tmpsrc)
- checksum_map = []
filename = url_filename(url)
- if len(lines) == 1 and len(lines[0].split()) == 1:
- # Only a single line with a single string
- # treat it as a checksum only file
- checksum_map.append((lines[0], filename))
- else:
- # The assumption here is the file is in the format of
- # checksum filename
- for line in lines:
- # Split by one whitespace to keep the leading type char ' ' (whitespace) for text and '*' for binary
- parts = line.split(" ", 1)
- if len(parts) == 2:
- # Remove the leading type char, we expect
- if parts[1].startswith((" ", "*",)):
- parts[1] = parts[1][1:]
-
- # Append checksum and path without potential leading './'
- checksum_map.append((parts[0], parts[1].lstrip("./")))
-
+ checksum_map = parse_digest_lines(filename=filename, lines=lines)
# Look through each line in the checksum file for a hash corresponding to
# the filename in the url, returning the first hash that is found.
for cksum in (s for (s, f) in checksum_map if f == filename):