summaryrefslogtreecommitdiffstats
path: root/container
diff options
context:
space:
mode:
authorDan Mick <dmick@redhat.com>2024-06-26 04:07:41 +0200
committerDan Mick <dmick@redhat.com>2024-10-03 01:49:32 +0200
commit5c40a5c1737cf0628c899c4e9f788017b8b4fca8 (patch)
treee508c266491e87badadc76dc4bd67fcf7dfc57da /container
parentMerge pull request #59826 from adk3798/main-latest-release-squid (diff)
downloadceph-5c40a5c1737cf0628c899c4e9f788017b8b4fca8.tar.xz
ceph-5c40a5c1737cf0628c899c4e9f788017b8b4fca8.zip
Add Containerfile and build.sh to build it.
The intent is to replace ceph-container.git, at first for ci containers only, and eventually production containers as well. There is code present for production containers, including a separate "make-manifest-list.py" to scan for and glue the two arch-specific containers into a 'manifest-list' 'fat' container, but that code is not yet fully tested. This code will not be used until a corresponding change to the Jenkins jobs in ceph-build.git is pushed. Note that this tooling does not authenticate to the container repo; it is assumed that will be done elsewhere. Authentication is verified by pushing a minimal image to the requested repo. Signed-off-by: Dan Mick <dmick@redhat.com>
Diffstat (limited to 'container')
-rw-r--r--container/Containerfile209
-rwxr-xr-xcontainer/build.sh175
-rwxr-xr-xcontainer/make-manifest-list.py164
3 files changed, 548 insertions, 0 deletions
diff --git a/container/Containerfile b/container/Containerfile
new file mode 100644
index 00000000000..2f75c8c6ce6
--- /dev/null
+++ b/container/Containerfile
@@ -0,0 +1,209 @@
+ARG FROM_IMAGE="quay.io/centos/centos:stream9"
+FROM $FROM_IMAGE
+
+# allow FROM_IMAGE to be visible inside this stage
+ARG FROM_IMAGE
+
+# Ceph branch name
+ARG CEPH_REF="main"
+
+# Ceph SHA1
+ARG CEPH_SHA1
+
+# Ceph git repo (ceph-ci.git or ceph.git)
+ARG CEPH_GIT_REPO
+
+# (optional) Define the baseurl= for the ganesha.repo
+ARG GANESHA_REPO_BASEURL="https://buildlogs.centos.org/centos/\$releasever-stream/storage/\$basearch/nfsganesha-5/"
+
+# (optional) Set to "crimson" to install crimson packages.
+ARG OSD_FLAVOR="default"
+
+# (optional) Should be 'true' for CI builds (pull from shaman, etc.)
+ARG CI_CONTAINER="true"
+
+RUN /bin/echo -e "\
+FROM_IMAGE: ${FROM_IMAGE}\n\
+CEPH_REF: ${CEPH_REF}\n\
+GANESHA_REPO_BASEURL: ${GANESHA_REPO_BASEURL} \n\
+OSD_FLAVOR: ${OSD_FLAVOR} \n\
+CI_CONTAINER: ${CI_CONTAINER}"
+
+# Other labels are set automatically by container/build github action
+# See: https://github.com/opencontainers/image-spec/blob/main/annotations.md
+LABEL org.opencontainers.image.authors="Ceph Release Team <ceph-maintainers@ceph.io>" \
+ org.opencontainers.image.documentation="https://docs.ceph.com/"
+
+LABEL \
+FROM_IMAGE=${FROM_IMAGE} \
+CEPH_REF=${CEPH_REF} \
+CEPH_SHA1=${CEPH_SHA1} \
+CEPH_GIT_REPO=${CEPH_GIT_REPO} \
+GANESHA_REPO_BASEURL=${GANESHA_REPO_BASEURL} \
+OSD_FLAVOR=${OSD_FLAVOR}
+
+
+#===================================================================================================
+# Install ceph and dependencies, and clean up
+# IMPORTANT: in official builds, use '--squash' build option to keep image as small as possible
+# keeping run steps separate makes local rebuilds quick, but images are big without squash option
+#===================================================================================================
+
+# Pre-reqs
+RUN dnf install -y --setopt=install_weak_deps=False epel-release jq
+
+# Add NFS-Ganesha repo
+RUN \
+ echo "[ganesha]" > /etc/yum.repos.d/ganesha.repo && \
+ echo "name=ganesha" >> /etc/yum.repos.d/ganesha.repo && \
+ echo "baseurl=${GANESHA_REPO_BASEURL}" >> /etc/yum.repos.d/ganesha.repo && \
+ echo "gpgcheck=0" >> /etc/yum.repos.d/ganesha.repo && \
+ echo "enabled=1" >> /etc/yum.repos.d/ganesha.repo
+
+# ISCSI repo
+RUN set -x && \
+ curl -s -L https://shaman.ceph.com/api/repos/tcmu-runner/main/latest/centos/9/repo?arch=$(arch) -o /etc/yum.repos.d/tcmu-runner.repo && \
+ case "${CEPH_REF}" in \
+ quincy|reef) \
+ curl -s -L https://download.ceph.com/ceph-iscsi/3/rpm/el9/ceph-iscsi.repo -o /etc/yum.repos.d/ceph-iscsi.repo ;\
+ ;;\
+ main|*) \
+ curl -s -L https://shaman.ceph.com/api/repos/ceph-iscsi/main/latest/centos/9/repo -o /etc/yum.repos.d/ceph-iscsi.repo ;\
+ ;;\
+ esac
+
+# Ceph repo
+RUN set -x && \
+ rpm --import 'https://download.ceph.com/keys/release.asc' && \
+ ARCH=$(arch); if [ "${ARCH}" == "aarch64" ]; then ARCH="arm64"; fi ;\
+ IS_RELEASE=0 ;\
+ if [[ "${CI_CONTAINER}" == "true" ]] ; then \
+ # TODO: this can return different ceph builds (SHA1) for x86 vs. arm runs. is it important to fix?
+ REPO_URL=$(curl -s "https://shaman.ceph.com/api/search/?project=ceph&distros=centos/9/${ARCH}&flavor=${OSD_FLAVOR}&ref=${CEPH_REF}&sha1=latest" | jq -r .[0].url) ;\
+ else \
+ IS_RELEASE=1 ;\
+ REPO_URL="http://download.ceph.com/rpm-${CEPH_REF}/el9/" ;\
+ fi && \
+ rpm -Uvh "$REPO_URL/noarch/ceph-release-1-${IS_RELEASE}.el9.noarch.rpm"
+
+# Copr repos
+# scikit for mgr-diskprediction-local
+# ref: https://github.com/ceph/ceph-container/pull/1821
+RUN \
+ dnf install -y --setopt=install_weak_deps=False dnf-plugins-core && \
+ dnf copr enable -y tchaikov/python-scikit-learn
+
+# Update package mgr
+RUN dnf update -y --setopt=install_weak_deps=False
+
+# Define and install packages
+# General
+RUN echo "ca-certificates" > packages.txt
+# Ceph
+# TODO: remove lua-devel and luarocks once they are present in ceph.spec.in
+# ref: https://github.com/ceph/ceph/pull/54575#discussion_r1401199635
+RUN echo \
+"ceph-common \
+ceph-exporter \
+ceph-grafana-dashboards \
+ceph-immutable-object-cache \
+ceph-mds \
+ceph-mgr-cephadm \
+ceph-mgr-dashboard \
+ceph-mgr-diskprediction-local \
+ceph-mgr-k8sevents \
+ceph-mgr-rook \
+ceph-mgr \
+ceph-mon \
+ceph-osd \
+ceph-radosgw lua-devel luarocks \
+ceph-volume \
+cephfs-mirror \
+cephfs-top \
+kmod \
+libradosstriper1 \
+rbd-mirror" \
+>> packages.txt
+
+# Optional crimson package(s)
+RUN if [ "${OSD_FLAVOR}" == "crimson" ]; then \
+ echo "ceph-crimson-osd" >> packages.txt ; \
+fi
+
+# Ceph "Recommends"
+RUN echo "nvme-cli python3-saml smartmontools" >> packages.txt
+# NFS-Ganesha
+RUN echo "\
+dbus-daemon \
+nfs-ganesha-ceph \
+nfs-ganesha-rados-grace \
+nfs-ganesha-rados-urls \
+nfs-ganesha-rgw \
+nfs-ganesha \
+rpcbind \
+sssd-client" >> packages.txt
+
+# ISCSI
+RUN echo "ceph-iscsi tcmu-runner python3-rtslib" >> packages.txt
+
+# Ceph-CSI
+# TODO: coordinate with @Madhu-1 to have Ceph-CSI install these itself if unused by ceph
+# @adk3798 does cephadm use these?
+RUN echo "attr ceph-fuse rbd-nbd" >> packages.txt
+
+# Rook (only if packages must be in ceph container image)
+RUN echo "systemd-udev" >> packages.txt
+
+# Util packages (should be kept to only utils that are truly very useful)
+# 'sgdisk' (from gdisk) is used in docs and scripts for clearing disks (could be a risk? @travisn @guits @ktdreyer ?)
+# 'ps' (from procps-ng) and 'hostname' are very valuable for debugging and CI
+# TODO: remove sg3_utils once they are moved to ceph.spec.in with libstoragemgmt
+# ref: https://github.com/ceph/ceph-container/pull/2013#issuecomment-1248606472
+RUN echo "gdisk hostname procps-ng sg3_utils e2fsprogs lvm2 gcc" >> packages.txt
+
+# scikit
+RUN echo "python3-scikit-learn" >> packages.txt
+
+# ceph-node-proxy
+RUN echo "ceph-node-proxy" >> packages.txt
+
+RUN echo "=== PACKAGES TO BE INSTALLED ==="; cat packages.txt
+RUN echo "=== INSTALLING ===" ; \
+dnf install -y --setopt=install_weak_deps=False --setopt=skip_missing_names_on_install=False --enablerepo=crb $(cat packages.txt)
+
+# XXX why isn't this done in the ganesha package?
+RUN mkdir -p /var/run/ganesha
+
+# Disable sync with udev since the container can not contact udev
+RUN \
+ sed -i -e 's/udev_rules = 1/udev_rules = 0/' \
+ -e 's/udev_sync = 1/udev_sync = 0/' \
+ -e 's/obtain_device_list_from_udev = 1/obtain_device_list_from_udev = 0/' \
+ /etc/lvm/lvm.conf && \
+ # validate the sed command worked as expected
+ grep -sqo "udev_sync = 0" /etc/lvm/lvm.conf && \
+ grep -sqo "udev_rules = 0" /etc/lvm/lvm.conf && \
+ grep -sqo "obtain_device_list_from_udev = 0" /etc/lvm/lvm.conf
+
+# CLEAN UP!
+RUN set -x && \
+ dnf clean all && \
+ rm -rf /var/cache/dnf/* && \
+ rm -rf /var/lib/dnf/* && \
+ rm -f /var/lib/rpm/__db* && \
+ # remove unnecessary files with big impact
+ rm -rf /etc/selinux /usr/share/{doc,man,selinux} && \
+ # don't keep compiled python binaries
+ find / -xdev \( -name "*.pyc" -o -name "*.pyo" \) -delete
+
+# Verify that the packages installed haven't been accidentally cleaned, then
+# clean the package list and re-clean unnecessary RPM database files
+RUN rpm -q $(cat packages.txt) && rm -f /var/lib/rpm/__db* && rm -f *packages.txt
+
+#
+# Set some envs in the container for quickly inspecting details about the build at runtime
+ENV CEPH_IS_DEVEL="${CI_CONTAINER}" \
+ CEPH_REF="${CEPH_REF}" \
+ CEPH_OSD_FLAVOR="${OSD_FLAVOR}" \
+ FROM_IMAGE="${FROM_IMAGE}"
+
diff --git a/container/build.sh b/container/build.sh
new file mode 100755
index 00000000000..7c97e2261c1
--- /dev/null
+++ b/container/build.sh
@@ -0,0 +1,175 @@
+#!/bin/bash -ex
+# vim: ts=4 sw=4 expandtab
+
+# repo auth with write perms must be present (this script does not log into
+# CONTAINER_REPO_HOSTNAME and CONTAINER_REPO_ORGANIZATION).
+# If NO_PUSH is set, no login is necessary
+
+
+CFILE=${1:-Containerfile}
+shift || true
+
+usage() {
+ cat << EOF
+$0 [containerfile] (defaults to 'Containerfile')
+For a CI build (from ceph-ci.git, built and pushed to shaman):
+CI_CONTAINER: must be 'true'
+FLAVOR (OSD flavor, default or crimson)
+BRANCH (of Ceph. <remote>/<ref>)
+CEPH_SHA1 (of Ceph)
+ARCH (of build host, and resulting container)
+CONTAINER_REPO_HOSTNAME (quay.ceph.io, for CI, for instance)
+CONTAINER_REPO_ORGANIZATION (ceph-ci, for CI, for instance)
+CONTAINER_REPO_USERNAME
+CONTAINER_REPO_PASSWORD
+
+For a release build: (from ceph.git, built and pushed to download.ceph.com)
+CI_CONTAINER: must be 'false'
+and you must also add
+VERSION (for instance, 19.1.0) for tagging the image
+
+You can avoid the push step (for testing) by setting NO_PUSH to anything
+EOF
+}
+
+CI_CONTAINER=${CI_CONTAINER:-false}
+FLAVOR=${FLAVOR:-default}
+# default: current checked-out branch
+BRANCH=${BRANCH:-$(git rev-parse --abbrev-ref HEAD)}
+# default: current checked-out branch
+CEPH_SHA1=${CEPH_SHA1:-$(git rev-parse HEAD)}
+# default: build host arch
+ARCH=${ARCH:-$(arch)}
+if [[ "${ARCH}" == "aarch64" ]] ; then ARCH=arm64; fi
+if [[ ${CI_CONTAINER} == "true" ]] ; then
+ CONTAINER_REPO_HOSTNAME=${CONTAINER_REPO_HOSTNAME:-quay.ceph.io}
+ CONTAINER_REPO_ORGANIZATION=${CONTAINER_REPO_ORGANIZATION:-ceph/ceph-${ARCH}}
+else
+ CONTAINER_REPO_HOSTNAME=${CONTAINER_REPO_HOSTNAME:-quay.io}
+ CONTAINER_REPO_ORGANIZATION=${CONTAINER_REPO_ORGANIZATION:-ceph/ceph}
+ # default: most-recent annotated tag
+ VERSION=${VERSION:-$(git describe --abbrev=0)}
+fi
+
+# check for existence of all required variables
+: "${CI_CONTAINER:?}"
+: "${FLAVOR:?}"
+: "${BRANCH:?}"
+: "${CEPH_SHA1:?}"
+: "${ARCH:?}"
+: "${CONTAINER_REPO_HOSTNAME:?}"
+: "${CONTAINER_REPO_ORGANIZATION:?}"
+: "${CONTAINER_REPO_USERNAME:?}"
+: "${CONTAINER_REPO_PASSWORD:?}"
+if [[ ${CI_CONTAINER} != "true" ]] ; then ${VERSION:?}; fi
+
+# check for valid repo auth (if pushing)
+ORGURL=${CONTAINER_REPO_HOSTNAME}/${CONTAINER_REPO_ORGANIZATION}
+MINIMAL_IMAGE=${ORGURL}/ceph:minimal-test
+if [[ ${NO_PUSH} != "true" ]] ; then
+ podman rmi ${MINIMAL_IMAGE} || true
+ echo "FROM scratch" | podman build -f - -t ${MINIMAL_IMAGE}
+ if ! podman push ${MINIMAL_IMAGE} ; then
+ echo "Not authenticated to ${ORGURL}; need docker/podman login?"
+ exit 1
+ fi
+ podman rmi ${MINIMAL_IMAGE} | true
+fi
+
+if [[ -z "${CEPH_GIT_REPO}" ]] ; then
+ if [[ ${CI_CONTAINER} == "true" ]]; then
+ CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git
+ else
+ CEPH_GIT_REPO=https://github.com/ceph/ceph.git
+ fi
+fi
+
+# BRANCH will be, say, origin/main. remove <remote>/
+BRANCH=${BRANCH##*/}
+
+podman build --pull=newer --squash -f $CFILE -t build.sh.output \
+ --build-arg FROM_IMAGE=${FROM_IMAGE:-quay.io/centos/centos:stream9} \
+ --build-arg CEPH_SHA1=${CEPH_SHA1} \
+ --build-arg CEPH_GIT_REPO=${CEPH_GIT_REPO} \
+ --build-arg CEPH_REF=${BRANCH:-main} \
+ --build-arg OSD_FLAVOR=${FLAVOR:-default} \
+ --build-arg CI_CONTAINER=${CI_CONTAINER:-default} \
+ 2>&1
+
+image_id=$(podman image ls localhost/build.sh.output --format '{{.ID}}')
+
+# grab useful image attributes for building the tag
+#
+# the variable settings are prefixed with "export CEPH_CONTAINER_" so that
+# an eval or . can be used to put them into the environment
+#
+# PATH is removed from the output as it would cause problems for this
+# parent script and its children
+#
+# notes:
+#
+# we want .Architecture and everything in .Config.Env
+#
+# printf will not accept "\n" (is this a podman bug?)
+# so construct vars with two calls to podman inspect, joined by a newline,
+# so that vars will get the output of the first command, newline, output
+# of the second command
+#
+vars="$(podman inspect -f '{{printf "export CEPH_CONTAINER_ARCH=%v" .Architecture}}' ${image_id})
+$(podman inspect -f '{{range $index, $value := .Config.Env}}export CEPH_CONTAINER_{{$value}}{{println}}{{end}}' ${image_id})"
+vars="$(echo "${vars}" | grep -v PATH)"
+eval ${vars}
+
+# remove everything up to and including the last slash
+fromtag=${CEPH_CONTAINER_FROM_IMAGE##*/}
+# translate : to -
+fromtag=${fromtag/:/-}
+builddate=$(date +%Y%m%d)
+local_tag=${fromtag}-${CEPH_CONTAINER_CEPH_REF}-${CEPH_CONTAINER_ARCH}-${builddate}
+
+repopath=${CONTAINER_REPO_HOSTNAME}/${CONTAINER_REPO_ORGANIZATION}
+
+if [[ ${CI_CONTAINER} == "true" ]] ; then
+ # ceph-ci conventions for remote tags:
+ # requires ARCH, BRANCH, CEPH_SHA1, FLAVOR
+ full_repo_tag=$repopath/ceph:${BRANCH}-${fromtag}-${ARCH}-devel
+ branch_repo_tag=$repopath/ceph:${BRANCH}
+ sha1_repo_tag=$repopath/ceph:${CEPH_SHA1}
+
+ if [[ "${ARCH}" == "aarch64" ]] ; then
+ branch_repo_tag=${branch_repo_tag}-aarch64
+ sha1_repo_tag=${sha1_repo_tag}-aarch64
+ fi
+
+ podman tag ${image_id} ${full_repo_tag}
+ podman tag ${image_id} ${branch_repo_tag}
+ podman tag ${image_id} ${sha1_repo_tag}
+
+ if [[ ${FLAVOR} == "crimson" && ${ARCH} == "x86_64" ]] ; then
+ sha1_flavor_repo_tag=${sha1_repo_tag}-${FLAVOR}
+ podman tag ${image_id} ${sha1_flavor_repo_tag}
+ if [[ -z "${NO_PUSH}" ]] ; then
+ podman push ${sha1_flavor_repo_tag}
+ fi
+ exit
+ fi
+
+ if [[ -z "${NO_PUSH}" ]] ; then
+ podman push ${full_repo_tag}
+ podman push ${branch_repo_tag}
+ podman push ${sha1_repo_tag}
+ fi
+else
+ #
+ # non-CI build. Tags are like v19.1.0-20240701
+ # push to quay.ceph.io/ceph/prerelease
+ #
+ version_tag=${repopath}/prerelease/ceph-${ARCH}:${VERSION}-${builddate}
+
+ podman tag ${image_id} ${version_tag}
+ if [[ -z "${NO_PUSH}" ]] ; then
+ podman push ${image_id} ${version_tag}
+ fi
+fi
+
+
diff --git a/container/make-manifest-list.py b/container/make-manifest-list.py
new file mode 100755
index 00000000000..010dcaed2b7
--- /dev/null
+++ b/container/make-manifest-list.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python3
+#
+# make a combined "manifest-list" container out of two arch-specific containers
+# searches for latest tags on HOST/{AMD,ARM}64_REPO, makes sure they refer
+# to the same Ceph SHA1, and creates a manifest-list ("fat") image on
+# MANIFEST_HOST/MANIFEST_REPO with the 'standard' set of tags.
+#
+# uses scratch local manifest LOCALMANIFEST, will be destroyed if present
+
+from datetime import datetime
+import functools
+import json
+import os
+import re
+import subprocess
+import sys
+
+# optional env vars (will default if not set)
+
+OPTIONAL_VARS = (
+ 'HOST',
+ 'AMD64_REPO',
+ 'ARM64_REPO',
+ 'MANIFEST_HOST',
+ 'MANIFEST_REPO',
+)
+
+# Manifest image. Will be destroyed if already present.
+LOCALMANIFEST = 'localhost/m'
+
+
+def dump_vars(names, vardict):
+ for name in names:
+ print(f'{name}: {vardict[name]}', file=sys.stderr)
+
+
+def run_command(args):
+ print(f'running {args}', file=sys.stderr)
+ if not isinstance(args, list):
+ args = args.split()
+ try:
+ result = subprocess.run(
+ args,
+ capture_output=True,
+ text=True,
+ check=True)
+ return True, result.stdout, result.stderr
+
+ except subprocess.CalledProcessError as e:
+ print(f"Command '{e.cmd}' returned {e.returncode}")
+ print("Error output:")
+ print(e.stderr)
+ return False, result.stdout, result.stderr
+
+
+def get_command_output(args):
+ success, stdout, stderr = run_command(args)
+ return (stdout if success else None)
+
+
+def run_command_show_failure(args):
+ success, stdout, stderr = run_command(args)
+ if not success:
+ print(f'{args} failed:', file=sys.stderr)
+ print(f'stdout:\n{stdout}')
+ print(f'stderr:\n{stderr}')
+ return success
+
+
+@functools.lru_cache
+def get_latest_tag(path):
+ latest_tag = json.loads(
+ get_command_output(f'skopeo list-tags docker://{path}')
+ )['Tags'][-1]
+ return latest_tag
+
+
+@functools.lru_cache
+def get_image_inspect(path):
+ info = json.loads(
+ get_command_output(f'skopeo inspect docker://{path}')
+ )
+ return info
+
+
+def get_sha1(info):
+ return info['Labels']['GIT_COMMIT']
+
+
+def main():
+ host = os.environ.get('HOST', 'quay.io')
+ amd64_repo = os.environ.get('AMD64_REPO', 'ceph/ceph-amd64')
+ arm64_repo = os.environ.get('ARM64_REPO', 'ceph/ceph-arm64')
+ manifest_host = os.environ.get('MANIFEST_HOST', host)
+ manifest_repo = os.environ.get('MANIFEST_REPO', 'ceph/ceph')
+ dump_vars(
+ ('host',
+ 'amd64_repo',
+ 'arm64_repo',
+ 'manifest_host',
+ 'manifest_repo',
+ ),
+ locals())
+
+ repopaths = (
+ f'{host}/{amd64_repo}',
+ f'{host}/{arm64_repo}',
+ )
+ tags = [get_latest_tag(p) for p in repopaths]
+ print(f'latest tags: amd64:{tags[0]} arm64:{tags[1]}')
+
+ # check that version of latest tag matches
+ version_re = \
+ r'v(?P<major>\d+)\.(?P<minor>\d+)\.(?P<micro>\d+)-(?P<date>\d+)'
+ versions = list()
+ for tag in tags:
+ mo = re.match(version_re, tag)
+ ver = f'{mo.group("major")}.{mo.group("minor")}.{mo.group("micro")}'
+ versions.append(ver)
+ if versions[0] != versions[1]:
+ print(
+ f'version mismatch: amd64:{versions[0]} arm64:{versions[1]}',
+ file=sys.stderr,
+ )
+ return(1)
+
+ major, minor, micro = mo.group(1), mo.group(2), mo.group(3)
+ print(f'Ceph version: {major}.{minor}.{micro}', file=sys.stderr)
+
+ # check that ceph sha1 of two arch images matches
+ paths_with_tags = [f'{p}:{t}' for (p, t) in zip(repopaths, tags)]
+ info = [get_image_inspect(p) for p in paths_with_tags]
+ sha1s = [get_sha1(i) for i in info]
+ if sha1s[0] != sha1s[1]:
+ print(
+ f'sha1 mismatch: amd64: {sha1s[0]} arm64: {sha1s[1]}',
+ file=sys.stderr,
+ )
+ builddate = [i['Created'] for i in info]
+ print(
+ f'Build dates: amd64: {builddate[0]} arm64: {builddate[1]}',
+ file=sys.stderr,
+ )
+ return(1)
+
+ # create manifest list image with the standard list of tags
+ # ignore failure on manifest rm
+ run_command(f'podman manifest rm localhost/m')
+ run_command_show_failure(f'podman manifest create localhost/m')
+ for p in paths_with_tags:
+ run_command_show_failure(f'podman manifest add m {p}')
+ base = f'{manifest_host}/{manifest_repo}'
+ for t in (
+ f'v{major}',
+ f'v{major}.{minor}',
+ f'v{major}.{minor}.{micro}',
+ f'v{major}.{minor}.{micro}-{datetime.today().strftime("%Y%m%d")}',
+ ):
+ run_command_show_failure(
+ f'podman manifest push localhost/m {base}:{t}')
+
+
+if (__name__ == '__main__'):
+ sys.exit(main())