From 77576b7207909f86ee2505ec90c71c1a4f216da4 Mon Sep 17 00:00:00 2001 From: Scott Little Date: Wed, 28 Oct 2020 14:12:13 -0400 Subject: [PATCH 01/37] Add support CentOS-8/dnf/mock-2.6 based builds This update will retain support for CentOS-7/yum/mock-1.4 based builds. The build environment will be queried to discover which environment it is building in, and modify the commands we issue accordingly. In CentOS 8, DNF replaces both YUM and REPOQUERY. While DNF tries to be a transparent replacement of the old tools, there are also subtle changes to the supported arguments. I will provide independent mock.cfg.prototypes for centos7 vs centos8. Changes in generate-centos-repo.sh under stx-tools will be required to select the correct prototype. Add support for mock 2.6. Mock 2.6 is python 3, and it processes the 'root' and 'rootdir' arguments slightly differently. Also change the order of arguments to tar within default_build_srpm. The latest tar only honors '--exclude' if it precedes other arguments. Story: 2006729 Depends-On: https://review.opendev.org/762700 Signed-off-by: Scott Little Change-Id: I826be2051e535e6a4c08ad17124f453b04210668 --- build-tools/build-iso | 45 +- build-tools/build-pkgs | 14 +- build-tools/build-rpms-parallel | 202 +-- build-tools/build-rpms-serial | 172 ++- build-tools/build_iso/cgts_deps.sh | 103 +- build-tools/build_minimal_iso/cgts_deps.sh | 91 +- build-tools/default_build_srpm | 12 +- build-tools/mockchain-parallel | 8 +- build-tools/mockchain-parallel-2.6 | 1221 +++++++++++++++++ build-tools/modify-build-cfg | 37 +- build-tools/pkg-manager-utils.sh | 33 + build-tools/repo_files/mock.cfg.all.proto | 59 +- .../repo_files/mock.cfg.centos7.all.proto | 58 + .../repo_files/mock.cfg.centos7.distro.proto | 58 + build-tools/repo_files/mock.cfg.centos7.proto | 57 + .../repo_files/mock.cfg.centos8.all.proto | 63 + .../repo_files/mock.cfg.centos8.distro.proto | 63 + build-tools/repo_files/mock.cfg.centos8.proto | 62 + build-tools/repo_files/mock.cfg.distro.proto | 59 +- build-tools/repo_files/mock.cfg.proto | 58 +- 20 files changed, 2075 insertions(+), 400 deletions(-) create mode 100755 build-tools/mockchain-parallel-2.6 create mode 100755 build-tools/pkg-manager-utils.sh mode change 100644 => 120000 build-tools/repo_files/mock.cfg.all.proto create mode 100644 build-tools/repo_files/mock.cfg.centos7.all.proto create mode 100644 build-tools/repo_files/mock.cfg.centos7.distro.proto create mode 100644 build-tools/repo_files/mock.cfg.centos7.proto create mode 100644 build-tools/repo_files/mock.cfg.centos8.all.proto create mode 100644 build-tools/repo_files/mock.cfg.centos8.distro.proto create mode 100644 build-tools/repo_files/mock.cfg.centos8.proto mode change 100644 => 120000 build-tools/repo_files/mock.cfg.distro.proto mode change 100644 => 120000 build-tools/repo_files/mock.cfg.proto diff --git a/build-tools/build-iso b/build-tools/build-iso index d6c6ccc6..a830cae2 100755 --- a/build-tools/build-iso +++ b/build-tools/build-iso @@ -1,7 +1,7 @@ #!/bin/bash # -# Copyright (c) 2018-2019 Wind River Systems, Inc. +# Copyright (c) 2018-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -18,6 +18,10 @@ BUILD_ISO_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" source "${BUILD_ISO_DIR}/image-utils.sh" source "${BUILD_ISO_DIR}/git-utils.sh" +# Set REPOQUERY, REPOQUERY_SUB_COMMAND, REPOQUERY_RESOLVE and +# REPOQUERY_WHATPROVIDES_DELIM for our build environment. +source "${BUILD_ISO_DIR}/pkg-manager-utils.sh" + usage () { echo "" echo "Usage: " @@ -40,8 +44,8 @@ usage () { MY_YUM_CONF="" STD_REPO_ID="local-std" RT_REPO_ID="local-rt" -LOWER_LAYER_STD_REPO_ID="StxCentos7Distro" -LOWER_LAYER_RT_REPO_ID="StxCentos7Distro-rt" +LOWER_LAYER_STD_REPO_ID="" +LOWER_LAYER_RT_REPO_ID="" NPROCS=$(nproc) @@ -77,13 +81,13 @@ function install_pkg_list { \cd $OUTPUT_DIST_DIR/isolinux/Packages $INTERNAL_REPO_ROOT/build-tools/build_iso/cgts_deps.sh --deps=$PKGLIST - + if [ $? -ne 0 ] then echo "Could not install dependencies" exit 1 fi - + # clean up echo "Removing local-std yum repo $CGCS_REPO_DIR/repodata" echo "Removing local-rt yum repo $CGCS_RT_REPO_DIR/repodata" @@ -111,7 +115,7 @@ function make_report { for PKGLIST in $PKGLISTFILES; do while read PKG; do PKG=`echo $PKG | sed "s/#.*//"`; - if [ "${PKG}x" != "x" ]; then + if [ "${PKG}x" != "x" ]; then echo $PKG >> $REPORT_FILE fi done < $PKGLIST @@ -127,7 +131,7 @@ function make_report { echo "-----------------" >> $REPORT_FILE echo " WARNINGS " >> $REPORT_FILE echo "-----------------" >> $REPORT_FILE - + # Note that the warnings file may have multiple lines for the same # missing dependency. A sort | uniq solves this so we don't duplicate # warnings @@ -158,13 +162,17 @@ function init_vars { exit 1 fi + # LOWER_LAYER_STD_REPO_ID should be something like StxCentos7Distro or StxCentos8Distro + LOWER_LAYER_STD_REPO_ID=$(grep '\[StxCentos.*Distro\]' ${MY_YUM_CONF} | sed -e 's/^\[//' -e 's/\].*//') + LOWER_LAYER_RT_REPO_ID=$(grep '\[StxCentos.*Distro-rt\]' ${MY_YUM_CONF} | sed -e 's/^\[//' -e 's/\].*//') + DISTRO_REPO_DIR=$(for d in $(grep baseurl $MY_YUM_CONF | grep file: | awk -F : '{print $2}' | sed 's:///:/:g'); do if [ -d $d/images ]; then echo $d ;fi; done) ##################################### # Output definitons # where to put stuff (curent dir unless MY_WORKSPACE defined) - OUTPUT_DIR="$PWD/export" + OUTPUT_DIR="$PWD/export" if [ ! -z "$MY_WORKSPACE" ] && [ -d "$MY_WORKSPACE" ] ; then OUTPUT_DIR="$MY_WORKSPACE/export" CGCS_REPO_DIR="$MY_WORKSPACE/std/rpmbuild/RPMS" @@ -316,7 +324,7 @@ function init_output_dir { date +%s.%N > $OUTPUT_DIST_DIR/isolinux/.discinfo echo $PLATFORM_RELEASE >> $OUTPUT_DIST_DIR/isolinux/.discinfo echo "x86_64" >> $OUTPUT_DIST_DIR/isolinux/.discinfo - + \cp -L -ru $DISTRO_REPO_DIR/isolinux/* $OUTPUT_DIST_DIR/isolinux/ \cp -L -ru $DISTRO_REPO_DIR/images/pxeboot $OUTPUT_DIST_DIR/isolinux/images/ @@ -342,7 +350,7 @@ function init_output_dir { perl -p -i -e "s/device=sda/device=${DEVICE}/g" $OUTPUT_DIST_DIR/isolinux/isolinux.cfg fi - # Copy UEFI files + # Copy UEFI files \cp -L -ru $DISTRO_REPO_DIR/EFI/* $OUTPUT_DIST_DIR/isolinux/EFI/ \cp -L "$BSP_FILES_PATH/grub.cfg" "$OUTPUT_DIST_DIR/isolinux/EFI/BOOT/grub.cfg" \cp -L "$BSP_FILES_PATH/pxeboot_grub.cfg" "$OUTPUT_DIST_DIR/isolinux/pxeboot/pxeboot_grub.cfg" @@ -472,7 +480,7 @@ function final_touches { # create the repo \cd $OUTPUT_DIST_DIR/isolinux $CREATEREPO -q -g ../comps.xml . - + # build the ISO printf "Building image $OUTPUT_FILE\n" \cd $OUTPUT_DIST_DIR @@ -485,7 +493,7 @@ function final_touches { -eltorito-alt-boot \ -e images/efiboot.img \ -no-emul-boot \ - isolinux/ + isolinux/ isohybrid --uefi $OUTPUT_FILE implantisomd5 $OUTPUT_FILE @@ -495,7 +503,7 @@ function final_touches { function extract_pkg_from_local_repo { local pkgname=$1 - local yum_conf=$2 + local pkg_mgr_conf=$2 shift 2 local repoid="" @@ -505,7 +513,14 @@ function extract_pkg_from_local_repo { repoid_arg+=" --repoid=${repoid}" done - local pkgfile=$(TMPDIR=$TMP_DIR repoquery --config=${yum_conf} ${repoid_arg} --location -q ${pkgname}) + echo "TMPDIR=$TMP_DIR"\ + "${REPOQUERY} --config=${pkg_mgr_conf} ${repoid_arg}"\ + "${REPOQUERY_SUB_COMMAND} --location"\ + "--arch=noarch,x86_64 -q ${pkgname}" + local pkgfile=$(TMPDIR=$TMP_DIR \ + ${REPOQUERY} --config=${pkg_mgr_conf} ${repoid_arg} \ + ${REPOQUERY_SUB_COMMAND} --location \ + --arch=noarch,x86_64 -q ${pkgname}) if [ -z "${pkgfile}" ]; then echo "Could not find package $pkgname in $@" exit 1 @@ -605,7 +620,7 @@ EOM echo "Error: Could not copy all files from installer" exit 1 fi - + \cp --preserve=all www/pages/feed/rel-*/LiveOS/squashfs.img $OUTPUT_DIST_DIR/isolinux/LiveOS if [ $? -ne 0 ]; then echo "Error: Could not copy squashfs from LiveOS" diff --git a/build-tools/build-pkgs b/build-tools/build-pkgs index edd980f8..da9d65d0 100755 --- a/build-tools/build-pkgs +++ b/build-tools/build-pkgs @@ -1,7 +1,7 @@ #!/bin/bash # -# Copyright (c) 2018 Wind River Systems, Inc. +# Copyright (c) 2018-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -15,6 +15,10 @@ BUILD_PKGS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" +# Set REPOQUERY and REPOQUERY_SUB_COMMAND for our build environment. +source "${BUILD_PKGS_DIR}/pkg-manager-utils.sh" + + usage () { echo "" echo "Usage: " @@ -75,11 +79,15 @@ make_cache_current_rpms () { if [ -d $MY_WORKSPACE/$build_type/rpmbuild/$rpm_type/repodata ]; then current=$MY_WORKSPACE/$build_type/rpmbuild/$rpm_type/ - repoquery \ + ${REPOQUERY} \ --repofrompath=$build_type-$rpm_type,$current \ - --repoid=$build_type-$rpm_type --arch=noarch,src,x86_64 -a \ + --repoid=$build_type-$rpm_type --arch=noarch,src,x86_64 \ + ${REPOQUERY_SUB_COMMAND} \ + --all \ --qf "%-10{repoid} %-40{name} %-10{version} %-10{release}" \ >> ${FILE} + + \rm -rf $TMP_DIR/yum-$USER-* fi done; done; diff --git a/build-tools/build-rpms-parallel b/build-tools/build-rpms-parallel index 4ca2202e..e2830c24 100755 --- a/build-tools/build-rpms-parallel +++ b/build-tools/build-rpms-parallel @@ -1,7 +1,7 @@ #!/bin/bash # -# Copyright (c) 2018 Wind River Systems, Inc. +# Copyright (c) 2018-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -15,7 +15,7 @@ # The location of packages to be built is # $MY_WORKSPACE//rpmbuild/SRPMS. # -# The build order is a derived from the BuildRequires in the +# The build order is a derived from the BuildRequires in the # spec files in the src.rpms. Note that the BuildRequires sometimes # create dependency loops, so no correct order can be computed. In these # cases we add a retry loop. As long as one new package builds, we @@ -24,9 +24,12 @@ # messages go by! # - export ME=$(basename "$0") CMDLINE="$ME $@" +BUILD_RPMS_PARALLEL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" + +# Set PKG_MANAGER for our build environment. +source "${BUILD_RPMS_PARALLEL_DIR}/pkg-manager-utils.sh" # Build for distribution. Currently 'centos' is only supported value. @@ -43,22 +46,22 @@ MAX_MEM_PER_WORKER=11 # Note: tmpfs is typically 2.5 gb when compiling many small jobs MIN_MEM_PER_WORKER=3 -# Maximum number of disk based parallel build environments +# Maximum number of disk based parallel build environments MAX_DISK_BASED_WORKERS=2 # Minimum space in gb for each disk based parallel build environment MIN_DISK_PER_WORKER=20 -# How many srpms to build before we add another parallel build environment +# How many srpms to build before we add another parallel build environment MIN_TASKS_PER_CORE=3 # Max number of new concurrent builds to allow for MAX_SHARE_FACTOR=4 -# Always leave at least MEMORY_RESERVE gb of available mem for the system +# Always leave at least MEMORY_RESERVE gb of available mem for the system MEMORY_RESERVE=1 -# These two values will be reassigned in the 'compute_resources' subroutine +# These two values will be reassigned in the 'compute_resources' subroutine MOCKCHAIN_RESOURCE_ALLOCATION="" MAX_WORKERS=$ABSOLUTE_MAX_WORKERS @@ -164,7 +167,7 @@ create-no-clean-list () { local install_groups="" local install_packages="" local p - + for p in $(grep "config_opts\['chroot_setup_cmd'\]" $MY_BUILD_CFG | tail -n1 | cut -d '=' -f 2 | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e "s/^'//" -e "s/'$//" -e 's/^install //'); do if [[ $p == @* ]] ; then install_groups=$(join_by ' ' $install_groups $(echo $p | cut -c 2-)) @@ -172,37 +175,44 @@ create-no-clean-list () { install_packages=$(join_by ' ' $install_packages $p) fi done - + local noclean_last_list_len=0 local noclean_list="" local tmp_list="" local g - + for g in $install_groups; do - tmp_list=$(yum -c $MY_YUM_CONF groupinfo $g 2>> /dev/null | awk 'f;/Mandatory Packages:/{f=1}' | cut -c 5-) + # Find mandatory packages in the group. + # Discard anything before (and including) 'Mandatory Packages:' + # and anything after (and including) 'Optional Packages:'. + # Also discard leading spaces or '+' characters. + tmp_list=$(${PKG_MANAGER} -c $MY_YUM_CONF groupinfo $g 2>> /dev/null \ + | awk 'f;/Mandatory Packages:/{f=1}' \ + | sed -n '/Optional Packages:/q;p' \ + | sed 's#[ +]*##') noclean_list=$(join_by ' ' $noclean_list $tmp_list) done - + noclean_list=$(join_by ' ' $noclean_list $install_packages) noclean_list=$(echo $noclean_list | tr ' ' '\n' | sort --uniq) noclean_list_len=$(echo $noclean_list | wc -w) - + while [ $noclean_list_len -gt $noclean_last_list_len ]; do noclean_last_list_len=$noclean_list_len - noclean_list=$( (yum -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq) + noclean_list=$( (${PKG_MANAGER} -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq) noclean_list_len=$(echo $noclean_list | wc -w) done - + echo $noclean_list > $NO_CLEAN_LIST_FILE fi cat $NO_CLEAN_LIST_FILE } -str_lst_contains() { +str_lst_contains() { TARGET="$1" LST="$2" - if [[ $LST =~ (^|[[:space:]])$TARGET($|[[:space:]]) ]] ; then + if [[ $LST =~ (^|[[:space:]])$TARGET($|[[:space:]]) ]] ; then return 0 else return 1 @@ -214,7 +224,7 @@ compute_resources () { local b echo "" - for f in $@; do + for f in $@; do b=$(basename $f) if [ -f $SOURCES_DIR/$b/BIG ] || [ ]; then weight=$((weight+MIN_TASKS_PER_CORE)) @@ -232,7 +242,7 @@ compute_resources () { local cpus=$(number_of_cpus) local num_users=$(sqrt $users) local num_build=$(number_of_builds_in_progress) - num_build=$((num_build+1)) + num_build=$((num_build+1)) echo "compute_resources: total: cpus=$cpus, mem=$mem, disk=$disk, weight=$weight, num_build=$num_build" # What fraction of the machine will we use @@ -311,7 +321,9 @@ create_lst () { recreate_repodata () { local DIR=${1} - (cd $DIR + ( + mkdir -p $DIR + cd $DIR if [ -f repodata/*comps*xml ]; then \mv repodata/*comps*xml comps.xml fi @@ -402,11 +414,11 @@ union () { echo $b fi done -} +} # # returns 0 if element is in the array -# +# # e.g. contains ARRAY $SEEKING && echo "$SEEKING is in 'ARRAY'" # contains () { @@ -453,7 +465,7 @@ build_order_recursive () { needs=( $(grep "^$target;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$target;//" | sed 's/,/ /g') ) needs_list=( $(intersection needs remainder_list) ) for((idx=0;idx<${#needs_list[@]};idx++)); do - build_order_recursive ${needs_list[idx]} + build_order_recursive ${needs_list[idx]} done echo $target break @@ -503,7 +515,7 @@ build_order () { UNORDERED_LIST=( ${original_list[@]} ) while [ ${#UNORDERED_LIST[@]} -gt 0 ]; do element=${UNORDERED_LIST[0]} - build_order_recursive $element + build_order_recursive $element done fi } @@ -621,7 +633,7 @@ umount_mock_root_as_tmpfs () { return 0 fi mock_tmpfs_umount $mount_dir &> /dev/null - + rc=$? if [ $rc -ne 0 ]; then echo "FAILED: mock_tmpfs_umount $mount_dir" @@ -652,7 +664,7 @@ kill_descendents () recursive_promote_children="" recursive_children=$(pgrep -P $kill_pid) fi - + if [ $iteration -eq 0 ]; then other_children=$(for relevant_child in $relevant_other_children; do pgrep -P $kill_pid $relevant_child; done) if [ "$other_children" != "" ]; then @@ -667,7 +679,7 @@ kill_descendents () fi for pid in $recursive_children; do - kill_descendents "$pid" $kill_all $need_stop $((iteration + 1)) + kill_descendents "$pid" $kill_all $need_stop $((iteration + 1)) done for pid in $recursive_promote_children; do kill_descendents "$pid" 1 1 $((iteration + 1)) @@ -946,11 +958,11 @@ mock_clean_cfg () { echo "==================================" mock_clean_cache_cfg $CFG echo "==================================" - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all echo "==================================" - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --clean" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --clean + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --clean" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --clean ### Note: this sometimes leaves behind a $MY_WORKSPACE/cache/mock/yum_cache/yumcache.lock echo "==================================" mock_clean_cache_all_cfg $CFG @@ -1034,10 +1046,10 @@ mock_partial_clean_cfg () { local RPMS_CLEAN_LIST="" local NEED_FULL_MOCK_CLEAN=0 for r in $RPMS_LIST; do - if ! str_lst_contains $r "$NO_CLEAN_LIST" ; then + if ! str_lst_contains $r "$NO_CLEAN_LIST" ; then RPMS_CLEAN_LIST=$(join_by ' ' $RPMS_CLEAN_LIST $r) - else - echo "Can't remove '$r' from mock environment without a wipe"; + else + echo "Can't remove '$r' from mock environment without a wipe"; NEED_FULL_MOCK_CLEAN=1 fi done @@ -1050,19 +1062,19 @@ mock_partial_clean_cfg () { # Intent of following is for $RPMS_LIST to be expand now while the remaining $ varaibles are for bash inside mock to expand echo "Try to uninstall from the mock environment these packages: $RPMS_CLEAN_LIST" CMD='LST="'$RPMS_CLEAN_LIST'"; - DELETE_LIST=""; - for r in $LST; do - FOUND=$(rpm -q $r) ; - if [ $? -eq 0 ]; then - DELETE_LIST="$DELETE_LIST $FOUND"; - fi; - done; - echo "uninstalling these packages: $DELETE_LIST"; + DELETE_LIST=""; + for r in $LST; do + FOUND=$(rpm -q $r) ; + if [ $? -eq 0 ]; then + DELETE_LIST="$DELETE_LIST $FOUND"; + fi; + done; + echo "uninstalling these packages: $DELETE_LIST"; if [ "$DELETE_LIST" != "" ]; then rpm -e --nodeps $DELETE_LIST; fi' - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --chroot "bash -c '$CMD'" &>> $TMP + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '$CMD'" &>> $TMP RC=$? if [ $RC -ne 0 ]; then cat $TMP @@ -1112,8 +1124,8 @@ mock_clean_cache_cfg () { clean_yum_cache_cfg $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache" &> $TMP - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache &>> $TMP + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache" &> $TMP + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache &>> $TMP RC=$? if [ $RC -ne 0 ]; then cat $TMP @@ -1141,8 +1153,8 @@ mock_clean_cache_all_cfg () { echo "==================================" clean_yum_cache_cfg $CFG echo "==================================" - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all echo "==================================" } @@ -1169,9 +1181,16 @@ mock_clean_metadata_cfg () { return 1 fi - CMD=$((cat $CFG; grep config_opts\\[\'yum.conf\'\\\] $CFG | sed 's#\\n#\n#g') | grep '^[[]' | grep -v main | sed 's/[][]//g' | sed 's#^#yum --enablerepo=#' | sed 's#$# clean metadata#' | sort -u | tr '\n' ';') - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP + CMD=$((cat $CFG; \ + grep config_opts\\[\'yum.conf\'\\\] $CFG | \ + sed 's#\\n#\n#g') | \ + grep '^[[]' | \ + grep -v main | \ + sed -e 's/[][]//g' -e "s#^#${PKG_MANAGER} --enablerepo=#" -e 's#$# clean metadata#' | \ + sort -u | \ + tr '\n' ';') + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP RC=$? if [ $RC -ne 0 ]; then cat $TMP @@ -1228,7 +1247,7 @@ update_cgcs_repo () { if [ "x$NEW_UNTRACKED" != "x" ]; then NEED_REBUILD=1 fi - + if [ $NEED_REBUILD -eq 1 ]; then NEED_MOCK_CLEAN=1 echo "" @@ -1274,7 +1293,7 @@ mock_clean_mounts_dir () { fi fi if [ $RC -eq 1 ]; then - echo "ERROR: Directory '$MOUNT' is already mounted and will cause a build failure within mock." + echo "ERROR: Directory '$MOUNT' is already mounted and will cause a build failure within mock." echo "Ask your system administrator to umount '$MOUNT'." exit 1 fi @@ -1319,7 +1338,7 @@ clean_yum_cache_cfg () { local YUM_CACHE_MOUNT=$(readlink -f "$ROOT_DIR/root/var/cache/yum") local YUM_CACHE_LOCK="$CACHE_DIR/mock/yum_cache/yumcache.lock" # echo "clean_yum_cache YUM_CACHE_MOUNT='$YUM_CACHE_MOUNT' YUM_CACHE_LOCK='$YUM_CACHE_LOCK'" - + if [ "$YUM_CACHE_MOUNT" != "" ]; then mock_clean_mounts_dir "$YUM_CACHE_MOUNT" fi @@ -1358,8 +1377,8 @@ mock_update_cfg () { echo "${FUNCNAME[0]}: $CFG" echo "==================================" set_mock_symlinks $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --update" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --update + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --update" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --update echo "==================================" } @@ -1368,8 +1387,8 @@ mock_init_cfg () { echo "${FUNCNAME[0]}: $CFG" echo "==================================" set_mock_symlinks $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --init" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --init + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --init" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --init echo "==================================" } @@ -1388,14 +1407,14 @@ mock_update_or_init_cfg () { if [ -d $ROOT_DIR/root ]; then echo "Updating the mock environment" set_mock_symlinks $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --update" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --update &> $TMP + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --update" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --update &> $TMP RC=$? else echo "Init the mock environment" set_mock_symlinks $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --init" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --init &> $TMP + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --init" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --init &> $TMP RC=$? fi if [ $RC -ne 0 ]; then @@ -1411,7 +1430,7 @@ mock_update_or_init () { local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev) ( mock_update_or_init_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) & done - wait + wait echo "${FUNCNAME[0]}: out" } @@ -1429,12 +1448,12 @@ NO_DESCENDANTS=0 NO_REQUIRED=0 NO_AUTOCLEAN=0 NO_BUILD_INFO=0 -HELP=0 +HELP=0 CLEAN_FLAG=0 FORMAL_FLAG=0 CAREFUL=0 DEP_TEST_FLAG=0 - + # read the options TEMP=$(getopt -o h --long parallel,std,rt,installer,containers,no-required,no-descendants,no-autoclean,no-build-info,dep-test,clean,tmpfs-clean,formal,careful,help,layer: -n "$ME" -- "$@") @@ -1470,7 +1489,7 @@ while true ; do --) shift ; break ;; *) echo "Internal error!" ; exit 1 ;; esac -done +done # Reset variables if [ -n "$MY_WORKSPACE" ]; then @@ -1729,7 +1748,7 @@ clean_list () { echo "rm -rf $d" \rm -rf "$d" 2>> /dev/null done - + else rs=$(rpm_get_srpm $r) if [[ "$rs" != "$sn"-[0-9]* ]]; then @@ -1742,7 +1761,7 @@ clean_list () { fi done done - + TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$s") if [ $CLEAN_FLAG -eq 1 ]; then @@ -1782,7 +1801,7 @@ clean_list () { \rm -f -v $RESULT_DIR/mockchain.log 2>> /dev/null mock_clean - else + else # If dependency test if [ $DEP_TEST_FLAG -eq 1 ]; then mock_clean @@ -2045,7 +2064,7 @@ if [ $CLEAN_FLAG -eq 0 ] && [ $NO_DESCENDANTS -eq 0 ] && [ -f $SRPM_DIRECT_DESCE done # For non-std build, and if non specific build targets are named, then search all - # packages that we might build and check if they require a package that DID build + # packages that we might build and check if they require a package that DID build # in the std build. If so build the package as a secondary target, even though the # primary target was from a different build_type. if [ "$BUILD_TYPE" != "std" ] && [ $ALL -eq 1 ] && [ -f $SRPM_TO_RPM_MAP_FILE ] && [ -f $SRPM_RPM_DIRECT_REQUIRES_FILE ]; then @@ -2105,7 +2124,7 @@ if [ $CLEAN_FLAG -eq 0 ] && [ $NO_DESCENDANTS -eq 0 ] && [ -f $SRPM_DIRECT_DESCE done fi - # If the kernel or kernel-rt packages where absent from the primary build targets, but + # If the kernel or kernel-rt packages where absent from the primary build targets, but # added as a secondary target, then make sure all out-of-tree kernel modules are also # added. for n in kernel kernel-rt; do @@ -2234,9 +2253,9 @@ echo "SRPMS_LIST = $SRPMS_LIST" echo "RPMS_LIST = $RPMS_LIST" -echo +echo if [ $CLEAN_FLAG -eq 0 ]; then - # pre-create these directories as $USER, + # pre-create these directories as $USER, # else mock will create them as root and fails to clean them. # Note: keep these in sync with mockchain-parallel! for i in $(seq 0 $((MAX_WORKERS-1))); do @@ -2299,15 +2318,28 @@ mock_clean_metadata echo echo "Building" +recreate_repodata $BUILD_BASE/results/$BUILD_ENVIRONMENT_DIR + CMD_PREFIX="" if [ -x /bin/ionice ]; then CMD_PREFIX="nice -n 20 ionice -c Idle /bin/ionice " fi -CMD_OPTIONS="-m --no-clean -m --no-cleanup-after" -if [ $CAREFUL -eq 1 ]; then - CMD_OPTIONS="-m --no-cleanup-after" +REAL_MOCKCHAIN=0 +MOCK_PASSTHROUGH="-m" +MOCKCHAIN="mockchain-parallel" +CHAIN_OPTION="" +if file $(which mockchain) | grep -q 'Python script'; then + REAL_MOCKCHAIN=1 fi + +CMD_OPTIONS="$MOCK_PASSTHROUGH --no-clean $MOCK_PASSTHROUGH --no-cleanup-after" +if [ $CAREFUL -eq 1 ]; then + CMD_OPTIONS="$MOCK_PASSTHROUGH --no-cleanup-after" +fi + +CMD_OPTIONS+=" --log=$MOCKCHAIN_LOG" + echo "CAREFUL=$CAREFUL" # Sets WORKERS and MOCKCHAIN_RESOURCE_ALLOCATION @@ -2315,13 +2347,13 @@ compute_resources $SRPMS_LIST if [ -f $SRPM_RPM_DIRECT_REQUIRES_FILE ]; then - CMD_OPTIONS="$CMD_OPTIONS --srpm-dependency-file $SRPM_RPM_DIRECT_REQUIRES_FILE" + CMD_OPTIONS+=" --srpm-dependency-file $SRPM_RPM_DIRECT_REQUIRES_FILE" fi if [ -f "$RPM_DIRECT_REQUIRES_FILE" ]; then - CMD_OPTIONS="$CMD_OPTIONS --rpm-dependency-file $RPM_DIRECT_REQUIRES_FILE" + CMD_OPTIONS+=" --rpm-dependency-file $RPM_DIRECT_REQUIRES_FILE" fi if [ -f "$RPM_TO_SRPM_MAP_FILE" ]; then - CMD_OPTIONS="$CMD_OPTIONS --rpm-to-srpm-map-file $RPM_TO_SRPM_MAP_FILE" + CMD_OPTIONS+=" --rpm-to-srpm-map-file $RPM_TO_SRPM_MAP_FILE" fi @@ -2342,12 +2374,12 @@ echo "MAX_WORKERS=$MAX_WORKERS" echo "MOCKCHAIN_RESOURCE_ALLOCATION=$MOCKCHAIN_RESOURCE_ALLOCATION" -CMD="$CMD_PREFIX mockchain-parallel -r $BUILD_CFG -l $BUILD_BASE --recurse --workers=$MAX_WORKERS --worker-resources=$MOCKCHAIN_RESOURCE_ALLOCATION --basedir=$MY_WORKSPACE --log=$MOCKCHAIN_LOG --tmp_prefix=$USER --addrepo=$LOCAL_URL --addrepo=$LOCAL_SRC_URL $CMD_OPTIONS -m --rebuild $SRPMS_LIST" +CMD="$CMD_PREFIX $MOCKCHAIN --root $BUILD_CFG --localrepo $BUILD_BASE --recurse --workers=$MAX_WORKERS --worker-resources=$MOCKCHAIN_RESOURCE_ALLOCATION --basedir=$MY_WORKSPACE --tmp_prefix=$USER --addrepo=$LOCAL_URL --addrepo=$LOCAL_SRC_URL $CMD_OPTIONS $MOCK_PASSTHROUGH --rebuild" +CMD_BUILD_LIST="$CHAIN_OPTION $SRPMS_LIST" echo "" -echo "$CMD -m --define='_tis_dist .tis' -m --define='platform_release $PLATFORM_RELEASE'" +echo "$CMD $MOCK_PASSTHROUGH --define='_tis_dist .tis' $MOCK_PASSTHROUGH --define='platform_release $PLATFORM_RELEASE' $CMD_BUILD_LIST" echo "" - -trapwrap stdbuf -o0 $CMD -m --define="_tis_dist .tis" -m --define="platform_release $PLATFORM_RELEASE" +trapwrap stdbuf -o0 $CMD $MOCK_PASSTHROUGH --define="_tis_dist .tis" $MOCK_PASSTHROUGH --define="platform_release $PLATFORM_RELEASE" $CMD_BUILD_LIST MOCKCHAIN_RC=$? echo $PLATFORM_RELEASE > $LAST_PLATFORM_RELEASE_FILE @@ -2357,7 +2389,7 @@ if [ $CLEAN_FLAG -eq 0 ]; then fi for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --max-args=1 dirname | sort -u); do - rsync -u $d/*.rpm $RPM_DIR + rsync -u $d/*.rpm $RPM_DIR done if [ $ALL -eq 1 ]; then @@ -2441,5 +2473,5 @@ if [ 0$FORMAL_BUILD -eq 1 ] && [ "$USER" == "jenkins" ]; then fi fi -exit 0 +exit 0 ) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-rpms-parallel_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]} diff --git a/build-tools/build-rpms-serial b/build-tools/build-rpms-serial index 5bd7dbe7..0a6afbf7 100755 --- a/build-tools/build-rpms-serial +++ b/build-tools/build-rpms-serial @@ -1,20 +1,20 @@ #!/bin/bash # -# Copyright (c) 2018 Wind River Systems, Inc. +# Copyright (c) 2018-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -# +# # Builds rpm files from src.rpm files. # # This version compiles one package at a time. -# -# The location of packages to be built is +# +# The location of packages to be built is # $MY_WORKSPACE//rpmbuild/SRPMS. # -# The build order is a derived from the BuildRequires in the +# The build order is a derived from the BuildRequires in the # spec files in the src.rpms. Note that the BuildRequires sometimes # create dependency loops, so no correct order can be computed. In these # cases we add a retry loop. As long as one new package builds, we @@ -103,7 +103,7 @@ create-no-clean-list () { local install_groups="" local install_packages="" local p - + for p in $(grep "config_opts\['chroot_setup_cmd'\]" $MY_BUILD_CFG | tail -n1 | cut -d '=' -f 2 | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e "s/^'//" -e "s/'$//" -e 's/^install //'); do if [[ $p == @* ]] ; then install_groups=$(join_by ' ' $install_groups $(echo $p | cut -c 2-)) @@ -111,37 +111,44 @@ create-no-clean-list () { install_packages=$(join_by ' ' $install_packages $p) fi done - + local noclean_last_list_len=0 local noclean_list="" local tmp_list="" local g - + for g in $install_groups; do - tmp_list=$(yum -c $MY_YUM_CONF groupinfo $g 2>> /dev/null | awk 'f;/Mandatory Packages:/{f=1}' | cut -c 5-) + # Find manditory packages in the group. + # Discard anything before (and including) 'Mandatory Packages:' + # and anything after (and including) 'Optional Packages:'. + # Also discard leading spaces or '+' characters. + tmp_list=$(${PKG_MANAGER} -c $MY_YUM_CONF groupinfo $g 2>> /dev/null \ + | awk 'f;/Mandatory Packages:/{f=1}' \ + | sed -n '/Optional Packages:/q;p' \ + | sed 's#[ +]*##') noclean_list=$(join_by ' ' $noclean_list $tmp_list) done - + noclean_list=$(join_by ' ' $noclean_list $install_packages) noclean_list=$(echo $noclean_list | tr ' ' '\n' | sort --uniq) noclean_list_len=$(echo $noclean_list | wc -w) - + while [ $noclean_list_len -gt $noclean_last_list_len ]; do noclean_last_list_len=$noclean_list_len noclean_list=$( (yum -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq) noclean_list_len=$(echo $noclean_list | wc -w) done - + echo $noclean_list > $NO_CLEAN_LIST_FILE fi cat $NO_CLEAN_LIST_FILE } -str_lst_contains() { +str_lst_contains() { TARGET="$1" LST="$2" - if [[ $LST =~ (^|[[:space:]])$TARGET($|[[:space:]]) ]] ; then + if [[ $LST =~ (^|[[:space:]])$TARGET($|[[:space:]]) ]] ; then return 0 else return 1 @@ -169,7 +176,9 @@ create_lst () { recreate_repodata () { local DIR=${1} - (cd $DIR + ( + mkdir -p $DIR + cd $DIR if [ -f repodata/*comps*xml ]; then \mv repodata/*comps*xml comps.xml fi @@ -260,11 +269,11 @@ union () { echo $b fi done -} +} # # returns 0 if element is in the array -# +# # e.g. contains ARRAY $SEEKING && echo "$SEEKING is in 'ARRAY'" # contains () { @@ -311,7 +320,7 @@ build_order_recursive () { needs=( $(grep "^$target;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$target;//" | sed 's/,/ /g') ) needs_list=( $(intersection needs remainder_list) ) for((idx=0;idx<${#needs_list[@]};idx++)); do - build_order_recursive ${needs_list[idx]} + build_order_recursive ${needs_list[idx]} done echo $target break @@ -361,7 +370,7 @@ build_order () { UNORDERED_LIST=( ${original_list[@]} ) while [ ${#UNORDERED_LIST[@]} -gt 0 ]; do element=${UNORDERED_LIST[0]} - build_order_recursive $element + build_order_recursive $element done fi } @@ -476,7 +485,7 @@ kill_descendents () recursive_promote_children="" recursive_children=$(pgrep -P $kill_pid) fi - + if [ $iteration -eq 0 ]; then other_children=$(for relevant_child in $relevant_other_children; do pgrep -P $kill_pid $relevant_child; done) if [ "$other_children" != "" ]; then @@ -491,7 +500,7 @@ kill_descendents () fi for pid in $recursive_children; do - kill_descendents "$pid" $kill_all $need_stop $((iteration + 1)) + kill_descendents "$pid" $kill_all $need_stop $((iteration + 1)) done for pid in $recursive_promote_children; do kill_descendents "$pid" 1 1 $((iteration + 1)) @@ -754,11 +763,11 @@ mock_clean_cfg () { echo "==================================" mock_clean_cache_cfg $CFG echo "==================================" - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all echo "==================================" - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --clean" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --clean + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --clean" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --clean ### Note: this sometimes leaves behind a $MY_WORKSPACE/cache/mock/yum_cache/yumcache.lock echo "==================================" mock_clean_cache_all_cfg $CFG @@ -833,10 +842,10 @@ mock_partial_clean_cfg () { local RPMS_CLEAN_LIST="" local NEED_FULL_MOCK_CLEAN=0 for r in $RPMS_LIST; do - if ! str_lst_contains $r "$NO_CLEAN_LIST" ; then + if ! str_lst_contains $r "$NO_CLEAN_LIST" ; then RPMS_CLEAN_LIST=$(join_by ' ' $RPMS_CLEAN_LIST $r) - else - echo "Can't remove '$r' from mock environment without a wipe"; + else + echo "Can't remove '$r' from mock environment without a wipe"; NEED_FULL_MOCK_CLEAN=1 fi done @@ -849,19 +858,19 @@ mock_partial_clean_cfg () { # Intent of following is for $RPMS_LIST to be expand now while the remaining $ varaibles are for bash inside mock to expand echo "Try to uninstall from the mock environment these packages: $RPMS_CLEAN_LIST" CMD='LST="'$RPMS_CLEAN_LIST'"; - DELETE_LIST=""; - for r in $LST; do - FOUND=$(rpm -q $r) ; - if [ $? -eq 0 ]; then - DELETE_LIST="$DELETE_LIST $FOUND"; - fi; - done; - echo "uninstalling these packages: $DELETE_LIST"; + DELETE_LIST=""; + for r in $LST; do + FOUND=$(rpm -q $r) ; + if [ $? -eq 0 ]; then + DELETE_LIST="$DELETE_LIST $FOUND"; + fi; + done; + echo "uninstalling these packages: $DELETE_LIST"; if [ "$DELETE_LIST" != "" ]; then rpm -e --nodeps $DELETE_LIST; fi' - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --chroot "bash -c '$CMD'" &>> $TMP + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '$CMD'" &>> $TMP RC=$? if [ $RC -ne 0 ]; then cat $TMP @@ -907,8 +916,8 @@ mock_clean_cache_cfg () { clean_yum_cache_cfg $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache" &> $TMP - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache &>> $TMP + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache" &> $TMP + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache &>> $TMP RC=$? if [ $RC -ne 0 ]; then cat $TMP @@ -931,8 +940,8 @@ mock_clean_cache_all_cfg () { echo "==================================" clean_yum_cache_cfg $CFG echo "==================================" - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all echo "==================================" } @@ -956,8 +965,8 @@ mock_clean_metadata_cfg () { fi CMD=$((cat $CFG; grep config_opts\\[\'yum.conf\'\\\] $CFG | sed 's#\\n#\n#g') | grep '^[[]' | grep -v main | sed 's/[][]//g' | sed 's#^#yum --enablerepo=#' | sed 's#$# clean metadata#' | sort -u | tr '\n' ';') - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP RC=$? if [ $RC -ne 0 ]; then cat $TMP @@ -1010,7 +1019,7 @@ update_cgcs_repo () { if [ "x$NEW_UNTRACKED" != "x" ]; then NEED_REBUILD=1 fi - + if [ $NEED_REBUILD -eq 1 ]; then NEED_MOCK_CLEAN=1 echo "" @@ -1056,7 +1065,7 @@ mock_clean_mounts_dir () { fi fi if [ $RC -eq 1 ]; then - echo "ERROR: Directory '$MOUNT' is already mounted and will cause a build failure within mock." + echo "ERROR: Directory '$MOUNT' is already mounted and will cause a build failure within mock." echo "Ask your system administrator to umount '$MOUNT'." exit 1 fi @@ -1097,7 +1106,7 @@ clean_yum_cache_cfg () { local YUM_CACHE_MOUNT=$(readlink -f "$ROOT_DIR/root/var/cache/yum") local YUM_CACHE_LOCK="$CACHE_DIR/mock/yum_cache/yumcache.lock" # echo "clean_yum_cache YUM_CACHE_MOUNT='$YUM_CACHE_MOUNT' YUM_CACHE_LOCK='$YUM_CACHE_LOCK'" - + if [ "$YUM_CACHE_MOUNT" != "" ]; then mock_clean_mounts_dir "$YUM_CACHE_MOUNT" fi @@ -1131,8 +1140,8 @@ mock_update_cfg () { echo "${FUNCNAME[0]}: $CFG" echo "==================================" set_mock_symlinks $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --update" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --update + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --update" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --update echo "==================================" } @@ -1141,8 +1150,8 @@ mock_init_cfg () { echo "${FUNCNAME[0]}: $CFG" echo "==================================" set_mock_symlinks $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --init" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --init + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --init" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --init echo "==================================" } @@ -1161,14 +1170,14 @@ mock_update_or_init_cfg () { if [ -d $ROOT_DIR/root ]; then echo "Updating the mock environment" set_mock_symlinks $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --update" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --update &> $TMP + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --update" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --update &> $TMP RC=$? else echo "Init the mock environment" set_mock_symlinks $CFG - echo "$MOCK -r $CFG --configdir $(dirname $CFG) --init" - trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --init &> $TMP + echo "$MOCK --root $CFG --configdir $(dirname $CFG) --init" + trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --init &> $TMP RC=$? fi if [ $RC -ne 0 ]; then @@ -1198,12 +1207,12 @@ NO_DESCENDANTS=0 NO_REQUIRED=0 NO_AUTOCLEAN=0 NO_BUILD_INFO=0 -HELP=0 +HELP=0 CLEAN_FLAG=0 FORMAL_FLAG=0 CAREFUL=0 DEP_TEST_FLAG=0 - + # read the options TEMP=$(getopt -o h --long serial,std,rt,installer,containers,no-required,no-descendants,no-autoclean,no-build-info,dep-test,clean,formal,careful,help,layer: -n "$ME" -- "$@") @@ -1238,7 +1247,7 @@ while true ; do --) shift ; break ;; *) echo "Internal error!" ; exit 1 ;; esac -done +done # Reset variables @@ -1493,7 +1502,7 @@ clean_list () { echo "rm -rf $d" \rm -rf "$d" 2>> /dev/null done - + else rs=$(rpm_get_srpm $r) if [[ "$rs" != "$sn"-[0-9]* ]]; then @@ -1506,7 +1515,7 @@ clean_list () { fi done done - + TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$s") if [ $CLEAN_FLAG -eq 1 ]; then @@ -1542,7 +1551,7 @@ clean_list () { \rm -f -v $RESULT_DIR/mockchain.log 2>> /dev/null mock_clean - else + else # If dependency test if [ $DEP_TEST_FLAG -eq 1 ]; then mock_clean @@ -1801,7 +1810,7 @@ if [ $CLEAN_FLAG -eq 0 ] && [ $NO_DESCENDANTS -eq 0 ] && [ -f $SRPM_DIRECT_DESCE done # For non-std build, and if non specific build targets are named, then search all - # packages that we might build and check if they require a package that DID build + # packages that we might build and check if they require a package that DID build # in the std build. If so build the package as a secondary target, even though the # primary target was from a different build_type. if [ "$BUILD_TYPE" != "std" ] && [ $ALL -eq 1 ] && [ -f $SRPM_TO_RPM_MAP_FILE ] && [ -f $SRPM_RPM_DIRECT_REQUIRES_FILE ]; then @@ -1861,7 +1870,7 @@ if [ $CLEAN_FLAG -eq 0 ] && [ $NO_DESCENDANTS -eq 0 ] && [ -f $SRPM_DIRECT_DESCE done fi - # If the kernel or kernel-rt packages where absent from the primary build targets, but + # If the kernel or kernel-rt packages where absent from the primary build targets, but # added as a secondary target, then make sure all out-of-tree kernel modules are also # added. for n in kernel kernel-rt; do @@ -1996,9 +2005,9 @@ echo "SRPMS_LIST = $SRPMS_LIST" echo "RPMS_LIST = $RPMS_LIST" -echo +echo if [ $CLEAN_FLAG -eq 0 ]; then - # pre-create these directories as $USER, + # pre-create these directories as $USER, # else mock will create them as root and fails to clean them. # Note: keep these in sync with mockchain-parallel! mkdir -p $MY_WORKSPACE/mock @@ -2059,27 +2068,46 @@ mock_clean_metadata echo echo "Building" +recreate_repodata $BUILD_BASE/results/$BUILD_ENVIRONMENT_DIR + CMD_PREFIX="" if [ -x /bin/ionice ]; then CMD_PREFIX="nice -n 20 ionice -c Idle /bin/ionice " fi -CMD_OPTIONS="-m --no-clean -m --no-cleanup-after" +REAL_MOCKCHAIN=0 +MOCK_PASSTHROUGH="" +MOCKCHAIN="mock" +CHAIN_OPTION="--chain" +if file $(which mockchain) | grep -q 'Python script'; then + REAL_MOCKCHAIN=1 + MOCK_PASSTHROUGH="-m" + MOCKCHAIN="mockchain" + CHAIN_OPTION="" +fi + +CMD_OPTIONS="$MOCK_PASSTHROUGH --no-clean $MOCK_PASSTHROUGH --no-cleanup-after" if [ $CAREFUL -eq 1 ]; then - CMD_OPTIONS="-m --no-cleanup-after" + CMD_OPTIONS="$MOCK_PASSTHROUGH --no-cleanup-after" +fi +if [ $REAL_MOCKCHAIN -eq 1 ]; then + CMD_OPTIONS+=" --log=$MOCKCHAIN_LOG" fi echo "CAREFUL=$CAREFUL" echo "CMD_OPTIONS=$CMD_OPTIONS" -CMD="$CMD_PREFIX mockchain -r $BUILD_CFG -l $BUILD_BASE --recurse --log=$MOCKCHAIN_LOG --tmp_prefix=$USER --addrepo=$LOCAL_URL --addrepo=$LOCAL_SRC_URL -m --rootdir=$BUILD_BASE/mock/root $CMD_OPTIONS -m --rebuild $SRPMS_LIST" -echo "$CMD -m --define='_tis_dist .tis' -m --define='platform_release $PLATFORM_RELEASE'" -trapwrap stdbuf -o0 $CMD -m --define='_tis_dist .tis' -m --define="platform_release $PLATFORM_RELEASE" +CMD="$CMD_PREFIX $MOCKCHAIN --root $BUILD_CFG --localrepo $BUILD_BASE --recurse --tmp_prefix=$USER --addrepo=$LOCAL_URL --addrepo=$LOCAL_SRC_URL $MOCK_PASSTHROUGH --rootdir=$BUILD_BASE/mock/root $CMD_OPTIONS $MOCK_PASSTHROUGH --rebuild" +CMD_BUILD_LIST="$CHAIN_OPTION $SRPMS_LIST" +echo "" +echo "$CMD $MOCK_PASSTHROUGH --define='_tis_dist .tis' $MOCK_PASSTHROUGH --define='platform_release $PLATFORM_RELEASE' $CMD_BUILD_LIST" +echo "" +trapwrap stdbuf -o0 $CMD $MOCK_PASSTHROUGH --define="_tis_dist .tis" $MOCK_PASSTHROUGH --define="platform_release $PLATFORM_RELEASE" $CMD_BUILD_LIST MOCKCHAIN_RC=$? echo $PLATFORM_RELEASE > $LAST_PLATFORM_RELEASE_FILE for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --max-args=1 dirname | sort -u); do - rsync -u $d/*.rpm $RPM_DIR + rsync -u $d/*.rpm $RPM_DIR done if [ $ALL -eq 1 ]; then @@ -2163,5 +2191,5 @@ if [ 0$FORMAL_BUILD -eq 1 ] && [ "$USER" == "jenkins" ]; then fi fi -exit 0 +exit 0 ) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-rpms-serial_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]} diff --git a/build-tools/build_iso/cgts_deps.sh b/build-tools/build_iso/cgts_deps.sh index e1ccbf96..ab45352d 100755 --- a/build-tools/build_iso/cgts_deps.sh +++ b/build-tools/build_iso/cgts_deps.sh @@ -1,5 +1,11 @@ #!/bin/env bash +# +# Copyright (c) 2018-2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + # Here's the score, kids. There are a few different places from which we can # get packages. In priority order, they are: # @@ -26,21 +32,33 @@ # export/dist/report_deps.txt # +CGTS_DEPS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" + +# Set REPOQUERY, REPOQUERY_SUB_COMMAND, REPOQUERY_RESOLVE and +# REPOQUERY_WHATPROVIDES_DELIM for our build environment. +source ${CGTS_DEPS_DIR}/../pkg-manager-utils.sh + # This function generates a simple file of dependencies we're trying to resolve function generate_dep_list { TMP_RPM_DB=$(mktemp -d $(pwd)/tmp_rpm_db_XXXXXX) mkdir -p $TMP_RPM_DB rpm --initdb --dbpath $TMP_RPM_DB - rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' >> $DEPDETAILLISTFILE 2>&1 - rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' 2>&1 \ - | grep -v "error:" \ - | grep -v "warning:" \ - | grep -v "Preparing..." \ - | grep -v "installing package" \ - | sed "s/ is needed by.*$//" | sed "s/ >=.*$//" | sort -u > $DEPLISTFILE + rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' > $DEPLISTFILE_NEW 2>&1 + cat $DEPLISTFILE_NEW >> $DEPDETAILLISTFILE + cat $DEPLISTFILE_NEW \ + | grep -v -e "error:" -e "warning:" -e "Preparing..." \ + -e "Verifying..." -e "installing package" \ + | sed -e "s/ is needed by.*$//" -e "s/ [<=>].*$//" \ + | sort -u > $DEPLISTFILE \rm -rf $TMP_RPM_DB } +join_array() { + local IFS="$1" + shift + echo "$*" +} + # Takes a list of requirements (either explcit package name, or capabilities # to provide) and install packages to meet those dependancies # @@ -55,6 +73,7 @@ function generate_dep_list { # capabilities. function install_deps { local DEP_LIST="" + local DEP_LIST_ARRAY=() local DEP_LIST_FILE="$1" # Temporary files are used in a few different ways @@ -68,7 +87,7 @@ function install_deps { while read DEP do - DEP_LIST="${DEP_LIST} ${DEP}" + DEP_LIST+=" '${DEP}'" done < $DEP_LIST_FILE echo "Debug: List of deps to resolve: ${DEP_LIST}" @@ -79,8 +98,17 @@ function install_deps { # go through each repo and convert deps to packages based on package name for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do - echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch ${DEP_LIST} --qf='%{name}'" - TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --qf='%{name}' ${DEP_LIST} | sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES + echo "TMPDIR=${TMP_DIR}"\ + "${REPOQUERY} --config=${YUM} --repoid=$REPOID"\ + "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\ + "${DEP_LIST} --qf='%{name}'" + + TMPDIR=${TMP_DIR} \ + ${REPOQUERY} --config=${YUM} --repoid=$REPOID \ + ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \ + --qf='%{name}' ${DEP_LIST} \ + | sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES + \rm -rf $TMP_DIR/yum-$USER-* done sort $UNSORTED_PACKAGES -u > $SORTED_PACKAGES @@ -96,19 +124,31 @@ function install_deps { # If there are any requirements not resolved, look up the packages with # --whatprovides if [ -s $UNRESOLVED_PACKAGES ]; then - DEP_LIST="" + DEP_LIST_ARRAY=() \cp $SORTED_PACKAGES $UNSORTED_PACKAGES while read DEP do - DEP_LIST="${DEP_LIST} ${DEP}" + DEP_LIST_ARRAY+=( "${DEP}" ) done < $UNRESOLVED_PACKAGES - DEP_LIST=$(echo "$DEP_LIST" | sed 's/^ //g') - if [ "$DEP_LIST" != "" ]; then + if [ "${REPOQUERY_WHATPROVIDES_DELIM}" != " " ]; then + DEP_LIST_ARRAY=( "$(join_array "${REPOQUERY_WHATPROVIDES_DELIM}" "${DEP_LIST_ARRAY[@]}" )" ) + fi + + if [ ${#DEP_LIST_ARRAY[@]} -gt 0 ]; then for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do - echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --whatprovides ${DEP_LIST} --qf='%{name}'" - TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --qf='%{name}' --whatprovides ${DEP_LIST} | sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES + echo "TMPDIR=${TMP_DIR}"\ + "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\ + "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\ + "--qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]}" + + TMPDIR=${TMP_DIR} \ + ${REPOQUERY} --config=${YUM} --repoid=${REPOID} \ + ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \ + --qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]} \ + | sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES + \rm -rf $TMP_DIR/yum-$USER-* done fi @@ -124,12 +164,12 @@ function install_deps { DEP_LIST=" " while read DEP do - DEP_LIST="${DEP_LIST}${DEP} " + DEP_LIST+="${DEP} " done < $SORTED_PACKAGES rm $SORTED_PACKAGES # go through each repo and install packages - local TARGETS=${DEP_LIST} + local TARGETS="${DEP_LIST}" echo "Debug: Resolved list of deps to install: ${TARGETS}" local UNRESOLVED for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do @@ -137,8 +177,20 @@ function install_deps { if [[ ! -z "${TARGETS// }" ]]; then REPO_PATH=$(cat $YUM | sed -n "/^\[$REPOID\]\$/,\$p" | grep '^baseurl=' | head -n 1 | awk -F 'file://' '{print $2}' | sed 's:/$::') - >&2 echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'" - TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" | sort -r -V > $TMPFILE + + >&2 echo "TMPDIR=${TMP_DIR}"\ + "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\ + "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\ + "--qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'"\ + "${REPOQUERY_RESOLVE} ${TARGETS}" + + TMPDIR=${TMP_DIR} \ + ${REPOQUERY} --config=${YUM} --repoid=${REPOID} \ + ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \ + --qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" \ + ${REPOQUERY_RESOLVE} ${TARGETS} \ + | sort -r -V > $TMPFILE + \rm -rf $TMP_DIR/yum-$USER-* while read STR @@ -157,7 +209,11 @@ function install_deps { cp $PKG_PATH . if [ $? -ne 0 ]; then >&2 echo " Here's what I have to work with..." - >&2 echo " TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $PKG --qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\"" + >&2 echo " TMPDIR=${TMP_DIR}"\ + "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\ + "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\ + "--qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\""\ + "${REPOQUERY_RESOLVE} ${PKG}" >&2 echo " PKG=$PKG PKG_FILE=$PKG_FILE REPO_PATH=$REPO_PATH PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH" fi @@ -172,8 +228,8 @@ function install_deps { echo " path $PKG_PATH" >> $BUILT_REPORT FOUND_UNKNOWN=1 fi - done < $TMPFILE #<<< "$(TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\" | sort -r -V)" - \rm -rf $TMP_DIR/yum-$USER-* + done < $TMPFILE + \rm -rf $TMP_DIR/yum-$USER-* TARGETS="$UNRESOLVED" fi done @@ -220,6 +276,7 @@ OUTPUT_DIR=$MY_WORKSPACE/export TMP_DIR=$MY_WORKSPACE/tmp YUM=$OUTPUT_DIR/yum.conf DEPLISTFILE=$OUTPUT_DIR/deps.txt +DEPLISTFILE_NEW=$OUTPUT_DIR/deps_new.txt DEPDETAILLISTFILE=$OUTPUT_DIR/deps_detail.txt BUILT_REPORT=$OUTPUT_DIR/local.txt diff --git a/build-tools/build_minimal_iso/cgts_deps.sh b/build-tools/build_minimal_iso/cgts_deps.sh index 062b74ff..9e4a88b2 100755 --- a/build-tools/build_minimal_iso/cgts_deps.sh +++ b/build-tools/build_minimal_iso/cgts_deps.sh @@ -1,40 +1,71 @@ #!/bin/env bash + +# +# Copyright (c) 2018-2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +CGTS_DEPS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" + +# Set REPOQUERY, REPOQUERY_SUB_COMMAND, REPOQUERY_RESOLVE and +# REPOQUERY_WHATPROVIDES_DELIM for our build environment. +source ${CGTS_DEPS_DIR}/../pkg-manager-utils.sh + function generate_dep_list { TMP_RPM_DB=$(mktemp -d $(pwd)/tmp_rpm_db_XXXXXX) mkdir -p $TMP_RPM_DB rpm --initdb --dbpath $TMP_RPM_DB - rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' >> $DEPDETAILLISTFILE 2>&1 - rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' 2>&1 \ - | grep -v "error:" \ - | grep -v "warning:" \ - | grep -v "Preparing..." \ - | sed "s/ is needed by.*$//" | sed "s/ >=.*$//" | sort -u > $DEPLISTFILE - rm -rf $TMP_RPM_DB + rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' > $DEPLISTFILE_NEW 2>&1 + cat $DEPLISTFILE_NEW >> $DEPDETAILLISTFILE + cat $DEPLISTFILE_NEW \ + | grep -v -e "error:" -e "warning:" -e "Preparing..." \ + -e "Verifying..." -e "installing package" \ + | sed -e "s/ is needed by.*$//" -e "s/ [<=>].*$//" \ + | sort -u > $DEPLISTFILE + \rm -rf $TMP_RPM_DB +} + +join_array() { + local IFS="$1" + shift + echo "$*" } function install_deps { local DEP_LIST="" + local DEP_LIST_ARRAY=() local DEP_LIST_FILE="$1" rm -f $TMPFILE while read DEP do - DEP_LIST="${DEP_LIST} ${DEP}" + DEP_LIST_ARRAY+=( "${DEP}" ) done < $DEP_LIST_FILE - echo "Debug: List of deps to resolve: ${DEP_LIST}" + if [ "${REPOQUERY_WHATPROVIDES_DELIM}" != " " ]; then + DEP_LIST_ARRAY=( "$(join_array "${REPOQUERY_WHATPROVIDES_DELIM}" "${DEP_LIST_ARRAY[@]}" )" ) + fi - if [ -z "${DEP_LIST}" ]; then + echo "Debug: List of deps to resolve: ${DEP_LIST_ARRAY[@]}" + + if [ ${#DEP_LIST_ARRAY[@]} -gt 0 ]; then return 0 fi # go through each repo and convert deps to packages - for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do - echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --whatprovides ${DEP_LIST} --qf='%{name}'" - TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --qf='%{name}' --whatprovides ${DEP_LIST} | sed "s/kernel-debug/kernel/g" >> $TMPFILE - \rm -rf $TMP_DIR/yum-$USER-* + echo "TMPDIR=${TMP_DIR}"\ + "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\ + "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\ + "--qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]}" + TMPDIR=${TMP_DIR} \ + ${REPOQUERY} --config=${YUM} --repoid=${REPOID} \ + ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \ + --qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]} \ + | sed "s/kernel-debug/kernel/g" >> $TMPFILE + \rm -rf $TMP_DIR/yum-$USER-* done sort $TMPFILE -u > $TMPFILE1 rm $TMPFILE @@ -42,12 +73,12 @@ function install_deps { DEP_LIST="" while read DEP do - DEP_LIST="${DEP_LIST} ${DEP}" + DEP_LIST+="${DEP} " done < $TMPFILE1 rm $TMPFILE1 # next go through each repo and install packages - local TARGETS=${DEP_LIST} + local TARGETS="${DEP_LIST}" echo "Debug: Resolved list of deps to install: ${TARGETS}" local UNRESOLVED for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do @@ -55,9 +86,19 @@ function install_deps { if [[ ! -z "${TARGETS// }" ]]; then REPO_PATH=$(cat $YUM | sed -n "/^\[$REPOID\]\$/,\$p" | grep '^baseurl=' | head -n 1 | awk -F 'file://' '{print $2}' | sed 's:/$::') - >&2 echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'" - TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" | sort -r -V >> $TMPFILE - \rm -rf $TMP_DIR/yum-$USER-* + >&2 echo "TMPDIR=${TMP_DIR}"\ + "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\ + "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\ + "--qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'"\ + "${REPOQUERY_RESOLVE} ${TARGETS}" + TMPDIR=${TMP_DIR} \ + ${REPOQUERY} --config=${YUM} --repoid=${REPOID} \ + ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \ + --qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" \ + ${REPOQUERY_RESOLVE} ${TARGETS} \ + | sort -r -V >> $TMPFILE + + \rm -rf $TMP_DIR/yum-$USER-* while read STR do @@ -75,7 +116,11 @@ function install_deps { cp $PKG_PATH . if [ $? -ne 0 ]; then >&2 echo " Here's what I have to work with..." - >&2 echo " TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $PKG --qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\"" + >&2 echo " TMPDIR=${TMP_DIR}"\ + "${REPOQUERY} -c ${YUM} --repoid=${REPOID}"\ + "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\ + "--qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\""\ + "${REPOQUERY_RESOLVE} ${PKG}" >&2 echo " PKG=$PKG PKG_FILE=$PKG_FILE REPO_PATH=$REPO_PATH PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH" fi @@ -91,8 +136,9 @@ function install_deps { echo " path $PKG_PATH" >> $BUILT_REPORT FOUND_UNKNOWN=1 fi - done < $TMPFILE #<<< "$(TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\" | sort -r -V)" - \rm -rf $TMP_DIR/yum-$USER-* + done < $TMPFILE + + \rm -rf $TMP_DIR/yum-$USER-* TARGETS="$UNRESOLVED" fi done @@ -143,6 +189,7 @@ OUTPUT_DIR=${ROOT}/newDisk YUM=${ROOT}/yum.conf TMP_DIR=${ROOT}/tmp DEPLISTFILE=${ROOT}/deps.txt +DEPLISTFILE_NEW=${ROOT}/deps_new.txt DEPDETAILLISTFILE=${ROOT}/deps_detail.txt INSTALLDIR=${ROOT}/newDisk/isolinux/Packages diff --git a/build-tools/default_build_srpm b/build-tools/default_build_srpm index e74b3661..fe99ad6c 100755 --- a/build-tools/default_build_srpm +++ b/build-tools/default_build_srpm @@ -1,6 +1,12 @@ #!/bin/bash # set -x +# +# Copyright (c) 2018-2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + source "$SRC_BASE/build-tools/spec-utils" source "$SRC_BASE/build-tools/srpm-utils" @@ -195,14 +201,14 @@ fi if [ $TAR_NEEDED -gt 0 ]; then echo "Creating tar file: $TAR_PATH ..." - echo "tar czf $TAR_PATH $SRC_DIR --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude='$SRC_DIR/$DISTRO' --exclude='pbr-*.egg' --transform 's,^$TRANSFORM,$TAR_NAME-$VERSION,'" - tar czf $TAR_PATH $SRC_DIR --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform "s,^$TRANSFORM,$TAR_NAME-$VERSION," + echo "tar --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude='$SRC_DIR/$DISTRO' --exclude='pbr-*.egg' --transform 's,^$TRANSFORM,$TAR_NAME-$VERSION,' -czf $TAR_PATH $SRC_DIR" + tar --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform "s,^$TRANSFORM,$TAR_NAME-$VERSION," -czf "$TAR_PATH" "$SRC_DIR" if [ $? -ne 0 ]; then if [ "x$STAGING" != "x" ]; then popd fi - echo "ERROR: default_build_srpm (${LINENO}): failed to create tar file, cmd: tar czf $TAR_PATH $SRC_DIR --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform \"s,^$TRANSFORM,$TAR_NAME-$VERSION,\"" + echo "ERROR: default_build_srpm (${LINENO}): failed to create tar file, cmd: tar --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform \"s,^$TRANSFORM,$TAR_NAME-$VERSION,\" -czf '$TAR_PATH' '$SRC_DIR'" exit 1 fi echo "Created tar file: $TAR_PATH" diff --git a/build-tools/mockchain-parallel b/build-tools/mockchain-parallel index 8645d30f..73029df6 100755 --- a/build-tools/mockchain-parallel +++ b/build-tools/mockchain-parallel @@ -24,10 +24,16 @@ interpreter_path () { get__version__ () { local path=${1} + local var="" if [ ! -f ${path} ]; then return 1 fi - grep __VERSION__= ${path} | cut -d '=' -f 2 | sed 's/"//g' + if file ${path} | grep -q 'Python script'; then + ver=$(grep __VERSION__= ${path} | cut -d '=' -f 2 | sed 's/"//g') + else + ver=$(${path} --version 2> /dev/null) + fi + echo $ver } VC_LESS_THAN=0 diff --git a/build-tools/mockchain-parallel-2.6 b/build-tools/mockchain-parallel-2.6 new file mode 100755 index 00000000..c159d9bf --- /dev/null +++ b/build-tools/mockchain-parallel-2.6 @@ -0,0 +1,1221 @@ +#!/usr/bin/python3 -tt +# -*- coding: utf-8 -*- +# vim: noai:ts=4:sw=4:expandtab + +# by skvidal@fedoraproject.org +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# copyright 2012 Red Hat, Inc. + +# SUMMARY +# mockchain +# take a mock config and a series of srpms +# rebuild them one at a time +# adding each to a local repo +# so they are available as build deps to next pkg being built +from __future__ import print_function + +import cgi +# pylint: disable=deprecated-module +import optparse +import os +import re +import shutil +import subprocess +import sys +import tempfile +import time +import multiprocessing +import signal +import psutil + +import requests +# pylint: disable=import-error +from six.moves.urllib_parse import urlsplit + +import mockbuild.util + +from stxRpmUtils import splitRpmFilename + +# all of the variables below are substituted by the build system +__VERSION__="2.6" +SYSCONFDIR="/etc" +PYTHONDIR="/usr/lib/python3.6/site-packages" +PKGPYTHONDIR="/usr/lib/python3.6/site-packages/mockbuild" +MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock") +# end build system subs + +mockconfig_path = '/etc/mock' + +def rpmName(path): + filename = os.path.basename(path) + (n, v, r, e, a) = splitRpmFilename(filename) + return n + +def createrepo(path): + global max_workers + if os.path.exists(path + '/repodata/repomd.xml'): + comm = ['/usr/bin/createrepo_c', '--update', '--retain-old-md', "%d" % max_workers, "--workers", "%d" % max_workers, path] + else: + comm = ['/usr/bin/createrepo_c', "--workers", "%d" % max_workers, path] + cmd = subprocess.Popen( + comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = cmd.communicate() + return out, err + + +g_opts = optparse.Values() + +def parse_args(args): + parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]') + parser.add_option( + '-r', '--root', default=None, dest='chroot', + metavar="CONFIG", + help="chroot config name/base to use in the mock build") + parser.add_option( + '-l', '--localrepo', default=None, + help="local path for the local repo, defaults to making its own") + parser.add_option( + '-c', '--continue', default=False, action='store_true', + dest='cont', + help="if a pkg fails to build, continue to the next one") + parser.add_option( + '-a', '--addrepo', default=[], action='append', + dest='repos', + help="add these repo baseurls to the chroot's yum config") + parser.add_option( + '--recurse', default=False, action='store_true', + help="if more than one pkg and it fails to build, try to build the rest and come back to it") + parser.add_option( + '--log', default=None, dest='logfile', + help="log to the file named by this option, defaults to not logging") + parser.add_option( + '--workers', default=1, dest='max_workers', + help="number of parallel build jobs") + parser.add_option( + '--worker-resources', default="", dest='worker_resources', + help="colon seperated list, how much mem in gb for each workers temfs") + parser.add_option( + '--basedir', default='/var/lib/mock', dest='basedir', + help="path to workspace") + parser.add_option( + '--tmp_prefix', default=None, dest='tmp_prefix', + help="tmp dir prefix - will default to username-pid if not specified") + parser.add_option( + '-m', '--mock-option', default=[], action='append', + dest='mock_option', + help="option to pass directly to mock") + parser.add_option( + '--mark-slow-name', default=[], action='append', + dest='slow_pkg_names_raw', + help="package name that is known to build slowly") + parser.add_option( + '--mark-slow-path', default=[], action='append', + dest='slow_pkgs_raw', + help="package path that is known to build slowly") + parser.add_option( + '--mark-big-name', default=[], action='append', + dest='big_pkg_names_raw', + help="package name that is known to require a lot of disk space to build") + parser.add_option( + '--mark-big-path', default=[], action='append', + dest='big_pkgs_raw', + help="package path that is known to require a lot of disk space to build") + parser.add_option( + '--srpm-dependency-file', default=None, + dest='srpm_dependency_file', + help="path to srpm dependency file") + parser.add_option( + '--rpm-dependency-file', default=None, + dest='rpm_dependency_file', + help="path to rpm dependency file") + parser.add_option( + '--rpm-to-srpm-map-file', default=None, + dest='rpm_to_srpm_map_file', + help="path to rpm to srpm map file") + + opts, args = parser.parse_args(args) + if opts.recurse: + opts.cont = True + + if not opts.chroot: + print("You must provide an argument to -r for the mock chroot") + sys.exit(1) + + if len(sys.argv) < 3: + print("You must specify at least 1 package to build") + sys.exit(1) + + return opts, args + + +REPOS_ID = [] + +slow_pkg_names={} +slow_pkgs={} +big_pkg_names={} +big_pkgs={} + +def generate_repo_id(baseurl): + """ generate repository id for yum.conf out of baseurl """ + repoid = "/".join(baseurl.split('//')[1:]).replace('/', '_') + repoid = re.sub(r'[^a-zA-Z0-9_]', '', repoid) + suffix = '' + i = 1 + while repoid + suffix in REPOS_ID: + suffix = str(i) + i += 1 + repoid = repoid + suffix + REPOS_ID.append(repoid) + return repoid + + +def set_build_idx(infile, destfile, build_idx, tmpfs_size_gb, opts): + # log(opts.logfile, "set_build_idx: infile=%s, destfile=%s, build_idx=%d, tmpfs_size_gb=%d" % (infile, destfile, build_idx, tmpfs_size_gb)) + + try: + with open(infile) as f: + code = compile(f.read(), infile, 'exec') + # pylint: disable=exec-used + exec(code) + + config_opts['root'] = config_opts['root'].replace('b0', 'b{0}'.format(build_idx)) + config_opts['rootdir'] = config_opts['rootdir'].replace('b0', 'b{0}'.format(build_idx)) + config_opts['cache_topdir'] = config_opts['cache_topdir'].replace('b0', 'b{0}'.format(build_idx)) + # log(opts.logfile, "set_build_idx: root=%s" % config_opts['root']) + # log(opts.logfile, "set_build_idx: cache_topdir=%s" % config_opts['cache_topdir']) + if tmpfs_size_gb > 0: + config_opts['plugin_conf']['tmpfs_enable'] = True + config_opts['plugin_conf']['tmpfs_opts'] = {} + config_opts['plugin_conf']['tmpfs_opts']['required_ram_mb'] = 1024 + config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'] = "%dg" % tmpfs_size_gb + config_opts['plugin_conf']['tmpfs_opts']['mode'] = '0755' + config_opts['plugin_conf']['tmpfs_opts']['keep_mounted'] = True + # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_enable=%s" % config_opts['plugin_conf']['tmpfs_enable']) + # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_opts->max_fs_size=%s" % config_opts['plugin_conf']['tmpfs_opts']['max_fs_size']) + + with open(destfile, 'w') as br_dest: + for k, v in list(config_opts.items()): + br_dest.write("config_opts[%r] = %r\n" % (k, v)) + + try: + log(opts.logfile, "set_build_idx: os.makedirs %s" % config_opts['cache_topdir']) + if not os.path.isdir(config_opts['cache_topdir']): + os.makedirs(config_opts['cache_topdir'], exist_ok=True) + except (IOError, OSError): + return False, "Could not create dir: %s" % config_opts['cache_topdir'] + + cache_dir = "%s/%s/mock" % (config_opts['basedir'], config_opts['root']) + try: + log(opts.logfile, "set_build_idx: os.makedirs %s" % cache_dir) + if not os.path.isdir(cache_dir): + os.makedirs(cache_dir) + except (IOError, OSError): + return False, "Could not create dir: %s" % cache_dir + + return True, '' + except (IOError, OSError): + return False, "Could not write mock config to %s" % destfile + + return True, '' + +def set_basedir(infile, destfile, basedir, opts): + log(opts.logfile, "set_basedir: infile=%s, destfile=%s, basedir=%s" % (infile, destfile, basedir)) + try: + with open(infile) as f: + code = compile(f.read(), infile, 'exec') + # pylint: disable=exec-used + exec(code) + + config_opts['basedir'] = basedir + config_opts['resultdir'] = '{0}/result'.format(basedir) + config_opts['backup_base_dir'] = '{0}/backup'.format(basedir) + config_opts['root'] = 'mock/b0' + config_opts['cache_topdir'] = '{0}/cache/b0'.format(basedir) + config_opts['rootdir'] = '{0}/mock/b0/root'.format(basedir) + + with open(destfile, 'w') as br_dest: + for k, v in list(config_opts.items()): + br_dest.write("config_opts[%r] = %r\n" % (k, v)) + return True, '' + except (IOError, OSError): + return False, "Could not write mock config to %s" % destfile + + return True, '' + +def add_local_repo(infile, destfile, baseurl, repoid=None): + """take a mock chroot config and add a repo to it's yum.conf + infile = mock chroot config file + destfile = where to save out the result + baseurl = baseurl of repo you wish to add""" + global config_opts + + try: + with open(infile) as f: + code = compile(f.read(), infile, 'exec') + # pylint: disable=exec-used + exec(code) + if not repoid: + repoid = generate_repo_id(baseurl) + else: + REPOS_ID.append(repoid) + localyumrepo = """ +[%s] +name=%s +baseurl=%s +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 +cost=1 +best=1 +""" % (repoid, baseurl, baseurl) + + config_opts['yum.conf'] += localyumrepo + with open(destfile, 'w') as br_dest: + for k, v in list(config_opts.items()): + br_dest.write("config_opts[%r] = %r\n" % (k, v)) + return True, '' + except (IOError, OSError): + return False, "Could not write mock config to %s" % destfile + + return True, '' + + +def do_build(opts, cfg, pkg): + + # returns 0, cmd, out, err = failure + # returns 1, cmd, out, err = success + # returns 2, None, None, None = already built + + signal.signal(signal.SIGTERM, child_signal_handler) + signal.signal(signal.SIGINT, child_signal_handler) + signal.signal(signal.SIGHUP, child_signal_handler) + signal.signal(signal.SIGABRT, child_signal_handler) + s_pkg = os.path.basename(pkg) + pdn = s_pkg.replace('.src.rpm', '') + resdir = '%s/%s' % (opts.local_repo_dir, pdn) + resdir = os.path.normpath(resdir) + if not os.path.exists(resdir): + os.makedirs(resdir) + + success_file = resdir + '/success' + fail_file = resdir + '/fail' + + if os.path.exists(success_file): + # return 2, None, None, None + sys.exit(2) + + # clean it up if we're starting over :) + if os.path.exists(fail_file): + os.unlink(fail_file) + + if opts.uniqueext == '': + mockcmd = ['/usr/bin/mock', + '--configdir', opts.config_path, + '--resultdir', resdir, + '--root', cfg, ] + else: + mockcmd = ['/usr/bin/mock', + '--configdir', opts.config_path, + '--resultdir', resdir, + '--uniqueext', opts.uniqueext, + '--root', cfg, ] + + # Ensure repo is up-to-date. + # Note: Merely adding --update to mockcmd failed to update + mockcmd_update=mockcmd + mockcmd_update.append('--update') + cmd = subprocess.Popen( + mockcmd_update, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = cmd.communicate() + if cmd.returncode != 0: + if (isinstance(err, bytes)): + err = err.decode("utf-8") + sys.stderr.write(err) + + # heuristic here, if user pass for mock "-d foo", but we must be care to leave + # "-d'foo bar'" or "--define='foo bar'" as is + compiled_re_1 = re.compile(r'^(-\S)\s+(.+)') + compiled_re_2 = re.compile(r'^(--[^ =])[ =](\.+)') + for option in opts.mock_option: + r_match = compiled_re_1.match(option) + if r_match: + mockcmd.extend([r_match.group(1), r_match.group(2)]) + else: + r_match = compiled_re_2.match(option) + if r_match: + mockcmd.extend([r_match.group(1), r_match.group(2)]) + else: + mockcmd.append(option) + + print('building %s' % s_pkg) + mockcmd.append(pkg) + cmd = subprocess.Popen( + mockcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = cmd.communicate() + if cmd.returncode == 0: + with open(success_file, 'w') as f: + f.write('done\n') + ret = 1 + else: + if (isinstance(err, bytes)): + err = err.decode("utf-8") + sys.stderr.write(err) + with open(fail_file, 'w') as f: + f.write('undone\n') + ret = 0 + + # return ret, cmd, out, err + sys.exit(ret) + + +def log(lf, msg): + if lf: + now = time.time() + try: + with open(lf, 'a') as f: + f.write(str(now) + ':' + msg + '\n') + except (IOError, OSError) as e: + print('Could not write to logfile %s - %s' % (lf, str(e))) + print(msg) + + +config_opts = {} + +worker_data = [] +workers = 0 +max_workers = 1 + +build_env = [] + +failed = [] +built_pkgs = [] + +local_repo_dir = "" + +pkg_to_name={} +name_to_pkg={} +srpm_dependencies_direct={} +rpm_dependencies_direct={} +rpm_to_srpm_map={} +no_dep_list = [ "bash", "kernel" , "kernel-rt" ] + + +def init_build_env(slots, opts, config_opts_in): + global build_env + + orig_chroot_name=config_opts_in['chroot_name'] + orig_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(orig_chroot_name)) + # build_env.append({'state': 'Idle', 'cfg': orig_mock_config}) + for i in range(0,slots): + new_chroot_name = "{0}.b{1}".format(orig_chroot_name, i) + new_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(new_chroot_name)) + tmpfs_size_gb = 0 + if opts.worker_resources == "": + if i > 0: + tmpfs_size_gb = 2 * (1 + slots - i) + else: + resource_array=opts.worker_resources.split(':') + if i < len(resource_array): + tmpfs_size_gb=int(resource_array[i]) + else: + log(opts.logfile, "Error: worker-resources argument '%s' does not supply info for all %d workers" % (opts.worker_resources, slots)) + sys.exit(1) + if i == 0 and tmpfs_size_gb != 0: + log(opts.logfile, "Error: worker-resources argument '%s' must pass '0' as first value" % (opts.worker_resources, slots)) + sys.exit(1) + build_env.append({'state': 'Idle', 'cfg': new_mock_config, 'fs_size_gb': tmpfs_size_gb}) + + res, msg = set_build_idx(orig_mock_config, new_mock_config, i, tmpfs_size_gb, opts) + if not res: + log(opts.logfile, "Error: Could not write out local config: %s" % msg) + sys.exit(1) + + +idle_build_env_last_awarded = 0 +def get_idle_build_env(slots): + global build_env + global idle_build_env_last_awarded + visited = 0 + + if slots < 1: + return -1 + + i = idle_build_env_last_awarded - 1 + if i < 0 or i >= slots: + i = slots - 1 + + while visited < slots: + if build_env[i]['state'] == 'Idle': + build_env[i]['state'] = 'Busy' + idle_build_env_last_awarded = i + return i + visited = visited + 1 + i = i - 1 + if i < 0: + i = slots - 1 + return -1 + +def release_build_env(idx): + global build_env + + build_env[idx]['state'] = 'Idle' + +def get_best_rc(a, b): + print("get_best_rc: a=%s" % str(a)) + print("get_best_rc: b=%s" % str(b)) + if (b == {}) and (a != {}): + return a + if (a == {}) and (b != {}): + return b + + if (b['build_name'] is None) and (not a['build_name'] is None): + return a + if (a['build_name'] is None) and (not b['build_name'] is None): + return b + + if a['unbuilt_deps'] < b['unbuilt_deps']: + return a + if b['unbuilt_deps'] < a['unbuilt_deps']: + return b + + if a['depth'] < b['depth']: + return a + if b['depth'] < a['depth']: + return b + + print("get_best_rc: uncertain %s vs %s" % (a,b)) + return a + +unbuilt_dep_list_print=False +def unbuilt_dep_list(name, unbuilt_pkg_names, depth, checked=None): + global srpm_dependencies_direct + global rpm_dependencies_direct + global rpm_to_srpm_map + global no_dep_list + global unbuilt_dep_list_print + + first_iteration=False + unbuilt = [] + if name in no_dep_list: + return unbuilt + + if checked is None: + first_iteration=True + checked=[] + + # Count unbuild dependencies + if first_iteration: + dependencies_direct=srpm_dependencies_direct + else: + dependencies_direct=rpm_dependencies_direct + + if name in dependencies_direct: + for rdep in dependencies_direct[name]: + sdep='???' + if rdep in rpm_to_srpm_map: + sdep = rpm_to_srpm_map[rdep] + if rdep != name and sdep != name and not rdep in checked: + if (not first_iteration) and (sdep in no_dep_list): + continue + checked.append(rdep) + if sdep in unbuilt_pkg_names: + if not sdep in unbuilt: + unbuilt.append(sdep) + if depth > 0: + child_unbuilt = unbuilt_dep_list(rdep, unbuilt_pkg_names, depth-1, checked) + for sub_sdep in child_unbuilt: + if sub_sdep != name: + if not sub_sdep in unbuilt: + unbuilt.append(sub_sdep) + + return unbuilt + +def can_build_at_idx(build_idx, name, opts): + global pkg_to_name + global name_to_pkg + global big_pkgs + global big_pkg_names + global slow_pkgs + global slow_pkg_names + global build_env + + fs_size_gb = 0 + size_gb = 0 + speed = 0 + pkg = name_to_pkg[name] + if name in big_pkg_names: + size_gb=big_pkg_names[name] + if pkg in big_pkgs: + size_gb=big_pkgs[pkg] + if name in slow_pkg_names: + speed=slow_pkg_names[name] + if pkg in slow_pkgs: + speed=slow_pkgs[pkg] + fs_size_gb = build_env[build_idx]['fs_size_gb'] + return fs_size_gb == 0 or fs_size_gb >= size_gb + +def schedule(build_idx, pkgs, opts): + global worker_data + global pkg_to_name + global name_to_pkg + global big_pkgs + global big_pkg_names + global slow_pkgs + global slow_pkg_names + + unbuilt_pkg_names=[] + building_pkg_names=[] + unprioritized_pkg_names=[] + + for pkg in pkgs: + name = pkg_to_name[pkg] + unbuilt_pkg_names.append(name) + unprioritized_pkg_names.append(name) + + prioritized_pkg_names=[] + + for wd in worker_data: + pkg = wd['pkg'] + if not pkg is None: + name = pkg_to_name[pkg] + building_pkg_names.append(name) + + # log(opts.logfile, "schedule: build_idx=%d start" % build_idx) + if len(big_pkg_names) or len(big_pkgs): + next_unprioritized_pkg_names = unprioritized_pkg_names[:] + for name in unprioritized_pkg_names: + pkg = name_to_pkg[name] + if name in big_pkg_names or pkg in big_pkgs: + prioritized_pkg_names.append(name) + next_unprioritized_pkg_names.remove(name) + unprioritized_pkg_names = next_unprioritized_pkg_names[:] + + if len(slow_pkg_names) or len(slow_pkgs): + next_unprioritized_pkg_names = unprioritized_pkg_names[:] + for name in unprioritized_pkg_names: + pkg = name_to_pkg[name] + if name in slow_pkg_names or pkg in slow_pkgs: + if can_build_at_idx(build_idx, name, opts): + prioritized_pkg_names.append(name) + next_unprioritized_pkg_names.remove(name) + unprioritized_pkg_names = next_unprioritized_pkg_names[:] + + for name in unprioritized_pkg_names: + if can_build_at_idx(build_idx, name, opts): + prioritized_pkg_names.append(name) + + name_out = schedule2(build_idx, prioritized_pkg_names, unbuilt_pkg_names, building_pkg_names, opts) + if not name_out is None: + pkg_out = name_to_pkg[name_out] + else: + pkg_out = None + # log(opts.logfile, "schedule: failed to translate '%s' to a pkg" % name_out) + # log(opts.logfile, "schedule: build_idx=%d end: out = %s -> %s" % (build_idx, str(name_out), str(pkg_out))) + return pkg_out + + +def schedule2(build_idx, pkg_names, unbuilt_pkg_names, building_pkg_names, opts): + global pkg_to_name + global name_to_pkg + global no_dep_list + + max_depth = 3 + + if len(pkg_names) == 0: + return None + + unbuilt_deps={} + building_deps={} + for depth in range(max_depth,-1,-1): + unbuilt_deps[depth]={} + building_deps[depth]={} + + for depth in range(max_depth,-1,-1): + checked=[] + reordered_pkg_names = pkg_names[:] + # for name in reordered_pkg_names: + while len(reordered_pkg_names): + name = reordered_pkg_names.pop(0) + if name in checked: + continue + + # log(opts.logfile, "checked.append(%s)" % name) + checked.append(name) + + pkg = name_to_pkg[name] + # log(opts.logfile, "schedule2: check '%s', depth %d" % (name, depth)) + if not name in unbuilt_deps[depth]: + unbuilt_deps[depth][name] = unbuilt_dep_list(name, unbuilt_pkg_names, depth) + if not name in building_deps[depth]: + building_deps[depth][name] = unbuilt_dep_list(name, building_pkg_names, depth) + # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, depth, unbuilt_deps[depth][name])) + # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, depth, building_deps[depth][name])) + if len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 0: + if can_build_at_idx(build_idx, name, opts): + log(opts.logfile, "schedule2: no unbuilt deps for '%s', searching at depth %d" % (name, depth)) + return name + else: + # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx)) + continue + + if not name in unbuilt_deps[0]: + unbuilt_deps[0][name] = unbuilt_dep_list(name, unbuilt_pkg_names, 0) + if not name in building_deps[0]: + building_deps[0][name] = unbuilt_dep_list(name, building_pkg_names, 0) + # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, 0, unbuilt_deps[0][name])) + # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, 0, building_deps[0][name])) + if (len(building_deps[depth][name]) == 0 and len(unbuilt_deps[depth][name]) == 1 and unbuilt_deps[depth][name][0] in no_dep_list) or (len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 1 and building_deps[depth][name][0] in no_dep_list): + if len(unbuilt_deps[0][name]) == 0 and len(building_deps[0][name]) == 0: + if can_build_at_idx(build_idx, name, opts): + log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, searching at depth %d" % (name, depth)) + return name + else: + # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx)) + continue + + loop = False + for dep_name in unbuilt_deps[depth][name]: + if name == dep_name: + continue + + # log(opts.logfile, "name=%s depends on dep_name=%s, depth=%d" % (name, dep_name, depth)) + if dep_name in checked: + continue + + # log(opts.logfile, "schedule2: check '%s' indirect" % dep_name) + if not dep_name in unbuilt_deps[depth]: + unbuilt_deps[depth][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, depth) + if not dep_name in building_deps[depth]: + building_deps[depth][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, depth) + # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, unbuilt_deps[depth][dep_name])) + # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, building_deps[depth][dep_name])) + if len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 0: + if can_build_at_idx(build_idx, dep_name, opts): + log(opts.logfile, "schedule2: deps: no unbuilt deps for '%s', working towards '%s', searching at depth %d" % (dep_name, name, depth)) + return dep_name + + if not dep_name in unbuilt_deps[0]: + unbuilt_deps[0][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, 0) + if not dep_name in building_deps[0]: + building_deps[0][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, 0) + # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, unbuilt_deps[0][dep_name])) + # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, building_deps[0][dep_name])) + if (len(building_deps[depth][dep_name]) == 0 and len(unbuilt_deps[depth][dep_name]) == 1 and unbuilt_deps[depth][dep_name][0] in no_dep_list) or (len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 1 and building_deps[depth][dep_name][0] in no_dep_list): + if len(unbuilt_deps[0][dep_name]) == 0 and len(building_deps[0][dep_name]) == 0: + if can_build_at_idx(build_idx, dep_name, opts): + log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, working towards '%s', searching at depth %d" % (dep_name, name, depth)) + return dep_name + + if name in unbuilt_deps[0][dep_name]: + loop = True + # log(opts.logfile, "schedule2: loop detected: %s <-> %s" % (name, dep_name)) + + if loop and len(building_deps[depth][name]) == 0: + log(opts.logfile, "schedule2: loop detected, try to build '%s'" % name) + return name + + for dep_name in unbuilt_deps[depth][name]: + if dep_name in reordered_pkg_names: + # log(opts.logfile, "schedule2: promote %s to work toward %s" % (dep_name, name)) + reordered_pkg_names.remove(dep_name) + reordered_pkg_names.insert(0,dep_name) + + # log(opts.logfile, "schedule2: Nothing buildable at this time") + return None + + +def read_deps(opts): + read_srpm_deps(opts) + read_rpm_deps(opts) + read_map_deps(opts) + +def read_srpm_deps(opts): + global srpm_dependencies_direct + + if opts.srpm_dependency_file == None: + return + + if not os.path.exists(opts.srpm_dependency_file): + log(opts.logfile, "File not found: %s" % opts.srpm_dependency_file) + sys.exit(1) + + with open(opts.srpm_dependency_file) as f: + lines = f.readlines() + for line in lines: + (name,deps) = line.rstrip().split(';') + srpm_dependencies_direct[name]=deps.split(',') + +def read_rpm_deps(opts): + global rpm_dependencies_direct + + if opts.rpm_dependency_file == None: + return + + if not os.path.exists(opts.rpm_dependency_file): + log(opts.logfile, "File not found: %s" % opts.rpm_dependency_file) + sys.exit(1) + + with open(opts.rpm_dependency_file) as f: + lines = f.readlines() + for line in lines: + (name,deps) = line.rstrip().split(';') + rpm_dependencies_direct[name]=deps.split(',') + +def read_map_deps(opts): + global rpm_to_srpm_map + + if opts.rpm_to_srpm_map_file == None: + return + + if not os.path.exists(opts.rpm_to_srpm_map_file): + log(opts.logfile, "File not found: %s" % opts.rpm_to_srpm_map_file) + sys.exit(1) + + with open(opts.rpm_to_srpm_map_file) as f: + lines = f.readlines() + for line in lines: + (rpm,srpm) = line.rstrip().split(';') + rpm_to_srpm_map[rpm]=srpm + + +def reaper(opts): + global built_pkgs + global failed + global worker_data + global workers + + reaped = 0 + need_createrepo = False + last_reaped = -1 + while reaped > last_reaped: + last_reaped = reaped + for wd in worker_data: + p = wd['proc'] + ret = p.exitcode + if ret is not None: + pkg = wd['pkg'] + b = int(wd['build_index']) + p.join() + worker_data.remove(wd) + workers = workers - 1 + reaped = reaped + 1 + release_build_env(b) + + log(opts.logfile, "End build on 'b%d': %s" % (b, pkg)) + + if ret == 0: + failed.append(pkg) + log(opts.logfile, "Error building %s on 'b%d'." % (os.path.basename(pkg), b)) + if opts.recurse and not stop_signal: + log(opts.logfile, "Will try to build again (if some other package will succeed).") + else: + log(opts.logfile, "See logs/results in %s" % opts.local_repo_dir) + elif ret == 1: + log(opts.logfile, "Success building %s on 'b%d'" % (os.path.basename(pkg), b)) + built_pkgs.append(pkg) + need_createrepo = True + elif ret == 2: + log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg)) + + if need_createrepo: + # createrepo with the new pkgs + err = createrepo(opts.local_repo_dir)[1] + if err.strip(): + log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir) + log(opts.logfile, "Err: %s" % err) + + return reaped + +stop_signal = False + +def on_terminate(proc): + print("process {} terminated with exit code {}".format(proc, proc.returncode)) + +def kill_proc_and_descentents(parent, need_stop=False, verbose=False): + global g_opts + + if need_stop: + if verbose: + log(g_opts.logfile, "Stop %d" % parent.pid) + + try: + parent.send_signal(signal.SIGSTOP) + except: + # perhaps mock still running as root, give it a sec to drop pivledges and try again + time.sleep(1) + parent.send_signal(signal.SIGSTOP) + + try: + children = parent.children(recursive=False) + except: + children = [] + + for p in children: + kill_proc_and_descentents(p, need_stop=True, verbose=verbose) + + if verbose: + log(g_opts.logfile, "Terminate %d" % parent.pid) + + # parent.send_signal(signal.SIGTERM) + try: + parent.terminate() + except: + # perhaps mock still running as root, give it a sec to drop pivledges and try again + time.sleep(1) + parent.terminate() + + if need_stop: + if verbose: + log(g_opts.logfile, "Continue %d" % parent.pid) + + parent.send_signal(signal.SIGCONT) + + +def child_signal_handler(signum, frame): + global g_opts + my_pid = os.getpid() + # log(g_opts.logfile, "--------- child %d recieved signal %d" % (my_pid, signum)) + p = psutil.Process(my_pid) + kill_proc_and_descentents(p) + try: + sys.exit(0) + except SystemExit as e: + os._exit(0) + +def signal_handler(signum, frame): + global g_opts + global stop_signal + global workers + global worker_data + stop_signal = True + + # Signal processes to complete + log(g_opts.logfile, "recieved signal %d, Terminating children" % signum) + for wd in worker_data: + p = wd['proc'] + ret = p.exitcode + if ret is None: + # log(g_opts.logfile, "terminate child %d" % p.pid) + p.terminate() + else: + log(g_opts.logfile, "child return code was %d" % ret) + + # Wait for remaining processes to complete + log(g_opts.logfile, "===== wait for signaled jobs to complete =====") + while len(worker_data) > 0: + log(g_opts.logfile, " remaining workers: %d" % workers) + reaped = reaper(g_opts) + if reaped == 0: + time.sleep(0.1) + + try: + sys.exit(1) + except SystemExit as e: + os._exit(1) + +def main(args): + opts, args = parse_args(args) + # take mock config + list of pkgs + + global g_opts + global stop_signal + global build_env + global worker_data + global workers + global max_workers + + global slow_pkg_names + global slow_pkgs + global big_pkg_names + global big_pkgs + max_workers = int(opts.max_workers) + + global failed + global built_pkgs + + cfg = opts.chroot + pkgs = args[1:] + + # transform slow/big package options into dictionaries + for line in opts.slow_pkg_names_raw: + speed,name = line.split(":") + if speed != "": + slow_pkg_names[name]=int(speed) + for line in opts.slow_pkgs_raw: + speed,pkg = line.split(":") + if speed != "": + slow_pkgs[pkg]=int(speed) + for line in opts.big_pkg_names_raw: + size_gb,name = line.split(":") + if size_gb != "": + big_pkg_names[name]=int(size_gb) + for line in opts.big_pkgs_raw: + size_gb,pkg = line.split(":") + if size_gb != "": + big_pkgs[pkg]=int(size_gb) + + # Set up a mapping between pkg path and pkg name + global pkg_to_name + global name_to_pkg + for pkg in pkgs: + if not pkg.endswith('.rpm'): + log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg) + continue + + try: + name = rpmName(pkg) + except OSError as e: + print("Could not parse rpm %s" % pkg) + sys.exit(1) + + pkg_to_name[pkg] = name + name_to_pkg[name] = pkg + + read_deps(opts) + + global config_opts + config_opts = mockbuild.util.load_config(mockconfig_path, cfg, None, __VERSION__, PKGPYTHONDIR) + + if not opts.tmp_prefix: + try: + opts.tmp_prefix = os.getlogin() + except OSError as e: + print("Could not find login name for tmp dir prefix add --tmp_prefix") + sys.exit(1) + pid = os.getpid() + opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid) + + if opts.basedir != "/var/lib/mock": + opts.uniqueext = '' + + # create a tempdir for our local info + if opts.localrepo: + local_tmp_dir = os.path.abspath(opts.localrepo) + if not os.path.exists(local_tmp_dir): + os.makedirs(local_tmp_dir) + os.chmod(local_tmp_dir, 0o755) + else: + pre = 'mock-chain-%s-' % opts.uniqueext + local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp') + os.chmod(local_tmp_dir, 0o755) + + if opts.logfile: + opts.logfile = os.path.join(local_tmp_dir, opts.logfile) + if os.path.exists(opts.logfile): + os.unlink(opts.logfile) + + log(opts.logfile, "starting logfile: %s" % opts.logfile) + + opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + config_opts['chroot_name'] + '/') + + if not os.path.exists(opts.local_repo_dir): + os.makedirs(opts.local_repo_dir, mode=0o755) + + local_baseurl = "file://%s" % opts.local_repo_dir + log(opts.logfile, "results dir: %s" % opts.local_repo_dir) + opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + config_opts['chroot_name'] + '/') + + if not os.path.exists(opts.config_path): + os.makedirs(opts.config_path, mode=0o755) + + log(opts.logfile, "config dir: %s" % opts.config_path) + + my_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(config_opts['chroot_name'])) + + # modify with localrepo + res, msg = add_local_repo(config_opts['config_file'], my_mock_config, local_baseurl, 'local_build_repo') + if not res: + log(opts.logfile, "Error: Could not write out local config: %s" % msg) + sys.exit(1) + + for baseurl in opts.repos: + res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl) + if not res: + log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg)) + sys.exit(1) + + res, msg = set_basedir(my_mock_config, my_mock_config, opts.basedir, opts) + if not res: + log(opts.logfile, "Error: Could not write out local config: %s" % msg) + sys.exit(1) + + # these files needed from the mock.config dir to make mock run + for fn in ['site-defaults.cfg', 'logging.ini']: + pth = mockconfig_path + '/' + fn + shutil.copyfile(pth, opts.config_path + '/' + fn) + + # createrepo on it + err = createrepo(opts.local_repo_dir)[1] + if err.strip(): + log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir) + log(opts.logfile, "Err: %s" % err) + sys.exit(1) + + init_build_env(max_workers, opts, config_opts) + + download_dir = tempfile.mkdtemp() + downloaded_pkgs = {} + built_pkgs = [] + try_again = True + to_be_built = pkgs + return_code = 0 + num_of_tries = 0 + + g_opts = opts + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGHUP, signal_handler) + signal.signal(signal.SIGABRT, signal_handler) + + while try_again and not stop_signal: + num_of_tries += 1 + failed = [] + + log(opts.logfile, "===== iteration %d start =====" % num_of_tries) + + to_be_built_scheduled = to_be_built[:] + + need_reap = False + while len(to_be_built_scheduled) > 0: + # Free up a worker + while need_reap or workers >= max_workers: + need_reap = False + reaped = reaper(opts) + if reaped == 0: + time.sleep(0.1) + + if workers < max_workers: + workers = workers + 1 + + b = get_idle_build_env(max_workers) + if b < 0: + log(opts.logfile, "Failed to find idle build env for: %s" % pkg) + workers = workers - 1 + need_reap = True + continue + + pkg = schedule(b, to_be_built_scheduled, opts) + if pkg is None: + if workers <= 1: + # Remember we have one build environmnet reserved, so can't test for zero workers + log(opts.logfile, "failed to schedule from: %s" % to_be_built_scheduled) + pkg = to_be_built_scheduled[0] + log(opts.logfile, "All workers idle, forcing build of pkg=%s" % pkg) + else: + release_build_env(b) + workers = workers - 1 + need_reap = True + continue + + to_be_built_scheduled.remove(pkg) + + if not pkg.endswith('.rpm'): + log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg) + failed.append(pkg) + release_build_env(b) + need_reap = True + continue + + elif pkg.startswith('http://') or pkg.startswith('https://') or pkg.startswith('ftp://'): + url = pkg + try: + log(opts.logfile, 'Fetching %s' % url) + r = requests.get(url) + # pylint: disable=no-member + if r.status_code == requests.codes.ok: + fn = urlsplit(r.url).path.rsplit('/', 1)[1] + if 'content-disposition' in r.headers: + _, params = cgi.parse_header(r.headers['content-disposition']) + if 'filename' in params and params['filename']: + fn = params['filename'] + pkg = download_dir + '/' + fn + with open(pkg, 'wb') as fd: + for chunk in r.iter_content(4096): + fd.write(chunk) + except Exception as e: + log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e))) + failed.append(url) + release_build_env(b) + need_reap = True + continue + else: + downloaded_pkgs[pkg] = url + + log(opts.logfile, "Start build on 'b%d': %s" % (b, pkg)) + # ret = do_build(opts, config_opts['chroot_name'], pkg)[0] + p = multiprocessing.Process(target=do_build, args=(opts, build_env[b]['cfg'], pkg)) + worker_data.append({'proc': p, 'pkg': pkg, 'build_index': int(b)}) + p.start() + + # Wait for remaining processes to complete + log(opts.logfile, "===== wait for last jobs in iteration %d to complete =====" % num_of_tries) + while workers > 0: + reaped = reaper(opts) + if reaped == 0: + time.sleep(0.1) + log(opts.logfile, "===== iteration %d complete =====" % num_of_tries) + + if failed and opts.recurse: + log(opts.logfile, "failed=%s" % failed) + log(opts.logfile, "to_be_built=%s" % to_be_built) + if len(failed) != len(to_be_built): + to_be_built = failed + try_again = True + log(opts.logfile, 'Some package succeeded, some failed.') + log(opts.logfile, 'Trying to rebuild %s failed pkgs, because --recurse is set.' % len(failed)) + else: + if max_workers > 1: + max_workers = 1 + to_be_built = failed + try_again = True + log(opts.logfile, 'Some package failed under parallel build.') + log(opts.logfile, 'Trying to rebuild %s failed pkgs with single thread, because --recurse is set.' % len(failed)) + else: + log(opts.logfile, "") + log(opts.logfile, "*** Build Failed ***") + log(opts.logfile, "Tried %s times - following pkgs could not be successfully built:" % num_of_tries) + log(opts.logfile, "*** Build Failed ***") + for pkg in failed: + msg = pkg + if pkg in downloaded_pkgs: + msg = downloaded_pkgs[pkg] + log(opts.logfile, msg) + log(opts.logfile, "") + try_again = False + else: + try_again = False + if failed: + return_code = 2 + + # cleaning up our download dir + shutil.rmtree(download_dir, ignore_errors=True) + + log(opts.logfile, "") + log(opts.logfile, "Results out to: %s" % opts.local_repo_dir) + log(opts.logfile, "") + log(opts.logfile, "Pkgs built: %s" % len(built_pkgs)) + if built_pkgs: + if failed: + if len(built_pkgs): + log(opts.logfile, "Some packages successfully built in this order:") + else: + log(opts.logfile, "Packages successfully built in this order:") + for pkg in built_pkgs: + log(opts.logfile, pkg) + return return_code + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/build-tools/modify-build-cfg b/build-tools/modify-build-cfg index b639f521..78658623 100755 --- a/build-tools/modify-build-cfg +++ b/build-tools/modify-build-cfg @@ -1,5 +1,11 @@ #!/bin/sh +# +# Copyright (c) 2018-2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + # This script modifies a mock configuration file (typically $MY_BUILD_CFG) # to add build time environment variables to the mock environment (things # like what branch we're building on, etc). @@ -11,6 +17,11 @@ # usage: modify-build-cfg [file.cfg] [] # +MODIFY_BUILD_CFG_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" + +# Set PKG_MANAGER for our build environment. +source "${MODIFY_BUILD_CFG_DIR}/pkg-manager-utils.sh" + LAYER=${2:-$LAYER} # For backward compatibility. Old repo location or new? @@ -23,6 +34,13 @@ if [ ! -d ${CENTOS_REPO} ]; then fi fi +# Preferred python +if rpm -q --whatprovides --quiet python3; then + PYTHON_PKG=python3 +else + PYTHON_PKG=python2 +fi + # Try to find a layer specific mock.cfg.proto MOCK_CFG_PROTO="${CENTOS_REPO}/mock.cfg.${LAYER}.proto" echo "==== Try MOCK_CFG_PROTO=$MOCK_CFG_PROTO ====" @@ -98,7 +116,7 @@ grep -q "config_opts\['environment'\]\['WRS_GIT_BRANCH'\]" $FILE || \ grep -q "config_opts\['environment'\]\['CGCS_GIT_BRANCH'\]" $FILE || \ echo "config_opts['environment']['CGCS_GIT_BRANCH']" >> $FILE -if [ -z $FORMAL_BUILD ]; then +if [ -z $FORMAL_BUILD ]; then grep -q "config_opts\['macros'\]\['%_no_cgcs_license_check'\] = '1'" $FILE || \ echo "config_opts['macros']['%_no_cgcs_license_check'] = '1'" >> $FILE else @@ -118,8 +136,21 @@ if [ "containers" == "$BUILD_TYPE" ]; then NETWORK_PKGS="bind-utils" fi -grep -q "config_opts\['chroot_setup_cmd'\] = 'install @buildsys-build pigz lbzip2 yum $NETWORK_PKGS'" $FILE || \ - echo "config_opts['chroot_setup_cmd'] = 'install @buildsys-build pigz lbzip2 yum $NETWORK_PKGS'" >> $FILE +BUILD_PKGS='' +if [ "${PKG_MANAGER}" == "yum" ]; then + BUILD_PKGS='@buildsys-build' +elif [ "${PKG_MANAGER}" == "dnf" ]; then + # buildsys-build group was dropped when Centos-8 switched to dnf. + # We must list all the members plus a few new ones (fedpkg-minimal, epel-rpm-macros). + BUILD_PKGS='bash bzip2 coreutils cpio diffutils epel-release epel-rpm-macros fedpkg-minimal findutils gawk gcc gcc-c++ grep gzip info make patch redhat-rpm-config redhat-release rpm-build sed shadow-utils tar unzip util-linux which xz' +fi + +STX_PKGS='pigz lbzip2 bash' + +PKGS="${BUILD_PKGS} ${STX_PKGS} ${PKG_MANAGER} ${PYTHON_PKG} ${NETWORK_PKGS}" + +grep -q "config_opts\['chroot_setup_cmd'\] = 'install ${PKGS}'" $FILE || \ + echo "config_opts['chroot_setup_cmd'] = 'install ${PKGS}'" >> $FILE # Special case for containers. # rpmbuild_networking is required for invoking helm commands within mock diff --git a/build-tools/pkg-manager-utils.sh b/build-tools/pkg-manager-utils.sh new file mode 100755 index 00000000..17f4abc1 --- /dev/null +++ b/build-tools/pkg-manager-utils.sh @@ -0,0 +1,33 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# BASH utilities to select package manager +# +# Currently just sets some environment variables +# + +# Yum vs DNF compatibility +YUM=$(which yum 2>> /dev/null) +DNF=$(which dnf 2>> /dev/null) +PKG_MANAGER="" +REPOQUERY=$(which repoquery 2>> /dev/null) +REPOQUERY_SUB_COMMAND="" +REPOQUERY_RESOLVE="--resolve" +REPOQUERY_WHATPROVIDES_DELIM=" " +if [ ! -z ${DNF} ]; then + PKG_MANAGER="dnf" + REPOQUERY=${DNF} + REPOQUERY_SUB_COMMAND="repoquery --disable-modular-filtering" + REPOQUERY_RESOLVE="" + REPOQUERY_WHATPROVIDES_DELIM="," +elif [ ! -z ${YUM} ]; then + PKG_MANAGER="yum" +else + >&2 echo "ERROR: Couldn't find a supported package manager" + exit 1 +fi + diff --git a/build-tools/repo_files/mock.cfg.all.proto b/build-tools/repo_files/mock.cfg.all.proto deleted file mode 100644 index 6ecaa2d2..00000000 --- a/build-tools/repo_files/mock.cfg.all.proto +++ /dev/null @@ -1,58 +0,0 @@ -config_opts['root'] = 'BUILD_ENV/mock' -config_opts['target_arch'] = 'x86_64' -config_opts['legal_host_arches'] = ('x86_64',) -config_opts['chroot_setup_cmd'] = 'install @buildsys-build' -config_opts['dist'] = 'el7' # only useful for --resultdir variable subst -config_opts['releasever'] = '7' -config_opts['rpmbuild_networking'] = False - - -config_opts['yum.conf'] = """ -[main] -keepcache=1 -debuglevel=2 -reposdir=/dev/null -logfile=/var/log/yum.log -retries=20 -obsoletes=1 -gpgcheck=0 -assumeyes=1 -syslog_ident=mock -syslog_device= - -# repos -[local-std] -name=local-std -baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS -enabled=1 -skip_if_unavailable=1 -metadata_expire=0 - -[local-rt] -name=local-rt -baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS -enabled=1 -skip_if_unavailable=1 -metadata_expire=0 - -[local-installer] -name=local-installer -baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS -enabled=1 -skip_if_unavailable=1 -metadata_expire=0 - -[StxCentos7Distro] -name=Stx-Centos-7-Distro -enabled=1 -baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary -failovermethod=priority -exclude=kernel-devel libvirt-devel - -[StxCentos7Distro-rt] -name=Stx-Centos-7-Distro-rt -enabled=1 -baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary -failovermethod=priority - -""" diff --git a/build-tools/repo_files/mock.cfg.all.proto b/build-tools/repo_files/mock.cfg.all.proto new file mode 120000 index 00000000..2ba14cf5 --- /dev/null +++ b/build-tools/repo_files/mock.cfg.all.proto @@ -0,0 +1 @@ +mock.cfg.centos7.all.proto \ No newline at end of file diff --git a/build-tools/repo_files/mock.cfg.centos7.all.proto b/build-tools/repo_files/mock.cfg.centos7.all.proto new file mode 100644 index 00000000..6ecaa2d2 --- /dev/null +++ b/build-tools/repo_files/mock.cfg.centos7.all.proto @@ -0,0 +1,58 @@ +config_opts['root'] = 'BUILD_ENV/mock' +config_opts['target_arch'] = 'x86_64' +config_opts['legal_host_arches'] = ('x86_64',) +config_opts['chroot_setup_cmd'] = 'install @buildsys-build' +config_opts['dist'] = 'el7' # only useful for --resultdir variable subst +config_opts['releasever'] = '7' +config_opts['rpmbuild_networking'] = False + + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= + +# repos +[local-std] +name=local-std +baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-rt] +name=local-rt +baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-installer] +name=local-installer +baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[StxCentos7Distro] +name=Stx-Centos-7-Distro +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary +failovermethod=priority +exclude=kernel-devel libvirt-devel + +[StxCentos7Distro-rt] +name=Stx-Centos-7-Distro-rt +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary +failovermethod=priority + +""" diff --git a/build-tools/repo_files/mock.cfg.centos7.distro.proto b/build-tools/repo_files/mock.cfg.centos7.distro.proto new file mode 100644 index 00000000..6ecaa2d2 --- /dev/null +++ b/build-tools/repo_files/mock.cfg.centos7.distro.proto @@ -0,0 +1,58 @@ +config_opts['root'] = 'BUILD_ENV/mock' +config_opts['target_arch'] = 'x86_64' +config_opts['legal_host_arches'] = ('x86_64',) +config_opts['chroot_setup_cmd'] = 'install @buildsys-build' +config_opts['dist'] = 'el7' # only useful for --resultdir variable subst +config_opts['releasever'] = '7' +config_opts['rpmbuild_networking'] = False + + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= + +# repos +[local-std] +name=local-std +baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-rt] +name=local-rt +baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-installer] +name=local-installer +baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[StxCentos7Distro] +name=Stx-Centos-7-Distro +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary +failovermethod=priority +exclude=kernel-devel libvirt-devel + +[StxCentos7Distro-rt] +name=Stx-Centos-7-Distro-rt +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary +failovermethod=priority + +""" diff --git a/build-tools/repo_files/mock.cfg.centos7.proto b/build-tools/repo_files/mock.cfg.centos7.proto new file mode 100644 index 00000000..beeea32f --- /dev/null +++ b/build-tools/repo_files/mock.cfg.centos7.proto @@ -0,0 +1,57 @@ +config_opts['root'] = 'BUILD_ENV/mock' +config_opts['target_arch'] = 'x86_64' +config_opts['legal_host_arches'] = ('x86_64',) +config_opts['chroot_setup_cmd'] = 'install @buildsys-build' +config_opts['dist'] = 'el7' # only useful for --resultdir variable subst +config_opts['releasever'] = '7' +config_opts['rpmbuild_networking'] = False + + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= + +# repos +[local-std] +name=local-std +baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-rt] +name=local-rt +baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-installer] +name=local-installer +baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[StxCentos7Distro] +name=Stx-Centos-7-Distro +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary +failovermethod=priority + +[StxCentos7Distro-rt] +name=Stx-Centos-7-Distro-rt +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary +failovermethod=priority + +""" diff --git a/build-tools/repo_files/mock.cfg.centos8.all.proto b/build-tools/repo_files/mock.cfg.centos8.all.proto new file mode 100644 index 00000000..c5bb65da --- /dev/null +++ b/build-tools/repo_files/mock.cfg.centos8.all.proto @@ -0,0 +1,63 @@ +config_opts['root'] = 'BUILD_ENV/mock' +config_opts['target_arch'] = 'x86_64' +config_opts['legal_host_arches'] = ('x86_64',) +config_opts['chroot_setup_cmd'] = 'install bash bzip2 coreutils cpio diffutils epel-release epel-rpm-macros fedpkg-minimal findutils gawk gcc gcc-c++ grep gzip info make patch redhat-rpm-config redhat-release rpm-build sed shadow-utils tar unzip util-linux which xz' +config_opts['dist'] = 'el8' # only useful for --resultdir variable subst +config_opts['releasever'] = '8' +config_opts['package_manager'] = 'dnf' +config_opts['use_bootstrap'] = False +config_opts['use_bootstrap_image'] = False +config_opts['rpmbuild_networking'] = False + + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= + +# repos +[local-std] +name=local-std +baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-rt] +name=local-rt +baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-installer] +name=local-installer +baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[StxCentos8Distro] +name=Stx-Centos-8-Distro +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary +failovermethod=priority +exclude=kernel-devel libvirt-devel +module_hotfixes=1 + +[StxCentos8Distro-rt] +name=Stx-Centos-8-Distro-rt +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary +failovermethod=priority +module_hotfixes=1 + +""" diff --git a/build-tools/repo_files/mock.cfg.centos8.distro.proto b/build-tools/repo_files/mock.cfg.centos8.distro.proto new file mode 100644 index 00000000..c5bb65da --- /dev/null +++ b/build-tools/repo_files/mock.cfg.centos8.distro.proto @@ -0,0 +1,63 @@ +config_opts['root'] = 'BUILD_ENV/mock' +config_opts['target_arch'] = 'x86_64' +config_opts['legal_host_arches'] = ('x86_64',) +config_opts['chroot_setup_cmd'] = 'install bash bzip2 coreutils cpio diffutils epel-release epel-rpm-macros fedpkg-minimal findutils gawk gcc gcc-c++ grep gzip info make patch redhat-rpm-config redhat-release rpm-build sed shadow-utils tar unzip util-linux which xz' +config_opts['dist'] = 'el8' # only useful for --resultdir variable subst +config_opts['releasever'] = '8' +config_opts['package_manager'] = 'dnf' +config_opts['use_bootstrap'] = False +config_opts['use_bootstrap_image'] = False +config_opts['rpmbuild_networking'] = False + + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= + +# repos +[local-std] +name=local-std +baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-rt] +name=local-rt +baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-installer] +name=local-installer +baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[StxCentos8Distro] +name=Stx-Centos-8-Distro +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary +failovermethod=priority +exclude=kernel-devel libvirt-devel +module_hotfixes=1 + +[StxCentos8Distro-rt] +name=Stx-Centos-8-Distro-rt +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary +failovermethod=priority +module_hotfixes=1 + +""" diff --git a/build-tools/repo_files/mock.cfg.centos8.proto b/build-tools/repo_files/mock.cfg.centos8.proto new file mode 100644 index 00000000..08a041b2 --- /dev/null +++ b/build-tools/repo_files/mock.cfg.centos8.proto @@ -0,0 +1,62 @@ +config_opts['root'] = 'BUILD_ENV/mock' +config_opts['target_arch'] = 'x86_64' +config_opts['legal_host_arches'] = ('x86_64',) +config_opts['chroot_setup_cmd'] = 'install bash bzip2 coreutils cpio diffutils epel-release epel-rpm-macros fedpkg-minimal findutils gawk gcc gcc-c++ grep gzip info make patch redhat-rpm-config redhat-release rpm-build sed shadow-utils tar unzip util-linux which xz' +config_opts['dist'] = 'el8' # only useful for --resultdir variable subst +config_opts['releasever'] = '8' +config_opts['package_manager'] = 'dnf' +config_opts['use_bootstrap'] = False +config_opts['use_bootstrap_image'] = False +config_opts['rpmbuild_networking'] = False + + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= + +# repos +[local-std] +name=local-std +baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-rt] +name=local-rt +baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[local-installer] +name=local-installer +baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[StxCentos8Distro] +name=Stx-Centos-8-Distro +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary +failovermethod=priority +module_hotfixes=1 + +[StxCentos8Distro-rt] +name=Stx-Centos-8-Distro-rt +enabled=1 +baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary +failovermethod=priority +module_hotfixes=1 + +""" diff --git a/build-tools/repo_files/mock.cfg.distro.proto b/build-tools/repo_files/mock.cfg.distro.proto deleted file mode 100644 index 6ecaa2d2..00000000 --- a/build-tools/repo_files/mock.cfg.distro.proto +++ /dev/null @@ -1,58 +0,0 @@ -config_opts['root'] = 'BUILD_ENV/mock' -config_opts['target_arch'] = 'x86_64' -config_opts['legal_host_arches'] = ('x86_64',) -config_opts['chroot_setup_cmd'] = 'install @buildsys-build' -config_opts['dist'] = 'el7' # only useful for --resultdir variable subst -config_opts['releasever'] = '7' -config_opts['rpmbuild_networking'] = False - - -config_opts['yum.conf'] = """ -[main] -keepcache=1 -debuglevel=2 -reposdir=/dev/null -logfile=/var/log/yum.log -retries=20 -obsoletes=1 -gpgcheck=0 -assumeyes=1 -syslog_ident=mock -syslog_device= - -# repos -[local-std] -name=local-std -baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS -enabled=1 -skip_if_unavailable=1 -metadata_expire=0 - -[local-rt] -name=local-rt -baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS -enabled=1 -skip_if_unavailable=1 -metadata_expire=0 - -[local-installer] -name=local-installer -baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS -enabled=1 -skip_if_unavailable=1 -metadata_expire=0 - -[StxCentos7Distro] -name=Stx-Centos-7-Distro -enabled=1 -baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary -failovermethod=priority -exclude=kernel-devel libvirt-devel - -[StxCentos7Distro-rt] -name=Stx-Centos-7-Distro-rt -enabled=1 -baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary -failovermethod=priority - -""" diff --git a/build-tools/repo_files/mock.cfg.distro.proto b/build-tools/repo_files/mock.cfg.distro.proto new file mode 120000 index 00000000..add71c08 --- /dev/null +++ b/build-tools/repo_files/mock.cfg.distro.proto @@ -0,0 +1 @@ +mock.cfg.centos7.distro.proto \ No newline at end of file diff --git a/build-tools/repo_files/mock.cfg.proto b/build-tools/repo_files/mock.cfg.proto deleted file mode 100644 index beeea32f..00000000 --- a/build-tools/repo_files/mock.cfg.proto +++ /dev/null @@ -1,57 +0,0 @@ -config_opts['root'] = 'BUILD_ENV/mock' -config_opts['target_arch'] = 'x86_64' -config_opts['legal_host_arches'] = ('x86_64',) -config_opts['chroot_setup_cmd'] = 'install @buildsys-build' -config_opts['dist'] = 'el7' # only useful for --resultdir variable subst -config_opts['releasever'] = '7' -config_opts['rpmbuild_networking'] = False - - -config_opts['yum.conf'] = """ -[main] -keepcache=1 -debuglevel=2 -reposdir=/dev/null -logfile=/var/log/yum.log -retries=20 -obsoletes=1 -gpgcheck=0 -assumeyes=1 -syslog_ident=mock -syslog_device= - -# repos -[local-std] -name=local-std -baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS -enabled=1 -skip_if_unavailable=1 -metadata_expire=0 - -[local-rt] -name=local-rt -baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS -enabled=1 -skip_if_unavailable=1 -metadata_expire=0 - -[local-installer] -name=local-installer -baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS -enabled=1 -skip_if_unavailable=1 -metadata_expire=0 - -[StxCentos7Distro] -name=Stx-Centos-7-Distro -enabled=1 -baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary -failovermethod=priority - -[StxCentos7Distro-rt] -name=Stx-Centos-7-Distro-rt -enabled=1 -baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary -failovermethod=priority - -""" diff --git a/build-tools/repo_files/mock.cfg.proto b/build-tools/repo_files/mock.cfg.proto new file mode 120000 index 00000000..55c2e026 --- /dev/null +++ b/build-tools/repo_files/mock.cfg.proto @@ -0,0 +1 @@ +mock.cfg.centos7.proto \ No newline at end of file From 468097ba39cc7e34f9ea921d1250a6d565cd36f4 Mon Sep 17 00:00:00 2001 From: Don Penney Date: Wed, 9 Dec 2020 17:29:46 -0500 Subject: [PATCH 02/37] Remove Django from wheels tarball Wheels added to the wheels.cfg file result in modification to the upper-constraints.txt included in the wheels tarball to point to the explicit wheel added by the wheels.cfg. The inclusion of Django resulted in an upper-constraints.txt entry locking the version to 2.1.5. However, the openstack/horizon requirements.txt file includes the following: Django>=2.2,<3.0 This results in a conflict between the constraints and requirements. Recent updates to the pip resolver exposed the conflict, and pip was unable to find a version of Django to fit both. Since the Django entry was just downloading a pre-built wheel, as opposed to building an appropriate wheel from tarball, this update removes the entry altogether to avoid creating this conflict. Change-Id: I870a72f312c781a9b000fc98c4ed104c22469ebd Closes-Bug: 1907290 Signed-off-by: Don Penney --- build-tools/build-wheels/docker/stable-wheels.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/build-tools/build-wheels/docker/stable-wheels.cfg b/build-tools/build-wheels/docker/stable-wheels.cfg index addac690..b547ccdb 100644 --- a/build-tools/build-wheels/docker/stable-wheels.cfg +++ b/build-tools/build-wheels/docker/stable-wheels.cfg @@ -17,7 +17,6 @@ cmd2-0.8.9-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e9/ construct-2.8.22-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e5/c6/3e3aeef38bb0c27364af3d21493d9690c7c3925f298559bca3c48b7c9419/construct-2.8.22.tar.gz|construct-2.8.22 crc16-0.1.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a6/e0/70a44c4385f2b33df82e518005aae16b5c1feaf082c73c0acebe3426fc0a/crc16-0.1.1.tar.gz|crc16-0.1.1|fix_setup demjson-2.2.4-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/96/67/6db789e2533158963d4af689f961b644ddd9200615b8ce92d6cad695c65a/demjson-2.2.4.tar.gz|demjson-2.2.4|fix_setup -Django-2.1.5-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/36/50/078a42b4e9bedb94efd3e0278c0eb71650ed9672cdc91bd5542953bec17f/Django-2.1.5-py3-none-any.whl django_debreach-2.0.1-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/2a/92/8c363cf5d1ee33d4c3b999b41c127c5cd3c64d4c20aa47bdfb6c386c9309/django_debreach-2.0.1-py3-none-any.whl django_floppyforms-1.8.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/d2/498b883ac309b56b70c26877974bd50927615dd3f6433f5463e2668b1128/django_floppyforms-1.8.0-py2.py3-none-any.whl django_pyscss-2.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/4b/7f/d771802305184aac6010826f60a0b2ecaa3f57d19ab0e405f0c8db07e809/django-pyscss-2.0.2.tar.gz|django-pyscss-2.0.2 From a64a3078aa1ee2cb411f786e9b9c3526b0c7f36d Mon Sep 17 00:00:00 2001 From: Scott Little Date: Thu, 17 Dec 2020 14:02:40 -0500 Subject: [PATCH 03/37] Ignore downloads-backup* directories Tell git to ignore the stx/download-backup* directories that are now produced by populate_downloads.sh. Partial-bug: 1908297 Signed-off-by: Scott Little Change-Id: I7899250266d5145708312ed414a24f2a8815051b --- stx/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/stx/.gitignore b/stx/.gitignore index 9c1366cd..c2c849eb 100644 --- a/stx/.gitignore +++ b/stx/.gitignore @@ -17,6 +17,7 @@ installer-prebuilt /distributedcloud-client /docs /downloads +/downloads-backup* /fault /git /gui From 357abccddc48e1a2382dab645ea30da2dce14431 Mon Sep 17 00:00:00 2001 From: Scott Little Date: Thu, 17 Dec 2020 13:55:52 -0500 Subject: [PATCH 04/37] Correctly get the age of tarballs through their symlink find throws an error if the PATH_LIST references a tarball under stx/downloads, which is a symlink to the tarballs real location under the stx-tools. Modify the find command to follow the symlink. Closes-Bug: 1908570 Signed-off-by: Scott Little Change-Id: Ia691c211715d561b3a4ffca6c4408927aa1e48e8 --- build-tools/build-srpms-parallel | 4 ++-- build-tools/build-srpms-serial | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/build-tools/build-srpms-parallel b/build-tools/build-srpms-parallel index 5d7b6282..3e667986 100755 --- a/build-tools/build-srpms-parallel +++ b/build-tools/build-srpms-parallel @@ -787,7 +787,7 @@ build_dir_srpm () { if [ "x$PATH_LIST" == "x" ]; then echo "0" else - AGE2=$(find $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE2=$(find -L $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) echo "$AGE2" fi ) @@ -1138,7 +1138,7 @@ build_dir_spec () { if [ "x$PATH_LIST" == "x" ]; then echo "0" else - AGE2=$(find $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE2=$(find -L $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) echo "$AGE2" fi ) diff --git a/build-tools/build-srpms-serial b/build-tools/build-srpms-serial index e83ac519..37d4e4bb 100755 --- a/build-tools/build-srpms-serial +++ b/build-tools/build-srpms-serial @@ -765,7 +765,7 @@ build_dir_srpm () { if [ "x$PATH_LIST" == "x" ]; then echo "0" else - AGE2=$(find $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE2=$(find -L $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) echo "$AGE2" fi ) @@ -1115,7 +1115,7 @@ build_dir_spec () { if [ "x$PATH_LIST" == "x" ]; then echo "0" else - AGE2=$(find $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE2=$(find -L $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) echo "$AGE2" fi ) From db323440cdb28dc629d3d6e4645e8f1532b03713 Mon Sep 17 00:00:00 2001 From: Poornima Date: Sat, 19 Dec 2020 02:57:56 +0530 Subject: [PATCH 05/37] Add SDO-rv-service to .gitignore - .gitignore: Add SDO-rv-service to .gitignore Story: 2008117 Task: 41480 Signed-off-by: Poornima Change-Id: Ifdf4d8950794f9d9711a860beb504c32629768bc --- stx/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/stx/.gitignore b/stx/.gitignore index 9c1366cd..7e95643a 100644 --- a/stx/.gitignore +++ b/stx/.gitignore @@ -34,6 +34,7 @@ installer-prebuilt /platform-armada-app /portieris-armada-app /rook-ceph +/SDO-rv-service /snmp-armada-app /stx-puppet /test From 427193dd6c5bae371e6191916edbdc36f6332b66 Mon Sep 17 00:00:00 2001 From: Don Penney Date: Mon, 21 Dec 2020 15:25:27 -0500 Subject: [PATCH 06/37] Exclude .tox folder when checking file ages The content of a .tox folder should not be considered when building srpms. Additionally, as this folder can be fairly large and include tens of thousands of files, a find listing can take some time. This commit updates these find commands to exclude the .tox folder from the search in order to save time if a user has run tox tests prior to the build. Change-Id: I035d730af8cdd443487f6ead3eec9e1e54f28709 Related-Bug: 1908940 Signed-off-by: Don Penney --- build-tools/build-srpms-parallel | 8 ++++---- build-tools/build-srpms-serial | 8 ++++---- build-tools/srpm-utils | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/build-tools/build-srpms-parallel b/build-tools/build-srpms-parallel index 3e667986..f8e59e01 100755 --- a/build-tools/build-srpms-parallel +++ b/build-tools/build-srpms-parallel @@ -768,7 +768,7 @@ build_dir_srpm () { # Find age of youngest input file. # We will apply this as the creation/modification timestamp of the src.rpm we produce. # - AGE=$(find $PKG_BASE $ORIG_SRPM_PATH -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE=$(find $PKG_BASE $ORIG_SRPM_PATH ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1) if [ -f $PKG_BASE/$DATA ]; then AGE2=$( cd $PKG_BASE @@ -787,7 +787,7 @@ build_dir_srpm () { if [ "x$PATH_LIST" == "x" ]; then echo "0" else - AGE2=$(find -L $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE2=$(find -L $PATH_LIST ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1) echo "$AGE2" fi ) @@ -1115,7 +1115,7 @@ build_dir_spec () { # Find age of youngest input file. # We will apply this as the creation/modification timestamp of the src.rpm we produce. # - AGE=$(find $PKG_BASE -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE=$(find $PKG_BASE ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1) if [ -f $PKG_BASE/$DATA ]; then AGE2=$( cd $PKG_BASE @@ -1138,7 +1138,7 @@ build_dir_spec () { if [ "x$PATH_LIST" == "x" ]; then echo "0" else - AGE2=$(find -L $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE2=$(find -L $PATH_LIST ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1) echo "$AGE2" fi ) diff --git a/build-tools/build-srpms-serial b/build-tools/build-srpms-serial index 37d4e4bb..a9cceeab 100755 --- a/build-tools/build-srpms-serial +++ b/build-tools/build-srpms-serial @@ -747,7 +747,7 @@ build_dir_srpm () { # Find age of youngest input file. # We will apply this as the creation/modification timestamp of the src.rpm we produce. # - AGE=$(find $PKG_BASE $ORIG_SRPM_PATH -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE=$(find $PKG_BASE $ORIG_SRPM_PATH ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1) if [ -f $PKG_BASE/$DATA ]; then AGE2=$( cd $PKG_BASE @@ -765,7 +765,7 @@ build_dir_srpm () { if [ "x$PATH_LIST" == "x" ]; then echo "0" else - AGE2=$(find -L $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE2=$(find -L $PATH_LIST ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1) echo "$AGE2" fi ) @@ -1093,7 +1093,7 @@ build_dir_spec () { # Find age of youngest input file. # We will apply this as the creation/modification timestamp of the src.rpm we produce. # - AGE=$(find $PKG_BASE -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE=$(find $PKG_BASE ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1) if [ -f $PKG_BASE/$DATA ]; then AGE2=$( cd $PKG_BASE @@ -1115,7 +1115,7 @@ build_dir_spec () { if [ "x$PATH_LIST" == "x" ]; then echo "0" else - AGE2=$(find -L $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + AGE2=$(find -L $PATH_LIST ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1) echo "$AGE2" fi ) diff --git a/build-tools/srpm-utils b/build-tools/srpm-utils index d035f346..e9299af8 100644 --- a/build-tools/srpm-utils +++ b/build-tools/srpm-utils @@ -3411,8 +3411,8 @@ srpm_source_file_list () { # Create lists of input files (INPUT_FILES) and symlinks (INPUT_LINKS). # First elements are absolute paths... while read path; do - find "${path}" -type f | grep -v -e '[/][.]git$' -e '[/][.]git[/]' -e '[/][.]tox[/]' >> $INPUT_FILES - find "${path}" -type l | grep -v -e '[/][.]git$' -e '[/][.]git[/]' -e '[/][.]tox[/]' >> $INPUT_LINKS + find "${path}" ! -path '*/.git/*' ! -path '*/.tox/*' -type f >> $INPUT_FILES + find "${path}" ! -path '*/.git/*' ! -path '*/.tox/*' -type l >> $INPUT_LINKS done < "${INPUT_SOURCES}" # Create sorted, unique list of canonical paths From 776fd4a557bb91337d790a3fe04ea914f1cd9acc Mon Sep 17 00:00:00 2001 From: Melissa Wang Date: Fri, 18 Dec 2020 15:57:49 -0500 Subject: [PATCH 07/37] build-img: Remove by-path symlinks for AWS images In the graphical installation of an AWS-compatible image, there are duplicate symlinks created by the udev persistent storage rules. This change comments out the deprecated persistent storage rules in /usr/lib/udev/rules.d/ during the setup work for the AWS image. This issue has not been found in regular qcow2 images. Tested on OpenStack in a distributed cloud system and on AWS. Story: 2007858 Task: 41482 Change-Id: Icbf9e753042e29536bd92b13e4d779d97de1c5f2 Signed-off-by: Melissa Wang --- build-tools/build-img | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/build-tools/build-img b/build-tools/build-img index 1b3d43cd..8bf65397 100755 --- a/build-tools/build-img +++ b/build-tools/build-img @@ -243,6 +243,10 @@ add_aws_setup(){ AWS_OAM_IF=ens5 AWS_MGMT_IF=ens6 cat >>"$ks_addon" <<_END + +# Comment out deprecated virtio by-path rules to avoid duplicate symlinks +sed -i 's/^\(KERNEL.*disk\/by-path\/virtio\)/#\1/' /usr/lib/udev/rules.d/60-persistent-storage.rules + cat >/etc/modules-load.d/ena.conf < Date: Tue, 5 Jan 2021 12:31:03 -0300 Subject: [PATCH 08/37] Add SNMP images to tag management Update yaml file to provide information about the source images of SNMP app and its references for static tags. Story: 2008132 Task: 41527 Signed-off-by: Gonzalo Gallardo Change-Id: I219bcc250eae62fed8c22e1670907474dab3b135 --- .../tag-management/image-tags.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index 9340cad2..10975b09 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -68,3 +68,15 @@ images: src_ref: https://opendev.org/starlingx/portieris-armada-app/commit/a6123ffebb77f23d5182576be17e69d62fd8d701 # Tag based on upstream package version tag: stx.5.0-v0.7.0 + - name: docker.io/starlingx/stx-snmp + src_build_tag: master-centos-stable-20210105T023146Z.0 + src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/2b370655a7f9a506ea139cfffa6c466d1a82cce4 + tag: stx.5.0-v1.0.0 + - name: docker.io/starlingx/stx-fm-subagent + src_build_tag: master-centos-stable-20210105T023146Z.0 + src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/2b370655a7f9a506ea139cfffa6c466d1a82cce4 + tag: stx.5.0-v1.0.0 + - name: docker.io/starlingx/stx-fm-trap-subagent + src_build_tag: master-centos-stable-20210105T023146Z.0 + src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/2b370655a7f9a506ea139cfffa6c466d1a82cce4 + tag: stx.5.0-v1.0.0 From e6022190a0dd04bac3a350db549d5666d5f43d23 Mon Sep 17 00:00:00 2001 From: "Chen, Haochuan Z" Date: Fri, 8 Jan 2021 10:10:56 +0800 Subject: [PATCH 09/37] Add ceph performance tuning tool in dev image Story: 2008497 Task: 41554 Change-Id: I705189b560c513ef2aaa06003a9b77518f39d28d Signed-off-by: Chen, Haochuan Z --- build-tools/build_iso/image-dev.inc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build-tools/build_iso/image-dev.inc b/build-tools/build_iso/image-dev.inc index d5de593a..63bc157d 100644 --- a/build-tools/build_iso/image-dev.inc +++ b/build-tools/build_iso/image-dev.inc @@ -2,3 +2,5 @@ # # They are exceptional packages only to be included in developer builds enable-dev-patch +fio +dstat From 023323eb44f2cb0903853e05938ff8b1ccb22051 Mon Sep 17 00:00:00 2001 From: Melissa Wang Date: Fri, 8 Jan 2021 11:13:55 -0500 Subject: [PATCH 10/37] build-img: add option to include additional rpms This change adds the option to add new packages to the qcow2 image after the iso has been built. Story: 2007858 Task: 41557 Change-Id: I7a75df2b62f56899ee4b1b69e360e07b09e1e391 Signed-off-by: Melissa Wang --- build-tools/build-img | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/build-tools/build-img b/build-tools/build-img index 8bf65397..02e7f06b 100755 --- a/build-tools/build-img +++ b/build-tools/build-img @@ -20,6 +20,7 @@ TEMPFILES_DIR= SUDO=0 GRAPHICAL=0 TTY_SETTINGS= +RPM_ADDON_LIST=() # Print out the help message usage() { @@ -87,6 +88,8 @@ Create a QCOW2/QEMU image with StarlingX pre-installed --aws Prepare an image that can be loaded onto an AWS EC2 instance + --addon + Specify additional rpms to add to the qcow2 image ENVIRONMENT @@ -299,7 +302,7 @@ _END # Process command line init() { local temp - temp=$(getopt -o hf4w:W:e:p:P:Sm:gs:i:o: --long help,force,ipv4,ipv4-default-gateway:,ipv6-default-gateway:,oam-dev:,password:,passwords-from:,passwords-from-stdin,mode:,graphical,sudo,size:,iso:,output:,aws -n "$PROGNAME" -- "$@") || cmdline_error + temp=$(getopt -o hf4w:W:e:p:P:Sm:gs:i:o: --long help,force,ipv4,ipv4-default-gateway:,ipv6-default-gateway:,oam-dev:,password:,passwords-from:,passwords-from-stdin,mode:,graphical,sudo,size:,iso:,output:,aws,addon: -n "$PROGNAME" -- "$@") || cmdline_error eval set -- "$temp" while true ; do case "$1" in @@ -372,6 +375,10 @@ init() { AWS_COMPATIBLE=1 shift ;; + --addon) + RPM_ADDON_LIST+=("$2") + shift 2 + ;; --) shift break @@ -411,6 +418,13 @@ UPDATE_ISO=$MY_REPO/stx/utilities/utilities/platform-util/scripts/update-iso.sh # make sure input ISO file exists : <"$BOOTIMAGE_ISO" || exit 1 +# make sure patch_build.sh exists +PATCH_BUILD=$MY_REPO/stx/update/extras/scripts/patch_build.sh +: <"$PATCH_BUILD" || exit 1 + +# find patch-iso +which patch-iso >/dev/null || exit 1 + # find QEMU/KVM find_kvm @@ -488,6 +502,13 @@ echo "default via $IPV6_GW_ADDR dev $OAM_DEV metric 1" >/etc/sysconfig/network-s _END fi +# Disable cloud-init networking if cloud-init is installed +cat >>"$ks_addon" <<_END +if [ -d /etc/cloud/cloud.cfg.d/ ]; then + echo "network: {config: disabled}" > /etc/cloud/cloud.cfg.d/99-disable-networking.cfg +fi +_END + # Set passwords for user in "${!PASSWORDS[@]}" ; do encrypted=$(encrypt_password "${PASSWORDS[$user]}") @@ -538,6 +559,24 @@ cmd+=(-a "$ks_addon") echo "${cmd[@]}" "${cmd[@]}" || exit 1 +# patch the iso if additional rpms are specified +if [ ${#RPM_ADDON_LIST[@]} -gt 0 ] ; then + patch_file="PATCH.img-addon" + patched_iso="$TEMPFILES_DIR/bootimage_${AUTO_MODE}${GRAPHICAL_SUFFIX}_patched.iso" + cmd=("$PATCH_BUILD" --id "${patch_file}" --summary "additional packages for qcow2 image" --desc "Adds customizations to qcow2 image") + for rpm_addon in "${RPM_ADDON_LIST[@]}"; do + cmd+=(--all-nodes "${rpm_addon}") + done + # create the patch file + echo "${cmd[@]}" + "${cmd[@]}" || exit 1 + cmd=(patch-iso -i "$auto_iso" -o "$patched_iso" "${MY_WORKSPACE}/${patch_file}.patch") + # execute patch-iso + echo "${cmd[@]}" + "${cmd[@]}" || exit 1 + mv ${patched_iso} ${auto_iso} +fi + # create a blank image file rm -f "$IMG_FILE.tmp" cmd=(qemu-img create "$IMG_FILE.tmp" -f qcow2 "$IMG_SIZE") From 0aec356f26c745d100f12081ac88f51353e41092 Mon Sep 17 00:00:00 2001 From: Teresa Ho Date: Tue, 19 Jan 2021 12:23:25 -0500 Subject: [PATCH 11/37] Update tag for stx-oidc-client to stx.5.0-v1.0.4 Closes-Bug: 1911244 Signed-off-by: Teresa Ho Change-Id: I0f49f13dd3e111d436ef6b32d8576136faad53bb --- .../build-docker-images/tag-management/image-tags.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index 10975b09..866786f0 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -37,10 +37,10 @@ images: # Version determined by running 'git describe --tags' in clone of upstream repo tag: stx.4.0-v0.11.0-109-gc48c502 - name: docker.io/starlingx/stx-oidc-client - src_build_tag: master-centos-stable-20200901T001315Z.0 - src_ref: https://opendev.org/starlingx/oidc-auth-armada-app/commit/957fc7c2092c7574a0a931012a0ec1cf5bb66429 - # StarlingX app. Setting version to v1.0.3 - tag: stx.5.0-v1.0.3 + src_build_tag: master-centos-stable-20210119T015305Z.0 + src_ref: https://opendev.org/starlingx/oidc-auth-armada-app/commit/70147e64e910e9878dd5bdf464cfd9672894ba18 + # StarlingX app. Setting version to v1.0.4 + tag: stx.5.0-v1.0.4 - name: docker.io/starlingx/dex src_build_tag: master-centos-stable-20200204T162546Z.0 src_ref: https://opendev.org/starlingx/oidc-auth-armada-app/commit/5d6701bdf214e77f460f2e3dd2b6f7d3186830c8 From 395308f881f90c8beb94a82844ee4bcebc560e4f Mon Sep 17 00:00:00 2001 From: Cole Walker Date: Thu, 4 Feb 2021 13:53:25 -0500 Subject: [PATCH 12/37] Update .gitignore with /ptp-notification-armada-app Required to keep repo status and git status commands clean Story: 2008529 Task: 41770 Signed-off-by: Cole Walker Change-Id: Ib00800c61cafcb174027c4c936ef000eb59c009e --- stx/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/stx/.gitignore b/stx/.gitignore index 9d8603c7..dfdf5b93 100644 --- a/stx/.gitignore +++ b/stx/.gitignore @@ -34,6 +34,7 @@ installer-prebuilt /openstack-armada-app /platform-armada-app /portieris-armada-app +/ptp-notification-armada-app /rook-ceph /SDO-rv-service /snmp-armada-app From 71d5ae9df9268be2803e2a45277e65c7c84c1fdb Mon Sep 17 00:00:00 2001 From: Scott Little Date: Tue, 9 Feb 2021 13:12:06 -0500 Subject: [PATCH 13/37] build-img fails if not run under $MY_WORKSPACE Build-img may need to generate a patch that is to be applied to the image being produced. The patch build tool drops the patch into the current directory. No explicit working directory was set. A subsequent step tries to apply the patch, but assumes it is in $MY_WORKSPACE. We want all build artifacts to be placed under $MY_WORKSPACE, so set $MY_WORKSPACE as the current directory before creating the patch. Closes-Bug: 1915182 Signed-off-by: Scott Little Change-Id: Ie2fbafae7a0ec90fc11e4af3d67dd15aec27c909 --- build-tools/build-img | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/build-tools/build-img b/build-tools/build-img index 02e7f06b..fb71bd1d 100755 --- a/build-tools/build-img +++ b/build-tools/build-img @@ -561,6 +561,9 @@ echo "${cmd[@]}" # patch the iso if additional rpms are specified if [ ${#RPM_ADDON_LIST[@]} -gt 0 ] ; then + # Patch build will drop the generated patch file into the current directory. + # We want that to be $MY_WORKSPACE. + pushd $MY_WORKSPACE patch_file="PATCH.img-addon" patched_iso="$TEMPFILES_DIR/bootimage_${AUTO_MODE}${GRAPHICAL_SUFFIX}_patched.iso" cmd=("$PATCH_BUILD" --id "${patch_file}" --summary "additional packages for qcow2 image" --desc "Adds customizations to qcow2 image") @@ -575,6 +578,7 @@ if [ ${#RPM_ADDON_LIST[@]} -gt 0 ] ; then echo "${cmd[@]}" "${cmd[@]}" || exit 1 mv ${patched_iso} ${auto_iso} + popd fi # create a blank image file From a595a39242d37cfd3c228f8fdbd1396b666a8e1d Mon Sep 17 00:00:00 2001 From: Cole Walker Date: Thu, 4 Feb 2021 13:03:18 -0500 Subject: [PATCH 14/37] Add ptp-notification images to tag management Includes tags for 3 images built in the ptp-notification-armada-app repo: notificationservice-base notificationclient-base locationservice-base Story: 2008529 Task: 41768 Signed-off-by: Cole Walker Change-Id: I25a1973bfd1b78787001a7427ed464c36bfacca4 --- .../tag-management/image-tags.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index 866786f0..c7d937b8 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -80,3 +80,15 @@ images: src_build_tag: master-centos-stable-20210105T023146Z.0 src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/2b370655a7f9a506ea139cfffa6c466d1a82cce4 tag: stx.5.0-v1.0.0 + - name: docker.io/starlingx/notificationservice-base + src_build_tag: master-centos-stable-20210209T020052Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/f4344963f509ca58d4d3e10e0eb63e01d06e49f4 + tag: stx.5.0-v1.0.2 + - name: docker.io/starlingx/locationservice-base + src_build_tag: master-centos-stable-20210204T224209Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/545e6b6bb093235c2f8dab8d171f30c6ae8682d3 + tag: stx.5.0-v1.0.1 + - name: docker.io/starlingx/notificationclient-base + src_build_tag: master-centos-stable-20210204T224209Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/545e6b6bb093235c2f8dab8d171f30c6ae8682d3 + tag: stx.5.0-v1.0.1 From e6454552ae4f1c7e240c54babe19539f788595d2 Mon Sep 17 00:00:00 2001 From: Cole Walker Date: Wed, 17 Feb 2021 16:22:39 -0500 Subject: [PATCH 15/37] Update image tags for ptp-notification Update the image tags for: notificationservice-base notificationclient-base Story: 2008529 Task: 41768 Signed-off-by: Cole Walker Change-Id: Ia12d582d38121af8bb482c0150c5fadbbf4f464c --- .../tag-management/image-tags.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index c7d937b8..57451137 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -81,14 +81,14 @@ images: src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/2b370655a7f9a506ea139cfffa6c466d1a82cce4 tag: stx.5.0-v1.0.0 - name: docker.io/starlingx/notificationservice-base - src_build_tag: master-centos-stable-20210209T020052Z.0 - src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/f4344963f509ca58d4d3e10e0eb63e01d06e49f4 - tag: stx.5.0-v1.0.2 + src_build_tag: master-centos-stable-20210217T173034Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/b618223d037b0d720fbaa38e032145a72f2a7359 + tag: stx.5.0-v1.0.3 - name: docker.io/starlingx/locationservice-base src_build_tag: master-centos-stable-20210204T224209Z.0 src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/545e6b6bb093235c2f8dab8d171f30c6ae8682d3 tag: stx.5.0-v1.0.1 - name: docker.io/starlingx/notificationclient-base - src_build_tag: master-centos-stable-20210204T224209Z.0 - src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/545e6b6bb093235c2f8dab8d171f30c6ae8682d3 - tag: stx.5.0-v1.0.1 + src_build_tag: master-centos-stable-20210217T173034Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/435d2df181b4bc5dcdf65690858027faa742968c + tag: stx.5.0-v1.0.2 From c19642ada5e54bd919abc26bdd33f4ecd469b697 Mon Sep 17 00:00:00 2001 From: Steven Webster Date: Wed, 17 Feb 2021 13:40:23 -0500 Subject: [PATCH 16/37] Update k8-cni-sriov image to stx.5.0-v2.6-7-gb18123d8 This commit uprevs the SR-IOV CNI image to pick up a few bug fixes. Specifically, this commit will allow rate-limiting configuration on a VF to be retained after the VF has been used by a pod (and pod subsequently deleted). Testing: NICs: Ethernet Controller X710 for 10GbE SFP+ Mellanox MT27700 Family [ConnectX-4] Functional: Connectivity testing (kernel + DPDK) Devices allocated appropriately to pod Rate-limiting information retained after pod deletion Partial-Bug: #1915951 Signed-off-by: Steven Webster Change-Id: I3eff245392312f969aa23d162c7d45f75f598afe --- .../build-docker-images/tag-management/image-tags.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index c7d937b8..5121f03b 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -2,10 +2,10 @@ --- images: - name: docker.io/starlingx/k8s-cni-sriov - src_build_tag: master-centos-stable-20191203T153530Z.0 - src_ref: https://opendev.org/starlingx/integ/commit/dac417bd31ed36d455e94db4aabe5916367654d4 - # Tag determined based on release tag associated with upstream commit - tag: stx.4.0-v2.2 + src_build_tag: master-centos-stable-20210218T003113Z.0 + src_ref: https://opendev.org/starlingx/integ/commit/eccff3b0e661592084d9114a9a41816761e1f9b5 + # Version determined by running 'git describe --tags' in clone of upstream repo + tag: stx.5.0-v2.6-7-gb18123d8 - name: docker.io/starlingx/k8s-plugins-sriov-network-device src_build_tag: master-centos-stable-20200512T184214Z.0 src_ref: https://opendev.org/starlingx/integ/commit/e2dc5c2dd0042788697ade268ac5c24fe9dc2f8c From 1d669071f928d5d1a63df4636176ea4ba71667f9 Mon Sep 17 00:00:00 2001 From: Scott Little Date: Fri, 19 Feb 2021 11:40:12 -0500 Subject: [PATCH 17/37] Support mock >= 2.7 Starting in Mock 2.7, mock's util.py was split, and a function we need was moved to the new config.py. Closes-Bug: 1916275 Change-Id: I4be183a855ca1fd8c18b67c63a0d69585598c2b3 Signed-off-by: Scott Little --- build-tools/mockchain-parallel-2.7 | 1221 ++++++++++++++++++++++++++++ 1 file changed, 1221 insertions(+) create mode 100755 build-tools/mockchain-parallel-2.7 diff --git a/build-tools/mockchain-parallel-2.7 b/build-tools/mockchain-parallel-2.7 new file mode 100755 index 00000000..b9f4bd3f --- /dev/null +++ b/build-tools/mockchain-parallel-2.7 @@ -0,0 +1,1221 @@ +#!/usr/bin/python3 -tt +# -*- coding: utf-8 -*- +# vim: noai:ts=4:sw=4:expandtab + +# by skvidal@fedoraproject.org +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# copyright 2012 Red Hat, Inc. + +# SUMMARY +# mockchain +# take a mock config and a series of srpms +# rebuild them one at a time +# adding each to a local repo +# so they are available as build deps to next pkg being built +from __future__ import print_function + +import cgi +# pylint: disable=deprecated-module +import optparse +import os +import re +import shutil +import subprocess +import sys +import tempfile +import time +import multiprocessing +import signal +import psutil + +import requests +# pylint: disable=import-error +from six.moves.urllib_parse import urlsplit + +import mockbuild.config as mock_config + +from stxRpmUtils import splitRpmFilename + +# all of the variables below are substituted by the build system +__VERSION__="2.7" +SYSCONFDIR="/etc" +PYTHONDIR="/usr/lib/python3.6/site-packages" +PKGPYTHONDIR="/usr/lib/python3.6/site-packages/mockbuild" +MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock") +# end build system subs + +mockconfig_path = '/etc/mock' + +def rpmName(path): + filename = os.path.basename(path) + (n, v, r, e, a) = splitRpmFilename(filename) + return n + +def createrepo(path): + global max_workers + if os.path.exists(path + '/repodata/repomd.xml'): + comm = ['/usr/bin/createrepo_c', '--update', '--retain-old-md', "%d" % max_workers, "--workers", "%d" % max_workers, path] + else: + comm = ['/usr/bin/createrepo_c', "--workers", "%d" % max_workers, path] + cmd = subprocess.Popen( + comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = cmd.communicate() + return out, err + + +g_opts = optparse.Values() + +def parse_args(args): + parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]') + parser.add_option( + '-r', '--root', default=None, dest='chroot', + metavar="CONFIG", + help="chroot config name/base to use in the mock build") + parser.add_option( + '-l', '--localrepo', default=None, + help="local path for the local repo, defaults to making its own") + parser.add_option( + '-c', '--continue', default=False, action='store_true', + dest='cont', + help="if a pkg fails to build, continue to the next one") + parser.add_option( + '-a', '--addrepo', default=[], action='append', + dest='repos', + help="add these repo baseurls to the chroot's yum config") + parser.add_option( + '--recurse', default=False, action='store_true', + help="if more than one pkg and it fails to build, try to build the rest and come back to it") + parser.add_option( + '--log', default=None, dest='logfile', + help="log to the file named by this option, defaults to not logging") + parser.add_option( + '--workers', default=1, dest='max_workers', + help="number of parallel build jobs") + parser.add_option( + '--worker-resources', default="", dest='worker_resources', + help="colon seperated list, how much mem in gb for each workers temfs") + parser.add_option( + '--basedir', default='/var/lib/mock', dest='basedir', + help="path to workspace") + parser.add_option( + '--tmp_prefix', default=None, dest='tmp_prefix', + help="tmp dir prefix - will default to username-pid if not specified") + parser.add_option( + '-m', '--mock-option', default=[], action='append', + dest='mock_option', + help="option to pass directly to mock") + parser.add_option( + '--mark-slow-name', default=[], action='append', + dest='slow_pkg_names_raw', + help="package name that is known to build slowly") + parser.add_option( + '--mark-slow-path', default=[], action='append', + dest='slow_pkgs_raw', + help="package path that is known to build slowly") + parser.add_option( + '--mark-big-name', default=[], action='append', + dest='big_pkg_names_raw', + help="package name that is known to require a lot of disk space to build") + parser.add_option( + '--mark-big-path', default=[], action='append', + dest='big_pkgs_raw', + help="package path that is known to require a lot of disk space to build") + parser.add_option( + '--srpm-dependency-file', default=None, + dest='srpm_dependency_file', + help="path to srpm dependency file") + parser.add_option( + '--rpm-dependency-file', default=None, + dest='rpm_dependency_file', + help="path to rpm dependency file") + parser.add_option( + '--rpm-to-srpm-map-file', default=None, + dest='rpm_to_srpm_map_file', + help="path to rpm to srpm map file") + + opts, args = parser.parse_args(args) + if opts.recurse: + opts.cont = True + + if not opts.chroot: + print("You must provide an argument to -r for the mock chroot") + sys.exit(1) + + if len(sys.argv) < 3: + print("You must specify at least 1 package to build") + sys.exit(1) + + return opts, args + + +REPOS_ID = [] + +slow_pkg_names={} +slow_pkgs={} +big_pkg_names={} +big_pkgs={} + +def generate_repo_id(baseurl): + """ generate repository id for yum.conf out of baseurl """ + repoid = "/".join(baseurl.split('//')[1:]).replace('/', '_') + repoid = re.sub(r'[^a-zA-Z0-9_]', '', repoid) + suffix = '' + i = 1 + while repoid + suffix in REPOS_ID: + suffix = str(i) + i += 1 + repoid = repoid + suffix + REPOS_ID.append(repoid) + return repoid + + +def set_build_idx(infile, destfile, build_idx, tmpfs_size_gb, opts): + # log(opts.logfile, "set_build_idx: infile=%s, destfile=%s, build_idx=%d, tmpfs_size_gb=%d" % (infile, destfile, build_idx, tmpfs_size_gb)) + + try: + with open(infile) as f: + code = compile(f.read(), infile, 'exec') + # pylint: disable=exec-used + exec(code) + + config_opts['root'] = config_opts['root'].replace('b0', 'b{0}'.format(build_idx)) + config_opts['rootdir'] = config_opts['rootdir'].replace('b0', 'b{0}'.format(build_idx)) + config_opts['cache_topdir'] = config_opts['cache_topdir'].replace('b0', 'b{0}'.format(build_idx)) + # log(opts.logfile, "set_build_idx: root=%s" % config_opts['root']) + # log(opts.logfile, "set_build_idx: cache_topdir=%s" % config_opts['cache_topdir']) + if tmpfs_size_gb > 0: + config_opts['plugin_conf']['tmpfs_enable'] = True + config_opts['plugin_conf']['tmpfs_opts'] = {} + config_opts['plugin_conf']['tmpfs_opts']['required_ram_mb'] = 1024 + config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'] = "%dg" % tmpfs_size_gb + config_opts['plugin_conf']['tmpfs_opts']['mode'] = '0755' + config_opts['plugin_conf']['tmpfs_opts']['keep_mounted'] = True + # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_enable=%s" % config_opts['plugin_conf']['tmpfs_enable']) + # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_opts->max_fs_size=%s" % config_opts['plugin_conf']['tmpfs_opts']['max_fs_size']) + + with open(destfile, 'w') as br_dest: + for k, v in list(config_opts.items()): + br_dest.write("config_opts[%r] = %r\n" % (k, v)) + + try: + log(opts.logfile, "set_build_idx: os.makedirs %s" % config_opts['cache_topdir']) + if not os.path.isdir(config_opts['cache_topdir']): + os.makedirs(config_opts['cache_topdir'], exist_ok=True) + except (IOError, OSError): + return False, "Could not create dir: %s" % config_opts['cache_topdir'] + + cache_dir = "%s/%s/mock" % (config_opts['basedir'], config_opts['root']) + try: + log(opts.logfile, "set_build_idx: os.makedirs %s" % cache_dir) + if not os.path.isdir(cache_dir): + os.makedirs(cache_dir) + except (IOError, OSError): + return False, "Could not create dir: %s" % cache_dir + + return True, '' + except (IOError, OSError): + return False, "Could not write mock config to %s" % destfile + + return True, '' + +def set_basedir(infile, destfile, basedir, opts): + log(opts.logfile, "set_basedir: infile=%s, destfile=%s, basedir=%s" % (infile, destfile, basedir)) + try: + with open(infile) as f: + code = compile(f.read(), infile, 'exec') + # pylint: disable=exec-used + exec(code) + + config_opts['basedir'] = basedir + config_opts['resultdir'] = '{0}/result'.format(basedir) + config_opts['backup_base_dir'] = '{0}/backup'.format(basedir) + config_opts['root'] = 'mock/b0' + config_opts['cache_topdir'] = '{0}/cache/b0'.format(basedir) + config_opts['rootdir'] = '{0}/mock/b0/root'.format(basedir) + + with open(destfile, 'w') as br_dest: + for k, v in list(config_opts.items()): + br_dest.write("config_opts[%r] = %r\n" % (k, v)) + return True, '' + except (IOError, OSError): + return False, "Could not write mock config to %s" % destfile + + return True, '' + +def add_local_repo(infile, destfile, baseurl, repoid=None): + """take a mock chroot config and add a repo to it's yum.conf + infile = mock chroot config file + destfile = where to save out the result + baseurl = baseurl of repo you wish to add""" + global config_opts + + try: + with open(infile) as f: + code = compile(f.read(), infile, 'exec') + # pylint: disable=exec-used + exec(code) + if not repoid: + repoid = generate_repo_id(baseurl) + else: + REPOS_ID.append(repoid) + localyumrepo = """ +[%s] +name=%s +baseurl=%s +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 +cost=1 +best=1 +""" % (repoid, baseurl, baseurl) + + config_opts['yum.conf'] += localyumrepo + with open(destfile, 'w') as br_dest: + for k, v in list(config_opts.items()): + br_dest.write("config_opts[%r] = %r\n" % (k, v)) + return True, '' + except (IOError, OSError): + return False, "Could not write mock config to %s" % destfile + + return True, '' + + +def do_build(opts, cfg, pkg): + + # returns 0, cmd, out, err = failure + # returns 1, cmd, out, err = success + # returns 2, None, None, None = already built + + signal.signal(signal.SIGTERM, child_signal_handler) + signal.signal(signal.SIGINT, child_signal_handler) + signal.signal(signal.SIGHUP, child_signal_handler) + signal.signal(signal.SIGABRT, child_signal_handler) + s_pkg = os.path.basename(pkg) + pdn = s_pkg.replace('.src.rpm', '') + resdir = '%s/%s' % (opts.local_repo_dir, pdn) + resdir = os.path.normpath(resdir) + if not os.path.exists(resdir): + os.makedirs(resdir) + + success_file = resdir + '/success' + fail_file = resdir + '/fail' + + if os.path.exists(success_file): + # return 2, None, None, None + sys.exit(2) + + # clean it up if we're starting over :) + if os.path.exists(fail_file): + os.unlink(fail_file) + + if opts.uniqueext == '': + mockcmd = ['/usr/bin/mock', + '--configdir', opts.config_path, + '--resultdir', resdir, + '--root', cfg, ] + else: + mockcmd = ['/usr/bin/mock', + '--configdir', opts.config_path, + '--resultdir', resdir, + '--uniqueext', opts.uniqueext, + '--root', cfg, ] + + # Ensure repo is up-to-date. + # Note: Merely adding --update to mockcmd failed to update + mockcmd_update=mockcmd + mockcmd_update.append('--update') + cmd = subprocess.Popen( + mockcmd_update, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = cmd.communicate() + if cmd.returncode != 0: + if (isinstance(err, bytes)): + err = err.decode("utf-8") + sys.stderr.write(err) + + # heuristic here, if user pass for mock "-d foo", but we must be care to leave + # "-d'foo bar'" or "--define='foo bar'" as is + compiled_re_1 = re.compile(r'^(-\S)\s+(.+)') + compiled_re_2 = re.compile(r'^(--[^ =])[ =](\.+)') + for option in opts.mock_option: + r_match = compiled_re_1.match(option) + if r_match: + mockcmd.extend([r_match.group(1), r_match.group(2)]) + else: + r_match = compiled_re_2.match(option) + if r_match: + mockcmd.extend([r_match.group(1), r_match.group(2)]) + else: + mockcmd.append(option) + + print('building %s' % s_pkg) + mockcmd.append(pkg) + cmd = subprocess.Popen( + mockcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = cmd.communicate() + if cmd.returncode == 0: + with open(success_file, 'w') as f: + f.write('done\n') + ret = 1 + else: + if (isinstance(err, bytes)): + err = err.decode("utf-8") + sys.stderr.write(err) + with open(fail_file, 'w') as f: + f.write('undone\n') + ret = 0 + + # return ret, cmd, out, err + sys.exit(ret) + + +def log(lf, msg): + if lf: + now = time.time() + try: + with open(lf, 'a') as f: + f.write(str(now) + ':' + msg + '\n') + except (IOError, OSError) as e: + print('Could not write to logfile %s - %s' % (lf, str(e))) + print(msg) + + +config_opts = {} + +worker_data = [] +workers = 0 +max_workers = 1 + +build_env = [] + +failed = [] +built_pkgs = [] + +local_repo_dir = "" + +pkg_to_name={} +name_to_pkg={} +srpm_dependencies_direct={} +rpm_dependencies_direct={} +rpm_to_srpm_map={} +no_dep_list = [ "bash", "kernel" , "kernel-rt" ] + + +def init_build_env(slots, opts, config_opts_in): + global build_env + + orig_chroot_name=config_opts_in['chroot_name'] + orig_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(orig_chroot_name)) + # build_env.append({'state': 'Idle', 'cfg': orig_mock_config}) + for i in range(0,slots): + new_chroot_name = "{0}.b{1}".format(orig_chroot_name, i) + new_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(new_chroot_name)) + tmpfs_size_gb = 0 + if opts.worker_resources == "": + if i > 0: + tmpfs_size_gb = 2 * (1 + slots - i) + else: + resource_array=opts.worker_resources.split(':') + if i < len(resource_array): + tmpfs_size_gb=int(resource_array[i]) + else: + log(opts.logfile, "Error: worker-resources argument '%s' does not supply info for all %d workers" % (opts.worker_resources, slots)) + sys.exit(1) + if i == 0 and tmpfs_size_gb != 0: + log(opts.logfile, "Error: worker-resources argument '%s' must pass '0' as first value" % (opts.worker_resources, slots)) + sys.exit(1) + build_env.append({'state': 'Idle', 'cfg': new_mock_config, 'fs_size_gb': tmpfs_size_gb}) + + res, msg = set_build_idx(orig_mock_config, new_mock_config, i, tmpfs_size_gb, opts) + if not res: + log(opts.logfile, "Error: Could not write out local config: %s" % msg) + sys.exit(1) + + +idle_build_env_last_awarded = 0 +def get_idle_build_env(slots): + global build_env + global idle_build_env_last_awarded + visited = 0 + + if slots < 1: + return -1 + + i = idle_build_env_last_awarded - 1 + if i < 0 or i >= slots: + i = slots - 1 + + while visited < slots: + if build_env[i]['state'] == 'Idle': + build_env[i]['state'] = 'Busy' + idle_build_env_last_awarded = i + return i + visited = visited + 1 + i = i - 1 + if i < 0: + i = slots - 1 + return -1 + +def release_build_env(idx): + global build_env + + build_env[idx]['state'] = 'Idle' + +def get_best_rc(a, b): + print("get_best_rc: a=%s" % str(a)) + print("get_best_rc: b=%s" % str(b)) + if (b == {}) and (a != {}): + return a + if (a == {}) and (b != {}): + return b + + if (b['build_name'] is None) and (not a['build_name'] is None): + return a + if (a['build_name'] is None) and (not b['build_name'] is None): + return b + + if a['unbuilt_deps'] < b['unbuilt_deps']: + return a + if b['unbuilt_deps'] < a['unbuilt_deps']: + return b + + if a['depth'] < b['depth']: + return a + if b['depth'] < a['depth']: + return b + + print("get_best_rc: uncertain %s vs %s" % (a,b)) + return a + +unbuilt_dep_list_print=False +def unbuilt_dep_list(name, unbuilt_pkg_names, depth, checked=None): + global srpm_dependencies_direct + global rpm_dependencies_direct + global rpm_to_srpm_map + global no_dep_list + global unbuilt_dep_list_print + + first_iteration=False + unbuilt = [] + if name in no_dep_list: + return unbuilt + + if checked is None: + first_iteration=True + checked=[] + + # Count unbuild dependencies + if first_iteration: + dependencies_direct=srpm_dependencies_direct + else: + dependencies_direct=rpm_dependencies_direct + + if name in dependencies_direct: + for rdep in dependencies_direct[name]: + sdep='???' + if rdep in rpm_to_srpm_map: + sdep = rpm_to_srpm_map[rdep] + if rdep != name and sdep != name and not rdep in checked: + if (not first_iteration) and (sdep in no_dep_list): + continue + checked.append(rdep) + if sdep in unbuilt_pkg_names: + if not sdep in unbuilt: + unbuilt.append(sdep) + if depth > 0: + child_unbuilt = unbuilt_dep_list(rdep, unbuilt_pkg_names, depth-1, checked) + for sub_sdep in child_unbuilt: + if sub_sdep != name: + if not sub_sdep in unbuilt: + unbuilt.append(sub_sdep) + + return unbuilt + +def can_build_at_idx(build_idx, name, opts): + global pkg_to_name + global name_to_pkg + global big_pkgs + global big_pkg_names + global slow_pkgs + global slow_pkg_names + global build_env + + fs_size_gb = 0 + size_gb = 0 + speed = 0 + pkg = name_to_pkg[name] + if name in big_pkg_names: + size_gb=big_pkg_names[name] + if pkg in big_pkgs: + size_gb=big_pkgs[pkg] + if name in slow_pkg_names: + speed=slow_pkg_names[name] + if pkg in slow_pkgs: + speed=slow_pkgs[pkg] + fs_size_gb = build_env[build_idx]['fs_size_gb'] + return fs_size_gb == 0 or fs_size_gb >= size_gb + +def schedule(build_idx, pkgs, opts): + global worker_data + global pkg_to_name + global name_to_pkg + global big_pkgs + global big_pkg_names + global slow_pkgs + global slow_pkg_names + + unbuilt_pkg_names=[] + building_pkg_names=[] + unprioritized_pkg_names=[] + + for pkg in pkgs: + name = pkg_to_name[pkg] + unbuilt_pkg_names.append(name) + unprioritized_pkg_names.append(name) + + prioritized_pkg_names=[] + + for wd in worker_data: + pkg = wd['pkg'] + if not pkg is None: + name = pkg_to_name[pkg] + building_pkg_names.append(name) + + # log(opts.logfile, "schedule: build_idx=%d start" % build_idx) + if len(big_pkg_names) or len(big_pkgs): + next_unprioritized_pkg_names = unprioritized_pkg_names[:] + for name in unprioritized_pkg_names: + pkg = name_to_pkg[name] + if name in big_pkg_names or pkg in big_pkgs: + prioritized_pkg_names.append(name) + next_unprioritized_pkg_names.remove(name) + unprioritized_pkg_names = next_unprioritized_pkg_names[:] + + if len(slow_pkg_names) or len(slow_pkgs): + next_unprioritized_pkg_names = unprioritized_pkg_names[:] + for name in unprioritized_pkg_names: + pkg = name_to_pkg[name] + if name in slow_pkg_names or pkg in slow_pkgs: + if can_build_at_idx(build_idx, name, opts): + prioritized_pkg_names.append(name) + next_unprioritized_pkg_names.remove(name) + unprioritized_pkg_names = next_unprioritized_pkg_names[:] + + for name in unprioritized_pkg_names: + if can_build_at_idx(build_idx, name, opts): + prioritized_pkg_names.append(name) + + name_out = schedule2(build_idx, prioritized_pkg_names, unbuilt_pkg_names, building_pkg_names, opts) + if not name_out is None: + pkg_out = name_to_pkg[name_out] + else: + pkg_out = None + # log(opts.logfile, "schedule: failed to translate '%s' to a pkg" % name_out) + # log(opts.logfile, "schedule: build_idx=%d end: out = %s -> %s" % (build_idx, str(name_out), str(pkg_out))) + return pkg_out + + +def schedule2(build_idx, pkg_names, unbuilt_pkg_names, building_pkg_names, opts): + global pkg_to_name + global name_to_pkg + global no_dep_list + + max_depth = 3 + + if len(pkg_names) == 0: + return None + + unbuilt_deps={} + building_deps={} + for depth in range(max_depth,-1,-1): + unbuilt_deps[depth]={} + building_deps[depth]={} + + for depth in range(max_depth,-1,-1): + checked=[] + reordered_pkg_names = pkg_names[:] + # for name in reordered_pkg_names: + while len(reordered_pkg_names): + name = reordered_pkg_names.pop(0) + if name in checked: + continue + + # log(opts.logfile, "checked.append(%s)" % name) + checked.append(name) + + pkg = name_to_pkg[name] + # log(opts.logfile, "schedule2: check '%s', depth %d" % (name, depth)) + if not name in unbuilt_deps[depth]: + unbuilt_deps[depth][name] = unbuilt_dep_list(name, unbuilt_pkg_names, depth) + if not name in building_deps[depth]: + building_deps[depth][name] = unbuilt_dep_list(name, building_pkg_names, depth) + # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, depth, unbuilt_deps[depth][name])) + # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, depth, building_deps[depth][name])) + if len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 0: + if can_build_at_idx(build_idx, name, opts): + log(opts.logfile, "schedule2: no unbuilt deps for '%s', searching at depth %d" % (name, depth)) + return name + else: + # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx)) + continue + + if not name in unbuilt_deps[0]: + unbuilt_deps[0][name] = unbuilt_dep_list(name, unbuilt_pkg_names, 0) + if not name in building_deps[0]: + building_deps[0][name] = unbuilt_dep_list(name, building_pkg_names, 0) + # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, 0, unbuilt_deps[0][name])) + # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, 0, building_deps[0][name])) + if (len(building_deps[depth][name]) == 0 and len(unbuilt_deps[depth][name]) == 1 and unbuilt_deps[depth][name][0] in no_dep_list) or (len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 1 and building_deps[depth][name][0] in no_dep_list): + if len(unbuilt_deps[0][name]) == 0 and len(building_deps[0][name]) == 0: + if can_build_at_idx(build_idx, name, opts): + log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, searching at depth %d" % (name, depth)) + return name + else: + # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx)) + continue + + loop = False + for dep_name in unbuilt_deps[depth][name]: + if name == dep_name: + continue + + # log(opts.logfile, "name=%s depends on dep_name=%s, depth=%d" % (name, dep_name, depth)) + if dep_name in checked: + continue + + # log(opts.logfile, "schedule2: check '%s' indirect" % dep_name) + if not dep_name in unbuilt_deps[depth]: + unbuilt_deps[depth][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, depth) + if not dep_name in building_deps[depth]: + building_deps[depth][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, depth) + # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, unbuilt_deps[depth][dep_name])) + # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, building_deps[depth][dep_name])) + if len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 0: + if can_build_at_idx(build_idx, dep_name, opts): + log(opts.logfile, "schedule2: deps: no unbuilt deps for '%s', working towards '%s', searching at depth %d" % (dep_name, name, depth)) + return dep_name + + if not dep_name in unbuilt_deps[0]: + unbuilt_deps[0][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, 0) + if not dep_name in building_deps[0]: + building_deps[0][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, 0) + # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, unbuilt_deps[0][dep_name])) + # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, building_deps[0][dep_name])) + if (len(building_deps[depth][dep_name]) == 0 and len(unbuilt_deps[depth][dep_name]) == 1 and unbuilt_deps[depth][dep_name][0] in no_dep_list) or (len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 1 and building_deps[depth][dep_name][0] in no_dep_list): + if len(unbuilt_deps[0][dep_name]) == 0 and len(building_deps[0][dep_name]) == 0: + if can_build_at_idx(build_idx, dep_name, opts): + log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, working towards '%s', searching at depth %d" % (dep_name, name, depth)) + return dep_name + + if name in unbuilt_deps[0][dep_name]: + loop = True + # log(opts.logfile, "schedule2: loop detected: %s <-> %s" % (name, dep_name)) + + if loop and len(building_deps[depth][name]) == 0: + log(opts.logfile, "schedule2: loop detected, try to build '%s'" % name) + return name + + for dep_name in unbuilt_deps[depth][name]: + if dep_name in reordered_pkg_names: + # log(opts.logfile, "schedule2: promote %s to work toward %s" % (dep_name, name)) + reordered_pkg_names.remove(dep_name) + reordered_pkg_names.insert(0,dep_name) + + # log(opts.logfile, "schedule2: Nothing buildable at this time") + return None + + +def read_deps(opts): + read_srpm_deps(opts) + read_rpm_deps(opts) + read_map_deps(opts) + +def read_srpm_deps(opts): + global srpm_dependencies_direct + + if opts.srpm_dependency_file == None: + return + + if not os.path.exists(opts.srpm_dependency_file): + log(opts.logfile, "File not found: %s" % opts.srpm_dependency_file) + sys.exit(1) + + with open(opts.srpm_dependency_file) as f: + lines = f.readlines() + for line in lines: + (name,deps) = line.rstrip().split(';') + srpm_dependencies_direct[name]=deps.split(',') + +def read_rpm_deps(opts): + global rpm_dependencies_direct + + if opts.rpm_dependency_file == None: + return + + if not os.path.exists(opts.rpm_dependency_file): + log(opts.logfile, "File not found: %s" % opts.rpm_dependency_file) + sys.exit(1) + + with open(opts.rpm_dependency_file) as f: + lines = f.readlines() + for line in lines: + (name,deps) = line.rstrip().split(';') + rpm_dependencies_direct[name]=deps.split(',') + +def read_map_deps(opts): + global rpm_to_srpm_map + + if opts.rpm_to_srpm_map_file == None: + return + + if not os.path.exists(opts.rpm_to_srpm_map_file): + log(opts.logfile, "File not found: %s" % opts.rpm_to_srpm_map_file) + sys.exit(1) + + with open(opts.rpm_to_srpm_map_file) as f: + lines = f.readlines() + for line in lines: + (rpm,srpm) = line.rstrip().split(';') + rpm_to_srpm_map[rpm]=srpm + + +def reaper(opts): + global built_pkgs + global failed + global worker_data + global workers + + reaped = 0 + need_createrepo = False + last_reaped = -1 + while reaped > last_reaped: + last_reaped = reaped + for wd in worker_data: + p = wd['proc'] + ret = p.exitcode + if ret is not None: + pkg = wd['pkg'] + b = int(wd['build_index']) + p.join() + worker_data.remove(wd) + workers = workers - 1 + reaped = reaped + 1 + release_build_env(b) + + log(opts.logfile, "End build on 'b%d': %s" % (b, pkg)) + + if ret == 0: + failed.append(pkg) + log(opts.logfile, "Error building %s on 'b%d'." % (os.path.basename(pkg), b)) + if opts.recurse and not stop_signal: + log(opts.logfile, "Will try to build again (if some other package will succeed).") + else: + log(opts.logfile, "See logs/results in %s" % opts.local_repo_dir) + elif ret == 1: + log(opts.logfile, "Success building %s on 'b%d'" % (os.path.basename(pkg), b)) + built_pkgs.append(pkg) + need_createrepo = True + elif ret == 2: + log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg)) + + if need_createrepo: + # createrepo with the new pkgs + err = createrepo(opts.local_repo_dir)[1] + if err.strip(): + log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir) + log(opts.logfile, "Err: %s" % err) + + return reaped + +stop_signal = False + +def on_terminate(proc): + print("process {} terminated with exit code {}".format(proc, proc.returncode)) + +def kill_proc_and_descentents(parent, need_stop=False, verbose=False): + global g_opts + + if need_stop: + if verbose: + log(g_opts.logfile, "Stop %d" % parent.pid) + + try: + parent.send_signal(signal.SIGSTOP) + except: + # perhaps mock still running as root, give it a sec to drop pivledges and try again + time.sleep(1) + parent.send_signal(signal.SIGSTOP) + + try: + children = parent.children(recursive=False) + except: + children = [] + + for p in children: + kill_proc_and_descentents(p, need_stop=True, verbose=verbose) + + if verbose: + log(g_opts.logfile, "Terminate %d" % parent.pid) + + # parent.send_signal(signal.SIGTERM) + try: + parent.terminate() + except: + # perhaps mock still running as root, give it a sec to drop pivledges and try again + time.sleep(1) + parent.terminate() + + if need_stop: + if verbose: + log(g_opts.logfile, "Continue %d" % parent.pid) + + parent.send_signal(signal.SIGCONT) + + +def child_signal_handler(signum, frame): + global g_opts + my_pid = os.getpid() + # log(g_opts.logfile, "--------- child %d recieved signal %d" % (my_pid, signum)) + p = psutil.Process(my_pid) + kill_proc_and_descentents(p) + try: + sys.exit(0) + except SystemExit as e: + os._exit(0) + +def signal_handler(signum, frame): + global g_opts + global stop_signal + global workers + global worker_data + stop_signal = True + + # Signal processes to complete + log(g_opts.logfile, "recieved signal %d, Terminating children" % signum) + for wd in worker_data: + p = wd['proc'] + ret = p.exitcode + if ret is None: + # log(g_opts.logfile, "terminate child %d" % p.pid) + p.terminate() + else: + log(g_opts.logfile, "child return code was %d" % ret) + + # Wait for remaining processes to complete + log(g_opts.logfile, "===== wait for signaled jobs to complete =====") + while len(worker_data) > 0: + log(g_opts.logfile, " remaining workers: %d" % workers) + reaped = reaper(g_opts) + if reaped == 0: + time.sleep(0.1) + + try: + sys.exit(1) + except SystemExit as e: + os._exit(1) + +def main(args): + opts, args = parse_args(args) + # take mock config + list of pkgs + + global g_opts + global stop_signal + global build_env + global worker_data + global workers + global max_workers + + global slow_pkg_names + global slow_pkgs + global big_pkg_names + global big_pkgs + max_workers = int(opts.max_workers) + + global failed + global built_pkgs + + cfg = opts.chroot + pkgs = args[1:] + + # transform slow/big package options into dictionaries + for line in opts.slow_pkg_names_raw: + speed,name = line.split(":") + if speed != "": + slow_pkg_names[name]=int(speed) + for line in opts.slow_pkgs_raw: + speed,pkg = line.split(":") + if speed != "": + slow_pkgs[pkg]=int(speed) + for line in opts.big_pkg_names_raw: + size_gb,name = line.split(":") + if size_gb != "": + big_pkg_names[name]=int(size_gb) + for line in opts.big_pkgs_raw: + size_gb,pkg = line.split(":") + if size_gb != "": + big_pkgs[pkg]=int(size_gb) + + # Set up a mapping between pkg path and pkg name + global pkg_to_name + global name_to_pkg + for pkg in pkgs: + if not pkg.endswith('.rpm'): + log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg) + continue + + try: + name = rpmName(pkg) + except OSError as e: + print("Could not parse rpm %s" % pkg) + sys.exit(1) + + pkg_to_name[pkg] = name + name_to_pkg[name] = pkg + + read_deps(opts) + + global config_opts + config_opts = mock_config.load_config(mockconfig_path, cfg, None, __VERSION__, PKGPYTHONDIR) + + if not opts.tmp_prefix: + try: + opts.tmp_prefix = os.getlogin() + except OSError as e: + print("Could not find login name for tmp dir prefix add --tmp_prefix") + sys.exit(1) + pid = os.getpid() + opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid) + + if opts.basedir != "/var/lib/mock": + opts.uniqueext = '' + + # create a tempdir for our local info + if opts.localrepo: + local_tmp_dir = os.path.abspath(opts.localrepo) + if not os.path.exists(local_tmp_dir): + os.makedirs(local_tmp_dir) + os.chmod(local_tmp_dir, 0o755) + else: + pre = 'mock-chain-%s-' % opts.uniqueext + local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp') + os.chmod(local_tmp_dir, 0o755) + + if opts.logfile: + opts.logfile = os.path.join(local_tmp_dir, opts.logfile) + if os.path.exists(opts.logfile): + os.unlink(opts.logfile) + + log(opts.logfile, "starting logfile: %s" % opts.logfile) + + opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + config_opts['chroot_name'] + '/') + + if not os.path.exists(opts.local_repo_dir): + os.makedirs(opts.local_repo_dir, mode=0o755) + + local_baseurl = "file://%s" % opts.local_repo_dir + log(opts.logfile, "results dir: %s" % opts.local_repo_dir) + opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + config_opts['chroot_name'] + '/') + + if not os.path.exists(opts.config_path): + os.makedirs(opts.config_path, mode=0o755) + + log(opts.logfile, "config dir: %s" % opts.config_path) + + my_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(config_opts['chroot_name'])) + + # modify with localrepo + res, msg = add_local_repo(config_opts['config_file'], my_mock_config, local_baseurl, 'local_build_repo') + if not res: + log(opts.logfile, "Error: Could not write out local config: %s" % msg) + sys.exit(1) + + for baseurl in opts.repos: + res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl) + if not res: + log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg)) + sys.exit(1) + + res, msg = set_basedir(my_mock_config, my_mock_config, opts.basedir, opts) + if not res: + log(opts.logfile, "Error: Could not write out local config: %s" % msg) + sys.exit(1) + + # these files needed from the mock.config dir to make mock run + for fn in ['site-defaults.cfg', 'logging.ini']: + pth = mockconfig_path + '/' + fn + shutil.copyfile(pth, opts.config_path + '/' + fn) + + # createrepo on it + err = createrepo(opts.local_repo_dir)[1] + if err.strip(): + log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir) + log(opts.logfile, "Err: %s" % err) + sys.exit(1) + + init_build_env(max_workers, opts, config_opts) + + download_dir = tempfile.mkdtemp() + downloaded_pkgs = {} + built_pkgs = [] + try_again = True + to_be_built = pkgs + return_code = 0 + num_of_tries = 0 + + g_opts = opts + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGHUP, signal_handler) + signal.signal(signal.SIGABRT, signal_handler) + + while try_again and not stop_signal: + num_of_tries += 1 + failed = [] + + log(opts.logfile, "===== iteration %d start =====" % num_of_tries) + + to_be_built_scheduled = to_be_built[:] + + need_reap = False + while len(to_be_built_scheduled) > 0: + # Free up a worker + while need_reap or workers >= max_workers: + need_reap = False + reaped = reaper(opts) + if reaped == 0: + time.sleep(0.1) + + if workers < max_workers: + workers = workers + 1 + + b = get_idle_build_env(max_workers) + if b < 0: + log(opts.logfile, "Failed to find idle build env for: %s" % pkg) + workers = workers - 1 + need_reap = True + continue + + pkg = schedule(b, to_be_built_scheduled, opts) + if pkg is None: + if workers <= 1: + # Remember we have one build environmnet reserved, so can't test for zero workers + log(opts.logfile, "failed to schedule from: %s" % to_be_built_scheduled) + pkg = to_be_built_scheduled[0] + log(opts.logfile, "All workers idle, forcing build of pkg=%s" % pkg) + else: + release_build_env(b) + workers = workers - 1 + need_reap = True + continue + + to_be_built_scheduled.remove(pkg) + + if not pkg.endswith('.rpm'): + log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg) + failed.append(pkg) + release_build_env(b) + need_reap = True + continue + + elif pkg.startswith('http://') or pkg.startswith('https://') or pkg.startswith('ftp://'): + url = pkg + try: + log(opts.logfile, 'Fetching %s' % url) + r = requests.get(url) + # pylint: disable=no-member + if r.status_code == requests.codes.ok: + fn = urlsplit(r.url).path.rsplit('/', 1)[1] + if 'content-disposition' in r.headers: + _, params = cgi.parse_header(r.headers['content-disposition']) + if 'filename' in params and params['filename']: + fn = params['filename'] + pkg = download_dir + '/' + fn + with open(pkg, 'wb') as fd: + for chunk in r.iter_content(4096): + fd.write(chunk) + except Exception as e: + log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e))) + failed.append(url) + release_build_env(b) + need_reap = True + continue + else: + downloaded_pkgs[pkg] = url + + log(opts.logfile, "Start build on 'b%d': %s" % (b, pkg)) + # ret = do_build(opts, config_opts['chroot_name'], pkg)[0] + p = multiprocessing.Process(target=do_build, args=(opts, build_env[b]['cfg'], pkg)) + worker_data.append({'proc': p, 'pkg': pkg, 'build_index': int(b)}) + p.start() + + # Wait for remaining processes to complete + log(opts.logfile, "===== wait for last jobs in iteration %d to complete =====" % num_of_tries) + while workers > 0: + reaped = reaper(opts) + if reaped == 0: + time.sleep(0.1) + log(opts.logfile, "===== iteration %d complete =====" % num_of_tries) + + if failed and opts.recurse: + log(opts.logfile, "failed=%s" % failed) + log(opts.logfile, "to_be_built=%s" % to_be_built) + if len(failed) != len(to_be_built): + to_be_built = failed + try_again = True + log(opts.logfile, 'Some package succeeded, some failed.') + log(opts.logfile, 'Trying to rebuild %s failed pkgs, because --recurse is set.' % len(failed)) + else: + if max_workers > 1: + max_workers = 1 + to_be_built = failed + try_again = True + log(opts.logfile, 'Some package failed under parallel build.') + log(opts.logfile, 'Trying to rebuild %s failed pkgs with single thread, because --recurse is set.' % len(failed)) + else: + log(opts.logfile, "") + log(opts.logfile, "*** Build Failed ***") + log(opts.logfile, "Tried %s times - following pkgs could not be successfully built:" % num_of_tries) + log(opts.logfile, "*** Build Failed ***") + for pkg in failed: + msg = pkg + if pkg in downloaded_pkgs: + msg = downloaded_pkgs[pkg] + log(opts.logfile, msg) + log(opts.logfile, "") + try_again = False + else: + try_again = False + if failed: + return_code = 2 + + # cleaning up our download dir + shutil.rmtree(download_dir, ignore_errors=True) + + log(opts.logfile, "") + log(opts.logfile, "Results out to: %s" % opts.local_repo_dir) + log(opts.logfile, "") + log(opts.logfile, "Pkgs built: %s" % len(built_pkgs)) + if built_pkgs: + if failed: + if len(built_pkgs): + log(opts.logfile, "Some packages successfully built in this order:") + else: + log(opts.logfile, "Packages successfully built in this order:") + for pkg in built_pkgs: + log(opts.logfile, pkg) + return return_code + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) From 0972ffe2464293657b717ae84bc1fef890f7ffc1 Mon Sep 17 00:00:00 2001 From: Scott Little Date: Fri, 19 Feb 2021 11:40:12 -0500 Subject: [PATCH 18/37] mock 2.7 requires additional config options Mock 2.7 defaults to using dnf. The centos 7 mock config must provide additional arguements to ensure a sane yum environment. With out them it will try to use dnf. Closes-Bug: 1916275 Signed-off-by: Scott Little Change-Id: I4801accf05b68e1e0bdf8975d1b4d989fd0a303f --- build-tools/repo_files/mock.cfg.centos7.all.proto | 3 +++ build-tools/repo_files/mock.cfg.centos7.distro.proto | 3 +++ build-tools/repo_files/mock.cfg.centos7.proto | 3 +++ 3 files changed, 9 insertions(+) diff --git a/build-tools/repo_files/mock.cfg.centos7.all.proto b/build-tools/repo_files/mock.cfg.centos7.all.proto index 6ecaa2d2..6d9eb63e 100644 --- a/build-tools/repo_files/mock.cfg.centos7.all.proto +++ b/build-tools/repo_files/mock.cfg.centos7.all.proto @@ -4,6 +4,9 @@ config_opts['legal_host_arches'] = ('x86_64',) config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' +config_opts['package_manager'] = 'yum' +config_opts['use_bootstrap'] = False +config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/repo_files/mock.cfg.centos7.distro.proto b/build-tools/repo_files/mock.cfg.centos7.distro.proto index 6ecaa2d2..6d9eb63e 100644 --- a/build-tools/repo_files/mock.cfg.centos7.distro.proto +++ b/build-tools/repo_files/mock.cfg.centos7.distro.proto @@ -4,6 +4,9 @@ config_opts['legal_host_arches'] = ('x86_64',) config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' +config_opts['package_manager'] = 'yum' +config_opts['use_bootstrap'] = False +config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/repo_files/mock.cfg.centos7.proto b/build-tools/repo_files/mock.cfg.centos7.proto index beeea32f..779c5b59 100644 --- a/build-tools/repo_files/mock.cfg.centos7.proto +++ b/build-tools/repo_files/mock.cfg.centos7.proto @@ -4,6 +4,9 @@ config_opts['legal_host_arches'] = ('x86_64',) config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' +config_opts['package_manager'] = 'yum' +config_opts['use_bootstrap'] = False +config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False From d4409383d4fd1d5fb9656f61548547c2784836d4 Mon Sep 17 00:00:00 2001 From: Babak Sarashki Date: Fri, 12 Feb 2021 00:18:46 +0000 Subject: [PATCH 19/37] build-tools: include ICE driver for E810 in the installer This commit adds ice driver (version 1.2.1) to iso. This is needed for network boot. Story: 2008436 Task: 41398 Depends-On: https://review.opendev.org/c/starlingx/kernel/+/776262 Signed-off-by: Babak Sarashki Change-Id: I233174df1da5b2c86e6684289663bec775e049eb --- build-tools/update-pxe-network-installer | 1 + 1 file changed, 1 insertion(+) diff --git a/build-tools/update-pxe-network-installer b/build-tools/update-pxe-network-installer index a636cc65..83ccd82b 100755 --- a/build-tools/update-pxe-network-installer +++ b/build-tools/update-pxe-network-installer @@ -100,6 +100,7 @@ find_and_copy_rpm 'e1000e kernel module' 'kmod-e1000e-[0-9]*.x86_64. find_and_copy_rpm 'i40e kernel module' 'kmod-i40e-[0-9]*.x86_64.rpm' std "$kernel_rpms_std" find_and_copy_rpm 'ixgbe kernel module' 'kmod-ixgbe-[0-9]*.x86_64.rpm' std "$kernel_rpms_std" find_and_copy_rpm 'mlnx-ofa kernel module' 'mlnx-ofa_kernel-modules-[0-9]*.x86_64.rpm' std "$kernel_rpms_std" +find_and_copy_rpm 'ice kernel module' 'kmod-ice-[0-9]*.x86_64.rpm' std "$kernel_rpms_std" echo " -------- successfully found standard kernel rpm and related kernel modules --------" echo "" From 78be59c758945cb10c1e5569669f8764603eccc8 Mon Sep 17 00:00:00 2001 From: Scott Little Date: Wed, 3 Mar 2021 11:02:43 -0500 Subject: [PATCH 20/37] Fix memory overcommit that caused OOM killer Parallel package builds use large ramdisks. It's important not to commit too much memory to these ram disks, or we may push the system into memory exhaustion. At that stage the Kernel will invoke the OOM killer, It will likely select our build, or worse someone else's build, to sacrifice. The current algorithm only considers free memory at the instant the parallel build starts. It does not consider how many other builds are in flight, but might not have allocated their ramdisk yet. The other build intends to use the memory, we see the memory as free and try to use the same memory. Solution is to consider total memory, and number of builds already running or which might foreseeably start in the near future (share factor) to derive an alternate estimate of memory available. We then allocate the lesser amount. Also fixed some issues with cleaning up of child processes when a newer mockchain-parallel is in use. Closes-Bug: 1917525 Signed-off-by: Scott Little Change-Id: Iab178c6f9acbd5a209d66d0da21f367911f34905 --- build-tools/build-rpms-parallel | 54 +++++++++++++++++++++++++-------- build-tools/build-rpms-serial | 35 ++++++++++++++++++--- 2 files changed, 72 insertions(+), 17 deletions(-) diff --git a/build-tools/build-rpms-parallel b/build-tools/build-rpms-parallel index e2830c24..d3eea32b 100755 --- a/build-tools/build-rpms-parallel +++ b/build-tools/build-rpms-parallel @@ -122,6 +122,10 @@ number_of_users () { users | tr ' ' '\n' | sort --uniq | wc -l } +total_mem_gb () { + free -g | grep 'Mem:' | awk '{ print $2 }' +} + available_mem_gb () { free -g | grep 'Mem:' | awk '{ print $7 }' } @@ -238,26 +242,41 @@ compute_resources () { local users=$(number_of_users) if [ $users -lt 1 ]; then users=1; fi local mem=$(available_mem_gb) + local total_mem=$(total_mem_gb) local disk=$(available_disk_gb) local cpus=$(number_of_cpus) local num_users=$(sqrt $users) local num_build=$(number_of_builds_in_progress) num_build=$((num_build+1)) - echo "compute_resources: total: cpus=$cpus, mem=$mem, disk=$disk, weight=$weight, num_build=$num_build" + echo "compute_resources: total: cpus=$cpus, total_mem=$total_mem, avail_mem=$mem, disk=$disk, weight=$weight, num_build=$num_build" # What fraction of the machine will we use local share_factor=$num_users if [ $share_factor -gt $((MAX_SHARE_FACTOR+num_build-1)) ]; then share_factor=$((MAX_SHARE_FACTOR+num_build-1)); fi if [ $share_factor -lt $num_build ]; then share_factor=$num_build; fi - local mem_share_factor=$((share_factor-num_build)) + + # What fraction of free memory can we use. + # e.g. + # We intend to support 4 concurrent builds (share_factor) + # Two builds (excluding ours) are already underway (num_build-1) + # So we should be able to support 2 more builds (mem_share_factor) + local mem_share_factor=$((share_factor-(num_build-1))) if [ $mem_share_factor -lt 1 ]; then mem_share_factor=1; fi + echo "compute_resources: share_factor=$share_factor mem_share_factor=$mem_share_factor" # What resources are we permitted to use + # Continuing the example from above ... memory share is the lesser of + # - Half the available memory (mem/mem_share_factor) + # - A quarter of the total memory (total_mem/share_factor) local mem_share=$(((mem-MEMORY_RESERVE)/mem_share_factor)) if [ $mem_share -lt 0 ]; then mem_share=0; fi + local total_mem_share=$(((total_mem-MEMORY_RESERVE)/share_factor)) + if [ $total_mem_share -lt 0 ]; then total_mem_share=0; fi + if [ $mem_share -gt $total_mem_share ]; then mem_share=$total_mem_share; fi local disk_share=$((disk/share_factor)) local cpus_share=$((cpus/share_factor)) + echo "compute_resources: our share: cpus=$cpus_share, mem=$mem_share, disk=$disk_share" # How many build jobs, how many jobs will use tmpfs, and how much mem for each tmpfs @@ -293,7 +312,7 @@ compute_resources () { fi done - # Our output is saved in environmnet variables + # Our output is saved in environment variables MOCKCHAIN_RESOURCE_ALLOCATION=$(echo $x | sed 's#^:##') MAX_WORKERS=$workers echo "compute_resources: MAX_WORKERS=$MAX_WORKERS, MOCKCHAIN_RESOURCE_ALLOCATION=$MOCKCHAIN_RESOURCE_ALLOCATION" @@ -654,7 +673,7 @@ kill_descendents () local relevant_recursive_children="$ME" local relevant_recursive_promote_children="mock" - local relevant_other_children="mockchain-parallel mockchain-parallel-1.3.4 mockchain-parallel-1.4.16" + local relevant_other_children="mockchain-parallel mockchain-parallel-1.3.4 mockchain-parallel-1.4.16 mockchain-parallel-2.6 mockchain-parallel-2.7" local recursive_promote_children=$(for relevant_child in $relevant_recursive_promote_children; do pgrep -P $kill_pid $relevant_child; done) local recursive_children=$(for relevant_child in $relevant_recursive_children; do pgrep -P $kill_pid $relevant_child; done) @@ -1181,14 +1200,24 @@ mock_clean_metadata_cfg () { return 1 fi - CMD=$((cat $CFG; \ - grep config_opts\\[\'yum.conf\'\\\] $CFG | \ - sed 's#\\n#\n#g') | \ - grep '^[[]' | \ - grep -v main | \ - sed -e 's/[][]//g' -e "s#^#${PKG_MANAGER} --enablerepo=#" -e 's#$# clean metadata#' | \ - sort -u | \ - tr '\n' ';') + # + # From mock config, extract the embedded yum/dnf config. + # Then extract the repo definitions, + # and convert to a series of yum commands to clean the + # metadata one repo at a time. e.g. + # CMD="yum --disablerepo=* --enablerepo=StxCentos7Distro clean metadata; \ + # yum --disablerepo=* --enablerepo=StxCentos7Distro-rt clean metadata; + # ... + # " + # + CMD=$((grep -e config_opts\\[\'yum.conf\'\\\] $CFG \ + -e config_opts\\[\'dnf.conf\'\\\] $CFG | \ + sed 's#\\n#\n#g') | \ + grep '^[[]' | \ + grep -v main | \ + sed -e 's/[][]//g' -e "s#^#${PKG_MANAGER} --disablerepo=* --enablerepo=#" -e 's#$# clean metadata#' | \ + sort -u | \ + tr '\n' ';') echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP RC=$? @@ -2338,6 +2367,7 @@ if [ $CAREFUL -eq 1 ]; then CMD_OPTIONS="$MOCK_PASSTHROUGH --no-cleanup-after" fi +CMD_OPTIONS+=" $MOCK_PASSTHROUGH --enable-plugin=package_state" CMD_OPTIONS+=" --log=$MOCKCHAIN_LOG" echo "CAREFUL=$CAREFUL" diff --git a/build-tools/build-rpms-serial b/build-tools/build-rpms-serial index 0a6afbf7..60a91d2b 100755 --- a/build-tools/build-rpms-serial +++ b/build-tools/build-rpms-serial @@ -25,7 +25,14 @@ export ME=$(basename "$0") CMDLINE="$ME $@" +BUILD_RPMS_PARALLEL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" +# Set PKG_MANAGER for our build environment. +source "${BUILD_RPMS_PARALLEL_DIR}/pkg-manager-utils.sh" + + +# Build for distribution. Currently 'centos' is only supported value. +export DISTRO="centos" CREATEREPO=$(which createrepo_c) if [ $? -ne 0 ]; then @@ -42,6 +49,7 @@ if [ ! -d ${LOCAL_REPO} ]; then fi fi +# Make sure we have a dependency cache DEPENDANCY_DIR="${LOCAL_REPO}/dependancy-cache" SRPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires" SRPM_TRANSITIVE_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-transitive-requires" @@ -118,7 +126,7 @@ create-no-clean-list () { local g for g in $install_groups; do - # Find manditory packages in the group. + # Find mandatory packages in the group. # Discard anything before (and including) 'Mandatory Packages:' # and anything after (and including) 'Optional Packages:'. # Also discard leading spaces or '+' characters. @@ -135,7 +143,7 @@ create-no-clean-list () { while [ $noclean_list_len -gt $noclean_last_list_len ]; do noclean_last_list_len=$noclean_list_len - noclean_list=$( (yum -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq) + noclean_list=$( (${PKG_MANAGER} -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq) noclean_list_len=$(echo $noclean_list | wc -w) done @@ -475,7 +483,7 @@ kill_descendents () local relevant_recursive_children="$ME" local relevant_recursive_promote_children="mock" - local relevant_other_children="mockchain-parallel" + local relevant_other_children="mockchain-parallel mockchain-parallel-1.3.4 mockchain-parallel-1.4.16 mockchain-parallel-2.6 mockchain-parallel-2.7" local recursive_promote_children=$(for relevant_child in $relevant_recursive_promote_children; do pgrep -P $kill_pid $relevant_child; done) local recursive_children=$(for relevant_child in $relevant_recursive_children; do pgrep -P $kill_pid $relevant_child; done) @@ -964,7 +972,24 @@ mock_clean_metadata_cfg () { return 1 fi - CMD=$((cat $CFG; grep config_opts\\[\'yum.conf\'\\\] $CFG | sed 's#\\n#\n#g') | grep '^[[]' | grep -v main | sed 's/[][]//g' | sed 's#^#yum --enablerepo=#' | sed 's#$# clean metadata#' | sort -u | tr '\n' ';') + # + # From mock config, extract the embedded yum/dnf config. + # Then extract the repo definitions, + # and convert to a series of yum commands to clean the + # metadata one repo at a time. e.g. + # CMD="yum --disablerepo=* --enablerepo=StxCentos7Distro clean metadata; \ + # yum --disablerepo=* --enablerepo=StxCentos7Distro-rt clean metadata; + # ... + # " + # + CMD=$((grep -e config_opts\\[\'yum.conf\'\\\] $CFG \ + -e config_opts\\[\'dnf.conf\'\\\] $CFG | \ + sed 's#\\n#\n#g') | \ + grep '^[[]' | \ + grep -v main | \ + sed -e 's/[][]//g' -e "s#^#${PKG_MANAGER} --disablerepo=* --enablerepo=#" -e 's#$# clean metadata#' | \ + sort -u | \ + tr '\n' ';') echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP RC=$? @@ -1129,6 +1154,7 @@ clean_yum_cache_cfg () { return $RC } + clean_yum_cache () { echo "${FUNCNAME[0]}: in" clean_yum_cache_cfg $BUILD_CFG @@ -1249,7 +1275,6 @@ while true ; do esac done - # Reset variables if [ -n "$MY_WORKSPACE" ]; then export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE} From fcba6679ab63cf2b384489e10e9ca244952ec6c4 Mon Sep 17 00:00:00 2001 From: Cole Walker Date: Mon, 15 Mar 2021 13:10:59 -0400 Subject: [PATCH 21/37] Update notificationclient-base image tag Update the notificationclient-base image tag to stx.5.0-v1.0.3 Story: 2008529 Task: 42054 Signed-off-by: Cole Walker Change-Id: I6312ecddf82445b8ecb7a16e059a29064eaa289d --- .../build-docker-images/tag-management/image-tags.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index cfac3a51..914d44cb 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -89,6 +89,6 @@ images: src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/545e6b6bb093235c2f8dab8d171f30c6ae8682d3 tag: stx.5.0-v1.0.1 - name: docker.io/starlingx/notificationclient-base - src_build_tag: master-centos-stable-20210217T173034Z.0 - src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/435d2df181b4bc5dcdf65690858027faa742968c - tag: stx.5.0-v1.0.2 + src_build_tag: master-centos-stable-20210314T171252Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/5eb6e432dde2993e5e99025ba7f4be8b899cef12 + tag: stx.5.0-v1.0.3 From 8cea0ccceb90de4005e556ca2b6929b259f572c0 Mon Sep 17 00:00:00 2001 From: Jose Infanzon Date: Tue, 16 Mar 2021 14:57:38 -0400 Subject: [PATCH 22/37] Update stx-fm-trap-subagent tag Update yaml file with a new tag for stx-fm-trap-subagent, due to a new commit delivered to support IPv6 Story: 2008132 Task: 42063 Signed-off-by: Jose Infanzon Change-Id: Id8a12b4c68b887ff153490af5e8084d7ee5224fe --- .../build-docker-images/tag-management/image-tags.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index 914d44cb..3f338279 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -77,9 +77,9 @@ images: src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/2b370655a7f9a506ea139cfffa6c466d1a82cce4 tag: stx.5.0-v1.0.0 - name: docker.io/starlingx/stx-fm-trap-subagent - src_build_tag: master-centos-stable-20210105T023146Z.0 - src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/2b370655a7f9a506ea139cfffa6c466d1a82cce4 - tag: stx.5.0-v1.0.0 + src_build_tag: master-centos-stable-20210314T171252Z.0 + src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/5aca0dd1661bc87a7927c00cf95e0c8aa6f2e2a0 + tag: stx.5.0-v1.0.1 - name: docker.io/starlingx/notificationservice-base src_build_tag: master-centos-stable-20210217T173034Z.0 src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/b618223d037b0d720fbaa38e032145a72f2a7359 From 90aa450e427e21e6dbe3fc230c089e909ad19f29 Mon Sep 17 00:00:00 2001 From: Davlet Panech Date: Mon, 22 Mar 2021 16:13:06 -0400 Subject: [PATCH 23/37] centos7: delete yum.lock after yum-builddep Pre-dnf yum-builddep leaves a stale yum.pid file behind with its own process ID. If that PID happens to be reused and match an existing process, a subsequent yum & co invocation hangs. Solution: create a wrapper script that deletes the pid file if necessary. Change-Id: Iee5c6a092835103a5dc52979345fa84e8a36c084 Closes-Bug: 1920805 Signed-off-by: Davlet Panech --- build-tools/create-yum-conf | 4 +- build-tools/modify-build-cfg | 4 +- .../repo_files/mock.cfg.centos7.all.proto | 1 + .../repo_files/mock.cfg.centos7.distro.proto | 1 + build-tools/repo_files/mock.cfg.centos7.proto | 1 + build-tools/yum-builddep-wrapper | 66 +++++++++++++++++++ 6 files changed, 73 insertions(+), 4 deletions(-) create mode 100755 build-tools/yum-builddep-wrapper diff --git a/build-tools/create-yum-conf b/build-tools/create-yum-conf index 80b4ecf7..c7b7e259 100755 --- a/build-tools/create-yum-conf +++ b/build-tools/create-yum-conf @@ -66,8 +66,8 @@ if [ ! -f "$MY_YUM_CONF" ]; then sed -i "s%LOCAL_BASE%file://%g" "$MY_YUM_CONF" sed -i "s%MIRROR_BASE%file:///import/mirrors%g" "$MY_YUM_CONF" sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$MY_YUM_CONF" - sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF" - sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF" + sed -i "s%/*MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF" + sed -i "s%/*MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF" else echo "ERROR: Could not find yum.conf or MOCK_CFG_PROTO" exit 1 diff --git a/build-tools/modify-build-cfg b/build-tools/modify-build-cfg index 78658623..e8665ed7 100755 --- a/build-tools/modify-build-cfg +++ b/build-tools/modify-build-cfg @@ -86,8 +86,8 @@ if [ ! -f $FILE ]; then sed -i "s%LOCAL_BASE%http://127.0.0.1:8088%g" "$FILE" sed -i "s%MIRROR_BASE%http://127.0.0.1:8088%g" "$FILE" sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$FILE" - sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE" - sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$FILE" + sed -i "s%/*MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE" + sed -i "s%/*MY_REPO_DIR%$MY_REPO%g" "$FILE" # Disable all local-* repos for the build-types other than the current one for bt in std rt; do diff --git a/build-tools/repo_files/mock.cfg.centos7.all.proto b/build-tools/repo_files/mock.cfg.centos7.all.proto index 6d9eb63e..95ed980c 100644 --- a/build-tools/repo_files/mock.cfg.centos7.all.proto +++ b/build-tools/repo_files/mock.cfg.centos7.all.proto @@ -5,6 +5,7 @@ config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' config_opts['package_manager'] = 'yum' +config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper' config_opts['use_bootstrap'] = False config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/repo_files/mock.cfg.centos7.distro.proto b/build-tools/repo_files/mock.cfg.centos7.distro.proto index 6d9eb63e..95ed980c 100644 --- a/build-tools/repo_files/mock.cfg.centos7.distro.proto +++ b/build-tools/repo_files/mock.cfg.centos7.distro.proto @@ -5,6 +5,7 @@ config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' config_opts['package_manager'] = 'yum' +config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper' config_opts['use_bootstrap'] = False config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/repo_files/mock.cfg.centos7.proto b/build-tools/repo_files/mock.cfg.centos7.proto index 779c5b59..2ca56fdc 100644 --- a/build-tools/repo_files/mock.cfg.centos7.proto +++ b/build-tools/repo_files/mock.cfg.centos7.proto @@ -5,6 +5,7 @@ config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' config_opts['package_manager'] = 'yum' +config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper' config_opts['use_bootstrap'] = False config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/yum-builddep-wrapper b/build-tools/yum-builddep-wrapper new file mode 100755 index 00000000..4f82d2d2 --- /dev/null +++ b/build-tools/yum-builddep-wrapper @@ -0,0 +1,66 @@ +#!/bin/bash + +# Old versions of yum-builddep leave a stale yum.pid file behind. +# Remove that file if necessary after yum-builddep exits + +# find yum-builddep +YUM_BUILDDEP=$(which yum-builddep 2>/dev/null) + +# dnf: call it directly +if [[ -z $YUM_BUILDDEP ]] || grep -q -F dnf.cli "$YUM_BUILDDEP" ; then + yum-builddep "$@" + exit $? +fi + + +# old yum: scan command line for --installroot +ROOT_PREFIX= +YUM_CONF=/etc/yum.conf +find_root_prefix() { + while [[ "$#" -gt 0 ]] ; do + case "$1" in + --installroot) + ROOT_PREFIX="$2" + shift + ;; + --installroot=*) + ROOT_PREFIX="${1#*=}" + ;; + -c|--config) + YUM_CONF="$2" + shift + ;; + --config=*) + YUM_CONF="${1#*=}" + ;; + esac + shift + done + if [[ -z "$ROOT_PREFIX" ]] && [[ -f "$YUM_CONF" ]] ; then + ROOT_PREFIX=$(sed -rn 's/^\s*installroot\s*=\s*(\S+)\s*$/\1/p' $YUM_CONF) + fi +} +find_root_prefix "$@" + +# ignore signals -- always wait for yum-builddep +trap "" INT TERM HUP PIPE + +# run it in the background to get its PID +"$YUM_BUILDDEP" "$@" & +pid="$!" + +# wait for it +wait "$pid" +res="$?" + +# if yum.pid remains and contains yum-builddep's PID, delete it +if [[ -f "${ROOT_PREFIX}/run/yum.pid" ]] ; then + lock_owner= + read lock_owner <"${ROOT_PREFIX}/run/yum.pid" || : + if [[ -n $lock_owner && $lock_owner == $pid ]] ; then + rm -f "${ROOT_PREFIX}/run/yum.pid" + fi +fi + +# done +exit $res From 290b3ea8d2b91a3eb4845e9df856119a13ecb2a9 Mon Sep 17 00:00:00 2001 From: Davlet Panech Date: Thu, 25 Mar 2021 09:57:30 -0400 Subject: [PATCH 24/37] Revert "centos7: delete yum.lock after yum-builddep" This reverts commit 90aa450e427e21e6dbe3fc230c089e909ad19f29. The original commit breaks the build-iso script. Change-Id: I73c9bc8e502c8401922d107b0b78cd88511a2a4b Signed-off-by: Davlet Panech --- build-tools/create-yum-conf | 4 +- build-tools/modify-build-cfg | 4 +- .../repo_files/mock.cfg.centos7.all.proto | 1 - .../repo_files/mock.cfg.centos7.distro.proto | 1 - build-tools/repo_files/mock.cfg.centos7.proto | 1 - build-tools/yum-builddep-wrapper | 66 ------------------- 6 files changed, 4 insertions(+), 73 deletions(-) delete mode 100755 build-tools/yum-builddep-wrapper diff --git a/build-tools/create-yum-conf b/build-tools/create-yum-conf index c7b7e259..80b4ecf7 100755 --- a/build-tools/create-yum-conf +++ b/build-tools/create-yum-conf @@ -66,8 +66,8 @@ if [ ! -f "$MY_YUM_CONF" ]; then sed -i "s%LOCAL_BASE%file://%g" "$MY_YUM_CONF" sed -i "s%MIRROR_BASE%file:///import/mirrors%g" "$MY_YUM_CONF" sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$MY_YUM_CONF" - sed -i "s%/*MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF" - sed -i "s%/*MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF" + sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF" + sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF" else echo "ERROR: Could not find yum.conf or MOCK_CFG_PROTO" exit 1 diff --git a/build-tools/modify-build-cfg b/build-tools/modify-build-cfg index e8665ed7..78658623 100755 --- a/build-tools/modify-build-cfg +++ b/build-tools/modify-build-cfg @@ -86,8 +86,8 @@ if [ ! -f $FILE ]; then sed -i "s%LOCAL_BASE%http://127.0.0.1:8088%g" "$FILE" sed -i "s%MIRROR_BASE%http://127.0.0.1:8088%g" "$FILE" sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$FILE" - sed -i "s%/*MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE" - sed -i "s%/*MY_REPO_DIR%$MY_REPO%g" "$FILE" + sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE" + sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$FILE" # Disable all local-* repos for the build-types other than the current one for bt in std rt; do diff --git a/build-tools/repo_files/mock.cfg.centos7.all.proto b/build-tools/repo_files/mock.cfg.centos7.all.proto index 95ed980c..6d9eb63e 100644 --- a/build-tools/repo_files/mock.cfg.centos7.all.proto +++ b/build-tools/repo_files/mock.cfg.centos7.all.proto @@ -5,7 +5,6 @@ config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' config_opts['package_manager'] = 'yum' -config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper' config_opts['use_bootstrap'] = False config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/repo_files/mock.cfg.centos7.distro.proto b/build-tools/repo_files/mock.cfg.centos7.distro.proto index 95ed980c..6d9eb63e 100644 --- a/build-tools/repo_files/mock.cfg.centos7.distro.proto +++ b/build-tools/repo_files/mock.cfg.centos7.distro.proto @@ -5,7 +5,6 @@ config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' config_opts['package_manager'] = 'yum' -config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper' config_opts['use_bootstrap'] = False config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/repo_files/mock.cfg.centos7.proto b/build-tools/repo_files/mock.cfg.centos7.proto index 2ca56fdc..779c5b59 100644 --- a/build-tools/repo_files/mock.cfg.centos7.proto +++ b/build-tools/repo_files/mock.cfg.centos7.proto @@ -5,7 +5,6 @@ config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' config_opts['package_manager'] = 'yum' -config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper' config_opts['use_bootstrap'] = False config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/yum-builddep-wrapper b/build-tools/yum-builddep-wrapper deleted file mode 100755 index 4f82d2d2..00000000 --- a/build-tools/yum-builddep-wrapper +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash - -# Old versions of yum-builddep leave a stale yum.pid file behind. -# Remove that file if necessary after yum-builddep exits - -# find yum-builddep -YUM_BUILDDEP=$(which yum-builddep 2>/dev/null) - -# dnf: call it directly -if [[ -z $YUM_BUILDDEP ]] || grep -q -F dnf.cli "$YUM_BUILDDEP" ; then - yum-builddep "$@" - exit $? -fi - - -# old yum: scan command line for --installroot -ROOT_PREFIX= -YUM_CONF=/etc/yum.conf -find_root_prefix() { - while [[ "$#" -gt 0 ]] ; do - case "$1" in - --installroot) - ROOT_PREFIX="$2" - shift - ;; - --installroot=*) - ROOT_PREFIX="${1#*=}" - ;; - -c|--config) - YUM_CONF="$2" - shift - ;; - --config=*) - YUM_CONF="${1#*=}" - ;; - esac - shift - done - if [[ -z "$ROOT_PREFIX" ]] && [[ -f "$YUM_CONF" ]] ; then - ROOT_PREFIX=$(sed -rn 's/^\s*installroot\s*=\s*(\S+)\s*$/\1/p' $YUM_CONF) - fi -} -find_root_prefix "$@" - -# ignore signals -- always wait for yum-builddep -trap "" INT TERM HUP PIPE - -# run it in the background to get its PID -"$YUM_BUILDDEP" "$@" & -pid="$!" - -# wait for it -wait "$pid" -res="$?" - -# if yum.pid remains and contains yum-builddep's PID, delete it -if [[ -f "${ROOT_PREFIX}/run/yum.pid" ]] ; then - lock_owner= - read lock_owner <"${ROOT_PREFIX}/run/yum.pid" || : - if [[ -n $lock_owner && $lock_owner == $pid ]] ; then - rm -f "${ROOT_PREFIX}/run/yum.pid" - fi -fi - -# done -exit $res From 3ef97d279c527d1c1a79b95aeebcb6c7e00a863d Mon Sep 17 00:00:00 2001 From: Davlet Panech Date: Thu, 25 Mar 2021 18:21:51 -0400 Subject: [PATCH 25/37] centos7: delete yum.pid after yum-builddep Pre-dnf yum-builddep leaves a stale yum.pid file behind with its own process ID. If that PID happens to be reused and match an existing process, a subsequent yum & co invocation hangs. Solution: create a wrapper script that deletes the pid file if necessary. Change-Id: I821643f576645d78ab1c29cdccefa12740bbc12f Closes-Bug: 1920805 Signed-off-by: Davlet Panech --- build-tools/create-yum-conf | 5 ++ build-tools/modify-build-cfg | 5 ++ .../repo_files/mock.cfg.centos7.all.proto | 1 + .../repo_files/mock.cfg.centos7.distro.proto | 1 + build-tools/repo_files/mock.cfg.centos7.proto | 1 + build-tools/yum-builddep-wrapper | 66 +++++++++++++++++++ 6 files changed, 79 insertions(+) create mode 100755 build-tools/yum-builddep-wrapper diff --git a/build-tools/create-yum-conf b/build-tools/create-yum-conf index 80b4ecf7..67a8486f 100755 --- a/build-tools/create-yum-conf +++ b/build-tools/create-yum-conf @@ -63,11 +63,16 @@ if [ ! -f "$MY_YUM_CONF" ]; then mock_cfg_to_yum_conf.py "$MOCK_CFG_PROTO" > "$MY_YUM_CONF" sed -i "s%\[main\]%&\ncachedir=$YUM_CACHE%" "$MY_YUM_CONF" sed -i "s%logfile=.*%logfile=$YUM_DIR/yum.log%" "$MY_YUM_CONF" + # eg: LOCAL_BASE/MY_BUILD_DIR => file:///MY_BUILD_DIR sed -i "s%LOCAL_BASE%file://%g" "$MY_YUM_CONF" sed -i "s%MIRROR_BASE%file:///import/mirrors%g" "$MY_YUM_CONF" sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$MY_YUM_CONF" + # eg: file:///MY_BUILD_DIR => file:///localdisk/loadbuild/... sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF" sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF" + # eg = MY_BUILD_DIR/xyz => /localdisk/loadbuild/.../xyz + sed -i "s%MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF" + sed -i "s%MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF" else echo "ERROR: Could not find yum.conf or MOCK_CFG_PROTO" exit 1 diff --git a/build-tools/modify-build-cfg b/build-tools/modify-build-cfg index 78658623..6c273f79 100755 --- a/build-tools/modify-build-cfg +++ b/build-tools/modify-build-cfg @@ -83,11 +83,16 @@ if [ ! -f $FILE ]; then exit 1 fi + # eg: LOCAL_BASE/MY_BUILD_DIR => http://127.0.0.1:8088/MY_BUILD_DIR sed -i "s%LOCAL_BASE%http://127.0.0.1:8088%g" "$FILE" sed -i "s%MIRROR_BASE%http://127.0.0.1:8088%g" "$FILE" sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$FILE" + # eg http://127.0.0.1:8088/MY_BUILD_DIR => http://12.0.0.1:8088/localdisk/loadbuild/... sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE" sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$FILE" + # eg = MY_BUILD_DIR/xyz => /localdisk/loadbuild/.../xyz + sed -i "s%MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE" + sed -i "s%MY_REPO_DIR%$MY_REPO%g" "$FILE" # Disable all local-* repos for the build-types other than the current one for bt in std rt; do diff --git a/build-tools/repo_files/mock.cfg.centos7.all.proto b/build-tools/repo_files/mock.cfg.centos7.all.proto index 6d9eb63e..95ed980c 100644 --- a/build-tools/repo_files/mock.cfg.centos7.all.proto +++ b/build-tools/repo_files/mock.cfg.centos7.all.proto @@ -5,6 +5,7 @@ config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' config_opts['package_manager'] = 'yum' +config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper' config_opts['use_bootstrap'] = False config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/repo_files/mock.cfg.centos7.distro.proto b/build-tools/repo_files/mock.cfg.centos7.distro.proto index 6d9eb63e..95ed980c 100644 --- a/build-tools/repo_files/mock.cfg.centos7.distro.proto +++ b/build-tools/repo_files/mock.cfg.centos7.distro.proto @@ -5,6 +5,7 @@ config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' config_opts['package_manager'] = 'yum' +config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper' config_opts['use_bootstrap'] = False config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/repo_files/mock.cfg.centos7.proto b/build-tools/repo_files/mock.cfg.centos7.proto index 779c5b59..2ca56fdc 100644 --- a/build-tools/repo_files/mock.cfg.centos7.proto +++ b/build-tools/repo_files/mock.cfg.centos7.proto @@ -5,6 +5,7 @@ config_opts['chroot_setup_cmd'] = 'install @buildsys-build' config_opts['dist'] = 'el7' # only useful for --resultdir variable subst config_opts['releasever'] = '7' config_opts['package_manager'] = 'yum' +config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper' config_opts['use_bootstrap'] = False config_opts['use_bootstrap_image'] = False config_opts['rpmbuild_networking'] = False diff --git a/build-tools/yum-builddep-wrapper b/build-tools/yum-builddep-wrapper new file mode 100755 index 00000000..4f82d2d2 --- /dev/null +++ b/build-tools/yum-builddep-wrapper @@ -0,0 +1,66 @@ +#!/bin/bash + +# Old versions of yum-builddep leave a stale yum.pid file behind. +# Remove that file if necessary after yum-builddep exits + +# find yum-builddep +YUM_BUILDDEP=$(which yum-builddep 2>/dev/null) + +# dnf: call it directly +if [[ -z $YUM_BUILDDEP ]] || grep -q -F dnf.cli "$YUM_BUILDDEP" ; then + yum-builddep "$@" + exit $? +fi + + +# old yum: scan command line for --installroot +ROOT_PREFIX= +YUM_CONF=/etc/yum.conf +find_root_prefix() { + while [[ "$#" -gt 0 ]] ; do + case "$1" in + --installroot) + ROOT_PREFIX="$2" + shift + ;; + --installroot=*) + ROOT_PREFIX="${1#*=}" + ;; + -c|--config) + YUM_CONF="$2" + shift + ;; + --config=*) + YUM_CONF="${1#*=}" + ;; + esac + shift + done + if [[ -z "$ROOT_PREFIX" ]] && [[ -f "$YUM_CONF" ]] ; then + ROOT_PREFIX=$(sed -rn 's/^\s*installroot\s*=\s*(\S+)\s*$/\1/p' $YUM_CONF) + fi +} +find_root_prefix "$@" + +# ignore signals -- always wait for yum-builddep +trap "" INT TERM HUP PIPE + +# run it in the background to get its PID +"$YUM_BUILDDEP" "$@" & +pid="$!" + +# wait for it +wait "$pid" +res="$?" + +# if yum.pid remains and contains yum-builddep's PID, delete it +if [[ -f "${ROOT_PREFIX}/run/yum.pid" ]] ; then + lock_owner= + read lock_owner <"${ROOT_PREFIX}/run/yum.pid" || : + if [[ -n $lock_owner && $lock_owner == $pid ]] ; then + rm -f "${ROOT_PREFIX}/run/yum.pid" + fi +fi + +# done +exit $res From 986e68b992314f1e1e9fdad9b2c3bc0a85747c5f Mon Sep 17 00:00:00 2001 From: Carmen Rata Date: Wed, 7 Apr 2021 22:28:27 -0400 Subject: [PATCH 26/37] Add openscap rpms to image.inc In order to be up-to-date with openscap tools used to scan for security violations we need to update the openscap rpms from: openscap-scanner-1.2.17-2.el7.x86_64 openscap-1.2.17-2.el7.x86_64 to: openscap-1.2.17-13.el7_9.x86_64.rpm openscap-scanner-1.2.17-13.el7_9.x86_64.rpm Added the 2 openscap rpms to image.inc to include them in the iso. Story: 2008668 Task: 42232 Depends-On: https://review.opendev.org/c/starlingx/tools/+/784801 Signed-off-by: Carmen Rata Change-Id: Id775dd299b35c085ef7f0762b778b8832c1fa68f --- build-tools/build_iso/image.inc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/build-tools/build_iso/image.inc b/build-tools/build_iso/image.inc index 663cbbfa..4c36213e 100644 --- a/build-tools/build_iso/image.inc +++ b/build-tools/build_iso/image.inc @@ -79,3 +79,7 @@ kata-runtime # For nvme disk firmware update nvme-cli + +# Add openscap tools +openscap +openscap-scanner From 2bdac9ac3b09332604919c8e7d39c1676c106ae5 Mon Sep 17 00:00:00 2001 From: Davlet Panech Date: Thu, 8 Apr 2021 15:17:44 -0400 Subject: [PATCH 27/37] Build and use py2 wheels with py2 projects Current build process can't handle python2-based projects because we only build wheels based on Ussury (python3). This patch fixes this. - build-wheels/docker/stable-wheels-py2.cfg - build-wheels/docker/dev-wheels-py2.cfg Resurrected original cfg files from commit aa317323716a7f5bb844bb8867eeb8b01313b51e (before upgrade to Ussuri) and saved with -py2 suffix - build-wheels/docker/centos-dockerfile Install python2 packages in addition to python3 - build-wheels/docker/docker-build-wheel.sh * Build with python3 or python2 depending on the environment * Suppress status message at the end because its better handled by the parent script (build-base-wheels.sh) - build-wheels/build-base-wheels.sh * Generate 2 wheel directories, "base" and "base-py2" at each invocation * Use the same builder image and docker script (with different environment) to generate wheels based on different cfg files. Save python3 wheels in "base" and python2 wheels in "base-py2" - build-wheels/build-wheel-tarball.sh * New command-line parameter --python2: generate *-wheels-py2.tar from "base-py2" * Use ussuri or train constraints depending on the presence of --python2 - build-docker-images/build-stx-images.sh * Use *-wheels-py2.tar for python2/loci projects * Fail early on missing --wheels/--wheels-py2 * Fail if multiple projects use the same LABEL * Remove support for config files - build-docker-images/docker-image-build-centos-dev.cfg - build-docker-images/docker-image-build-centos-stable.cfg Removed Change-Id: I2ca444f258a537ed2ba6f68206d32cf59e1802b4 Partial-Bug: 1891416 Signed-off-by: Davlet Panech --- .../build-docker-images/build-stx-images.sh | 251 +++++++----------- .../docker-image-build-centos-dev.cfg | 2 - .../docker-image-build-centos-stable.cfg | 2 - build-tools/build-wheels/build-base-wheels.sh | 200 +++++++++++--- .../build-wheels/build-wheel-tarball.sh | 49 +++- .../build-wheels/docker/centos-dockerfile | 7 + .../build-wheels/docker/dev-wheels-py2.cfg | 18 ++ .../build-wheels/docker/docker-build-wheel.sh | 32 ++- .../build-wheels/docker/stable-wheels-py2.cfg | 173 ++++++++++++ build-tools/build-wheels/openstack.cfg | 13 + 10 files changed, 526 insertions(+), 221 deletions(-) delete mode 100644 build-tools/build-docker-images/docker-image-build-centos-dev.cfg delete mode 100644 build-tools/build-docker-images/docker-image-build-centos-stable.cfg create mode 100644 build-tools/build-wheels/docker/dev-wheels-py2.cfg create mode 100644 build-tools/build-wheels/docker/stable-wheels-py2.cfg create mode 100644 build-tools/build-wheels/openstack.cfg diff --git a/build-tools/build-docker-images/build-stx-images.sh b/build-tools/build-docker-images/build-stx-images.sh index 34494b2e..d80d61f0 100755 --- a/build-tools/build-docker-images/build-stx-images.sh +++ b/build-tools/build-docker-images/build-stx-images.sh @@ -26,7 +26,6 @@ IMAGE_VERSION=$(date --utc '+%Y.%m.%d.%H.%M') # Default version, using timestamp PREFIX=dev LATEST_PREFIX="" PUSH=no -CONFIG_FILE="" HTTP_PROXY="" HTTPS_PROXY="" NO_PROXY="" @@ -34,16 +33,13 @@ DOCKER_USER=${USER} DOCKER_REGISTRY= BASE= WHEELS= -WHEELS_ALTERNATE= -DEFAULT_CONFIG_FILE_DIR="${MY_REPO}/build-tools/build-docker-images" -DEFAULT_CONFIG_FILE_PREFIX="docker-image-build" +WHEELS_PY2= CLEAN=no TAG_LATEST=no TAG_LIST_FILE= TAG_LIST_LATEST_FILE= declare -a ONLY declare -a SKIP -declare -a SERVICES_ALTERNATE declare -i MAX_ATTEMPTS=1 function usage { @@ -52,30 +48,32 @@ Usage: $(basename $0) Options: - --os: Specify base OS (valid options: ${SUPPORTED_OS_ARGS[@]}) - --version: Specify version for output image - --stream: Build stream, stable or dev (default: stable) - --base: Specify base docker image (required option) - --wheels: Specify path to wheels tarball or image, URL or docker tag (required option) - --wheels-alternate: Specify path to alternate wheels tarball or image, URL or docker tag - --push: Push to docker repo - --http_proxy: Set proxy :, urls splitted with "," - --https_proxy: Set proxy :, urls splitted with "," - --no_proxy: Set proxy , urls splitted with "," - --user: Docker repo userid - --registry: Docker registry - --prefix: Prefix on the image tag (default: dev) - --latest: Add a 'latest' tag when pushing + --os: Specify base OS (valid options: ${SUPPORTED_OS_ARGS[@]}) + --version: Specify version for output image + --stream: Build stream, stable or dev (default: stable) + --base: Specify base docker image (required option) + --wheels: Specify path to wheels tarball or image, URL or docker tag + (required when building loci projects) + --wheels-py2: Use this wheels tarball for Python2 projects + (default: work out from --wheels) + --wheels-alternate: same as --wheels-py2 + --push: Push to docker repo + --http_proxy: Set proxy :, urls splitted with "," + --https_proxy: Set proxy :, urls splitted with "," + --no_proxy: Set proxy , urls splitted with "," + --user: Docker repo userid + --registry: Docker registry + --prefix: Prefix on the image tag (default: dev) + --latest: Add a 'latest' tag when pushing --latest-prefix: Alternative prefix on the latest image tag - --clean: Remove image(s) from local registry + --clean: Remove image(s) from local registry --only : Only build the specified image(s). Multiple images can be specified with a comma-separated list, or with multiple --only arguments. --skip : Skip building the specified image(s). Multiple images can be specified with a comma-separated list, or with multiple --skip arguments. - --attempts: Max attempts, in case of failure (default: 1) - --config-file:Specify a path to a config file which will specify additional arguments to be passed into the the command + --attempts: Max attempts, in case of failure (default: 1) EOF @@ -97,63 +95,6 @@ function is_empty { test $# -eq 0 } -function get_args_from_file { - # get additional build args from specified file. - local -a config_items - - echo "Get args from file: $1" - for i in $(cat $1) - do - config_items=($(echo $i | sed s/=/\ /g)) - echo "--${config_items[0]} ${config_items[1]}" - case ${config_items[0]} in - base) - if [ -z "${BASE}" ]; then - BASE=${config_items[1]} - fi - ;; - user) - if [ -z "${DOCKER_USER}" ]; then - DOCKER_USER=${config_items[1]} - fi - ;; - proxy) - if [ -z "${PROXY}" ]; then - PROXY=${config_items[1]} - fi - ;; - registry) - if [ -z "${DOCKER_REGISTRY}" ]; then - # Add a trailing / if needed - DOCKER_REGISTRY="${config_items[1]%/}/" - fi - ;; - only) - # Read comma-separated values into array - if [ -z "${ONLY}" ]; then - # Read comma-separated values into array - ONLY=(`echo ${config_items[1]} | sed s/,/\ /g`) - fi - ;; - wheels) - if [ -z "${WHEELS}" ]; then - WHEELS=${config_items[1]} - fi - ;; - wheels_alternate) - if [ -z "${WHEELS_ALTERNATE}" ]; then - WHEELS_ALTERNATE=${config_items[1]} - echo "WHEELS_ALTERNATE: ${WHEELS_ALTERNATE}" >&2 - fi - ;; - services_alternate) - SERVICES_ALTERNATE=(`echo ${config_items[1]} | sed s/,/\ /g`) - echo "SERVICES_ALTERNATE: ${SERVICES_ALTERNATE[@]}" >&2 - ;; - esac - done -} - # # get_git: Clones a git into a subdirectory of ${WORKDIR}, and # leaves you in that directory. On error the directory @@ -394,16 +335,6 @@ function build_image_loci { local PYTHON3 PYTHON3=$(source ${image_build_file} && echo ${PYTHON3}) - if is_in ${PROJECT} ${SKIP[@]} || is_in ${LABEL} ${SKIP[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - - if ! is_empty ${ONLY[@]} && ! is_in ${PROJECT} ${ONLY[@]} && ! is_in ${LABEL} ${ONLY[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - echo "Building ${LABEL}" local -a BUILD_ARGS= @@ -411,9 +342,9 @@ function build_image_loci { BUILD_ARGS+=(--build-arg PROJECT_REPO=${PROJECT_REPO}) BUILD_ARGS+=(--build-arg FROM=${BASE}) - if is_in ${LABEL} ${SERVICES_ALTERNATE[@]}; then + if [ "${PYTHON3}" != "yes" ] ; then echo "Python2 service ${LABEL}" - BUILD_ARGS+=(--build-arg WHEELS=${WHEELS_ALTERNATE}) + BUILD_ARGS+=(--build-arg WHEELS=${WHEELS_PY2}) else echo "Python3 service ${LABEL}" BUILD_ARGS+=(--build-arg WHEELS=${WHEELS}) @@ -518,16 +449,6 @@ function build_image_docker { local DOCKER_PATCHES DOCKER_PATCHES=$(source ${image_build_file} && for p in ${DOCKER_PATCHES}; do echo $(dirname ${image_build_file})/${p}; done) - if is_in ${PROJECT} ${SKIP[@]} || is_in ${LABEL} ${SKIP[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - - if ! is_empty ${ONLY[@]} && ! is_in ${PROJECT} ${ONLY[@]} && ! is_in ${LABEL} ${ONLY[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - echo "Building ${LABEL}" local real_docker_context @@ -625,16 +546,6 @@ function build_image_script { local SOURCE_PATCHES SOURCE_PATCHES=$(source ${image_build_file} && for p in ${SOURCE_PATCHES}; do echo $(dirname ${image_build_file})/${p}; done) - if is_in ${PROJECT} ${SKIP[@]} || is_in ${LABEL} ${SKIP[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - - if ! is_empty ${ONLY[@]} && ! is_in ${PROJECT} ${ONLY[@]} && ! is_in ${LABEL} ${ONLY[@]}; then - echo "Skipping ${LABEL}" - return 0 - fi - # Validate the COMMAND option SUPPORTED_COMMAND_ARGS=('bash') local VALID_COMMAND=1 @@ -715,7 +626,7 @@ function build_image { esac } -OPTS=$(getopt -o h -l help,os:,version:,release:,stream:,push,http_proxy:,https_proxy:,no_proxy:,user:,registry:,base:,wheels:,wheels-alternate:,only:,skip:,prefix:,latest,latest-prefix:,clean,attempts:,config-file: -- "$@") +OPTS=$(getopt -o h -l help,os:,version:,release:,stream:,push,http_proxy:,https_proxy:,no_proxy:,user:,registry:,base:,wheels:,wheels-alternate:,wheels-py2:,only:,skip:,prefix:,latest,latest-prefix:,clean,attempts: -- "$@") if [ $? -ne 0 ]; then usage exit 1 @@ -742,8 +653,8 @@ while true; do WHEELS=$2 shift 2 ;; - --wheels-alternate) - WHEELS_ALTERNATE=$2 + --wheels-alternate|--wheels-py2) + WHEELS_PY2=$2 shift 2 ;; --version) @@ -813,10 +724,6 @@ while true; do MAX_ATTEMPTS=$2 shift 2 ;; - --config-file) - CONFIG_FILE=$2 - shift 2 - ;; -h | --help ) usage exit 1 @@ -842,37 +749,80 @@ if [ ${VALID_OS} -ne 0 ]; then exit 1 fi -DEFAULT_CONFIG_FILE="${DEFAULT_CONFIG_FILE_DIR}/${DEFAULT_CONFIG_FILE_PREFIX}-${OS}-${BUILD_STREAM}.cfg" - -# Read additional arguments from config file if it exists. -if [[ -z "$CONFIG_FILE" ]] && [[ -f ${DEFAULT_CONFIG_FILE} ]]; then - CONFIG_FILE=${DEFAULT_CONFIG_FILE} -fi -if [[ ! -z ${CONFIG_FILE} ]]; then - if [[ -f ${CONFIG_FILE} ]]; then - get_args_from_file ${CONFIG_FILE} - else - echo "Config file not found: ${CONFIG_FILE}" - exit 1 - fi -fi - -if [ -z "${WHEELS}" ]; then - echo "Path to wheels tarball must be specified with --wheels option." >&2 - exit 1 -fi - -if [ ${#SERVICES_ALTERNATE[@]} -ne 0 ] && [ -z "${WHEELS_ALTERNATE}" ]; then - echo "Path to wheels-alternate tarball must be specified with --wheels-alternate option"\ - "if python2 based services need to be build!" >&2 - exit 1 -fi - if [ -z "${BASE}" ]; then echo "Base image must be specified with --base option." >&2 exit 1 fi +# Guess WHEELS_PY2 if missing +if [[ -z "$WHEELS_PY2" && -n "$WHEELS" ]]; then + # http://foo/bar.tar?xxx#yyy => http://foo/bar-py2.tar?xxx#yyy + WHEELS_PY2="$(echo "$WHEELS" | sed -r 's,^([^#?]*)(\.tar)(\.gz|\.bz2|\.xz)?([#?].*)?$,\1-py2\2\3\4,i')" + if [[ "$WHEELS" == "$WHEELS_PY2" ]]; then + echo "Unable to guess --wheels-py2, please specify it explicitly" >&2 + exit 1 + fi +fi + +# Find the directives files +IMAGE_BUILD_FILES=() +function find_image_build_files { + local image_build_inc_file image_build_dir image_build_file + local -A all_labels + + for image_build_inc_file in $(find ${GIT_LIST} -maxdepth 1 -name "${OS}_${BUILD_STREAM}_docker_images.inc"); do + basedir=$(dirname ${image_build_inc_file}) + for image_build_dir in $(sed -e 's/#.*//' ${image_build_inc_file} | sort -u); do + for image_build_file in ${basedir}/${image_build_dir}/${OS}/*.${BUILD_STREAM}_docker_image; do + + # reset & read image build directive vars + local BUILDER= + local PROJECT= + local LABEL= + local PYTHON3= + PROJECT="$(source ${image_build_file} && echo ${PROJECT})" + BUILDER="$(source ${image_build_file} && echo ${BUILDER})" + LABEL="$(source ${image_build_file} && echo ${LABEL})" + PYTHON3="$(source ${image_build_file} && echo ${PYTHON3})" + + # make sure labels are unique + if [[ -n "${all_labels["$LABEL"]}" ]] ; then + echo "The following files define the same LABEL $LABEL" >&2 + echo " ${all_labels["$LABEL"]}" >&2 + echo " ${image_build_file}" >&2 + exit 1 + fi + all_labels["$LABEL"]="$image_build_file" + + # skip images we don't want to build + if is_in ${PROJECT} ${SKIP[@]} || is_in ${LABEL} ${SKIP[@]}; then + continue + fi + if ! is_empty ${ONLY[@]} && ! is_in ${PROJECT} ${ONLY[@]} && ! is_in ${LABEL} ${ONLY[@]}; then + continue + fi + + # loci builders require a wheels tarball + if [[ "${BUILDER}" == "loci" ]] ; then + # python3 projects require $WHEELS + if [[ "${PYTHON3}" == "yes" && -z "${WHEELS}" ]] ; then + echo "You are building python3 services with loci, but you didn't specify --wheels!" >&2 + exit 1 + # python2 projects require WHEELS_PY2 + elif [[ "${PYTHON3}" != "yes" && -z "${WHEELS_PY2}" ]] ; then + echo "You are building python2 services with loci, but you didn't specify --wheels-py2!" >&2 + exit 1 + fi + fi + + # Save image build file in the global list + IMAGE_BUILD_FILES+=("$image_build_file") + done + done + done +} +find_image_build_files + IMAGE_TAG="${OS}-${BUILD_STREAM}" IMAGE_TAG_LATEST="${IMAGE_TAG}-latest" @@ -933,15 +883,10 @@ if ! (grep -q rh-python36-mod_wsgi ${WORKDIR}/loci/bindep.txt); then echo 'rh-python36-mod_wsgi [platform:rpm !platform:suse (apache python3)]' >> ${WORKDIR}/loci/bindep.txt fi -# Find the directives files -for image_build_inc_file in $(find ${GIT_LIST} -maxdepth 1 -name "${OS}_${BUILD_STREAM}_docker_images.inc"); do - basedir=$(dirname ${image_build_inc_file}) - for image_build_dir in $(sed -e 's/#.*//' ${image_build_inc_file} | sort -u); do - for image_build_file in ${basedir}/${image_build_dir}/${OS}/*.${BUILD_STREAM}_docker_image; do - # Failures are reported by the build functions - build_image ${image_build_file} - done - done +# Build everything +for image_build_file in "${IMAGE_BUILD_FILES[@]}" ; do + # Failures are reported by the build functions + build_image ${image_build_file} done if [ "${CLEAN}" = "yes" -a ${#RESULTS_BUILT[@]} -gt 0 ]; then diff --git a/build-tools/build-docker-images/docker-image-build-centos-dev.cfg b/build-tools/build-docker-images/docker-image-build-centos-dev.cfg deleted file mode 100644 index 80e5ace3..00000000 --- a/build-tools/build-docker-images/docker-image-build-centos-dev.cfg +++ /dev/null @@ -1,2 +0,0 @@ -services_alternate=stx-fm-rest-api,stx-keystone-api-proxy,stx-nova-api-proxy,stx-platformclients -wheels_alternate=http://mirror.starlingx.cengn.ca/mirror/starlingx/master/centos/stx-centos-py2_dev-wheels.tar diff --git a/build-tools/build-docker-images/docker-image-build-centos-stable.cfg b/build-tools/build-docker-images/docker-image-build-centos-stable.cfg deleted file mode 100644 index 871d2271..00000000 --- a/build-tools/build-docker-images/docker-image-build-centos-stable.cfg +++ /dev/null @@ -1,2 +0,0 @@ -services_alternate=stx-fm-rest-api,stx-keystone-api-proxy,stx-nova-api-proxy,stx-platformclients -wheels_alternate=http://mirror.starlingx.cengn.ca/mirror/starlingx/master/centos/stx-centos-py2_stable-wheels.tar diff --git a/build-tools/build-wheels/build-base-wheels.sh b/build-tools/build-wheels/build-base-wheels.sh index 4ab913c1..b0274f22 100755 --- a/build-tools/build-wheels/build-base-wheels.sh +++ b/build-tools/build-wheels/build-base-wheels.sh @@ -114,8 +114,6 @@ while true; do esac done -BUILD_OUTPUT_PATH=${MY_WORKSPACE}/std/build-wheels-${OS}-${BUILD_STREAM}/base - BUILD_IMAGE_NAME="${USER}-$(basename ${MY_WORKSPACE})-wheelbuilder:${OS}-${BUILD_STREAM}" # BUILD_IMAGE_NAME can't have caps if it's passed to docker build -t $BUILD_IMAGE_NAME. @@ -123,7 +121,6 @@ BUILD_IMAGE_NAME="${USER}-$(basename ${MY_WORKSPACE})-wheelbuilder:${OS}-${BUILD BUILD_IMAGE_NAME="${BUILD_IMAGE_NAME,,}" DOCKER_FILE=${DOCKER_PATH}/${OS}-dockerfile -WHEELS_CFG=${DOCKER_PATH}/${BUILD_STREAM}-wheels.cfg function supported_os_list { for f in ${DOCKER_PATH}/*-dockerfile; do @@ -137,40 +134,75 @@ if [ ! -f ${DOCKER_FILE} ]; then exit 1 fi -if [ ! -f ${WHEELS_CFG} ]; then - echo "Required file does not exist: ${WHEELS_CFG}" >&2 - exit 1 -fi +# Print a loud message +function notice { + ( + set +x + echo + echo ====================================== + for s in "$@" ; do + echo "$s" + done + echo ====================================== + echo + ) 2>&1 +} -# -# Check build output directory for unexpected files, -# ie. wheels from old builds that are no longer in wheels.cfg -# -if [ -d ${BUILD_OUTPUT_PATH} ]; then +# prefix each line of a command's output +# also redirects command's STDERR to STDOUT +log_prefix() { + local prefix="$1" ; shift + "$@" 2>&1 | awk -v prefix="$prefix" '{print prefix $0}' + # return false if the command (rather than awk) failed + [ ${PIPESTATUS[0]} -eq 0 ] +} - for f in ${BUILD_OUTPUT_PATH}/*; do - grep -q "^$(basename $f)|" ${WHEELS_CFG} - if [ $? -ne 0 ]; then - echo "Deleting stale file: $f" - rm -f $f - fi - done -else - mkdir -p ${BUILD_OUTPUT_PATH} - if [ $? -ne 0 ]; then - echo "Failed to create directory: ${BUILD_OUTPUT_PATH}" >&2 + +# Make sure a file exists, exit otherwise +function require_file { + if [ ! -f "${1}" ]; then + echo "Required file does not exist: ${1}" >&2 exit 1 fi -fi +} -# Check to see if we need to build anything -BUILD_NEEDED=no -for wheel in $(cat ${WHEELS_CFG} | sed 's/#.*//' | awk -F '|' '{print $1}'); do - if [[ "${wheel}" =~ \* || ! -f ${BUILD_OUTPUT_PATH}/${wheel} ]]; then - BUILD_NEEDED=yes - break +# Check build output directory for unexpected files, +# ie. wheels from old builds that are no longer in wheels.cfg +function prepare_output_dir { + local output_dir="$1" + local wheels_cfg="$2" + if [ -d ${output_dir} ]; then + local f + for f in ${output_dir}/*; do + if [ -f $f ] ; then + grep -q "^$(basename $f)|" ${wheels_cfg} + if [ $? -ne 0 ]; then + echo "Deleting stale file: $f" + rm -f $f + fi + fi + done + else + mkdir -p ${output_dir} + if [ $? -ne 0 ]; then + echo "Failed to create directory: ${output_dir}" >&2 + exit 1 + fi fi -done +} + +BUILD_OUTPUT_PATH=${MY_WORKSPACE}/std/build-wheels-${OS}-${BUILD_STREAM}/base +BUILD_OUTPUT_PATH_PY2=${MY_WORKSPACE}/std/build-wheels-${OS}-${BUILD_STREAM}/base-py2 +WHEELS_CFG=${DOCKER_PATH}/${BUILD_STREAM}-wheels.cfg +WHEELS_CFG_PY2=${DOCKER_PATH}/${BUILD_STREAM}-wheels-py2.cfg + +# make sure .cfg files exist +require_file "${WHEELS_CFG}" +require_file "${WHEELS_CFG_PY2}" + +# prepare output directories +prepare_output_dir "${BUILD_OUTPUT_PATH}" "${WHEELS_CFG}" +prepare_output_dir "${BUILD_OUTPUT_PATH_PY2}" "${WHEELS_CFG_PY2}" if [ "${BUILD_STREAM}" = "dev" -o "${BUILD_STREAM}" = "master" ]; then # Download the master wheel from loci, so we're only building pieces not covered by it @@ -194,16 +226,30 @@ if [ "${BUILD_STREAM}" = "dev" -o "${BUILD_STREAM}" = "master" ]; then docker run --name ${USER}_inspect_wheels ${MASTER_WHEELS_IMAGE} noop 2>/dev/null echo "Extracting wheels from ${MASTER_WHEELS_IMAGE}" - docker export ${USER}_inspect_wheels | tar x -C ${BUILD_OUTPUT_PATH} '*.whl' + rm -rf "${BUILD_OUTPUT_PATH}-loci" + mkdir -p "$BUILD_OUTPUT_PATH-loci" + docker export ${USER}_inspect_wheels | tar x -C "${BUILD_OUTPUT_PATH}-loci" '*.whl' if [ ${PIPESTATUS[0]} -ne 0 -o ${PIPESTATUS[1]} -ne 0 ]; then echo "Failed to extract wheels from ${MASTER_WHEELS_IMAGE}" >&2 docker rm ${USER}_inspect_wheels if [ ${MASTER_WHEELS_PRESENT} -ne 0 ]; then docker image rm ${MASTER_WHEELS_IMAGE} fi + rm -rf "${BUILD_OUTPUT_PATH}-loci" exit 1 fi + # copy loci wheels in base and base-py2 directories + if ! cp "${BUILD_OUTPUT_PATH}-loci"/*.whl "${BUILD_OUTPUT_PATH}"/ ; then + echo "Failed to copy wheels to ${BUILD_OPUTPUT_PATH}" >&2 + exit 1 + fi + if ! cp "${BUILD_OUTPUT_PATH}-loci"/*.whl "${BUILD_OUTPUT_PATH_PY2}"/ ; then + echo "Failed to copy wheels to ${BUILD_OPUTPUT_PATH_PY2}" >&2 + exit 1 + fi + rm -rf "${BUILD_OUTPUT_PATH}-loci" + docker rm ${USER}_inspect_wheels if [ ${MASTER_WHEELS_PRESENT} -ne 0 ]; then @@ -211,7 +257,21 @@ if [ "${BUILD_STREAM}" = "dev" -o "${BUILD_STREAM}" = "master" ]; then fi fi -if [ "${BUILD_NEEDED}" = "no" ]; then +# check if there are any wheels missing +function all_wheels_exist { + local output_dir="$1" + local wheels_cfg="$2" + local wheel + for wheel in $(cat "${wheels_cfg}" | sed 's/#.*//' | awk -F '|' '{print $1}'); do + if [[ "${wheel}" =~ \* || ! -f ${output_dir}/${wheel} ]]; then + return 1 + fi + done + return 0 +} + +if all_wheels_exist "${BUILD_OUTPUT_PATH}" "${WHEELS_CFG}" && \ + all_wheels_exist "${BUILD_OUTPUT_PATH_PY2}" "${WHEELS_CFG_PY2}" ; then echo "All base wheels are already present. Skipping build." exit 0 fi @@ -247,12 +307,10 @@ if [ $? -ne 0 ]; then fi # Run the image, executing the build-wheel.sh script -RM_OPT= -if [ "${KEEP_CONTAINER}" = "no" ]; then - RM_OPT="--rm" -fi - declare -a RUN_ARGS +if [ "${KEEP_CONTAINER}" = "no" ]; then + RUN_ARGS+=(--rm) +fi if [ ! -z "$HTTP_PROXY" ]; then RUN_ARGS+=(--env http_proxy=$HTTP_PROXY) fi @@ -262,11 +320,23 @@ fi if [ ! -z "$NO_PROXY" ]; then RUN_ARGS+=(--env no_proxy=$NO_PROXY) fi - -RUN_ARGS+=(${RM_OPT} -v ${BUILD_OUTPUT_PATH}:/wheels ${BUILD_IMAGE_NAME} /docker-build-wheel.sh) +RUN_ARGS+=(--env DISPLAY_RESULT=no) # Run container to build wheels -with_retries ${MAX_ATTEMPTS} docker run ${RUN_ARGS[@]} +rm -f ${BUILD_OUTPUT_PATH}/failed.lst +rm -f ${BUILD_OUTPUT_PATH_PY2}/failed.lst + +notice "building python3 wheels" +log_prefix "[python3] " \ + with_retries ${MAX_ATTEMPTS} \ + docker run ${RUN_ARGS[@]} -v ${BUILD_OUTPUT_PATH}:/wheels ${BUILD_IMAGE_NAME} /docker-build-wheel.sh +BUILD_STATUS=$? + +notice "building python2 wheels" +log_prefix "[python2] " \ + with_retries ${MAX_ATTEMPTS} \ + docker run ${RUN_ARGS[@]} -v ${BUILD_OUTPUT_PATH_PY2}:/wheels --env PYTHON=python2 ${BUILD_IMAGE_NAME} /docker-build-wheel.sh +BUILD_STATUS_PY2=$? if [ "${KEEP_IMAGE}" = "no" ]; then # Delete the builder image @@ -287,8 +357,52 @@ if [ "${KEEP_IMAGE}" = "no" ]; then fi # Check for failures -if [ -f ${BUILD_OUTPUT_PATH}/failed.lst ]; then - # Failures would already have been reported +check_result() { + local python="$1" + local status="$2" + local dir="$3" + + # There's a failed images list + if [ -f "${dir}/failed.lst" ]; then + let failures=$(cat "${dir}/failed.lst" | wc -l) + + cat <&2 <&2 exit 1 fi -with_retries ${MAX_ATTEMPTS} wget https://raw.githubusercontent.com/openstack/requirements/${OPENSTACK_BRANCH}/upper-constraints.txt +with_retries ${MAX_ATTEMPTS} wget "${OPENSTACK_REQ_URL}/upper-constraints.txt" if [ $? -ne 0 ]; then echo "Failed to download upper-constraints.txt" >&2 exit 1 @@ -230,7 +259,7 @@ done shopt -s nullglob # Copy the base and stx wheels, updating upper-constraints.txt as necessary -for wheel in ../base/*.whl ../stx/wheels/*.whl; do +for wheel in ../base${PY_SUFFIX}/*.whl ../stx/wheels/*.whl; do # Get the wheel name and version from the METADATA METADATA=$(unzip -p ${wheel} '*/METADATA') name=$(echo "${METADATA}" | grep '^Name:' | awk '{print $2}') diff --git a/build-tools/build-wheels/docker/centos-dockerfile b/build-tools/build-wheels/docker/centos-dockerfile index c2dd825f..170a71a9 100644 --- a/build-tools/build-wheels/docker/centos-dockerfile +++ b/build-tools/build-wheels/docker/centos-dockerfile @@ -20,3 +20,10 @@ RUN set -ex ;\ COPY docker-build-wheel.sh / COPY ${BUILD_STREAM}-wheels.cfg /wheels.cfg +# Python2 packages +RUN set -ex; \ + yum -y install python python-devel ;\ + wget https://bootstrap.pypa.io/pip/2.7/get-pip.py ;\ + python get-pip.py +COPY ${BUILD_STREAM}-wheels-py2.cfg /wheels-py2.cfg + diff --git a/build-tools/build-wheels/docker/dev-wheels-py2.cfg b/build-tools/build-wheels/docker/dev-wheels-py2.cfg new file mode 100644 index 00000000..0bf19816 --- /dev/null +++ b/build-tools/build-wheels/docker/dev-wheels-py2.cfg @@ -0,0 +1,18 @@ +# +# git: wheelname|git|git-source|basedir|branch +# tar: wheelname|tar|wget-source|basedir +# pypi: wheelname|pypi|wget-source +# zip: wheelname|zip|wget-source|basedir +# +# If fix_setup must be called, add |fix_setup at the end of the line +# +amqplib-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2 +lz4-0.9.0-cp27-none-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0 +panko-5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0 +google_api_python_client-1.7.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/47/940908e52487440f61fb93ad55cbbe3a28235d3bb143b26affb17b37dd28/google_api_python_client-1.7.7-py2.py3-none-any.whl +neutron_lib-*.whl|git|https://github.com/openstack/neutron-lib|neutron-lib|master +python_openstackclient-*.whl|git|https://github.com/openstack/python-openstackclient|python-openstackclient|master +openstacksdk-*.whl|git|https://github.com/openstack/openstacksdk|openstacksdk|master +networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl +croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl +pecan-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3 diff --git a/build-tools/build-wheels/docker/docker-build-wheel.sh b/build-tools/build-wheels/docker/docker-build-wheel.sh index 9da8b84f..59e752a4 100755 --- a/build-tools/build-wheels/docker/docker-build-wheel.sh +++ b/build-tools/build-wheels/docker/docker-build-wheel.sh @@ -10,8 +10,14 @@ CFGFILE=/wheels.cfg OUTPUTDIR=/wheels -FAILED_LOG=$OUTPUTDIR/failed.lst +FAILED_LOG="${OUTPUTDIR}/failed.lst" +: ${DISPLAY_RESULT=yes} declare -i MAX_ATTEMPTS=5 +: ${PYTHON=python3} +if [[ "${PYTHON}" == "python2" ]] ; then + CFGFILE=/wheels-py2.cfg + FAILED_LOG="${OUTPUTDIR}/failed-py2.lst" +fi # # Function to log the start of a build @@ -184,7 +190,7 @@ function from_git { fi # Build the wheel - python3 setup.py bdist_wheel + ${PYTHON} setup.py bdist_wheel if [ -f dist/$wheelname ]; then cp dist/$wheelname $OUTPUTDIR || echo $wheelname >> $FAILED_LOG else @@ -244,7 +250,7 @@ function from_tar { fi # Build the wheel - python3 setup.py bdist_wheel + ${PYTHON} setup.py bdist_wheel if [ -f dist/$wheelname ]; then cp dist/$wheelname $OUTPUTDIR || echo $wheelname >> $FAILED_LOG else @@ -295,7 +301,7 @@ function from_zip { fi # Build the wheel - python3 setup.py bdist_wheel + ${PYTHON} setup.py bdist_wheel if [ -f dist/$wheelname ]; then cp dist/$wheelname $OUTPUTDIR || echo $wheelname >> $FAILED_LOG else @@ -339,24 +345,28 @@ from_tar from_zip from_pypi -if [ -f $FAILED_LOG ]; then - let failures=$(cat $FAILED_LOG | wc -l) +if [ -f "${FAILED_LOG}" ]; then + if [ "${DISPLAY_RESULT}" = yes ] ; then + let failures=$(cat "${FAILED_LOG}" | wc -l) - cat < Date: Thu, 15 Apr 2021 15:07:56 -0400 Subject: [PATCH 28/37] Update stx-platformclients tag to stx.5.0-v1.4.1 This commit updates the tag to pick up the patch that builds this image in python2, rather than python3 environment because stx-platformclients currently doesn't support python3. Change-Id: Ic8785d271f9ea7311708464a8cb75cc306ef209c Partial-Bug: 1891416 Signed-off-by: Davlet Panech --- .../build-docker-images/tag-management/image-tags.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index 3f338279..9041fd4e 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -55,9 +55,9 @@ images: src_ref: https://opendev.org/starlingx/metal/commit/d46c9c55a9a9b7ea09e8d0fe66c8cfbeeb9ac75f tag: stx.5.0-v1.0.0 - name: docker.io/starlingx/stx-platformclients - src_build_tag: master-centos-stable-20200803T230630Z.0 - src_ref: https://opendev.org/starlingx/distcloud-client/commit/7036f1fd11cd3bbae743aee89908e8195e4ded40 - tag: stx.5.0-v1.4.0 + src_build_tag: master-centos-stable-20210415T134403Z.0 + src_ref: https://opendev.org/starlingx/root/commit/2bdac9ac3b09332604919c8e7d39c1676c106ae5 + tag: stx.5.0-v1.4.1 - name: docker.io/starlingx/stx-vault-manager src_build_tag: master-centos-stable-20200722T035334Z.0 src_ref: https://opendev.org/starlingx/vault-armada-app/commit/2cd206d6703cc2733e39ecad4539c0d5f1600550 From 26aba37d117600a4f4553d833f58df438548ae01 Mon Sep 17 00:00:00 2001 From: Thiago Brito Date: Fri, 16 Apr 2021 11:07:32 -0400 Subject: [PATCH 29/37] Fixing dev stream py3 wheels Some wheels on the py3 files were still pointing towards the py2 versions, some were outdated with the wheels used on the stable build, so fixing this so we can get a proper build from the dev stream. Signed-off-by: Thiago Brito Change-Id: I2b69d9afa06d22441d8270c25dbb0a3cd08a4b99 --- build-tools/build-wheels/docker/dev-wheels-py2.cfg | 2 +- build-tools/build-wheels/docker/dev-wheels.cfg | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/build-tools/build-wheels/docker/dev-wheels-py2.cfg b/build-tools/build-wheels/docker/dev-wheels-py2.cfg index 0bf19816..244178d8 100644 --- a/build-tools/build-wheels/docker/dev-wheels-py2.cfg +++ b/build-tools/build-wheels/docker/dev-wheels-py2.cfg @@ -7,7 +7,7 @@ # If fix_setup must be called, add |fix_setup at the end of the line # amqplib-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2 -lz4-0.9.0-cp27-none-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0 +lz4-0.9.0-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0 panko-5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0 google_api_python_client-1.7.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/47/940908e52487440f61fb93ad55cbbe3a28235d3bb143b26affb17b37dd28/google_api_python_client-1.7.7-py2.py3-none-any.whl neutron_lib-*.whl|git|https://github.com/openstack/neutron-lib|neutron-lib|master diff --git a/build-tools/build-wheels/docker/dev-wheels.cfg b/build-tools/build-wheels/docker/dev-wheels.cfg index 0bf19816..2e0d7dad 100644 --- a/build-tools/build-wheels/docker/dev-wheels.cfg +++ b/build-tools/build-wheels/docker/dev-wheels.cfg @@ -6,13 +6,14 @@ # # If fix_setup must be called, add |fix_setup at the end of the line # -amqplib-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2 -lz4-0.9.0-cp27-none-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0 -panko-5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0 +amqplib-1.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2 +croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl google_api_python_client-1.7.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/47/940908e52487440f61fb93ad55cbbe3a28235d3bb143b26affb17b37dd28/google_api_python_client-1.7.7-py2.py3-none-any.whl +lz4-0.9.0-cp36-cp36m-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0 +networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl neutron_lib-*.whl|git|https://github.com/openstack/neutron-lib|neutron-lib|master python_openstackclient-*.whl|git|https://github.com/openstack/python-openstackclient|python-openstackclient|master openstacksdk-*.whl|git|https://github.com/openstack/openstacksdk|openstacksdk|master -networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl -croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl -pecan-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3 +panko-5.0.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0 +pecan-1.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3 + From 0178b539ed092f2b78bb09c6c35056676419c7e7 Mon Sep 17 00:00:00 2001 From: Giana Francisco Date: Wed, 21 Apr 2021 09:33:40 -0300 Subject: [PATCH 30/37] build-img: build PATCH.img-addon as REL and non-RR patch Added parameters --status REL and --reboot-required N when PATCH.img-addon is created from build-img script. In order to allow to sw-patch commits the PATCH.img-addon.patch Story: 2007858 Task: 42316 Signed-off-by: Giana Francisco Change-Id: Ief73924e52f364e70f7ad2713480536c953c055b --- build-tools/build-img | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools/build-img b/build-tools/build-img index fb71bd1d..e0f22f86 100755 --- a/build-tools/build-img +++ b/build-tools/build-img @@ -566,7 +566,7 @@ if [ ${#RPM_ADDON_LIST[@]} -gt 0 ] ; then pushd $MY_WORKSPACE patch_file="PATCH.img-addon" patched_iso="$TEMPFILES_DIR/bootimage_${AUTO_MODE}${GRAPHICAL_SUFFIX}_patched.iso" - cmd=("$PATCH_BUILD" --id "${patch_file}" --summary "additional packages for qcow2 image" --desc "Adds customizations to qcow2 image") + cmd=("$PATCH_BUILD" --id "${patch_file}" --summary "additional packages for qcow2 image" --desc "Adds customizations to qcow2 image" --status "REL" --reboot-required "N") for rpm_addon in "${RPM_ADDON_LIST[@]}"; do cmd+=(--all-nodes "${rpm_addon}") done From e16bd7d01b14862b08f8c58751102afddbe0eb82 Mon Sep 17 00:00:00 2001 From: Rafael Jardim Date: Tue, 20 Apr 2021 13:52:13 -0300 Subject: [PATCH 31/37] Update stx-platformclients tag to stx.5.0-v1.4.2 This commit updates the image with the updated clients cgts-client and distributed-cloud-client to execute both in python2 and python3. Story: 2007106 Task: 42314 Signed-off-by: Rafael Jardim Change-Id: Ia037a8e437ffd371bd20558eff716b2678a7d171 --- .../build-docker-images/tag-management/image-tags.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index 9041fd4e..be7aa333 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -55,9 +55,9 @@ images: src_ref: https://opendev.org/starlingx/metal/commit/d46c9c55a9a9b7ea09e8d0fe66c8cfbeeb9ac75f tag: stx.5.0-v1.0.0 - name: docker.io/starlingx/stx-platformclients - src_build_tag: master-centos-stable-20210415T134403Z.0 - src_ref: https://opendev.org/starlingx/root/commit/2bdac9ac3b09332604919c8e7d39c1676c106ae5 - tag: stx.5.0-v1.4.1 + src_build_tag: master-centos-stable-20210420T000040Z.0 + src_ref: https://opendev.org/starlingx/distcloud-client/commit/859864c21dadf0fc1888f5df94853a3c6d5472ac + tag: stx.5.0-v1.4.2 - name: docker.io/starlingx/stx-vault-manager src_build_tag: master-centos-stable-20200722T035334Z.0 src_ref: https://opendev.org/starlingx/vault-armada-app/commit/2cd206d6703cc2733e39ecad4539c0d5f1600550 From 33895c29224792ad1845d8f9bfa37ee484899114 Mon Sep 17 00:00:00 2001 From: Don Penney Date: Tue, 20 Apr 2021 19:31:53 -0400 Subject: [PATCH 32/37] Add MIRROR_LOCAL support to build-stx-images.sh As the loci build runs in a container, it is unable to directly use a PROJECT_REPO ref that requires ssh keys to be setup. In order to support this, a MIRROR_LOCAL option is added to loci image directives files. If the image directives file has MIRROR_LOCAL=yes, the build utility will first setup a bare clone of the PROJECT_REPO in the local workspace and instead pass a reference to that to the loci build. This functionality can be used by developers to test an image build using a private forked repo that requires an ssh key to access, for example. This should not be used in the formal build, as CENGN and other developers are unlikely to have the required access. Note: This requires that the build server in use has lighttpd server setup with external access, as the container cannot access localhost. Change-Id: Ibd260fbb47d2bece4dc27e1cf1c026fb5cd5ff0f Story: 2003907 Task: 42364 Co-Authored-By: Scott Little Signed-off-by: Don Penney --- .../build-docker-images/build-stx-images.sh | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/build-tools/build-docker-images/build-stx-images.sh b/build-tools/build-docker-images/build-stx-images.sh index d80d61f0..e45a9a4a 100755 --- a/build-tools/build-docker-images/build-stx-images.sh +++ b/build-tools/build-docker-images/build-stx-images.sh @@ -334,9 +334,46 @@ function build_image_loci { PROFILES=$(source ${image_build_file} && echo ${PROFILES}) local PYTHON3 PYTHON3=$(source ${image_build_file} && echo ${PYTHON3}) + local MIRROR_LOCAL + MIRROR_LOCAL=$(source ${image_build_file} && echo ${MIRROR_LOCAL}) echo "Building ${LABEL}" + local ORIGWD=${PWD} + + if [ "${MIRROR_LOCAL}" = "yes" ]; then + # Setup a local mirror of PROJECT_REPO + + local BARE_CLONES=${WORKDIR}/bare_clones + mkdir -p ${BARE_CLONES} + if [ $? -ne 0 ]; then + echo "Failed to create ${BARE_CLONES}" >&2 + RESULTS_FAILED+=(${LABEL}) + return 1 + fi + + local CLONE_DIR=${BARE_CLONES}/${PROJECT}.git + + # Remove prior clone dir, if it exists + \rm -rf ${CLONE_DIR} + + echo "Creating bare clone of ${PROJECT_REPO} for ${LABEL} build..." + git clone --bare ${PROJECT_REPO} ${CLONE_DIR} \ + && mv ${CLONE_DIR}/hooks/post-update.sample ${CLONE_DIR}/hooks/post-update \ + && chmod a+x ${CLONE_DIR}/hooks/post-update \ + && cd ${CLONE_DIR} \ + && git update-server-info \ + && cd ${ORIGWD} + if [ $? -ne 0 ]; then + echo "Failed to clone ${PROJECT_REPO}... Aborting ${LABEL} build" + RESULTS_FAILED+=(${LABEL}) + cd ${ORIGWD} + return 1 + fi + + PROJECT_REPO=http://${HOSTNAME}:8088/${CLONE_DIR} + fi + local -a BUILD_ARGS= BUILD_ARGS=(--build-arg PROJECT=${PROJECT}) BUILD_ARGS+=(--build-arg PROJECT_REPO=${PROJECT_REPO}) From 062353b6de7d4b0e017203c3e6086891bd6b9213 Mon Sep 17 00:00:00 2001 From: Cole Walker Date: Mon, 3 May 2021 09:48:54 -0400 Subject: [PATCH 33/37] Update image tag for notificationclient-base Update image tag to stx.5.0-v1.0.4 for notificationclient-base Closes-Bug: 1924198 Closes-Bug: 1926532 Signed-off-by: Cole Walker Change-Id: Ie09ae4f28864aa6ee8a549bd15be5645519df7b9 --- .../build-docker-images/tag-management/image-tags.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index be7aa333..13cfd363 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -89,6 +89,6 @@ images: src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/545e6b6bb093235c2f8dab8d171f30c6ae8682d3 tag: stx.5.0-v1.0.1 - name: docker.io/starlingx/notificationclient-base - src_build_tag: master-centos-stable-20210314T171252Z.0 - src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/5eb6e432dde2993e5e99025ba7f4be8b899cef12 - tag: stx.5.0-v1.0.3 + src_build_tag: master-centos-stable-20210503T050004Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/6e87c185baf927b28d0bcff6e2763a1e62c8145e + tag: stx.5.0-v1.0.4 From 84c45f5e3a241237887af9db89d8c0aa1f8923e0 Mon Sep 17 00:00:00 2001 From: Charles Short Date: Sun, 2 May 2021 12:04:04 -0400 Subject: [PATCH 34/37] Fix wheels tarball generation In d7c5a54ab94bce6635b83d91a807d28f97836a81, django was dropped in favor of rfc3986. However the wheel was not added to the build-wheels generation which breaks the docker images. Also add the migrate wheel since it was mising as well. Add the required wheels in order to build the docker image properly. Test: - Build new centos-stable-wheels tarball - Build stx-keystone-api-proxy container Closes-Bug: 1926795 Signed-off-by: Charles Short Change-Id: Ib6f0abfdcc82ca14f92ebc5b45fe8df961e804ee --- build-tools/build-wheels/docker/stable-wheels-py2.cfg | 2 ++ build-tools/build-wheels/docker/stable-wheels.cfg | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/build-tools/build-wheels/docker/stable-wheels-py2.cfg b/build-tools/build-wheels/docker/stable-wheels-py2.cfg index 17905d42..47f30a38 100644 --- a/build-tools/build-wheels/docker/stable-wheels-py2.cfg +++ b/build-tools/build-wheels/docker/stable-wheels-py2.cfg @@ -45,6 +45,7 @@ Mako-1.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/b0/3c/8d marathon-0.11.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/97/e3/f036af0d94f98d199233faa71b5bcbef8b8e8e634551940d98c95d276e4f/marathon-0.11.0-py2.py3-none-any.whl MarkupSafe-1.1.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/b9/2e/64db92e53b86efccfaea71321f597fa2e1b2bd3853d8ce658568f7a13094/MarkupSafe-1.1.1.tar.gz|MarkupSafe-1.1.1 mox-0.5.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/a1/64740c638cc5fae807022368f4141700518ee343b53eb3e90bf3cc15a4d4/mox-0.5.3.tar.gz|mox-0.5.3|fix_setup +migrate-0.3.8-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ce/31/1a4cbf8dc0536c55f41072e8ea37b3df1e412262dc731c57e5bb099eb9b2/migrate-0.3.8.tar.gz|migrate-0.3.8 mpmath-1.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ca/63/3384ebb3b51af9610086b23ea976e6d27d6d97bf140a76a365bd77a3eb32/mpmath-1.1.0.tar.gz|mpmath-1.1.0|fix_setup msgpack_python-0.4.8-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/msgpack/msgpack-python.git|msgpack-python|0.4.8 munch-2.3.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/68/f4/260ec98ea840757a0da09e0ed8135333d59b8dfebe9752a365b04857660a/munch-2.3.2.tar.gz|munch-2.3.2 @@ -110,6 +111,7 @@ repoze.lru-0.7-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/12/b requests_aws-0.1.8-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/5e/2f/4da17752036c04cf4c9af7a2da0d41ef2205043f1c61008006475aa24b8b/requests-aws-0.1.8.tar.gz|requests-aws-0.1.8 restructuredtext_lint-1.3.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/62/76/bd8760de759fb74d7863e6935200af101cb128a7de008741a4e22341d03c/restructuredtext_lint-1.3.0.tar.gz|restructuredtext_lint-1.3.0 retrying-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz|retrying-1.3.3 +rfc3986-1.4.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/78/be/7b8b99fd74ff5684225f50dd0e865393d2265656ef3b4ba9eaaaffe622b8/rfc3986-1.4.0-py2.py3-none-any.whl rjsmin-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/c3/8e/079b7cc3a0fc9934ab05d868a00183c7aafd90b5d6138313d98ac2b9f666/rjsmin-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl rtslib_fb-2.1.69-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/fc/1a/77a26207bdad13cc39b93d874b3a1b04e5a0b0332fb716e4d654537bacdb/rtslib-fb-2.1.69.tar.gz|rtslib-fb-2.1.69 scandir-1.10.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/df/f5/9c052db7bd54d0cbf1bc0bb6554362bba1012d03e5888950a4f5c5dadc4e/scandir-1.10.0.tar.gz|scandir-1.10.0 diff --git a/build-tools/build-wheels/docker/stable-wheels.cfg b/build-tools/build-wheels/docker/stable-wheels.cfg index b547ccdb..fbe83628 100644 --- a/build-tools/build-wheels/docker/stable-wheels.cfg +++ b/build-tools/build-wheels/docker/stable-wheels.cfg @@ -45,6 +45,7 @@ lz4-0.9.0-cp36-cp36m-linux_x86_64.whl|git|https://github.com/python-lz4/python-l Mako-1.1.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/50/78/f6ade1e18aebda570eed33b7c534378d9659351cadce2fcbc7b31be5f615/Mako-1.1.2-py2.py3-none-any.whl marathon-0.12.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/41/66/814432693297dfb076958ae5ac781e3a88fd70d335473a57f4f2c6329515/marathon-0.12.0-py2.py3-none-any.whl MarkupSafe-1.1.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/b9/2e/64db92e53b86efccfaea71321f597fa2e1b2bd3853d8ce658568f7a13094/MarkupSafe-1.1.1.tar.gz|MarkupSafe-1.1.1 +migrate-0.3.8-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ce/31/1a4cbf8dc0536c55f41072e8ea37b3df1e412262dc731c57e5bb099eb9b2/migrate-0.3.8.tar.gz|migrate-0.3.8 mox-0.5.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/a1/64740c638cc5fae807022368f4141700518ee343b53eb3e90bf3cc15a4d4/mox-0.5.3.tar.gz|mox-0.5.3|fix_setup mpmath-1.1.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ca/63/3384ebb3b51af9610086b23ea976e6d27d6d97bf140a76a365bd77a3eb32/mpmath-1.1.0.tar.gz|mpmath-1.1.0|fix_setup msgpack_python-0.4.8-cp36-cp36m-linux_x86_64.whl|git|https://github.com/msgpack/msgpack-python.git|msgpack-python|0.4.8 @@ -112,6 +113,7 @@ repoze.lru-0.7-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/12/b requests_aws-0.1.8-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/5e/2f/4da17752036c04cf4c9af7a2da0d41ef2205043f1c61008006475aa24b8b/requests-aws-0.1.8.tar.gz|requests-aws-0.1.8 restructuredtext_lint-1.3.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/62/76/bd8760de759fb74d7863e6935200af101cb128a7de008741a4e22341d03c/restructuredtext_lint-1.3.0.tar.gz|restructuredtext_lint-1.3.0 retrying-1.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz|retrying-1.3.3 +rfc3986-1.4.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/78/be/7b8b99fd74ff5684225f50dd0e865393d2265656ef3b4ba9eaaaffe622b8/rfc3986-1.4.0-py2.py3-none-any.whl rjsmin-1.1.0-cp36-cp36m-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/62/ee/574b170bbe7a059314e7239305cb829379232a408901585019e012e71170/rjsmin-1.1.0-cp36-cp36m-manylinux1_x86_64.whl rtslib_fb-2.1.71-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/9e/1b/c26bc038888b1e6042d35ec97599cef05181fb6a7a7ecdbb0c041c3f50ea/rtslib-fb-2.1.71.tar.gz|rtslib-fb-2.1.71| scandir-1.10.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/df/f5/9c052db7bd54d0cbf1bc0bb6554362bba1012d03e5888950a4f5c5dadc4e/scandir-1.10.0.tar.gz|scandir-1.10.0 @@ -176,4 +178,4 @@ XStatic_Font_Awesome-4.7.0.0-py2.py3-none-any.whl|pypi|https://files.pythonhoste xvfbwrapper-0.2.9-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/57/b6/4920eabda9b49630dea58745e79f9919aba6408d460afe758bf6e9b21a04/xvfbwrapper-0.2.9.tar.gz|xvfbwrapper-0.2.9 yappi-1.2.3-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/37/dc/86bbe1822cdc6dbf46c644061bd24217f6a0f056f00162a3697c9bea7575/yappi-1.2.3.tar.gz|yappi-1.2.3 yaql-1.1.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/77/89/cfee017cf4f2d6f5e7159bbf13fe4131c7dbf20d675b78c9928ae9aa9df8/yaql-1.1.3.tar.gz|yaql-1.1.3 -zVMCloudConnector-1.4.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/11/92/9f704de9759816e7b9897b9fb41285b421498b4642551b6fbcccd2850008/zVMCloudConnector-1.4.1.tar.gz|zVMCloudConnector-1.4.1 \ No newline at end of file +zVMCloudConnector-1.4.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/11/92/9f704de9759816e7b9897b9fb41285b421498b4642551b6fbcccd2850008/zVMCloudConnector-1.4.1.tar.gz|zVMCloudConnector-1.4.1 From a163d7723e659e89c37ec933d1b0f9aa638a6a73 Mon Sep 17 00:00:00 2001 From: Cole Walker Date: Wed, 5 May 2021 09:39:17 -0400 Subject: [PATCH 35/37] Update image tag for notificationservice-base Update image tag to stx.5.0-v1.0.4 for notificationservice-base Closes-Bug: 1924201 Closes-Bug: 1924197 Signed-off-by: Cole Walker Change-Id: Id863c4f154cc0b39e30ee8986fc07f9856a22826 --- .../build-docker-images/tag-management/image-tags.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index 13cfd363..46d5bde1 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -81,9 +81,9 @@ images: src_ref: https://opendev.org/starlingx/snmp-armada-app/commit/5aca0dd1661bc87a7927c00cf95e0c8aa6f2e2a0 tag: stx.5.0-v1.0.1 - name: docker.io/starlingx/notificationservice-base - src_build_tag: master-centos-stable-20210217T173034Z.0 - src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/b618223d037b0d720fbaa38e032145a72f2a7359 - tag: stx.5.0-v1.0.3 + src_build_tag: master-centos-stable-20210504T193232Z.0 + src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/eb4458e37ebe170f4c1289362f9cbc55fb1f32aa + tag: stx.5.0-v1.0.4 - name: docker.io/starlingx/locationservice-base src_build_tag: master-centos-stable-20210204T224209Z.0 src_ref: https://opendev.org/starlingx/ptp-notification-armada-app/commit/545e6b6bb093235c2f8dab8d171f30c6ae8682d3 From 0babd33b6d851dd11492c26e92c8a6ac2c2557de Mon Sep 17 00:00:00 2001 From: Rafael Jardim Date: Wed, 12 May 2021 11:25:54 -0300 Subject: [PATCH 36/37] Update stx-platformclients tag to stx.5.0-v1.4.3 This commit updates the image with the updated clients. Test: Some normal commands Commands related with https dcmanager that wasn't working System application-upload that wasn't working when executed from remote cli Closes-Bug: 1928233 Closes-Bug: 1928231 Signed-off-by: Rafael Jardim Change-Id: I8a0d12f699336a4412be5ff3c73cfb8d59038780 --- .../build-docker-images/tag-management/image-tags.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build-tools/build-docker-images/tag-management/image-tags.yaml b/build-tools/build-docker-images/tag-management/image-tags.yaml index 46d5bde1..c5d93d1a 100644 --- a/build-tools/build-docker-images/tag-management/image-tags.yaml +++ b/build-tools/build-docker-images/tag-management/image-tags.yaml @@ -55,9 +55,9 @@ images: src_ref: https://opendev.org/starlingx/metal/commit/d46c9c55a9a9b7ea09e8d0fe66c8cfbeeb9ac75f tag: stx.5.0-v1.0.0 - name: docker.io/starlingx/stx-platformclients - src_build_tag: master-centos-stable-20210420T000040Z.0 - src_ref: https://opendev.org/starlingx/distcloud-client/commit/859864c21dadf0fc1888f5df94853a3c6d5472ac - tag: stx.5.0-v1.4.2 + src_build_tag: master-centos-stable-20210512T053357Z.0 + src_ref: https://opendev.org/starlingx/distcloud-client/commit/d52a9080082db5fda2e77fb9e342f812ea8c17e1 + tag: stx.5.0-v1.4.3 - name: docker.io/starlingx/stx-vault-manager src_build_tag: master-centos-stable-20200722T035334Z.0 src_ref: https://opendev.org/starlingx/vault-armada-app/commit/2cd206d6703cc2733e39ecad4539c0d5f1600550 From f457bd15b9ce0be512ec96abcb9b858b0bab2ea8 Mon Sep 17 00:00:00 2001 From: Scott Little Date: Fri, 16 Apr 2021 01:39:17 -0400 Subject: [PATCH 37/37] Improved branching tools create_branches_and_tags.sh: - Update the .gitreview files in branched git repos. - When updating a manifest, add the ability to update and use the default revision field. - Create two levels of manifest lockdown, soft and hard. Soft lockdown only sets sha revisions on unbranched projects that lack a revision, or set the revision to master. Hard lockdown applies to all unbranched projects. push_branches_tags.sh: - opendev no longer accepts 'git push' for the delivery of new branches with updates. Instead we must now use separate commands to deliver the tag, the branch, and any updates. Closes-Bug: 1924762 Signed-off-by: Scott Little Change-Id: I6d669ddc80cc9b3cb9e72d65a64589dbccf43ae3 --- .../branching/create_branches_and_tags.sh | 76 ++++- build-tools/branching/push_branches_tags.sh | 78 ++++- build-tools/repo-utils.sh | 130 ++++++++- build-tools/url_utils.sh | 267 ++++++++++++++++++ 4 files changed, 515 insertions(+), 36 deletions(-) create mode 100755 build-tools/url_utils.sh diff --git a/build-tools/branching/create_branches_and_tags.sh b/build-tools/branching/create_branches_and_tags.sh index d87e0987..27c820c4 100755 --- a/build-tools/branching/create_branches_and_tags.sh +++ b/build-tools/branching/create_branches_and_tags.sh @@ -24,7 +24,7 @@ CREATE_BRANCHES_AND_TAGS_SH_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" source "${CREATE_BRANCHES_AND_TAGS_SH_DIR}/../git-repo-utils.sh" usage () { - echo "create_branches_and_tags.sh --branch= [--tag=] [ --remotes= ] [ --projects= ] [ --manifest [ --lock-down ]]" + echo "create_branches_and_tags.sh --branch= [--tag=] [ --remotes= ] [ --projects= ] [ --gitreview-default ] [ --manifest [ --lock-down | --soft-lock-down ] [ --default-revision ]]" echo "" echo "Create a branch and a tag in all listed projects, and all" echo "projects hosted by all listed remotes. Lists are comma separated." @@ -33,13 +33,19 @@ usage () { echo "If the tag is omitted, one is automativally generate by adding the" echo "prefix 'v' to the branch name." echo "" - echo "If a manifest is requested, it will recieve the name '.xml' and" - echo "it will specify the branch as the revision for all tagged projects." + echo "If a manifest is requested, the current manifest is modified." + echo "to specify the new branch for all select remotes and projects." echo "If lockdown is requested, all other projects get the current" echo "HEAD's sha set as the revision." + echo "If default-revision is selected, then the manifest default revision" + wcho "will be set." + echo "" + echo "If a gitreview-default is selected, then all branched projects" + echo "with a .gitreview file will have a defaultbranch entry added" + echo "or updated." } -TEMP=$(getopt -o h --long remotes:,projects:,branch:,tag:,manifest,lock-down,help -n 'create_branches_and_tags.sh' -- "$@") +TEMP=$(getopt -o h --long remotes:,projects:,branch:,tag:,manifest,lock-down,hard-lock-down,soft-lock-down,default-revision,gitreview-default,help -n 'create_branches_and_tags.sh' -- "$@") if [ $? -ne 0 ]; then usage exit 1 @@ -49,6 +55,8 @@ eval set -- "$TEMP" HELP=0 MANIFEST=0 LOCK_DOWN=0 +GITREVIEW_DEFAULT=0 +SET_DEFAULT_REVISION=0 remotes="" projects="" branch="" @@ -59,15 +67,19 @@ repo_root_dir="" while true ; do case "$1" in - -h|--help) HELP=1 ; shift ;; - --remotes) remotes+=$(echo "$2 " | tr ',' ' '); shift 2;; - --projects) projects+=$(echo "$2 " | tr ',' ' '); shift 2;; - --branch) branch=$2; shift 2;; - --tag) tag=$2; shift 2;; - --manifest) MANIFEST=1 ; shift ;; - --lock-down) LOCK_DOWN=1 ; shift ;; - --) shift ; break ;; - *) usage; exit 1 ;; + -h|--help) HELP=1 ; shift ;; + --remotes) remotes+=$(echo "$2 " | tr ',' ' '); shift 2;; + --projects) projects+=$(echo "$2 " | tr ',' ' '); shift 2;; + --branch) branch=$2; shift 2;; + --tag) tag=$2; shift 2;; + --manifest) MANIFEST=1 ; shift ;; + --lock-down) LOCK_DOWN=2 ; shift ;; + --hard-lock-down) LOCK_DOWN=2 ; shift ;; + --soft-lock-down) LOCK_DOWN=1 ; shift ;; + --default-revision) SET_DEFAULT_REVISION=1 ; shift ;; + --gitreview-default) GITREVIEW_DEFAULT=1 ; shift ;; + --) shift ; break ;; + *) usage; exit 1 ;; esac done @@ -88,6 +100,37 @@ if [ $? -ne 0 ]; then exit 1 fi +update_gitreview () { + local DIR=$1 + ( + cd $DIR || exit 1 + if [ $GITREVIEW_DEFAULT -eq 1 ] && [ -f .gitreview ]; then + if ! grep -q "^defaultbranch=$branch$" .gitreview; then + echo "Updating defaultbranch in ${DIR}/.gitreview" + if grep -q defaultbranch= .gitreview; then + sed "s#\(defaultbranch=\).*#\1$branch#" -i .gitreview + else + echo "defaultbranch=$branch" >> .gitreview + fi + + git add .gitreview + if [ $? != 0 ] ; then + echo_stderr "ERROR: failed to add .gitreview in ${DIR}" + exit 1 + fi + + git commit -s -m "Update .gitreview for $branch" + if [ $? != 0 ] ; then + echo_stderr "ERROR: failed to commit .gitreview in ${DIR}" + exit 1 + fi + else + echo "defaultbranch in ${DIR}/.gitreview already set" + fi + fi + ) +} + if [ $MANIFEST -eq 1 ]; then manifest=$(repo_manifest $repo_root_dir) if [ $? -ne 0 ]; then @@ -205,6 +248,7 @@ for subgit in $SUBGITS; do git checkout $branch fi + # check if destination tag already exists tag_check=$(git tag -l $tag) if [ -z "$tag_check" ]; then echo "Creating tag $tag in ${subgit}" @@ -216,6 +260,8 @@ for subgit in $SUBGITS; do else echo "Tag '$tag' already exists in ${subgit}" fi + + update_gitreview ${subgit} || exit 1 ) || exit 1 done ) || exit 1 @@ -276,8 +322,10 @@ if [ $MANIFEST -eq 1 ]; then exit 1 fi + update_gitreview ${manifest_dir} || exit 1 + echo "Creating manifest ${new_manifest_name}" - manifest_set_revision "${manifest}" "${new_manifest}" "$branch" ${LOCK_DOWN} $projects || exit 1 + manifest_set_revision "${manifest}" "${new_manifest}" "$branch" ${LOCK_DOWN} ${SET_DEFAULT_REVISION} $projects || exit 1 echo "Move manifest ${new_manifest_name}, overwriting ${manifest_name}" \cp -f "${manifest}" "${manifest}.save" diff --git a/build-tools/branching/push_branches_tags.sh b/build-tools/branching/push_branches_tags.sh index 1f94cc42..baa73bb6 100755 --- a/build-tools/branching/push_branches_tags.sh +++ b/build-tools/branching/push_branches_tags.sh @@ -17,6 +17,7 @@ PUSH_BRANCHES_TAGS_SH_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )" source "${PUSH_BRANCHES_TAGS_SH_DIR}/../git-repo-utils.sh" +source "${PUSH_BRANCHES_TAGS_SH_DIR}/../url_utils.sh" usage () { echo "push_branches_tags.sh --branch= [--tag=] [ --remotes= ] [ --projects= ] [ --manifest ]" @@ -151,6 +152,8 @@ for subgit in $SUBGITS; do ( cd $subgit + git fetch --all + branch_check=$(git branch -a --list $branch) if [ -z "$branch_check" ]; then echo_stderr "ERROR: Expected branch '$branch' to exist in ${subgit}" @@ -169,21 +172,49 @@ for subgit in $SUBGITS; do exit 1 fi - if [ "${review_method}" == "gerrit" ]; then - remote=$(git_repo_review_remote) - else - remote=$(git_repo_remote) + remote=$(git_remote) + if [ "${remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine remote in ${manifest_dir}" + exit 1 fi - if [ "${remote}" == "" ]; then - echo_stderr "ERROR: Failed to determine remote in ${subgit}" + if [ "${review_method}" == "gerrit" ]; then + review_remote=$(git_repo_review_remote) + else + review_remote=${remote} + fi + + if [ "${review_remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine review_remote in ${subgit}" exit 1 fi + branch_check=$(git branch -a --list $remote/$branch) + if [ "${branch_check}" != "" ]; then + echo "Branch $branch already exists in ${subgit}" + exit 0 + fi + echo "Pushing branch $branch in ${subgit}" if [ "${review_method}" == "gerrit" ]; then - echo "git push --tags ${remote} ${branch}" - git push --tags ${remote} ${branch} + url=$(git_repo_review_url) + if [ "${review_remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine review_url in ${subgit}" + exit 1 + fi + + host=$(url_server "${url}") + port=$(url_port "${url}") + path=$(url_path "${url}") + if [ "${host}" == "review.opendev.org" ]; then + git push ${review_remote} ${tag} && \ + ssh -p ${port} ${host} gerrit create-branch ${path} ${branch} ${tag} && \ + git config --local --replace-all "branch.${branch}.merge" refs/heads/${branch} && \ + git review --topic="${branch}" + else + echo "git push --tags ${remote} ${branch}" + git push --tags ${remote} ${branch} + fi else echo "git push --tags --set-upstream ${remote} ${branch}" git push --tags --set-upstream ${remote} ${branch} @@ -232,23 +263,44 @@ if [ $MANIFEST -eq 1 ]; then exit 1 fi - - remote=$(git_review_remote) + remote=$(git_remote) if [ "${remote}" == "" ]; then echo_stderr "ERROR: Failed to determine remote in ${manifest_dir}" exit 1 fi + review_remote=$(git_review_remote) + if [ "${review_remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine review_remote in ${manifest_dir}" + exit 1 + fi + echo "Pushing branch $branch in ${manifest_dir}" if [ "${review_method}" == "gerrit" ]; then # Is a reviewless push possible as part of creating a new branch in gerrit? - git push --tags ${remote} ${branch} + url=$(git_review_url) + if [ "${review_remote}" == "" ]; then + echo_stderr "ERROR: Failed to determine review_url in ${subgit}" + exit 1 + fi + + host=$(url_server "${url}") + port=$(url_port "${url}") + path=$(url_path "${url}") + if [ "${host}" == "review.opendev.org" ]; then + git push ${review_remote} ${tag} && \ + ssh -p ${port} ${host} gerrit create-branch ${path} ${branch} ${tag} && \ + git config --local --replace-all "branch.${branch}.merge" refs/heads/${branch} && \ + git review --yes --topic="${branch}" + else + git push --tags ${review_remote} ${branch} + fi else - git push --tags --set-upstream ${remote} ${branch} + git push --tags --set-upstream ${review_remote} ${branch} fi if [ $? != 0 ] ; then - echo_stderr "ERROR: Failed to push tag '${tag}' to remote '${remote}' in ${manifest_dir}" + echo_stderr "ERROR: Failed to push tag '${tag}' to remote '${review_remote}' in ${manifest_dir}" exit 1 fi ) || exit 1 diff --git a/build-tools/repo-utils.sh b/build-tools/repo-utils.sh index 9514b0e9..49f3d8eb 100644 --- a/build-tools/repo-utils.sh +++ b/build-tools/repo-utils.sh @@ -140,6 +140,50 @@ repo_is_project () { } +# +# manifest_get_revision_of_project +# +# Extract the revision of a project within the manifest. +# The default revision is supplied in the absence +# of an explicit project revision. +# +# manifest = Path to manifest. +# project-name = name of project. +# +manifest_get_revision_of_project () { + local manifest="${1}" + local project="${2}" + + local default_revision="" + local revision="" + + default_revision=$(manifest_get_default_revision "${manifest}") + revision=$(grep ' +# +# Extract the default revision of the manifest, if any. +# +# manifest = Path to manifest. +# +manifest_get_default_revision () { + local manifest="${1}" + + grep ' # @@ -149,8 +193,10 @@ repo_is_project () { # revision = A branch, tag ,or sha. Branch and SHA can be used # directly, but repo requires that a tag be in the form # "refs/tags/". -# lock_down = 0 or 1. If 1, set a revision on all other non-listed +# lock_down = 0,1 or 2. If 2, set a revision on all other non-listed # projects to equal the SHA of the current git head. +# If 1, similar to 2, but only if the project doesn't have +# some other form of revision specified. # project-list = A space seperated list of projects. Listed projects # will have their revision set to the provided revision # value. @@ -160,9 +206,11 @@ manifest_set_revision () { local new_manifest="${2}" local revision="${3}" local lock_down="${4}" - shift 4 + local set_default="${5}" + shift 5 local projects="${@}" + local old_default_revision="" local repo_root_dir="" local line="" local FOUND=0 @@ -192,11 +240,32 @@ manifest_set_revision () { return 1 fi + old_default_revision=$(manifest_get_default_revision "${old_manifest}") + if [ ${set_default} -eq 1 ] && [ "${old_default_revision}" == "" ]; then + # We only know how to alter an existing default revision, not set a + # new one, so continue without setting a default. + set_default=0 + fi + while IFS= read -r line; do echo "${line}" | grep -q '&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + if echo "$URL" | grep -q '[:][/][/]' ;then + echo "$URL" | sed 's#^\(.*\)://.*$#\1#' + else + echo "http" + fi + return 0 +} + +url_login () { + local URL="$1" + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + echo "$URL" | sed 's#^.*://\([^/]*\)/.*$#\1#' + return 0 +} + +url_user () { + local URL="$1" + local LOGIN + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + url_login "$URL" | sed -e '/@/! s#.*## ; s#\([^@]*\)@.*#\1#' + if [ ${PIPESTATUS[0]} -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_login failed" + return 1 + fi + + return 0 +} + +url_port () { + local URL="$1" + local LOGIN + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + url_login "$URL" | sed -e '/:/! s#.*## ; s#[^:]*:\([^:]*\)#\1#' + if [ ${PIPESTATUS[0]} -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_login failed" + return 1 + fi + + return 0 +} + +url_server () { + local URL="$1" + local LOGIN + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + url_login "$URL" | sed 's#^.*@## ; s#:.*$##' + if [ ${PIPESTATUS[0]} -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_login failed" + return 1 + fi + + return 0 +} + +url_path () { + local URL="$1" + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + echo "$URL" | sed 's#^.*://[^/]*/\(.*\)$#\1#' + return 0 +} + +# +# url_path_to_fs_path: +# +# Convert url format path to file system format. +# e.g. replace %20 with ' '. +# +# Note: Does NOT test the output path to ensure there are +# no illegal file system characters. +# +url_path_to_fs_path () { + local INPUT_PATH="$1" + local TEMP + + if [ "$INPUT_PATH" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + # Deviate from URI spec by not substituding '+' with ' '. + # It would alias '%20' and we need unique mappings. + # TEMP="${INPUT_PATH//+/ }" + + TEMP="$INPUT_PATH" + printf '%b' "${TEMP//%/\\x}" + return 0 +} + +# +# fs_path_to_url_path: +# +# Convert file system format path to url format. +# e.g. replace ' ' with %20. +# +fs_path_to_url_path () { + local INPUT_PATH="$1" + local LENGTH + local POS + local CHAR + + if [ "$INPUT_PATH" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + LENGTH="${#INPUT_PATH}" + for (( POS = 0; POS < LENGTH; POS++ )); do + CHAR="${1:POS:1}" + case $CHAR in + [/a-zA-Z0-9.~_-]) + # Reference https://metacpan.org/pod/URI::Escape + printf "$CHAR" + ;; + *) + printf '%%%02X' "'$CHAR" + ;; + esac + done + + return 0 +} + +# +# normalize_path: +# +# 1) replace // with / +# 2) replace /./ with / +# 3) Remove trailing / +# 4) Remove leading ./ +# + +normalize_path () { + local INPUT_PATH="$1" + + if [ "$INPUT_PATH" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + echo "$INPUT_PATH" | sed 's#[/]\+#/#g ; s#[/][.][/]#/#g ; s#/$## ; s#^[.]/##' + return 0 +} + + +# +# repo_url_to_sub_path: +# +repo_url_to_sub_path () { + local URL="$1" + local FAMILY="" + local SERVER="" + local URL_PATH="" + local FS_PATH="" + + if [ "$URL" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + # set FAMILY from URL + echo $URL | grep -q 'centos[.]org' && FAMILY=centos + echo $URL | grep -q 'fedoraproject[.]org[/]pub[/]epel' && FAMILY=epel + + SERVER=$(url_server "$URL") + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_server '$URL'" + return 1 + fi + + URL_PATH="$(url_path "$URL")" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_path '$URL'" + return 1 + fi + + FS_PATH="$(url_path_to_fs_path "$URL_PATH")" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): url_path_to_fs_path '$URL_PATH'" + return 1 + fi + + FS_PATH="$(normalize_path "$FS_PATH")" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): normalize_path '$FS_PATH'" + return 1 + fi + + normalize_path "./$FAMILY/$SERVER/$FS_PATH" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): normalize_path './$FAMILY/$SERVER/$FS_PATH'" + return 1 + fi + + return 0 +} + +CENGN_PROTOCOL="http" +CENGN_HOST="mirror.starlingx.cengn.ca" +CENGN_PORT="80" +CENGN_URL_ROOT="mirror" + +url_to_stx_mirror_url () { + local URL="$1" + local DISTRO="$2" + local URL_PATH="" + local FS_PATH="" + + if [ "$URL" == "" ] || [ "$DISTRO" == "" ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): empty argument" + return 1 + fi + + FS_PATH="$(repo_url_to_sub_path "$URL")" + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): repo_url_to_sub_path '$URL'" + return 1 + fi + + URL_PATH=$(fs_path_to_url_path "$FS_PATH") + if [ $? -ne 0 ]; then + >&2 echo "Error: $FUNCNAME (${LINENO}): fs_path_to_url_path '$FS_PATH'" + return 1 + fi + + echo "$CENGN_PROTOCOL://$CENGN_HOST:$CENGN_PORT/$CENGN_URL_ROOT/$DISTRO/$URL_PATH" + return 0 +}