From cfe45dadae1ae1cdb5f72259f2baaa7abc136422 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 1 Jun 2018 07:45:16 -0700 Subject: [PATCH] StarlingX open source release updates Signed-off-by: Dean Troyer --- .gitignore | 4 + LICENSE | 202 ++ addons/.gitignore | 3 + build-data/unbuilt_rpm_patterns | 19 + build-tools/Cached_Data.txt | 78 + build-tools/audit-pkgs | 70 + build-tools/branching/branch_and_tag.sh | 86 + .../branching/create_branches_and_tags.sh | 56 + build-tools/branching/create_tags.sh | 36 + build-tools/branching/push_branches_tags.sh | 42 + build-tools/branching/push_tags.sh | 29 + build-tools/build-guest | 338 ++ build-tools/build-img | 94 + build-tools/build-iso | 702 ++++ build-tools/build-pkg-srpm | 63 + build-tools/build-pkgs | 34 + build-tools/build-pkgs-parallel | 370 ++ build-tools/build-pkgs-serial | 379 ++ build-tools/build-pkgs4 | 1 + build-tools/build-rpms | 34 + build-tools/build-rpms-parallel | 2343 ++++++++++++ build-tools/build-rpms-serial | 1328 +++++++ build-tools/build-rpms4 | 1 + build-tools/build-sdk | 70 + build-tools/build-srpms | 34 + build-tools/build-srpms-parallel | 1438 ++++++++ build-tools/build-srpms-serial | 1161 ++++++ build-tools/build-srpms4 | 1 + build-tools/build_guest/build-guest-image.py | 123 + build-tools/build_guest/image-rt.inc | 39 + build-tools/build_guest/image.inc | 44 + build-tools/build_guest/rootfs-exclude.txt | 13 + .../build_guest/rootfs-rt/boot/extlinux.conf | 7 + build-tools/build_guest/rootfs-setup.sh | 90 + .../build_guest/rootfs-std/boot/extlinux.conf | 7 + .../cloud/cloud.cfg.d/99_wrs-datasources.cfg | 18 + .../build_guest/rootfs/etc/dhcp/dhclient.conf | 21 + .../build_guest/rootfs/etc/iptables.rules | 12 + .../rootfs/etc/modprobe.d/floppy.conf | 1 + .../rootfs/etc/modprobe.d/wrs_avp.conf | 1 + .../rootfs/etc/modules-load.d/wrs_avp.conf | 1 + .../etc/sysconfig/network-scripts/ifcfg-eth0 | 8 + .../etc/udev/rules.d/65-renumber-net.rules | 4 + .../rootfs/usr/lib/udev/renumber_device | 12 + .../build_guest/rpm-install-list-rt.txt | 305 ++ build-tools/build_guest/rpm-install-list.txt | 303 ++ build-tools/build_guest/rpm-remove-list.txt | 7 + build-tools/build_iso/anaconda-ks.cfg | 40 + build-tools/build_iso/cgts_deps.sh | 296 ++ build-tools/build_iso/comps.xml.gz | Bin 0 -> 160726 bytes build-tools/build_iso/gather_packages.pl | 122 + build-tools/build_iso/image-dev.inc | 4 + build-tools/build_iso/image.inc | 364 ++ build-tools/build_iso/isolinux.cfg | 125 + build-tools/build_iso/ks.cfg | 36 + build-tools/build_iso/minimal_rpm_list.txt | 258 ++ build-tools/build_iso/openstack_kilo.txt | 2 + build-tools/build_minimal_iso/README | 112 + build-tools/build_minimal_iso/README.2 | 5 + build-tools/build_minimal_iso/build.cfg | 108 + build-tools/build_minimal_iso/build.sh | 45 + build-tools/build_minimal_iso/build_centos.sh | 62 + build-tools/build_minimal_iso/cgts_deps.sh | 222 ++ build-tools/build_minimal_iso/yum.conf | 22 + build-tools/certificates/TiBoot.crt | Bin 0 -> 830 bytes build-tools/classify | 67 + build-tools/create-cgcs-centos-repo | 54 + build-tools/create-cgcs-tis-repo | 55 + build-tools/create-yum-conf | 59 + build-tools/create_dependancy_cache.py | 674 ++++ build-tools/default_build_srpm | 264 ++ build-tools/find_klm | 59 + .../find_patched_srpms_needing_upgrade | 54 + build-tools/ip_report.py | 517 +++ build-tools/make-installer-images.sh | 244 ++ .../copy_external_mirror_to_tis_mirror | 229 ++ .../copy_external_mirror_to_tis_mirror.old | 216 ++ .../mirror_rebase/link_cgcs_centos_repo | 225 ++ .../mirror_rebase/link_cgcs_centos_repo_2 | 82 + .../mirror_rebase/link_cgcs_centos_repo_3 | 40 + .../mirror_rebase/link_cgcs_centos_repo_4 | 176 + .../mirror_rebase/link_cgcs_centos_repo_5 | 94 + .../mirror_rebase/link_cgcs_centos_repo_6 | 91 + .../mirror_rebase/link_cgcs_centos_repo_7 | 84 + .../mirror_rebase/link_cgcs_centos_repo_8 | 65 + .../mirror_rebase/link_cgcs_centos_repo_9 | 346 ++ build-tools/mirror_rebase/tarball_upgrade | 330 ++ build-tools/mk/_sign_pkgs.mk | 31 + build-tools/mock_cfg_to_yum_conf.py | 9 + build-tools/mockchain-parallel | 1207 +++++++ build-tools/modify-build-cfg | 130 + build-tools/patch-iso | 320 ++ build-tools/patch_rebase_1 | 130 + build-tools/patch_rebase_2 | 148 + build-tools/patch_rebase_3 | 119 + build-tools/patch_rebase_4 | 403 +++ build-tools/sign-build | 506 +++ build-tools/sign-rpms | 258 ++ build-tools/sign_iso_formal.sh | 62 + build-tools/sign_patch_formal.sh | 51 + .../signing/bootimage_sig_validation_key.pem | 9 + build-tools/signing/dev-private-key.pem | 27 + build-tools/signing/ima_signing_key.priv | 28 + build-tools/source_lookup.txt | 87 + build-tools/spec-utils | 686 ++++ build-tools/srpm-utils | 3144 +++++++++++++++++ build-tools/sync-jenkins | 154 + build-tools/sync_jenkins.sh | 144 + build-tools/tis.macros | 11 + build-tools/update-efiboot-image | 163 + build-tools/update-pxe-network-installer | 396 +++ build-tools/wrs_orig.txt | 58 + 112 files changed, 24161 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 addons/.gitignore create mode 100644 build-data/unbuilt_rpm_patterns create mode 100644 build-tools/Cached_Data.txt create mode 100755 build-tools/audit-pkgs create mode 100755 build-tools/branching/branch_and_tag.sh create mode 100755 build-tools/branching/create_branches_and_tags.sh create mode 100755 build-tools/branching/create_tags.sh create mode 100755 build-tools/branching/push_branches_tags.sh create mode 100755 build-tools/branching/push_tags.sh create mode 100755 build-tools/build-guest create mode 100755 build-tools/build-img create mode 100755 build-tools/build-iso create mode 100644 build-tools/build-pkg-srpm create mode 100755 build-tools/build-pkgs create mode 100755 build-tools/build-pkgs-parallel create mode 100755 build-tools/build-pkgs-serial create mode 120000 build-tools/build-pkgs4 create mode 100755 build-tools/build-rpms create mode 100755 build-tools/build-rpms-parallel create mode 100755 build-tools/build-rpms-serial create mode 120000 build-tools/build-rpms4 create mode 100755 build-tools/build-sdk create mode 100755 build-tools/build-srpms create mode 100755 build-tools/build-srpms-parallel create mode 100755 build-tools/build-srpms-serial create mode 120000 build-tools/build-srpms4 create mode 100755 build-tools/build_guest/build-guest-image.py create mode 100644 build-tools/build_guest/image-rt.inc create mode 100644 build-tools/build_guest/image.inc create mode 100644 build-tools/build_guest/rootfs-exclude.txt create mode 100644 build-tools/build_guest/rootfs-rt/boot/extlinux.conf create mode 100755 build-tools/build_guest/rootfs-setup.sh create mode 100644 build-tools/build_guest/rootfs-std/boot/extlinux.conf create mode 100644 build-tools/build_guest/rootfs/etc/cloud/cloud.cfg.d/99_wrs-datasources.cfg create mode 100644 build-tools/build_guest/rootfs/etc/dhcp/dhclient.conf create mode 100644 build-tools/build_guest/rootfs/etc/iptables.rules create mode 100644 build-tools/build_guest/rootfs/etc/modprobe.d/floppy.conf create mode 100644 build-tools/build_guest/rootfs/etc/modprobe.d/wrs_avp.conf create mode 100644 build-tools/build_guest/rootfs/etc/modules-load.d/wrs_avp.conf create mode 100644 build-tools/build_guest/rootfs/etc/sysconfig/network-scripts/ifcfg-eth0 create mode 100644 build-tools/build_guest/rootfs/etc/udev/rules.d/65-renumber-net.rules create mode 100755 build-tools/build_guest/rootfs/usr/lib/udev/renumber_device create mode 100644 build-tools/build_guest/rpm-install-list-rt.txt create mode 100644 build-tools/build_guest/rpm-install-list.txt create mode 100644 build-tools/build_guest/rpm-remove-list.txt create mode 100644 build-tools/build_iso/anaconda-ks.cfg create mode 100755 build-tools/build_iso/cgts_deps.sh create mode 100644 build-tools/build_iso/comps.xml.gz create mode 100755 build-tools/build_iso/gather_packages.pl create mode 100644 build-tools/build_iso/image-dev.inc create mode 100644 build-tools/build_iso/image.inc create mode 100644 build-tools/build_iso/isolinux.cfg create mode 100644 build-tools/build_iso/ks.cfg create mode 100644 build-tools/build_iso/minimal_rpm_list.txt create mode 100644 build-tools/build_iso/openstack_kilo.txt create mode 100644 build-tools/build_minimal_iso/README create mode 100644 build-tools/build_minimal_iso/README.2 create mode 100644 build-tools/build_minimal_iso/build.cfg create mode 100755 build-tools/build_minimal_iso/build.sh create mode 100755 build-tools/build_minimal_iso/build_centos.sh create mode 100755 build-tools/build_minimal_iso/cgts_deps.sh create mode 100644 build-tools/build_minimal_iso/yum.conf create mode 100644 build-tools/certificates/TiBoot.crt create mode 100644 build-tools/classify create mode 100755 build-tools/create-cgcs-centos-repo create mode 100755 build-tools/create-cgcs-tis-repo create mode 100755 build-tools/create-yum-conf create mode 100755 build-tools/create_dependancy_cache.py create mode 100755 build-tools/default_build_srpm create mode 100755 build-tools/find_klm create mode 100755 build-tools/find_patched_srpms_needing_upgrade create mode 100755 build-tools/ip_report.py create mode 100755 build-tools/make-installer-images.sh create mode 100644 build-tools/mirror_rebase/copy_external_mirror_to_tis_mirror create mode 100755 build-tools/mirror_rebase/copy_external_mirror_to_tis_mirror.old create mode 100755 build-tools/mirror_rebase/link_cgcs_centos_repo create mode 100755 build-tools/mirror_rebase/link_cgcs_centos_repo_2 create mode 100755 build-tools/mirror_rebase/link_cgcs_centos_repo_3 create mode 100755 build-tools/mirror_rebase/link_cgcs_centos_repo_4 create mode 100755 build-tools/mirror_rebase/link_cgcs_centos_repo_5 create mode 100755 build-tools/mirror_rebase/link_cgcs_centos_repo_6 create mode 100755 build-tools/mirror_rebase/link_cgcs_centos_repo_7 create mode 100755 build-tools/mirror_rebase/link_cgcs_centos_repo_8 create mode 100755 build-tools/mirror_rebase/link_cgcs_centos_repo_9 create mode 100755 build-tools/mirror_rebase/tarball_upgrade create mode 100644 build-tools/mk/_sign_pkgs.mk create mode 100755 build-tools/mock_cfg_to_yum_conf.py create mode 100755 build-tools/mockchain-parallel create mode 100755 build-tools/modify-build-cfg create mode 100755 build-tools/patch-iso create mode 100755 build-tools/patch_rebase_1 create mode 100755 build-tools/patch_rebase_2 create mode 100755 build-tools/patch_rebase_3 create mode 100755 build-tools/patch_rebase_4 create mode 100755 build-tools/sign-build create mode 100755 build-tools/sign-rpms create mode 100755 build-tools/sign_iso_formal.sh create mode 100755 build-tools/sign_patch_formal.sh create mode 100644 build-tools/signing/bootimage_sig_validation_key.pem create mode 100644 build-tools/signing/dev-private-key.pem create mode 100755 build-tools/signing/ima_signing_key.priv create mode 100644 build-tools/source_lookup.txt create mode 100644 build-tools/spec-utils create mode 100644 build-tools/srpm-utils create mode 100755 build-tools/sync-jenkins create mode 100755 build-tools/sync_jenkins.sh create mode 100644 build-tools/tis.macros create mode 100755 build-tools/update-efiboot-image create mode 100755 build-tools/update-pxe-network-installer create mode 100644 build-tools/wrs_orig.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..7770dd90 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +*.swp +/cgcs-centos-repo +/cgcs-tis-repo +/cgcs-3rd-party-repo diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/addons/.gitignore b/addons/.gitignore new file mode 100644 index 00000000..785e60d3 --- /dev/null +++ b/addons/.gitignore @@ -0,0 +1,3 @@ +/wr-avs +/wr-cgcs + diff --git a/build-data/unbuilt_rpm_patterns b/build-data/unbuilt_rpm_patterns new file mode 100644 index 00000000..b71e4252 --- /dev/null +++ b/build-data/unbuilt_rpm_patterns @@ -0,0 +1,19 @@ +[-]locale[-] +[-]doc[-] +[-]dbg[-] +vswitch-staticdev +guest-scale-agent-staticdev +vim-spell +openssh-server-sysvinit +openstack-neutron-linuxbridge +^libcacard- +^kernel-bootwrapper +^kernel-doc- +^kernel-abi-whitelists +^kernel-debug- +^kernel-kdump +^kernel-rt-bootwrapper +^kernel-rt-doc- +^kernel-rt-abi-whitelists +^kernel-rt-debug- +^kernel-rt-kdump diff --git a/build-tools/Cached_Data.txt b/build-tools/Cached_Data.txt new file mode 100644 index 00000000..e3a7d1fe --- /dev/null +++ b/build-tools/Cached_Data.txt @@ -0,0 +1,78 @@ +Data on an source rpm: + + location: + ${MY_WORKSPACE}/${BUILD_TYPE}/rpmbuild/SPECS/${SRPM_FILE_NAME}/ + + files: + *.spec # spec file found in the source rpm + + subdirectories: + NAMES/ # Directory contains an emtpy file, where the file name + # is the name of the source rpm. + + SERVICES/ # Directory contains zero or more emtpy files, where the + # file name is the name of the service provided by one + # or more of the rpms. + + BUILDS/ # Directory contains emtpy files, where the file name is + # the name of a binary rpm built from the source rpm. + + BUILDS_VR/ # Directory contains emtpy files, where the file name is + # the name-verion-release of a binary rpm built from the + # source rpm. + + location: + ${MY_WORKSPACE}/${BUILD_TYPE}/rpmbuild/SOURCES/${SRPM_FILE_NAME}/ + + files: + BIG # if it exists, it contains one line, the numeric value + # extracted from build_srpms.data if the line + # BUILD_IS_BIG=### if present. + # This is the estimated filesystem size (GB) required to + # host a mock build of the package. + # Note: not all parallel build environments are the same + # size. The smallest build environmnet is 3 GB and this + # is sufficient for most packages. Don't bother adding a + # BUILD_IS_BIG=### directive unless 3 gb is proven to be + # insufficient. + + SLOW # if it exists, it contains one line, the numeric value i + # extracted from build_srpms.data if the line + # BUILD_IS_SLOW=### if present. + # This is the estimated build time (minutes) required to + # host perform a mock build of the package. + # Note: Currently we only use this value as a boolean. + # Non-zero and we try to start the build of this package + # earlier rather than later. Build times >= 3 minutes are + # worth anotating. Else don't bother adding a + # BUILD_IS_SLOW=### directive +e.g. + +cd $MY_WORKSPACE/std/rpmbuild/SPECS/openstack-cinder-9.1.1-0.tis.40.src.rpm +find . +./BUILDS +./BUILDS/openstack-cinder +./BUILDS/python-cinder +./BUILDS/python-cinder-tests +./NAMES +./NAMES/openstack-cinder +./SERVICES +./SERVICES/cinder +./BUILDS_VR +./BUILDS_VR/openstack-cinder-9.1.1-0.tis.40 +./BUILDS_VR/python-cinder-9.1.1-0.tis.40 +./BUILDS_VR/python-cinder-tests-9.1.1-0.tis.40 +./openstack-cinder.spec + + +e.g. +cd $MY_WORKSPACE/std/rpmbuild/SOURCES/kernel-3.10.0-514.16.1.el7.29.tis.src.rpm +find . +./BIG +./SLOW + +cat ./BIG +8 + +cat ./SLOW +12 diff --git a/build-tools/audit-pkgs b/build-tools/audit-pkgs new file mode 100755 index 00000000..6a33bb6e --- /dev/null +++ b/build-tools/audit-pkgs @@ -0,0 +1,70 @@ +#!/bin/bash + +rpm_compare () { + local r="$1" + local r2="$2" + local line + local f=$(basename $r) + local f2=$(basename $r2) + + rpm -q --dump --nosignature -p $r | awk ' { print $1 "\n" $1 " " $5 " " $6 " " $7 " " $8 " " $9 " " $10 " " $11 } ' > /tmp/dump.new + rpm -q --dump --nosignature -p $r2 | awk ' { print $1 "\n" $1 " " $5 " " $6 " " $7 " " $8 " " $9 " " $10 " " $11 } ' > /tmp/dump.old + first_line=1 + diff -y -W 200 --suppress-common-lines /tmp/dump.new /tmp/dump.old | grep '|' | + while read -r line; do + left=$(echo "$line" | awk -F '|' '{ print $1 }') + right=$(echo "$line" | awk -F '|' '{ print $2 }') + left_f=$(echo "$left" | awk '{ print $1 }') + right_f=$(echo "$right" | awk '{ print $1 }') + if [ "$left_f" != "$right_f" ];then + continue + fi + if [ $first_line -eq 1 ]; then + echo "" + echo "$f vs $f2" + first_line=0 + fi + echo "$line" + done +} + +echo "" +echo "======================================================" +echo "Auditing built packages vs unpatched upstream packages" +echo "======================================================" +for r in $(find $MY_WORKSPACE/*/rpmbuild/RPMS -name '*.rpm' | grep -v '.src.rpm' | grep -v debuginfo); do + f=$(basename $r) + f2=$(echo $f | sed 's#[.]tis[.][0-9]*[.]#.#' | sed 's#[.]tis[.]#.#') + r2=$(find $MY_REPO/cgcs-centos-repo/Binary/ -name $f2) + if [ "$r2" == "" ]; then + # Probably one of our own + # echo "Couldn't find '$f2'" + continue + fi + rpm_compare "$r" "$r2" +done + +echo "" +echo "============================" +echo "Auditing built for conflicts" +echo "============================" +grep 'conflicts with file from package' -r --binary-files=without-match $MY_WORKSPACE/*/results/ | +while read -r line; do + w=$(echo "$line" | awk '{ print $8 }')".rpm" + w2=$(echo "$line" | awk '{ print $14 }')".rpm" + echo "$w $w2" +done | sort --unique | sed 's#bash-completion-1:#bash-completion-#' | +while read -r line2; do + f=$(echo "$line2" | awk '{ print $1 }') + f2=$(echo "$line2" | awk '{ print $2 }') + r=$(find $MY_REPO/cgcs-centos-repo/Binary/ $MY_WORKSPACE/*/rpmbuild/RPMS -name $f) + r2=$(find $MY_REPO/cgcs-centos-repo/Binary/ $MY_WORKSPACE/*/rpmbuild/RPMS -name $f2) + # echo "" + # echo "$f vs $f2" + # echo "$r vs $r2" + if [ "$r" != "" ] && [ "$r2" != "" ]; then + rpm_compare "$r" "$r2" + fi +done + + diff --git a/build-tools/branching/branch_and_tag.sh b/build-tools/branching/branch_and_tag.sh new file mode 100755 index 00000000..cfe10449 --- /dev/null +++ b/build-tools/branching/branch_and_tag.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# The purpose of this script is to create branches and tags that follow a convention +# If the desired branch already exists, it is skipped. +# If the desired tag already exists, it is skipped. + +OLD_TAG=vCGCS_DEV_0018 +NEW_TAG=vCGCS_DEV_0019 + +OLD_BRANCH=CGCS_DEV_0018 +NEW_BRANCH=CGCS_DEV_0019 + +if [ -z "$MY_REPO" ]; then + echo "MY_REPO is unset" + exit 1 +else + echo "MY_REPO is set to '$MY_REPO'" +fi + +if [ -d "$MY_REPO" ]; then + cd $MY_REPO + echo "checking out and pulling old branch" + wrgit checkout $OLD_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: wrgit checkout $OLD_BRANCH" + exit 1 + fi + + wrgit pull + if [ $? -ne 0 ]; then + echo "ERROR: wrgit pull" + exit 1 + fi +else + echo "Could not change to diectory '$MY_REPO'" + exit 1 +fi + +echo "Finding subgits" +SUBGITS=`find . -type d -name ".git" | sed "s%/\.git$%%"` + +# Go through all subgits and create the NEW_BRANCH if it does not already exist +# Go through all subgits and create the NEW_TAG if it does not already exist +for subgit in $SUBGITS; do + echo "" + echo "" + pushd $subgit > /dev/null + git fetch + git fetch --tags + # check if destination branch already exists + echo "$subgit" + branch_check=`git branch -a --list $NEW_BRANCH` + if [ -z "$branch_check" ] + then + echo "Creating $NEW_BRANCH" + git checkout $OLD_BRANCH + git checkout -b $NEW_BRANCH + git push origin $NEW_BRANCH:$NEW_BRANCH + else + echo "$NEW_BRANCH already exists" + fi + tag_check=`git tag -l $NEW_TAG` + if [ -z "$tag_check" ] + then + echo "Creating $NEW_TAG" + # create tag + git checkout $NEW_BRANCH + git pull origin + git tag $NEW_TAG + git push origin $NEW_TAG + else + echo "$NEW_TAG already exists" + fi + + popd > /dev/null +done + +echo "All done. branches and tags are pushed" + + + + + + + + diff --git a/build-tools/branching/create_branches_and_tags.sh b/build-tools/branching/create_branches_and_tags.sh new file mode 100755 index 00000000..8a34dcf8 --- /dev/null +++ b/build-tools/branching/create_branches_and_tags.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +if [ x"$1" = x ] ; then + echo "ERROR: You must specify a name to create branches and tags" + exit 1 +fi +branch=$1 +tag="v$branch" + + + +echo "Finding subgits" +SUBGITS=`find . -type d -name ".git" | sed "s%/\.git$%%"` + +# Go through all subgits and create the branch and tag if they does not already exist +for subgit in $SUBGITS; do + echo "" + echo "" + pushd $subgit > /dev/null + + # check if destination branch already exists + echo "$subgit" + branch_check=`git branch -a --list $branch` + if [ -z "$branch_check" ] + then + echo "Creating branch $branch" + git checkout -b $branch + if [ $? != 0 ] ; then + echo "ERROR: Could not exec: git checkout -b $branch" + popd > /dev/null + exit 1 + fi + # git push origin $branch:$branch + else + echo "Branch $branch already exists" + git checkout $branch + fi + + tag_check=`git tag -l $tag` + if [ -z "$tag_check" ] + then + echo "Creating tag $tag" + git tag $tag + if [ $? != 0 ] ; then + echo "ERROR: Could not exec: git tag $tag" + popd > /dev/null + exit 1 + fi + # git push origin $tag + else + echo "Tag $tag already exists" + fi + + popd > /dev/null +done + diff --git a/build-tools/branching/create_tags.sh b/build-tools/branching/create_tags.sh new file mode 100755 index 00000000..b68df8e8 --- /dev/null +++ b/build-tools/branching/create_tags.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +if [ x"$1" = x ] ; then + echo "ERROR: You must specify a name to create tags" + exit 1 +fi +tag=$1 + + +echo "Finding subgits" +SUBGITS=`find . -type d -name ".git" | sed "s%/\.git$%%"` + +# Go through all subgits and create the tag if it does not already exist +for subgit in $SUBGITS; do + echo "" + echo "" + pushd $subgit > /dev/null + + tag_check=`git tag -l $tag` + if [ -z "$tag_check" ] + then + echo "Creating tag $tag" + git tag $tag + if [ $? != 0 ] ; then + echo "ERROR: Could not exec: git tag $tag" + popd > /dev/null + exit 1 + fi + # git push origin $tag + else + echo "Tag $tag already exists" + fi + + popd > /dev/null +done + diff --git a/build-tools/branching/push_branches_tags.sh b/build-tools/branching/push_branches_tags.sh new file mode 100755 index 00000000..480b4751 --- /dev/null +++ b/build-tools/branching/push_branches_tags.sh @@ -0,0 +1,42 @@ + +#!/bin/bash + +if [ x"$1" = x ] ; then + echo "ERROR: You must specify a name to create branches and tags" + exit 1 +fi +branch=$1 +tag="v$branch" + + + +echo "Finding subgits" +SUBGITS=`find . -type d -name ".git" | sed "s%/\.git$%%"` + +# Go through all subgits and create the branch and tag if they does not already exist +for subgit in $SUBGITS; do + echo "" + echo "" + pushd $subgit > /dev/null + + # check if destination branch already exists + echo "$subgit" + echo "Pushing branch $branch" + git push origin $branch:$branch + if [ $? != 0 ] ; then + echo "ERROR: Could not exec: git push origin $branch:$branch" + popd > /dev/null + exit 1 + fi + + echo "Pushing tag $tag" + git push origin $tag + if [ $? != 0 ] ; then + echo "ERROR: Could not exec: git push origin $tag" + popd > /dev/null + exit 1 + fi + + popd > /dev/null +done + diff --git a/build-tools/branching/push_tags.sh b/build-tools/branching/push_tags.sh new file mode 100755 index 00000000..b32c00ae --- /dev/null +++ b/build-tools/branching/push_tags.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +if [ x"$1" = x ] ; then + echo "ERROR: You must specify a name to push tags" + exit 1 +fi +tag=$1 + + +echo "Finding subgits" +SUBGITS=`find . -type d -name ".git" | sed "s%/\.git$%%"` + +# Go through all subgits and create the tag if it does not already exist +for subgit in $SUBGITS; do + echo "" + echo "" + pushd $subgit > /dev/null + + echo "Creating tag $tag" + git push origin $tag + if [ $? != 0 ] ; then + echo "ERROR: Could not exec: git push origin $tag" + popd > /dev/null + exit 1 + fi + + popd > /dev/null +done + diff --git a/build-tools/build-guest b/build-tools/build-guest new file mode 100755 index 00000000..6c26f914 --- /dev/null +++ b/build-tools/build-guest @@ -0,0 +1,338 @@ +#!/bin/env bash + +# +# Build the tis-centos-image.img or tis-centos-image-rt.img file +# + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# NOTE: TMP_DIR must end in '/' +# NOTE: /tmp/ is now tmpfs like. Can't be trusted across multiple mock commands +# TMP_DIR=/tmp/ +TMP_DIR=/ + +# Use RPMs from the std build only, for now +export BUILD_TYPE=std +export MY_BUILD_DIR_TOP=$MY_BUILD_DIR + +function init_vars { + # Output path (current dir unless MY_WORKSPACE defined) + OUTPUT_DIR="$PWD/export" + if [ ! -z "$MY_WORKSPACE" ] && [ -d "$MY_WORKSPACE" ] ; then + OUTPUT_DIR="$MY_WORKSPACE/export" + CGCS_REPO_DIR="$MY_WORKSPACE/rpmbuild/RPMS" + fi + + if [ -n "$MY_GUEST_DIR" ]; then + GUEST_DIR=$MY_GUEST_DIR + else + GUEST_DIR=$MY_WORKSPACE/guest + fi + + MOCK=/usr/bin/mock + if [ $VERBOSE -eq 0 ]; then + MOCK="$MOCK -q" + fi + + # Path to guest configuration + GUEST_BUILD_DIR=$DIR/build_guest + GUEST_BUILD_CMD=$GUEST_BUILD_DIR/build-guest-image.py + if [ $VERBOSE -eq 1 ]; then + GUEST_BUILD_CMD="$GUEST_BUILD_CMD -x" + fi + + if [ $BUILD_MODE == 'std' ]; then + OUTPUT_FILE=$OUTPUT_DIR/tis-centos-guest.img + elif [ $BUILD_MODE == 'rt' ]; then + OUTPUT_FILE=$OUTPUT_DIR/tis-centos-guest-rt.img + else + printf " Error -- unknown BUILD_MODE '$BUILD_MODE'\n"; + exit 1 + fi +} + + +function check_vars { + # Where to store data + printf "Finding cgcs-root\n" + printf " Checking \$MY_REPO (value \"$MY_REPO\")\n" + + if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then + INTERNAL_REPO_ROOT=$MY_REPO + printf " Found!\n" + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " No joy -- checking \$MY_REPO_ROOT_DIR (value \"$MY_REPO_ROOT_DIR\")\n" + if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then + INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root + printf " Found!\n" + fi + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " No joy -- checking for \$MY_WORKSPACE/cgcs-root\n" + if [ -d "$MY_WORKSPACE/cgcs-root" ] ; then + INTERNAL_REPO_ROOT=$MY_WORKSPACE/cgcs-root + printf " Found!\n" + fi + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " Error -- could not locate cgcs-root repo.\n" + exit 1 + fi + + if [ "x$MY_BUILD_CFG" == "x" ];then + printf " Error -- reqiure MY_BUILD_CFG to be defined.\n" + exit 1 + fi + + RELEASE_INFO=$INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc + export PLATFORM_RELEASE=$(source $RELEASE_INFO && echo $PLATFORM_RELEASE) +} + + +function create_rootfs { + printf "\nCreating guest file system\n" + + mkdir -p $GUEST_DIR + if [ $? -ne 0 ]; then + printf " Error -- Could not create $GUEST_DIR\n"; + exit 1 + fi + + # Place build-time environment variables in mock configuration + GUEST_ENV="${MY_BUILD_ENVIRONMENT}-guest" + GUEST_CFG=$GUEST_DIR/$MY_BUILD_ENVIRONMENT_FILE + + MY_BUILD_ENVIRONMENT=$GUEST_ENV ${DIR}/modify-build-cfg $GUEST_CFG + if [ $? -ne 0 ]; then + printf " Error -- Could not update $GUEST_CFG\n"; + exit 1 + fi + + # Setup mock directories for the guest + if [ -d /localdisk/loadbuild/mock ]; then + LNK=/localdisk/loadbuild/mock/$GUEST_ENV + if [ ! -L $LNK ]; then + ln -s $GUEST_DIR $LNK + fi + fi + + if [ -d /localdisk/loadbuild/mock-cache ]; then + mkdir -p $GUEST_DIR/cache + LNK=/localdisk/loadbuild/mock-cache/$GUEST_ENV + if [ ! -L $LNK ]; then + ln -s $GUEST_DIR/cache $LNK + fi + fi + + # Setup mock chroot environment + $MOCK -r $GUEST_CFG --clean && $MOCK -r $GUEST_CFG --init + if [ $? -ne 0 ]; then + printf " Error -- Failed to setup guest mock chroot\n"; + exit 1 + fi + + # Install the RPMs to the root filesystem + + # Note that the "rt" build needs access to both local-std and local-rt repos + local EXTRA_REPOS="" + + if [ $BUILD_MODE == 'std' ]; then + INC_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/rpm-install-list.txt) + TIS_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/image.inc) + elif [ $BUILD_MODE == 'rt' ]; then + INC_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/rpm-install-list-rt.txt) + TIS_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/image-rt.inc) + EXTRA_REPOS="--enablerepo local-rt" + else + printf " Error -- unknown BUILD_MODE '$BUILD_MODE'\n"; + exit 1 + fi + + $MOCK -r $GUEST_CFG --install ${INC_RPM_LIST} ${TIS_RPM_LIST} ${EXTRA_REPOS} + if [ $? -ne 0 ]; then + printf " Error -- Failed to install RPM packages\n"; + exit 1 + fi + + # Remove RPMs that are not required in image (pruned package list) + # NOTE: these are automatically installed from the mock init not + # through dependencies. + EXC_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/rpm-remove-list.txt) + + $MOCK -r $GUEST_CFG --remove ${EXC_RPM_LIST} + if [ $? -ne 0 ]; then + printf " Error -- Failed to remove RPM packages\n"; + exit 1 + fi + + printf " Done\n" +} + + +function update_rootfs { + printf "\nCustomizing guest file system\n" + + # Copy over skeleton configuration files + for GUEST_ROOTFS in $GUEST_BUILD_DIR/rootfs $GUEST_BUILD_DIR/rootfs-$BUILD_MODE; + do + for f in $(cd $GUEST_ROOTFS && find . -type f | cut -c3-); + do + echo "$MOCK -r $GUEST_CFG --copyin $GUEST_ROOTFS/$f $f" + $MOCK -r $GUEST_CFG --copyin $GUEST_ROOTFS/$f $f + if [ $? -ne 0 ]; then + printf " Error -- Failed to copyin file $f\n"; + exit 1 + fi + done + done + + # Run the root file system setup script inside the chroot + ROOTFS_SETUP=rootfs-setup.sh + $MOCK -r $GUEST_CFG --copyin $GUEST_BUILD_DIR/$ROOTFS_SETUP $TMP_DIR && \ + if [ $BUILD_MODE == 'rt' ]; then + ROOTFS_SETUP_CMD="$TMP_DIR$ROOTFS_SETUP --rt" + elif [ $BUILD_MODE == 'std' ]; then + ROOTFS_SETUP_CMD="$TMP_DIR$ROOTFS_SETUP --std" + else + ROOTFS_SETUP_CMD="$TMP_DIR$ROOTFS_SETUP" + fi + $MOCK -r $GUEST_CFG --chroot "$ROOTFS_SETUP_CMD" + if [ $? -ne 0 ]; then + printf " Error -- Failed to run guest $ROOTFS_SETUP\n"; + exit 1 + fi + $MOCK -r $GUEST_CFG --chroot "rm -f $TMP_DIR$ROOTFS_SETUP" + if [ $? -ne 0 ]; then + printf " Error -- Failed to delete $ROOTFS_SETUP from guest\n"; + exit 1 + fi + + printf " Done\n" +} + + +function build_image { + # Build the image + printf "\nBuilding guest image $OUTPUT_FILE\n" + + mkdir -p $OUTPUT_DIR + if [ $? -ne 0 ]; then + printf " Error -- Could not create $OUTPUT_DIR\n"; + exit 1 + fi + + # Build guest rootfs archive + ROOTFS_SPACE=$((500*1024*1024)) + ROOTFS_TAR=rootfs.tar + ROOTFS_EXCLUDE=rootfs-exclude.txt + + $MOCK -r $GUEST_CFG --copyin $GUEST_BUILD_DIR/$ROOTFS_EXCLUDE $TMP_DIR + $MOCK -r $GUEST_CFG --chroot -- tar -cf $TMP_DIR$ROOTFS_TAR -X $TMP_DIR$ROOTFS_EXCLUDE --numeric-owner / + $MOCK -r $GUEST_CFG --copyout $TMP_DIR$ROOTFS_TAR $GUEST_DIR + $MOCK -r $GUEST_CFG --chroot -- rm -f $TMP_DIR$ROOTFS_TAR + + $GUEST_BUILD_CMD -i $GUEST_DIR/$ROOTFS_TAR -o $OUTPUT_FILE -s $ROOTFS_SPACE + if [ $? -ne 0 ]; then + printf " Error -- Failed to build guest image\n"; + exit 1 + fi + + printf " Done\n" +} + + +function clean_guest { + printf "\nCleaning the guest $GUEST_DIR\n" + + if [ ! -e $GUEST_DIR ]; then + printf " Done...nothing to do\n"; + exit 0 + fi + + # Place build-time environment variables in mock configuration + GUEST_ENV="${MY_BUILD_ENVIRONMENT}-guest" + GUEST_CFG=$GUEST_DIR/$MY_BUILD_ENVIRONMENT_FILE + + if [ ! -e $GUEST_CFG ]; then + MY_BUILD_ENVIRONMENT=$GUEST_ENV ${DIR}/modify-build-cfg $GUEST_CFG + if [ $? -ne 0 ]; then + printf " Error -- Could not update $GUEST_CFG\n"; + exit 1 + fi + fi + + $MOCK -r $GUEST_CFG --clean + $MOCK -r $GUEST_CFG --scrub=cache + + rm -rf $GUEST_DIR + if [ $? -ne 0 ]; then + printf " Error -- Failed to remove guest $GUEST_DIR\n"; + exit 1 + fi + + printf " Done\n" +} + +############################################# +# Main code +############################################# + +usage () { + echo "" + echo "Usage: " + echo " build-guest [--rt | --std] [--verbose]" + echo " build-guest [--help]" + echo " build-guest [--clean]" + echo "" +} + +# Default argument values +HELP=0 +CLEAN=0 +VERBOSE=0 +BUILD_MODE='std' + +# read the options +TEMP=`getopt -o h --long clean --long rt --long std --long verbose -n 'test.sh' -- "$@"` +eval set -- "$TEMP" + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -h|--help) HELP=1 ; shift ;; + --clean) CLEAN=1 ; shift ;; + --verbose) VERBOSE=1 ; shift ;; + --rt) BUILD_MODE='rt' ; shift ;; + --std) BUILD_MODE='std' ; shift ;; + --) shift ; break ;; + *) echo "Internal error!" ; exit 1 ;; + esac +done + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +( +printf "\n*****************************\n" +printf "Create Titanium Cloud/CentOS Guest Image\n" +printf "*****************************\n\n" + +init_vars +check_vars + +if [ $CLEAN -eq 1 ]; then + clean_guest + exit 0 +fi + +create_rootfs +update_rootfs +build_image + +) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' ; exit ${PIPESTATUS[0]} diff --git a/build-tools/build-img b/build-tools/build-img new file mode 100755 index 00000000..84f3f2a7 --- /dev/null +++ b/build-tools/build-img @@ -0,0 +1,94 @@ +#!/bin/bash + +# Build an IMG file capable of being booted in a virtual environment +# The default settings are vda device which the Cumulus environment expects +# and controller mode + +usage () { + echo "" + echo "Usage: " + echo " build-img [--cpe] [--dest ] [--part [1 | 2]]" + echo " --dest " + echo " --cpe Boots in CPE mode. Default is controller mode." + echo "" +} + +DEST_ISO=bootimage_auto.iso +DEST_IMG=tis.img +AUTO_MODE=controller +HELP=0 +PART=0 + +# read the options +TEMP=`getopt -o hp:d: --long help,cpe,part:,dest: -n 'test.sh' -- "$@"` +eval set -- "$TEMP" + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -h|--help) HELP=1 ; shift ;; + --cpe) AUTO_MODE=cpe; shift ;; + -d | --dest) DEST_IMG="$2"; shift; shift ;; + -p | --part) PART="$2"; shift; shift ;; + --) shift ; break ;; + *) echo "Internal error!" ; exit 1 ;; + esac +done + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +echo PART=$PART + +# Step 1: Build an ISO that autoboots + +# Cumulus default device is vda +if [ $PART -ne 2 ]; then + build-iso --file bootimage_auto.iso --auto $AUTO_MODE --device vda --cumulus +fi + +# Step 2: Convert the ISO to IMG +if [ $PART -ne 1 ]; then + if [ ! -e "/dev/loop-control" -o ! -e "/dev/kvm" ]; then + CMD="cd $MY_WORKSPACE/export; \ + $MY_REPO/addons/wr-cgcs/layers/cgcs/extras.ND/scripts/install_iso_to_disk_image.sh bootimage_auto.iso $DEST_IMG" + + if [ "$HOSTNAME" == "yow-cgts3-centos7" ]; then + echo "Attempting to run kvm_iso_to_img on yow-cgts3-lx" + ssh -o StrictHostKeyChecking=no yow-cgts3-lx "$CMD" + if [ $? -ne 0 ]; then + echo "Failed to run update-efiboot-image on yow-cgts3-lx" + fi + fi + + if [ "$HOSTNAME" == "yow-cgts2-centos7" ]; then + echo "Attempting to run kvm_iso_to_img on yow-cgts2-lx" + ssh -o StrictHostKeyChecking=no yow-cgts2-lx "$CMD" + if [ $? -ne 0 ]; then + echo "Failed to run update-efiboot-image on yow-cgts2-lx" + fi + fi + + if [ ! -f "$MY_WORKSPACE/export/$DEST_IMG" ]; then + printf "\n" + printf "****************************************************************** \n" + printf "No kvm and/or loop device on this machine. To complete the build \n" + printf "please copy '$MY_WORKSPACE/export/bootimage_auto.iso' to a machine \n" + printf "that supports kvm and loop devices and run ... \n" + printf " $MY_REPO/addons/wr-cgcs/layers/cgcs/extras.ND/scripts/install_iso_to_disk_image.sh bootimage_auto.iso $DEST_IMG\n" + printf "****************************************************************** \n" + exit 1 + fi + fi + + if [ ! -f "$MY_WORKSPACE/export/$DEST_IMG" ]; then + ( + cd $MY_WORKSPACE/export + $MY_REPO/addons/wr-cgcs/layers/cgcs/extras.ND/scripts/install_iso_to_disk_image.sh bootimage_auto.iso $DEST_IMG + exit $? + ) + fi +fi + diff --git a/build-tools/build-iso b/build-tools/build-iso new file mode 100755 index 00000000..f01f7dcd --- /dev/null +++ b/build-tools/build-iso @@ -0,0 +1,702 @@ +#!/bin/bash + +# Build the export/bootimage.iso file +# +# This script uses environment variables to determine the source of +# packages, and bundles the packages into a bootable .iso +# +# It starts by building a basic "vanilla CentOS" ISO, and then adds our +# packages to it. + +usage () { + echo "" + echo "Usage: " + echo " build-iso [--auto ] [--file ] [--device ] [--skip-sign]" + echo " --file destination ISO file" + echo " --auto Modify kickstart to auto-install controller or cpe mode" + echo " --device Use a different boot/rootds device (default is sda)" + echo " --skip-sign do not add file signature to RPMs" + echo "" +} + +MY_YUM_CONF="" + +NPROCS=$(nproc) + +export MOCK=/usr/bin/mock + +CREATEREPO=$(which createrepo_c) +if [ $? -ne 0 ]; then + CREATEREPO="createrepo" +fi + +# TEMPORARY: Check for isohybrid now to give a warning about installing pkg +if [ ! -f /usr/bin/isohybrid ]; then + echo "Missing required utility: /usr/bin/isohybrid" + echo "Installation of syslinux is required:" + echo " sudo yum install -y syslinux" + exit 1 +fi + +function install_pkg_list { + local PKGLIST=$1 + if [ "x$PKGLIST" == "x" ]; then + return 1 + fi + + OLD_PWD=$PWD + + echo "Installing packages listed in $PKGLIST and dependancies" + \rm -f $OUTPUT_DIR/dist/report_deps.txt + $CREATEREPO $CGCS_REPO_DIR + $CREATEREPO $CGCS_RT_REPO_DIR + + \cp -v $MY_YUM_CONF $OUTPUT_DIR + + \cd $OUTPUT_DIST_DIR/isolinux/Packages + $INTERNAL_REPO_ROOT/build-tools/build_iso/cgts_deps.sh --deps=$PKGLIST + + if [ $? -ne 0 ] + then + echo "Could not install dependencies" + exit 1 + fi + + # clean up + echo "Removing local-std yum repo $CGCS_REPO_DIR/repodata" + echo "Removing local-rt yum repo $CGCS_RT_REPO_DIR/repodata" + + \cd $OLD_PWD +} + +# Generate the report of where all packages come from +function make_report { + local PKGLISTFILES=$@ + if [ "x$PKGLISTFILES" == "x" ]; then + return 1 + fi + echo "MAKING $REPORT_FILE" + echo "-----------------" >> $REPORT_FILE + + echo "ISO REPORT" > $REPORT_FILE + date >> $REPORT_FILE + echo "-----------------" >> $REPORT_FILE + + echo " " >> $REPORT_FILE + echo "-----------------" >> $REPORT_FILE + echo "EXPLICIT INCLUDES" >> $REPORT_FILE + echo "-----------------" >> $REPORT_FILE + for PKGLIST in $PKGLISTFILES; do + while read PKG; do + PKG=`echo $PKG | sed "s/#.*//"`; + if [ "${PKG}x" != "x" ]; then + echo $PKG >> $REPORT_FILE + fi + done < $PKGLIST + done + + echo " " >> $REPORT_FILE + echo "-----------------" >> $REPORT_FILE + echo " PACKAGES " >> $REPORT_FILE + echo "-----------------" >> $REPORT_FILE + cat $BUILT_REPORT | sort | uniq >> $REPORT_FILE + + echo " " >> $REPORT_FILE + echo "-----------------" >> $REPORT_FILE + echo " WARNINGS " >> $REPORT_FILE + echo "-----------------" >> $REPORT_FILE + + # Note that the warnings file may have multiple lines for the same + # missing dependency. A sort | uniq solves this so we don't duplicate + # warnings + cat $WARNINGS_REPORT | sort | uniq >> $REPORT_FILE + + echo "ISO REPORT: $REPORT_FILE" +} + +function init_vars { + ##################################### + # Input definitions + + # Where all CentOS packages live + # Where essential CentOS (minimal install) packages live + INTERNAL_REPO_ROOT= + # Where our own packages live + CGCS_REPO_DIR=$MY_WORKSPACE/std/rpmbuild/RPMS + CGCS_RT_REPO_DIR=$MY_WORKSPACE/rt/rpmbuild/RPMS + + MY_YUM_CONF=$(create-yum-conf) + if [ $? -ne 0 ]; then + echo "ERROR: create-yum-conf failed" + exit 1 + fi + + DISTRO_REPO_DIR=$(for d in $(grep baseurl $MY_YUM_CONF | grep file: | awk -F : '{print $2}' | sed 's:///:/:g'); do if [ -d $d/images ]; then echo $d ;fi; done) + + ##################################### + # Output definitons + + # where to put stuff (curent dir unless MY_WORKSPACE defined) + OUTPUT_DIR="$PWD/export" + if [ ! -z "$MY_WORKSPACE" ] && [ -d "$MY_WORKSPACE" ] ; then + OUTPUT_DIR="$MY_WORKSPACE/export" + CGCS_REPO_DIR="$MY_WORKSPACE/std/rpmbuild/RPMS" + CGCS_RT_REPO_DIR="$MY_WORKSPACE/rt/rpmbuild/RPMS" + fi + + # Directory in which to populate files to be distributed + if [ $CUMULUS -eq 0 ]; then + OUTPUT_DIST_DIR=$OUTPUT_DIR/dist + else + OUTPUT_DIST_DIR=$OUTPUT_DIR/dist-cumulus + fi + + # Package disc image + OUTPUT_FILE=$OUTPUT_DIR/$DEST_FILE + + # Generate an error if the output file is below this threshold + MINIMUM_EXPECTED_SIZE=500000000 + + + # report variables + REPORT_FILE=$OUTPUT_DIR/report.txt + BUILT_REPORT=$OUTPUT_DIR/local.txt + CLOUD_REPORT=$OUTPUT_DIR/cloud.txt + CLOUD_COMMON_REPORT=$OUTPUT_DIR/cloudcommon.txt + CENTOS_REPORT=$OUTPUT_DIR/centos.txt + EPEL_REPORT=$OUTPUT_DIR/epel.txt + WARNINGS_REPORT=$OUTPUT_DIR/warnings.txt + + \rm -f $REPORT_FILE + \rm -f $BUILT_REPORT + \rm -f $CLOUD_REPORT + \rm -f $CLOUD_COMMON_REPORT + \rm -f $CENTOS_REPORT + \rm -f $WARNINGS_REPORT +} + +# check input variables +function check_vars { + # Where to store data + printf "Finding cgcs-root\n" + printf " Checking \$MY_REPO (value \"$MY_REPO\")\n" + + if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then + INTERNAL_REPO_ROOT=$MY_REPO + printf " Found!\n" + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " No joy -- checking \$MY_REPO_ROOT_DIR (value \"$MY_REPO_ROOT_DIR\")\n" + if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then + INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root + printf " Found!\n" + fi + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " No joy -- checking for \$MY_WORKSPACE/cgcs-root\n" + if [ -d "$MY_WORKSPACE/cgcs-root" ] ; then + INTERNAL_REPO_ROOT=$MY_WORKSPACE/cgcs-root + printf " Found!\n" + fi + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " Error -- could not locate cgcs-root repo.\n" + exit 1 + fi + + printf "\nChecking that we can access $DISTRO_REPO_DIR\n" + if [ ! -d "$DISTRO_REPO_DIR" ] ; then + printf " Error -- could not access $DISTRO_REPO_DIR\n" + exit 1 + fi + + if [ ! -e "$DISTRO_REPO_DIR/repodata" ] ; then + printf " Error -- $DISTRO_REPO_DIR is there, but does not seem sane\n" + fi + + printf "\nOkay, input looks fine...\n\n" + printf "Creating output directory $OUTPUT_DIST_DIR\n" + if [ $CLEAN_FLAG -eq 1 ]; then + echo " Cleaning..." + if [ -e $OUTPUT_DIST_DIR ] ; then + chmod -R a+w $OUTPUT_DIST_DIR + \rm -rf $OUTPUT_DIST_DIR + fi + if [ -e $OUTPUT_DIST_DIR ] ; then + printf "Error: could not remove old $OUTPUT_DIST_DIR\n" + exit 1 + fi + fi + + \mkdir -p $OUTPUT_DIST_DIR + if [ ! -d $OUTPUT_DIST_DIR ] ; then + printf "Error: could not create $OUTPUT_DIST_DIR\n" + exit 1 + fi + + RELEASE_INFO=$INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc + export PLATFORM_RELEASE=$(source $RELEASE_INFO && echo $PLATFORM_RELEASE) + + echo " Done" + echo "" +} + +function init_output_dir { + echo "Creating base output directory in $OUTPUT_DIST_DIR" + \mkdir -p $OUTPUT_DIST_DIR/isolinux/images + \mkdir -p $OUTPUT_DIST_DIR/isolinux/ks + \mkdir -p $OUTPUT_DIST_DIR/isolinux/LiveOS + \mkdir -p $OUTPUT_DIST_DIR/isolinux/Packages + \mkdir -p $OUTPUT_DIST_DIR/utils + + \mkdir -p $OUTPUT_DIST_DIR/isolinux/EFI + # This directory will contains files required for the PXE network installer + \mkdir -p $OUTPUT_DIST_DIR/isolinux/pxeboot + + echo " Copying base files" + + # Generate .discinfo file + date +%s.%N > $OUTPUT_DIST_DIR/isolinux/.discinfo + echo $PLATFORM_RELEASE >> $OUTPUT_DIST_DIR/isolinux/.discinfo + echo "x86_64" >> $OUTPUT_DIST_DIR/isolinux/.discinfo + + \cp -L -ru $DISTRO_REPO_DIR/isolinux/* $OUTPUT_DIST_DIR/isolinux/ + \cp -L -ru $DISTRO_REPO_DIR/images/pxeboot $OUTPUT_DIST_DIR/isolinux/images/ + + echo " Installing startup files" + + \cp -L $INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/mwa-beas/bsp-files/centos.syslinux.cfg $OUTPUT_DIST_DIR/isolinux/syslinux.cfg + \cp -L $INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/mwa-beas/bsp-files/centos.syslinux.cfg $OUTPUT_DIST_DIR/isolinux/isolinux.cfg + sed -i 's/wr_usb_boot/oe_iso_boot/' $OUTPUT_DIST_DIR/isolinux/isolinux.cfg + + # Modify the isolinux.cfg to auto install if requested + # Option 0 is Controller(serial). Option 2 is CPE serial. + if [ "$AUTO_INSTALL" == "controller" ] ; then + echo "Modifying ISO to auto-install controller load" + perl -p -i -e 's/timeout 0/timeout 1\ndefault 0/' $OUTPUT_DIST_DIR/isolinux/isolinux.cfg + elif [ "$AUTO_INSTALL" == "cpe" ] ; then + echo "Modifying ISO to auto-install CPE (combined load)" + perl -p -i -e 's/timeout 0/timeout 1\ndefault 2/' $OUTPUT_DIST_DIR/isolinux/isolinux.cfg + fi + + # Modify the device if requested + if [ ! -z "$DEVICE" ] ; then + echo "Modifying ISO to use device $DEVICE" + perl -p -i -e "s/device=sda/device=${DEVICE}/g" $OUTPUT_DIST_DIR/isolinux/isolinux.cfg + fi + + + + # Copy UEFI files + \cp -L -ru $DISTRO_REPO_DIR/EFI/* $OUTPUT_DIST_DIR/isolinux/EFI/ + \cp -L $INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/mwa-beas/bsp-files/grub.cfg $OUTPUT_DIST_DIR/isolinux/EFI/BOOT/grub.cfg + \cp -L $INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/mwa-beas/bsp-files/pxeboot_grub.cfg $OUTPUT_DIST_DIR/isolinux/pxeboot/pxeboot_grub.cfg + + # Update the efiboot.img (See https://wiki.archlinux.org/index.php/Remastering_the_Install_ISO) + # We need to mount the image file, replace the grub.cfg file with the Titanium Cloud one, and unmount. + # Script update-efiboot-image will do this. If there is not loop device on the build machine + # then this script must be executed manually prior. + + if [ ! -e "/dev/loop-control" -a ! -f "$OUTPUT_DIR/efiboot.img" ]; then + CMD="export PROJECT=$PROJECT; \ + export SRC_BUILD_ENVIRONMENT=$SRC_BUILD_ENVIRONMENT; \ + export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT; \ + export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT_FILE; \ + export MY_BUILD_DIR=$MY_BUILD_DIR; \ + export MY_WORKSPACE=$MY_WORKSPACE; \ + export MY_REPO=$MY_REPO; \ + export MY_BUILD_CFG=$MY_BUILD_CFG; \ + export MY_MOCK_ROOT=$MY_MOCK_ROOT; \ + export PATH=$MY_REPO/build-tools:\$PATH; \ + update-efiboot-image" + + if [ "$HOSTNAME" == "yow-cgts3-centos7" ]; then + echo "Attempting to run update-efiboot-image on yow-cgts3-lx" + ssh -o StrictHostKeyChecking=no yow-cgts3-lx "$CMD" + if [ $? -ne 0 ]; then + echo "Failed to run update-efiboot-image on yow-cgts3-lx" + fi + fi + + if [ "$HOSTNAME" == "yow-cgts2-centos7" ]; then + echo "Attempting to run update-efiboot-image on yow-cgts2-lx" + ssh -o StrictHostKeyChecking=no yow-cgts2-lx "$CMD" + if [ $? -ne 0 ]; then + echo "Failed to run update-efiboot-image on yow-cgts2-lx" + fi + fi + fi + + if [ ! -e "/dev/loop-control" -a ! -f "$OUTPUT_DIR/efiboot.img" ]; then + printf "\n**************************************************************************************************** \n" + printf "No loop device on this machine. Please ensure $OUTPUT_DIR/efiboot.img \n" + printf "exist prior to executing build-iso by. It can be created by running \n" + printf " $INTERNAL_REPO_ROOT/build-tools/update-efiboot-image \n" + printf "on a machine that does support a loop device. Please ensure all standard \n" + printf "build environment variables are defined (e.g. MY_REPO, MY_WORKSPACE, etc.). \n" + printf " \n" + printf "e.g. If building on yow-cgts3-centos7, you'll want to run the script on \n" + printf " yow-cgts3-lx which shares the same file system, but supports loop devices \n" + printf "****************************************************************************************************** \n" + exit 1 + fi + + if [ -f "$OUTPUT_DIR/efiboot.img" ]; then + + # The script update-efiboot-image was run outside the build-iso script, do nothing. + printf " The image file $OUTPUT_DIR/efiboot.img already exist\n" + else + printf " The image file $OUTPUT_DIR/efiboot.img does not exist \n" + if [ ! -f "$INTERNAL_REPO_ROOT/build-tools/update-efiboot-image" ]; then + printf "*** Error: script update-efiboot-image does not exist *** \n" + exit 1 + fi + + # Run the script + $INTERNAL_REPO_ROOT/build-tools/update-efiboot-image + RET=$? + if [ $RET != 0 ]; then + printf "*** Error: update-efiboot-image script returned failure $RET *** \n" + exit 1 + fi + + fi + + \cp -L $OUTPUT_DIR/efiboot.img $OUTPUT_DIST_DIR/isolinux/images/ + \rm -f $OUTPUT_DIR/efiboot.img + + # Copy and set up pxeboot setup files + \cp $INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/mwa-beas/bsp-files/pxeboot_setup.sh $OUTPUT_DIST_DIR/isolinux/pxeboot_setup.sh + \cp $INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/mwa-beas/bsp-files/pxeboot.cfg $OUTPUT_DIST_DIR/isolinux/pxeboot/pxeboot.cfg + chmod +x $OUTPUT_DIST_DIR/isolinux/pxeboot_setup.sh + + \rm -f $OUTPUT_DIST_DIR/comps.xml + \cp -L $INTERNAL_REPO_ROOT/build-tools/build_iso/comps.xml.gz $OUTPUT_DIST_DIR/ + gunzip $OUTPUT_DIST_DIR/comps.xml.gz + + TMP_DIR=$MY_WORKSPACE/tmp + \mkdir -p $TMP_DIR + TMPDIR=$TMP_DIR yum clean all -c $MY_YUM_CONF + \rm -rf $TMP_DIR/yum-$USER-* + echo " Done" + echo "" +} + +function final_touches { + OLD_PWD=$PWD + + # Update the comps.xml + if [ ! -f $OUTPUT_DIST_DIR/comps.xml.bak ]; then + \cp $OUTPUT_DIST_DIR/comps.xml $OUTPUT_DIST_DIR/comps.xml.bak + fi + + python $INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/mwa-beas/bsp-files/platform_comps.py \ + --pkgdir $OUTPUT_DIST_DIR/isolinux/Packages \ + --groups $OUTPUT_DIST_DIR/comps.xml + if [ $? -ne 0 ]; then + echo "Failed to update comps.xml" + exit 1 + fi + + # create the repo + \cd $OUTPUT_DIST_DIR/isolinux + $CREATEREPO -q -g ../comps.xml . + + # build the ISO + printf "Building image $OUTPUT_FILE\n" + \cd $OUTPUT_DIST_DIR + chmod 664 isolinux/isolinux.bin + mkisofs -o $OUTPUT_FILE \ + -R -D -A 'oe_iso_boot' -V 'oe_iso_boot' \ + -quiet \ + -b isolinux.bin -c boot.cat -no-emul-boot \ + -boot-load-size 4 -boot-info-table \ + -eltorito-alt-boot \ + -e images/efiboot.img \ + -no-emul-boot \ + isolinux/ + + isohybrid --uefi $OUTPUT_FILE + implantisomd5 $OUTPUT_FILE + + \cd $OLD_PWD +} + +function extract_pkg_from_local_repo { + local repodir=$1 + local pkgname=$2 + + local pkgfile=$(repoquery --repofrompath local,${repodir} --location -q ${pkgname}) + if [ -z "${pkgfile}" ]; then + echo "Could not find package $pkgname in $repodir" + exit 1 + fi + + rpm2cpio ${pkgfile/file://} | cpio -idmv + if [ $? -ne 0 ]; then + echo "Failed to extract files from ${pkgfile/file://}" + exit 1 + fi +} + +function extract_installer_files { + # Changes to copied files here must also be reflected in patch-iso + + CGCSDIR=$INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs + PKGDIR=$OUTPUT_DIST_DIR/isolinux/Packages + + ( + \cd $OUTPUT_DIR + \rm -rf kickstarts extra_cfgs kickstart.work + \mkdir kickstarts extra_cfgs kickstart.work + + echo "Retrieving kickstarts..." + + \cd kickstart.work + + extract_pkg_from_local_repo ${CGCS_REPO_DIR} platform-kickstarts + extract_pkg_from_local_repo ${CGCS_REPO_DIR} platform-kickstarts-pxeboot + extract_pkg_from_local_repo ${CGCS_REPO_DIR} platform-kickstarts-extracfgs + + \cp --preserve=all www/pages/feed/rel-*/*.cfg pxeboot/*.cfg ../kickstarts/ && + \cp --preserve=all extra_cfgs/*.cfg ../extra_cfgs/ + if [ $? -ne 0 ]; then + echo "Failed to copy extracted kickstarts" + exit 1 + fi + + \cd .. + + # Copy kickstarts to ISO + \cp --preserve=all kickstarts/controller_ks.cfg $OUTPUT_DIST_DIR/isolinux/ks.cfg + # Modify the kickstart to shutdown instead of reboot if doing an auto install + if [ ! -z "$AUTO_INSTALL" ] ; then + sed -i 's/^reboot --eject/shutdown/' $OUTPUT_DIST_DIR/isolinux/ks.cfg + fi + + \mv kickstarts/pxeboot* $OUTPUT_DIST_DIR/isolinux/pxeboot/ + \cp --preserve=all kickstarts/* $OUTPUT_DIST_DIR/isolinux + + # Update OAM interface for cumulus auto install + if [ $CUMULUS -eq 1 ]; then + # Cumulus wants tty1 + perl -p -i -e 's/console=tty0/console=tty1/' $OUTPUT_DIST_DIR/isolinux/isolinux.cfg + + # CUMULUS setup scripts specify ens3 for OAM + OAM_IFNAME=ens3 + + cat <> $OUTPUT_DIST_DIR/isolinux/ks.cfg +%post +#For cumulus tis on tis automated install +cat << EOF > /etc/sysconfig/network-scripts/ifcfg-${OAM_IFNAME} +IPADDR=10.10.10.3 +NETMASK=255.255.255.0 +BOOTPROTO=static +ONBOOT=yes +DEVICE=${OAM_IFNAME} +MTU=1500 +GATEWAY=10.10.10.1 +EOF +%end +EOM + fi + + # For PXE boot network installer + + echo ${OUTPUT_DIST_DIR}/isolinux/Packages + + local WORKDIR=pxe-network-installer.content + local ORIG_PWD=$PWD + + \rm -rf $WORKDIR + \mkdir $WORKDIR + \cd $WORKDIR + + extract_pkg_from_local_repo ${CGCS_REPO_DIR} pxe-network-installer + extract_pkg_from_local_repo ${CGCS_REPO_DIR} grub2-efi-pxeboot + + \mkdir -p $OUTPUT_DIST_DIR/isolinux/pxeboot/EFI/centos/x86_64-efi + + \cp --preserve=all pxeboot/pxelinux.0 pxeboot/menu.c32 pxeboot/chain.c32 $OUTPUT_DIST_DIR/isolinux/pxeboot && + \cp --preserve=all pxeboot/EFI/centos/x86_64-efi/* $OUTPUT_DIST_DIR/isolinux/pxeboot/EFI/centos/x86_64-efi/ && + \cp --preserve=all pxeboot/EFI/grubx64.efi $OUTPUT_DIST_DIR/isolinux/pxeboot/EFI/ + if [ $? -ne 0 ]; then + echo "Error: Could not copy all files from installer" + exit 1 + fi + + \cp --preserve=all www/pages/feed/rel-*/LiveOS/squashfs.img $OUTPUT_DIST_DIR/isolinux/LiveOS + if [ $? -ne 0 ]; then + echo "Error: Could not copy squashfs from LiveOS" + exit 1 + fi + + + # Replace vmlinuz and initrd.img with our own pre-built ones + \rm -f \ + $OUTPUT_DIST_DIR/isolinux/vmlinuz \ + $OUTPUT_DIST_DIR/isolinux/images/pxeboot/vmlinuz \ + $OUTPUT_DIST_DIR/isolinux/initrd.img \ + $OUTPUT_DIST_DIR/isolinux/images/pxeboot/initrd.img + \cp --preserve=all pxeboot/rel-*/installer-bzImage_1.0 \ + $OUTPUT_DIST_DIR/isolinux/vmlinuz && + \cp --preserve=all pxeboot/rel-*/installer-bzImage_1.0 \ + $OUTPUT_DIST_DIR/isolinux/images/pxeboot/vmlinuz && + \cp --preserve=all pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \ + $OUTPUT_DIST_DIR/isolinux/initrd.img && + \cp --preserve=all pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \ + $OUTPUT_DIST_DIR/isolinux/images/pxeboot/initrd.img + + if [ $? -ne 0 ]; then + echo "Error: Failed to copy installer images" + exit 1 + fi + + \cd $ORIG_PWD + \rm -rf $WORKDIR + ) + if [ $? -ne 0 ]; then + exit 1 + fi +} + +function setup_upgrades_files { + # Changes to copied files here must also be reflected in patch-iso + + # Copy the upgrade files + UPGRADES_DIR="$OUTPUT_DIST_DIR/isolinux/upgrades" + \rm -rf $UPGRADES_DIR + \mkdir -p $UPGRADES_DIR + \cp $INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/mwa-beas/bsp-files/upgrades/* $UPGRADES_DIR + sed -i "s/xxxSW_VERSIONxxx/${PLATFORM_RELEASE}/g" $UPGRADES_DIR/metadata.xml + chmod +x $UPGRADES_DIR/*.sh + # Write the version out (used in upgrade scripts - this is the same as SW_VERSION) + echo "VERSION=$PLATFORM_RELEASE" > $UPGRADES_DIR/version +} + +function sign_iso { + # Sign the .iso with the developer private key + # Sigining with the formal key is only to be done for customer release + # builds + local isofilename=$(basename $OUTPUT_DIR/$DEST_FILE) + local isofilenoext="${isofilename%.*}" + openssl dgst -sha256 -sign ${MY_REPO}/build-tools/signing/dev-private-key.pem -binary -out $OUTPUT_DIR/$isofilenoext.sig $OUTPUT_DIR/$DEST_FILE +} + +############################################# +# Main code +############################################# + +# Check args +HELP=0 +CLEAN_FLAG=1 # TODO -- doesn't yet work without --clean +DEST_FILE=bootimage.iso +AUTO_FLAG=0 +AUTO_INSTALL="" +CUMULUS=0 +SIGN_RPM_FILES=1 +DEVICE="" + +# read the options +TEMP=`getopt -o hf:a:d: --long help,file:,auto:,device:,cumulus,clean,skip-sign -n 'test.sh' -- "$@"` +eval set -- "$TEMP" + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -h|--help) HELP=1 ; shift ;; + --clean) CLEAN_FLAG=1 ; shift ;; + --skip-sign) SIGN_RPM_FILES=0 ; shift ;; + --cumulus) CUMULUS=1 ; shift ;; + -f | --file) DEST_FILE="$2"; shift; shift ;; + -d | --device) DEVICE="$2"; shift; shift ;; + -a | --auto) AUTO_FLAG=1; AUTO_INSTALL="$2"; shift; shift ;; + --) shift ; break ;; + *) echo "Internal error!" ; exit 1 ;; + esac +done + +if [ $AUTO_FLAG -eq 1 ]; then + if [[ "$AUTO_INSTALL" != "controller" && "$AUTO_INSTALL" != "cpe" ]] ; then + echo "Unsupported --auto value: $AUTO_INSTALL" + exit 1 + fi +fi + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + + +( +printf "\n*************************\n" +printf "Create Titanium Cloud/CentOS Boot CD\n" +printf "*************************\n\n" + +# Init variables +init_vars +check_vars +PKGLIST_MINIMAL=$INTERNAL_REPO_ROOT/build-tools/build_iso/minimal_rpm_list.txt +PKGLIST_TI=$INTERNAL_REPO_ROOT/build-tools/build_iso/image.inc +PKGLIST_DEV=$INTERNAL_REPO_ROOT/build-tools/build_iso/image-dev.inc + +# Create skeleton build dir +init_output_dir + +# Create the vanilla DVD +echo "Copying vanilla CentOS RPMs" +install_pkg_list $PKGLIST_MINIMAL + +# Find all CGCS packages +# SAL exit 0 +echo "Installing Titanium Cloud packages" +install_pkg_list $PKGLIST_TI +if [ $? -eq 2 ]; then + exit 1 +fi +if [ "x${RELEASE_BUILD}" == "x" ]; then + echo "Installing Titanium Cloud developer packages" + install_pkg_list ${PKGLIST_DEV} + if [ $? -eq 2 ]; then + exit 1 + fi +fi + +# Extract installer files +extract_installer_files + +# Upgrades files +setup_upgrades_files + +# add file signatures to all rpms +if [ $SIGN_RPM_FILES -ne 0 ]; then + sign-rpms -d $OUTPUT_DIST_DIR/isolinux/Packages + if [ $? -ne 0 ] ; then + echo "failed to add file signatures to RPMs" + exit 1 + fi +fi + +# Finalize and build ISO +final_touches + +# Sign the ISO +sign_iso + +make_report $PKGLIST_MINIMAL $PKGLIST_TI + +# Check sanity +FILESIZE=$(wc -c <"$OUTPUT_FILE") +if [ $FILESIZE -ge $MINIMUM_EXPECTED_SIZE ]; then + printf "Done." + printf "Output file: $OUTPUT_FILE\n\n" +else + printf "Output file $OUTPUT_FILE smaller than expected -- probable error\n\n" + exit 1 +fi + +) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' ; exit ${PIPESTATUS[0]} diff --git a/build-tools/build-pkg-srpm b/build-tools/build-pkg-srpm new file mode 100644 index 00000000..ae348728 --- /dev/null +++ b/build-tools/build-pkg-srpm @@ -0,0 +1,63 @@ +#!/bin/bash + +# Available environment +# SRC_BASE = absolute path to cgcs-root +# AVS_BASE = absolute path to AVS source +# CGCS_BASE = absolute path to CGCS source +# RPM_BUILD_BASE = Directory where the package .distro directory can be found +# SRPM_OUT = Directory into which SRC RPMS are copied in preparation for mock build +# RPM_DIR = Directory into which binary RPMs are delivered by mock + +SRC_DIR="/sources" +VERSION=$(grep '^Version:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') +TAR_NAME=$(grep '^Name:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') +CUR_DIR=`pwd` +BUILD_DIR=".distro/centos7/rpmbuild" + +mkdir -p $BUILD_DIR/SRPMS + +TAR="$TAR_NAME-$VERSION.tar.gz" +TAR_PATH="$BUILD_DIR/SOURCES/$TAR" + +TAR_NEEDED=0 +if [ -f $TAR_PATH ]; then + n=`find . -cnewer $TAR_PATH -and ! -path './.git*' \ + -and ! -path './build/*' \ + -and ! -path './.pc/*' \ + -and ! -path './patches/*' \ + -and ! -path './.distro/*' \ + -and ! -path './pbr-*.egg/*' \ + | wc -l` + if [ $n -gt 0 ]; then + TAR_NEEDED=1 + fi +else + TAR_NEEDED=1 +fi + +if [ $TAR_NEEDED -gt 0 ]; then + tar czvf $TAR_PATH .$SRC_DIR --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude='.distro' --exclude='pbr-*.egg' --transform "s,^\.$SRC_DIR,$TAR_NAME-$VERSION," +fi + +for SPEC in `ls $BUILD_DIR/SPECS`; do + SPEC_PATH="$BUILD_DIR/SPECS/$SPEC" + RELEASE=$(grep '^Release:' $SPEC_PATH | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') + NAME=`echo $SPEC | sed 's/.spec$//'` + SRPM="$NAME-$VERSION-$RELEASE.src.rpm" + SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM" + + BUILD_NEEDED=0 + if [ -f $SRPM_PATH ]; then + n=`find . -cnewer $SRPM_PATH | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + else + BUILD_NEEDED=1 + fi + + if [ $BUILD_NEEDED -gt 0 ]; then + rpmbuild -bs $SPEC_PATH --define="%_topdir $CUR_DIR/$BUILD_DIR" --define="_tis_dist .tis" + fi +done + diff --git a/build-tools/build-pkgs b/build-tools/build-pkgs new file mode 100755 index 00000000..0c77b129 --- /dev/null +++ b/build-tools/build-pkgs @@ -0,0 +1,34 @@ +#!/bin/bash + +# This program is a wrapper around build-pkgs-parallel and build-pkgs-serial + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +usage () { + echo "" + echo "Usage: " + echo " Create source and binary rpms:" + echo " build-pkgs [--serial] [args]" +} + +SERIAL_FLAG=0 + +for arg in "$@"; do + case "$1" in + --serial) SERIAL_FLAG=1 ;; + esac +done + +which mock_tmpfs_umount >> /dev/null +if [ $? -ne 0 ]; then + SERIAL_FLAG=1 +fi + +if [ $SERIAL_FLAG -eq 1 ]; then + echo "build-pkgs-serial $@" + build-pkgs-serial "$@" +else + echo "build-pkgs-parallel $@" + build-pkgs-parallel "$@" +fi + diff --git a/build-tools/build-pkgs-parallel b/build-tools/build-pkgs-parallel new file mode 100755 index 00000000..af9e5f94 --- /dev/null +++ b/build-tools/build-pkgs-parallel @@ -0,0 +1,370 @@ +#!/bin/bash + +# This program is a wrapper around build-srpms-parallel and build-rpms-parallel + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +usage () { + echo "" + echo "Usage: " + echo " Create source and Binary rpms:" + echo " Build optimizations (--no-descendants, --no-required, --no-build-info," + echo " --no-autoclean) are not recommended for the first build after a clone/pull," + echo " nor the final build prior to creating an iso or patch, but can be used" + echo " for intermediate builds. i.e. while debugging compilation failures." + echo " build-pkgs-parallel [--no-descendants] [--no-required] [--no-build-info] [--no-autoclean] [--careful] [--formal] [ list of package names ]" + echo "" + echo " Delete source rpms, and the directories associated with it's creation:" + echo " Note: does not clean an edit environment" + echo " build-pkgs-parallel --clean [ list of package names ]" + echo "" + echo " Extract an src.rpm into a pair of git trees to aid in editing it's contents," + echo " one for source code and one for metadata such as the spec file." + echo " If --no-meta-patch is specified, then WRS patches are omitted." + echo " build-pkgs-parallel --edit [--no-meta-patch] [ list of package names ]" + echo "" + echo " Delete an edit environment" + echo " build-pkgs-parallel --edit --clean [ list of package names ]" + echo "" + echo " This help page" + echo " build-pkgs-parallel [--help]" + echo "" +} + + +HELP=0 +CLEAN_FLAG=0 +EDIT_FLAG=0 +STD_BUILD=1 +RT_BUILD=1 +INSTALLER_BUILD=0 + +# read the options +TEMP=$(getopt -o h --long rt,std,installer,edit,no-meta-patch,no-descendants,no-required,no-build-info,no-autoclean,formal,careful,help,clean -n 'build-pkgs-parallel' -- "$@") +if [ $? -ne 0 ]; then + usage + exit 0 +fi +eval set -- "$TEMP" + +# extract options and their arguments into variables. +EXTRA_ARGS_COMMON="" +EXTRA_ARGS_SRPM="" +EXTRA_ARGS_RPM="" +while true ; do + case "$1" in + --no-descendants) EXTRA_ARGS_COMMON+=" --no-descendants" ; shift ;; + --formal) EXTRA_ARGS_COMMON+=" --formal" ; shift ;; + --careful) EXTRA_ARGS_RPM+=" --careful" ; shift ;; + --no-required) EXTRA_ARGS_RPM+=" --no-required" ; shift ;; + --no-build-info) EXTRA_ARGS_COMMON+=" --no-build-info" ; shift ;; + --no-autoclean) EXTRA_ARGS_RPM+=" --no-autoclean" ; shift ;; + --no-meta-patch) EXTRA_ARGS_SRPM+=" --no-meta-patch" ; shift ;; + -h|--help) HELP=1 ; shift ;; + --clean) CLEAN_FLAG=1 ; shift ;; + --edit) EDIT_FLAG=1 ; EXTRA_ARGS_SRPM+=" --edit"; shift ;; + --rt) STD_BUILD=0 ; shift ;; + --std) RT_BUILD=0 ; shift ;; + --installer) INSTALLER_BUILD=1 ; STD_BUILD=0 ; RT_BUILD=0 ; shift ;; + --) shift ; break ;; + *) usage; exit 1 ;; + esac +done + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +function my_exit() { + build-rpms-parallel --std --tmpfs-clean + build-rpms-parallel --rt --tmpfs-clean +} + +function my_sigint() { + echo "build-pkgs-parallel sigint" + pkill -SIGABRT -P $BASHPID &> /dev/null + echo "build-pkgs-parallel waiting" + wait + echo "build-pkgs-parallel wait complete" + +} + +function my_sighup() { + echo "build-pkgs-parallel sighup" + pkill -SIGABRT -P $BASHPID &> /dev/null + echo "build-pkgs-parallel waiting" + wait + echo "build-pkgs-parallel wait complete" +} + +function my_sigabrt() { + echo "build-pkgs-parallel sigabrt" + pkill -SIGABRT -P $BASHPID &> /dev/null + echo "build-pkgs-parallel waiting" + wait + echo "build-pkgs-parallel wait complete" +} + +function my_sigterm() { + echo "build-pkgs-parallel sigterm" + pkill -SIGABRT -P $BASHPID &> /dev/null + echo "build-pkgs-parallel waiting" + wait + echo "build-pkgs-parallel wait complete" +} + +trap my_exit EXIT +trap my_sigint INT +trap my_sighup HUP +trap my_sigabrt ABRT +trap my_sigterm TERM + +TARGETS=" $@ " +TARGETS_STD=" " +TARGETS_RT=" " +TARGETS_INSTALLER=" " +TARGETS_MISC=" " + +find_targets () { + local centos_pkg_dirs=$1 + local d="" + local d2="" + local g="" + local x="" + local name="" + local path="" + local RESULT=" " + local FOUND=0 + + for g in $(find $MY_REPO -type d -name .git); do + d=$(dirname $g) + if [ -f $d/$centos_pkg_dirs ]; then + for d2 in $(grep -v '^#' $d/$centos_pkg_dirs); do + name="" + if [ -f $d/$d2/centos/srpm_path ]; then + path=$(cat $d/$d2/centos/srpm_path | head -n 1 | sed "s#^mirror:CentOS/tis-r3-CentOS/mitaka#$MY_REPO/cgcs-centos-repo#" | sed "s#^mirror:#$MY_REPO/cgcs-centos-repo/#" | sed "s#^repo:#$MY_REPO/#" | sed "s#^3rd_party:#$MY_REPO/cgcs-3rd-party-repo/#" | sed "s#^Source/#$MY_REPO/cgcs-centos-repo/Source/#") + name=$(rpm -q --qf='%{NAME}' --nosignature -p $path) + else + path=$(find $d/$d2/centos/ -name '*.spec' | head -n 1) + if [[ ( -z "$path" ) && ( -f $d/$d2/centos/spec_path ) ]]; then + path=$(find $MY_REPO/$(cat $d/$d2/centos/spec_path) -maxdepth 1 -name '*.spec' | head -n 1) + fi + if [ "$path" != "" ]; then + name=$(spec_find_tag Name "$path" 2>> /dev/null) + fi + fi + if [ "$name" != "" ]; then + if [ "$BUILD_TYPE" == "rt" ]; then + FOUND=0 + for x in $TARGETS; do + if [ "${x: -3}" == "-rt" ]; then + if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then + RESULT+="$x " + FOUND=1 + break + fi + fi + done + if [ $FOUND -eq 0 ]; then + for x in $TARGETS; do + if [ "${name}" == "${x}-rt" ]; then + RESULT+="$x-rt " + FOUND=1 + break + else + if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then + RESULT+="$x " + FOUND=1 + break + fi + fi + done + fi + else + for x in $TARGETS; do + if [ "${name}" == "$x" ]; then + RESULT+="$x " + FOUND=1 + break + fi + done + fi + fi + done + fi + done + + echo "$RESULT" + return 0 +} + +echo "CLEAN_FLAG=$CLEAN_FLAG" +echo "EDIT_FLAG=$EDIT_FLAG" + +if [ "x$TARGETS" != "x " ]; then + source $MY_REPO/build-tools/spec-utils + TARGETS_STD="$(find_targets centos_pkg_dirs)" + + BUILD_TYPE_SAVE="$BUILD_TYPE" + BUILD_TYPE="rt" + TARGETS_RT="$(find_targets centos_pkg_dirs_rt)" + BUILD_TYPE="$BUILD_TYPE_SAVE" + + echo "TARGETS_STD=$TARGETS_STD" + echo "TARGETS_RT=$TARGETS_RT" + + for x in $TARGETS; do + if [[ $TARGETS_STD == *" $x "* ]] + then + echo "found $x" >> /dev/null; + else + if [[ $TARGETS_RT == *" $x "* ]] + then + echo "found $x" >> /dev/null; + else + TARGETS_MISC+="$x " + fi + fi + done +fi + +echo "EXTRA_ARGS_COMMON='$EXTRA_ARGS_COMMON'" +echo "EXTRA_ARGS_SRPM='$EXTRA_ARGS_SRPM'" +echo "EXTRA_ARGS_RPM='$EXTRA_ARGS_RPM'" +echo "TARGETS='$TARGETS'" +echo "TARGETS_STD='$TARGETS_STD'" +echo "TARGETS_RT='$TARGETS_RT'" +echo "TARGETS_MISC='$TARGETS_MISC'" + +if [ $CLEAN_FLAG -eq 1 ]; then + if [ $STD_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_STD" != " " ] || [ "$TARGETS_MISC" != " " ]; then + if [ $EDIT_FLAG -ne 1 ]; then + echo "$DIR/build-rpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC" + $DIR/build-rpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC || exit 1 + fi + fi + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_STD" != " " ] || [ "$TARGETS_MISC" != " " ]; then + echo "$DIR/build-srpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC" + $DIR/build-srpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC || exit 1 + fi + fi + if [ $RT_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_RT" != " " ] || [ "$TARGETS_MISC" != " " ]; then + if [ $EDIT_FLAG -ne 1 ]; then + echo "$DIR/build-rpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC" + $DIR/build-rpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC || exit 1 + fi + fi + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_RT" != " " ] || [ "$TARGETS_MISC" != " " ]; then + echo "$DIR/build-srpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC" + $DIR/build-srpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC || exit 1 + fi + fi + exit $? +fi + +function launch_build() +{ + local build_type=$1 + shift + + local logfile=$MY_WORKSPACE/build-$build_type.log + local rc + local targets + + if [ "$build_type" == "std" ]; then + targets="$TARGETS_STD $TARGETS_MISC" + else + if [ "$build_type" == "rt" ]; then + targets="$TARGETS_RT $TARGETS_MISC" + else + if [ "$build_type" == "installer" ]; then + targets="$TARGETS_INSTALLER $TARGETS_MISC" + else + targets="$TARGETS" + fi + fi + fi + + echo "Launching $build_type build, logging to $logfile" + \rm $logfile + + echo -e "\n######## $(date): Launching build-srpms-parallel --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile + # No clean flag, call build-srpms-parallel followed by build-rpms-parallel + echo "$DIR/build-srpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets" | tee --append $logfile + $DIR/build-srpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets 2>&1 | tee --append $logfile + rc=${PIPESTATUS[0]} + if [ $rc -eq 0 ]; then + echo -e "\n######## $(date): build-srpm-parallel --$build_type was successful" | tee --append $logfile + else + echo -e "\n######## $(date): build-srpm-parallel --$build_type failed with rc=$rc" | tee --append $logfile + echo -e "\n$(date): build-srpm-parallel --$build_type failed with rc=$rc" + exit $rc + fi + + if [ $EDIT_FLAG -ne 1 ]; then + echo -e "\n######## $(date): Launching build-rpms-parallel --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile + echo "$DIR/build-rpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets" | tee --append $logfile + $DIR/build-rpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets 2>&1 | tee --append $logfile + rc=${PIPESTATUS[0]} + if [ $rc -eq 0 ]; then + echo -e "\n######## $(date): build-rpm-parallel --$build_type was successful" | tee --append $logfile + else + echo -e "\n######## $(date): build-rpm-parallel --$build_type failed with rc=$rc" | tee --append $logfile + echo -e "\n$(date): build-rpm-parallel --$build_type failed with rc=$rc" + exit $rc + fi + fi + + echo -e "\n$(date): $build_type complete\n" + #exit $rc +} + +function progbar() +{ + while :; do + for s in / - \\ \|; do + printf "\r$s" + sleep .5 + done + done +} + +# Create $MY_WORKSPACE if it doesn't exist already +mkdir -p $MY_WORKSPACE + +if [ $STD_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_STD" != " " ] || [ "$TARGETS_MISC" != " " ]; then + launch_build std + else + echo "Skipping 'std' build, no valid targets in list: $TARGETS" + fi +else + echo "Skipping 'std' build" +fi +if [ $RT_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_RT" != " " ] || [ "$TARGETS_MISC" != " " ]; then + launch_build rt + else + echo "Skipping 'rt' build, no valid targets in list: $TARGETS" + fi +else + echo "Skipping 'rt' build" +fi +if [ $INSTALLER_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_INSTALLER" != " " ] || [ "$TARGETS_MISC" != " " ]; then + launch_build installer + else + echo "Skipping 'installer' build, no valid targets in list: $TARGETS" + fi +else + echo "Skipping 'installer' build" +fi + + +echo "All builds were successful" + +exit 0 + diff --git a/build-tools/build-pkgs-serial b/build-tools/build-pkgs-serial new file mode 100755 index 00000000..6935d106 --- /dev/null +++ b/build-tools/build-pkgs-serial @@ -0,0 +1,379 @@ +#!/bin/bash + +# This program is a wrapper around build-srpms and build-rpms + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +usage () { + echo "" + echo "Usage: " + echo " Create source and Binary rpms:" + echo " Build optimizations (--no-descendants, --no-required, --no-build-info," + echo " --no-autoclean) are not recommended for the first build after a clone/pull," + echo " nor the final build prior to creating an iso or patch, but can be used" + echo " for intermediate builds. i.e. while debugging compilation failures." + echo " build-pkgs [--no-descendants] [--no-required] [--no-build-info] [--no-autoclean] [--careful] [--formal] [ list of package names ]" + echo "" + echo " Delete source rpms, and the directories associated with it's creation:" + echo " Note: does not clean an edit environment" + echo " build-pkgs --clean [ list of package names ]" + echo "" + echo " Extract an src.rpm into a pair of git trees to aid in editing it's contents," + echo " one for source code and one for metadata such as the spec file." + echo " If --no-meta-patch is specified, then WRS patches are omitted." + echo " build-pkgs --edit [--no-meta-patch] [ list of package names ]" + echo "" + echo " Delete an edit environment" + echo " build-pkgs --edit --clean [ list of package names ]" + echo "" + echo " This help page" + echo " build-pkgs [--help]" + echo "" +} + + +HELP=0 +CLEAN_FLAG=0 +EDIT_FLAG=0 +STD_BUILD=1 +RT_BUILD=1 +INSTALLER_BUILD=0 + +# read the options +TEMP=`getopt -o h --long serial,rt,std,edit,no-meta-patch,no-descendants,no-required,no-build-info,no-autoclean,formal,careful,help,clean -n 'build-pkgs' -- "$@"` +if [ $? -ne 0 ]; then + usage + exit 0 +fi +eval set -- "$TEMP" + +# extract options and their arguments into variables. +EXTRA_ARGS_COMMON="" +EXTRA_ARGS_SRPM="" +EXTRA_ARGS_RPM="" +while true ; do + case "$1" in + --no-descendants) EXTRA_ARGS_COMMON+=" --no-descendants" ; shift ;; + --formal) EXTRA_ARGS_COMMON+=" --formal" ; shift ;; + --careful) EXTRA_ARGS_RPM+=" --careful" ; shift ;; + --no-required) EXTRA_ARGS_RPM+=" --no-required" ; shift ;; + --no-build-info) EXTRA_ARGS_COMMON+=" --no-build-info" ; shift ;; + --no-autoclean) EXTRA_ARGS_RPM+=" --no-autoclean" ; shift ;; + --no-meta-patch) EXTRA_ARGS_SRPM+=" --no-meta-patch" ; shift ;; + -h|--help) HELP=1 ; shift ;; + --clean) CLEAN_FLAG=1 ; shift ;; + --edit) EDIT_FLAG=1 ; EXTRA_ARGS_SRPM+=" --edit"; shift ;; + --rt) STD_BUILD=0 ; shift ;; + --std) RT_BUILD=0 ; shift ;; + --installer) INSTALLER_BUILD=1 ; STD_BUILD=0 ; RT_BUILD=0 ; shift ;; + --serial) shift ;; + --) shift ; break ;; + *) usage; exit 1 ;; + esac +done + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +TARGETS=" $@ " +TARGETS_STD=" " +TARGETS_RT=" " +TARGETS_INSTALLER=" " +TARGETS_MISC=" " + +find_targets () { + local centos_pkg_dirs=$1 + local d="" + local d2="" + local g="" + local x="" + local name="" + local path="" + local RESULT=" " + local FOUND=0 + + for g in $(find $MY_REPO -type d -name .git); do + d=$(dirname $g) + if [ -f $d/$centos_pkg_dirs ]; then + for d2 in $(grep -v '^#' $d/$centos_pkg_dirs); do + name="" + if [ -f $d/$d2/centos/srpm_path ]; then + path=$(cat $d/$d2/centos/srpm_path | head -n 1 | sed "s#^mirror:CentOS/tis-r3-CentOS/mitaka#$MY_REPO/cgcs-centos-repo#" | sed "s#^mirror:#$MY_REPO/cgcs-centos-repo/#" | sed "s#^repo:#$MY_REPO/#" | sed "s#^3rd_party:#$MY_REPO/cgcs-3rd-party-repo/#" | sed "s#^Source/#$MY_REPO/cgcs-centos-repo/Source/#") + name=$(rpm -q --qf='%{NAME}' --nosignature -p $path) + else + path=$(find $d/$d2/centos/ -name '*.spec' | head -n 1) + if [[ ( -z "$path" ) && ( -f $d/$d2/centos/spec_path ) ]]; then + path=$(find $MY_REPO/$(cat $d/$d2/centos/spec_path) -maxdepth 1 -name '*.spec' | head -n 1) + fi + if [ "$path" != "" ]; then + name=$(spec_find_tag Name "$path" 2>> /dev/null) + fi + fi + if [ "$name" != "" ]; then + if [ "$BUILD_TYPE" == "rt" ]; then + FOUND=0 + for x in $TARGETS; do + if [ "${x: -3}" == "-rt" ]; then + if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then + RESULT+="$x " + FOUND=1 + break + fi + fi + done + if [ $FOUND -eq 0 ]; then + for x in $TARGETS; do + if [ "${name}" == "${x}-rt" ]; then + RESULT+="$x-rt " + FOUND=1 + break + else + if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then + RESULT+="$x " + FOUND=1 + break + fi + fi + done + fi + else + for x in $TARGETS; do + if [ "${name}" == "$x" ]; then + RESULT+="$x " + FOUND=1 + break + fi + done + fi + fi + done + fi + done + + echo "$RESULT" + return 0 +} + +echo "CLEAN_FLAG=$CLEAN_FLAG" +echo "EDIT_FLAG=$EDIT_FLAG" + +if [ "x$TARGETS" != "x " ]; then + source $MY_REPO/build-tools/spec-utils + TARGETS_STD="$(find_targets centos_pkg_dirs)" + + BUILD_TYPE_SAVE="$BUILD_TYPE" + BUILD_TYPE="rt" + TARGETS_RT="$(find_targets centos_pkg_dirs_rt)" + BUILD_TYPE="installer" + TARGETS_INSTALLER="$(find_targets centos_pkg_dirs_installer)" + BUILD_TYPE="$BUILD_TYPE_SAVE" + + echo "TARGETS_STD=$TARGETS_STD" + echo "TARGETS_RT=$TARGETS_RT" + echo "TARGETS_INSTALLER=$TARGETS_INSTALLER" + + for x in $TARGETS; do + if [[ $TARGETS_STD == *" $x "* ]] + then + echo "found $x" >> /dev/null; + else + if [[ $TARGETS_RT == *" $x "* ]] + then + echo "found $x" >> /dev/null; + else + if [[ $TARGETS_INSTALLER == *" $x "* ]] + then + echo "found $x" >> /dev/null; + INSTALLER_BUILD=1 + else + TARGETS_MISC+="$x " + fi + fi + fi + done +fi + +echo "EXTRA_ARGS_COMMON='$EXTRA_ARGS_COMMON'" +echo "EXTRA_ARGS_SRPM='$EXTRA_ARGS_SRPM'" +echo "EXTRA_ARGS_RPM='$EXTRA_ARGS_RPM'" +echo "TARGETS='$TARGETS'" +echo "TARGETS_STD='$TARGETS_STD'" +echo "TARGETS_RT='$TARGETS_RT'" +echo "TARGETS_INSTALLER='$TARGETS_INSTALLER'" +echo "TARGETS_MISC='$TARGETS_MISC'" + +if [ $CLEAN_FLAG -eq 1 ]; then + if [ $STD_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_STD" != " " ] || [ "$TARGETS_MISC" != " " ]; then + if [ $EDIT_FLAG -ne 1 ]; then + echo "$DIR/build-rpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC" + $DIR/build-rpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC || exit 1 + fi + fi + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_STD" != " " ] || [ "$TARGETS_MISC" != " " ]; then + echo "$DIR/build-srpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC" + $DIR/build-srpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC || exit 1 + fi + fi + if [ $RT_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x" ] || [ "$TARGETS_RT" != " " ] || [ "$TARGETS_MISC" != " " ]; then + if [ $EDIT_FLAG -ne 1 ]; then + echo "$DIR/build-rpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC" + $DIR/build-rpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC || exit 1 + fi + fi + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_RT" != " " ] || [ "$TARGETS_MISC" != " " ]; then + echo "$DIR/build-srpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC" + $DIR/build-srpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC || exit 1 + fi + fi + if [ $INSTALLER_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x" ] || [ "$TARGETS_INSTALLER" != " " ]; then + if [ $EDIT_FLAG -ne 1 ]; then + echo "$DIR/build-rpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER" + $DIR/build-rpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER || exit 1 + fi + fi + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_INSTALLER" != " " ]; then + echo "$DIR/build-srpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER" + $DIR/build-srpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER || exit 1 + fi + fi + exit $? +fi + +function launch_build() +{ + local build_type=$1 + shift + + local logfile=$MY_WORKSPACE/build-$build_type.log + local rc + local targets + + if [ "$build_type" == "std" ]; then + targets="$TARGETS_STD $TARGETS_MISC" + else + if [ "$build_type" == "rt" ]; then + targets="$TARGETS_RT $TARGETS_MISC" + else + if [ "$build_type" == "installer" ]; then + targets="$TARGETS_INSTALLER" + else + targets="$TARGETS" + fi + fi + fi + + echo "Launching $build_type build, logging to $logfile" + + echo -e "\n######## $(date): Launching build-srpms-serial --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile + # No clean flag, call build-srpms-serial followed by build-rpms-serial + echo "$DIR/build-srpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets" | tee --append $logfile + $DIR/build-srpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets 2>&1 | tee --append $logfile + rc=${PIPESTATUS[0]} + if [ $rc -eq 0 ]; then + echo -e "\n######## $(date): build-srpm-serial --$build_type was successful" | tee --append $logfile + else + echo -e "\n######## $(date): build-srpm-serial --$build_type failed with rc=$rc" | tee --append $logfile + echo -e "\n$(date): build-srpm-serial --$build_type failed with rc=$rc" + exit $rc + fi + + if [ $EDIT_FLAG -ne 1 ]; then + echo -e "\n######## $(date): Launching build-rpms-serial --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile + echo "$DIR/build-rpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets" | tee --append $logfile + $DIR/build-rpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets 2>&1 | tee --append $logfile + rc=${PIPESTATUS[0]} + if [ $rc -eq 0 ]; then + echo -e "\n######## $(date): build-rpm-serial --$build_type was successful" | tee --append $logfile + else + echo -e "\n######## $(date): build-rpm-serial --$build_type failed with rc=$rc" | tee --append $logfile + echo -e "\n$(date): build-rpm-serial --$build_type failed with rc=$rc" + exit $rc + fi + fi + + echo -e "\n$(date): $build_type complete\n" + #exit $rc +} + +function progbar() +{ + while :; do + for s in / - \\ \|; do + printf "\r$s" + sleep .5 + done + done +} + +# Create $MY_WORKSPACE if it doesn't exist already +mkdir -p $MY_WORKSPACE + +if [ $STD_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_STD" != " " ] || [ "$TARGETS_MISC" != " " ]; then + launch_build std + else + echo "Skipping 'std' build, no valid targets in list: $TARGETS" + fi +else + echo "Skipping 'std' build" +fi +if [ $RT_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_RT" != " " ] || [ "$TARGETS_MISC" != " " ]; then + launch_build rt + else + echo "Skipping 'rt' build, no valid targets in list: $TARGETS" + fi +else + echo "Skipping 'rt' build" +fi +if [ $INSTALLER_BUILD -eq 1 ]; then + if [ "x$TARGETS" == "x " ] || [ "$TARGETS_INSTALLER" != " " ]; then + launch_build installer + else + echo "Skipping 'installer' build, no valid targets in list: $TARGETS" + fi +# else + # echo "Skipping 'installer' build" +fi + + +#progbar & + +#function killemall() { +# for pid in $(jobs -p) +# do +# pgid=$(echo -n $(ps -o pgid= $pid)) +# if [ -n "$pgid" ]; then +# echo "Killing child progress group: kill -TERM -$pgid" +# kill -TERM -$pgid +# fi +# done +# echo "Bad mojo when you ctrl-C, so don't" +#} + +#trap killemall INT + +## Wait for builds to complete +#wait %1 +#STD_BUILD_RC=$? + +#wait %2 +#RT_BUILD_RC=$? + +# Kill the progress bar +#kill %3 + +#if [ $STD_BUILD_RC -ne 0 -o $RT_BUILD_RC -ne 0 ]; then +# echo "One or more builds failed" +# exit 1 +#fi + +echo "All builds were successful" + +exit 0 + diff --git a/build-tools/build-pkgs4 b/build-tools/build-pkgs4 new file mode 120000 index 00000000..a8849bfe --- /dev/null +++ b/build-tools/build-pkgs4 @@ -0,0 +1 @@ +build-pkgs-parallel \ No newline at end of file diff --git a/build-tools/build-rpms b/build-tools/build-rpms new file mode 100755 index 00000000..937c406a --- /dev/null +++ b/build-tools/build-rpms @@ -0,0 +1,34 @@ +#!/bin/bash + +# This program is a wrapper around build-rpms-parallel and build-rpms-serial + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +usage () { + echo "" + echo "Usage: " + echo " Create binary rpms:" + echo " build-rpms [--serial] [args]" +} + +SERIAL_FLAG=0 + +for arg in "$@"; do + case "$1" in + --serial) SERIAL_FLAG=1 ;; + esac +done + +which mock_tmpfs_umount >> /dev/null +if [ $? -ne 0 ]; then + SERIAL_FLAG=1 +fi + +if [ $SERIAL_FLAG -eq 1 ]; then + echo "build-rpms-serial $@" + build-rpms-serial "$@" +else + echo "build-rpms-parallel $@" + build-rpms-parallel "$@" +fi + diff --git a/build-tools/build-rpms-parallel b/build-tools/build-rpms-parallel new file mode 100755 index 00000000..4f922476 --- /dev/null +++ b/build-tools/build-rpms-parallel @@ -0,0 +1,2343 @@ +#!/bin/bash +# set -x + +export ME=$(basename "$0") +CMDLINE="$ME $@" + + +# Maximum number of parallel build environments +ABSOLUTE_MAX_WORKERS=4 + +# Maximum space in gb for each tmpfs based parallel build environment. +# Note: currently 10 gb is sufficient to build everything except ceph +MAX_MEM_PER_WORKER=10 + +# Minimum space in gb for each tmpfs based parallel build environment +# Note: tmpfs is typically 2.5 gb when compiling many small jobs +MIN_MEM_PER_WORKER=3 + +# Maximum number of disk based parallel build environments +MAX_DISK_BASED_WORKERS=2 + +# Minimum space in gb for each disk based parallel build environment +MIN_DISK_PER_WORKER=20 + +# How many srpms to build before we add another parallel build environment +MIN_TASKS_PER_CORE=3 + +# Max number of new concurrent builds to allow for +MAX_SHARE_FACTOR=4 + +# Always leave at least MEMORY_RESERVE gb of available mem for the system +MEMORY_RESERVE=1 + +# These two values will be reassigned in the 'compute_resources' subroutine +MOCKCHAIN_RESOURCE_ALLOCATION="" +MAX_WORKERS=$ABSOLUTE_MAX_WORKERS + + +CREATEREPO=$(which createrepo_c) +if [ $? -ne 0 ]; then + CREATEREPO="createrepo" +fi + +DEPENDANCY_DIR="$MY_REPO/cgcs-tis-repo/dependancy-cache" +SRPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires" +SRPM_TRANSITIVE_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-transitive-requires" +SRPM_TRANSITIVE_DESCENDANTS_FILE="$DEPENDANCY_DIR/SRPM-transitive-descendants" +SRPM_DIRECT_DESCENDANTS_FILE="$DEPENDANCY_DIR/SRPM-direct-descendants" +SRPM_RPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires-rpm" +RPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/RPM-direct-requires" +RPM_TO_SRPM_MAP_FILE="$DEPENDANCY_DIR/rpm-to-srpm" +SRPM_TO_RPM_MAP_FILE="$DEPENDANCY_DIR/srpm-to-rpm" + +UNBUILT_PATTERN_FILE="$MY_REPO/build-data/unbuilt_rpm_patterns" +IMAGE_INC_FILE="$MY_REPO/build-tools/build_iso/image.inc" + +export MOCK=/usr/bin/mock + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source $DIR/spec-utils +source $DIR/srpm-utils + +HOME=$(pwd) + +usage () { + echo "" + echo "Usage: " + echo " $ME [ [--rt] [--no-required] [--no-descendants] [--no-build-info] [--no-autoclean] [--formal] ]" + echo " $ME --clean [ [--no-descendants] ]" + echo " $ME --help" + echo "" +} + + +number_of_users () { + users | tr ' ' '\n' | sort --uniq | wc -l +} + +available_mem_gb () { + free -g | grep 'Mem:' | awk '{ print $7 }' +} + +available_disk_gb () { + df -BG $MY_WORKSPACE | grep -v '^Filesystem' | awk '{ print $4 }' | sed 's#G$##' +} + +number_of_cpus () { + /usr/bin/nproc +} + +number_of_builds_in_progress () { + local x + x=$(ps -ef | grep build-pkgs-parallel | wc -l) + x=$((x-1)) + echo $x +} + +sqrt () { + echo -e "sqrt($1)" | bc -q -i | head -2 | tail -1 +} + +join_by () { local IFS="$1"; shift; echo "$*"; } + +create-no-clean-list () { + local MY_YUM_CONF=$(create-yum-conf) + local NO_CLEAN_LIST_FILE=$MY_WORKSPACE/no_clean_list.txt + local NEED_REBUILD=0 + + if [ ! -f $NO_CLEAN_LIST_FILE ]; then + NEED_REBUILD=1 + else + if [ -f MY_BUILD_CFG ]; then + if [ -f MY_BUILD_CFG ]; then + find "$MY_BUILD_CFG" -not -newer "$NO_CLEAN_LIST_FILE" | grep $(basename $MY_BUILD_CFG) >> /dev/null + if [ $? -eq 0 ]; then + NEED_REBUILD=1 + fi + fi + fi + fi + + if [ $NEED_REBUILD -eq 1 ]; then + local install_groups="" + local install_packages="" + local p + + for p in $(grep "config_opts\['chroot_setup_cmd'\]" $MY_BUILD_CFG | tail -n1 | cut -d '=' -f 2 | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e "s/^'//" -e "s/'$//" -e 's/^install //'); do + if [[ $p == @* ]] ; then + install_groups=$(join_by ' ' $install_groups $(echo $p | cut -c 2-)) + else + install_packages=$(join_by ' ' $install_packages $p) + fi + done + + local noclean_last_list_len=0 + local noclean_list="" + local tmp_list="" + local g + + for g in $install_groups; do + tmp_list=$(yum -c $MY_YUM_CONF groupinfo $g 2>> /dev/null | awk 'f;/Mandatory Packages:/{f=1}' | cut -c 5-) + noclean_list=$(join_by ' ' $noclean_list $tmp_list) + done + + noclean_list=$(join_by ' ' $noclean_list $install_packages) + noclean_list=$(echo $noclean_list | tr ' ' '\n' | sort --uniq) + noclean_list_len=$(echo $noclean_list | wc -w) + + while [ $noclean_list_len -gt $noclean_last_list_len ]; do + noclean_last_list_len=$noclean_list_len + noclean_list=$( (yum -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq) + noclean_list_len=$(echo $noclean_list | wc -w) + done + + echo $noclean_list > $NO_CLEAN_LIST_FILE + fi + + cat $NO_CLEAN_LIST_FILE +} + +str_lst_contains() { + TARGET="$1" + LST="$2" + if [[ $LST =~ (^|[[:space:]])$TARGET($|[[:space:]]) ]] ; then + return 0 + else + return 1 + fi +} + +compute_resources () { + local weight=0 + local b + + echo "" + for f in $@; do + b=$(basename $f) + if [ -f $SOURCES_DIR/$b/BIG ] || [ ]; then + weight=$((weight+MIN_TASKS_PER_CORE)) + else + weight=$((weight+1)) + fi + done + weight=$((weight/MIN_TASKS_PER_CORE)) + + # gather data about the build machines resources + local users=$(number_of_users) + if [ $users -lt 1 ]; then users=1; fi + local mem=$(available_mem_gb) + local disk=$(available_disk_gb) + local cpus=$(number_of_cpus) + local num_users=$(sqrt $users) + local num_build=$(number_of_builds_in_progress) + num_build=$((num_build+1)) + echo "compute_resources: total: cpus=$cpus, mem=$mem, disk=$disk, weight=$weight, num_build=$num_build" + + # What fraction of the machine will we use + local share_factor=$num_users + if [ $share_factor -gt $((MAX_SHARE_FACTOR+num_build-1)) ]; then share_factor=$((MAX_SHARE_FACTOR+num_build-1)); fi + if [ $share_factor -lt $num_build ]; then share_factor=$num_build; fi + local mem_share_factor=$((share_factor-num_build)) + if [ $mem_share_factor -lt 1 ]; then mem_share_factor=1; fi + echo "compute_resources: share_factor=$share_factor mem_share_factor=$mem_share_factor" + + # What resources are we permitted to use + local mem_share=$(((mem-MEMORY_RESERVE)/mem_share_factor)) + if [ $mem_share -lt 0 ]; then mem_share=0; fi + local disk_share=$((disk/share_factor)) + local cpus_share=$((cpus/share_factor)) + echo "compute_resources: our share: cpus=$cpus_share, mem=$mem_share, disk=$disk_share" + + # How many build jobs, how many jobs will use tmpfs, and how much mem for each tmpfs + local workers=$cpus_share + if [ $workers -gt $MAX_WORKERS ]; then workers=$MAX_WORKERS; fi + if [ $workers -gt $weight ]; then workers=$weight; fi + if [ $workers -lt 1 ]; then workers=1; fi + local max_mem_based_workers=$((mem_share/MIN_MEM_PER_WORKER)) + if [ $max_mem_based_workers -lt 0 ]; then max_mem_based_workers=0; fi + local max_disk_based_workers=$((disk_share/MIN_DISK_PER_WORKER)) + if [ $max_disk_based_workers -gt $MAX_DISK_BASED_WORKERS ]; then max_disk_based_workers=$MAX_DISK_BASED_WORKERS; fi + if [ $max_disk_based_workers -lt 1 ]; then max_disk_based_workers=1; fi + echo "max_disk_based_workers=$max_disk_based_workers, max_mem_based_workers=$max_mem_based_workers" + local mem_based_workers=$max_mem_based_workers + if [ $mem_based_workers -ge $workers ]; then mem_based_workers=$((workers-1)); fi + local disk_based_workers=$((workers-mem_based_workers)) + if [ $disk_based_workers -gt $max_disk_based_workers ]; then disk_based_workers=$max_disk_based_workers; fi + if [ $disk_based_workers -lt 1 ]; then disk_based_workers=1; fi + echo "disk_based_workers=$disk_based_workers, mem_based_workers=$mem_based_workers" + if [ $workers -gt $((disk_based_workers+mem_based_workers)) ]; then workers=$((disk_based_workers+mem_based_workers)); fi + local mem_spoken_for=$((mem_based_workers*MIN_MEM_PER_WORKER)) + local avail_mem=$((mem_share-mem_spoken_for)) + local x="" + for i in $(seq 0 $((workers-1))); do + if [ $i -lt $disk_based_workers ]; then + x="$x:0" + else + extra_mem=$(($MAX_MEM_PER_WORKER-$MIN_MEM_PER_WORKER)) + if [ $extra_mem -gt $avail_mem ]; then extra_mem=$avail_mem; fi + avail_mem=$((avail_mem-extra_mem)) + mem_for_worker=$((MIN_MEM_PER_WORKER+extra_mem)) + x="$x:$mem_for_worker" + fi + done + + # Our output is saved in environmnet variables + MOCKCHAIN_RESOURCE_ALLOCATION=$(echo $x | sed 's#^:##') + MAX_WORKERS=$workers + echo "compute_resources: MAX_WORKERS=$MAX_WORKERS, MOCKCHAIN_RESOURCE_ALLOCATION=$MOCKCHAIN_RESOURCE_ALLOCATION" + echo "" +} + + +# +# Delete old repodata and reate a new one +# +recreate_repodata () { + local DIR=${1} + + (cd $DIR + if [ -f repodata/*comps*xml ]; then + \mv repodata/*comps*xml comps.xml + fi + \rm -rf repodata + if [ -f comps.xml ]; then + $CREATEREPO -g comps.xml --workers $(number_of_cpus) $(pwd) + else + $CREATEREPO --workers $(number_of_cpus) $(pwd) + fi + ) +} + +# +# Update existing repodata +# +update_repodata () { + local DIR=${1} + + (cd $DIR + TMP=$(mktemp /tmp/update_repodata_XXXXXX) + RC=0 + if [ -f comps.xml ]; then + $CREATEREPO --update -g comps.xml --workers $(number_of_cpus) $(pwd) &> $TMP + RC=$? + else + $CREATEREPO --update --workers $(number_of_cpus) $(pwd) &> $TMP + RC=$? + fi + if [ $RC -ne 0 ]; then + cat $TMP + fi + \rm -f $TMP + ) +} + +# +# return array that is the intersection of two other arrays +# +# NEW_ARRAY=( $( intersection ARRAY1 ARRAY2 ) ) +# +intersection () { + local Aname=$1[@] + local Bname=$2[@] + local A=("${!Aname}") + local B=("${!Bname}") + + # echo "${A[@]}" + # echo "${B[@]}" + for a in "${A[@]}"; do + # echo "a=$a" + for b in "${B[@]}"; do + # echo "b=$b" + if [ "$a" == "$b" ]; then + echo "$a" + break + fi + done + done +} + +# +# return array that is the union of two other arrays +# +# NEW_ARRAY=( $( union ARRAY1 ARRAY2 ) ) +# +union () { + local Aname=$1[@] + local Bname=$2[@] + local A=("${!Aname}") + local B=("${!Bname}") + local a + local b + + for a in "${A[@]}"; do + echo "$a" + done + + for b in "${B[@]}"; do + local found=0 + for a in "${A[@]}"; do + if [ "$a" == "$b" ]; then + found=1 + break + fi + done + if [ $found -eq 0 ]; then + echo $b + fi + done +} + +# +# returns 0 if element is in the array +# +# e.g. contains ARRAY $SEEKING && echo "$SEEKING is in 'ARRAY'" +# +contains () { + local Aname=$1[@] + local A=("${!Aname}") + local seeking=$2 + local in=1 + + for a in "${A[@]}"; do + if [[ $a == $seeking ]]; then + in=0 + break + fi + done + return $in +} + +# +# Append element to array if not present +# +# ARRAY=( $( put ARRAY $ELEMENT ) ) +# +put () { + local Aname=$1[@] + local A=("${!Aname}") + local element="$2" + for a in "${A[@]}"; do + echo "$a" + done + contains A "$element" || echo "$element" +} + +build_order_recursive () { + local target=$1 + local idx + local remainder_list + local needs + local needs_list + + for((idx=0;idx<${#UNORDERED_LIST[@]};idx++)); do + if [ ${UNORDERED_LIST[idx]} == $target ]; then + remainder_list=( ${UNORDERED_LIST[@]:0:$idx} ${UNORDERED_LIST[@]:$((idx + 1))} ) + UNORDERED_LIST=( ${remainder_list[@]} ) + needs=( $(grep "^$target;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$target;//" | sed 's/,/ /g') ) + needs_list=( $(intersection needs remainder_list) ) + for((idx=0;idx<${#needs_list[@]};idx++)); do + build_order_recursive ${needs_list[idx]} + done + echo $target + break + fi + done +} + +build_order () { + local Aname=$1[@] + local original_list=("${!Aname}") + local needs + local needs_list + local remainder_list + local idx + local element + local next_start=0 + local old_next_start=0 + local progress=1 + + while [ ${#original_list[@]} -gt 0 ] && [ $progress -gt 0 ]; do + progress=0 + old_next_start=$next_start + for((idx=$next_start;idx<${#original_list[@]};idx++)); do + element=${original_list[idx]} + next_start=$idx + remainder_list=( ${original_list[@]:0:$idx} ${original_list[@]:$((idx + 1))} ) + needs=( $(grep "^$element;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$element;//" | sed 's/,/ /g') ) + needs_list=( $(intersection needs remainder_list) ) + if [ ${#needs_list[@]} -eq 0 ]; then + echo "$element" + original_list=( "${remainder_list[@]}" ) + if [ $next_start -ge ${#original_list[@]} ]; then + next_start=0 + fi + progress=1 + break + fi + done + if [ $old_next_start -ne 0 ]; then + progress=1 + next_start=0 + fi + done + + if [ ${#original_list[@]} -gt 0 ]; then + # Had trouble calculating a build order for these remaining packages, so stick them at the end + UNORDERED_LIST=( ${original_list[@]} ) + while [ ${#UNORDERED_LIST[@]} -gt 0 ]; do + element=${UNORDERED_LIST[0]} + build_order_recursive $element + done + fi +} + +set_mock_symlinks () { + local LNK + local DEST + local CFG=$1 + if [ -d /localdisk/loadbuild/mock ]; then + mkdir -p $MY_WORKSPACE + LNK=$(echo "/localdisk/loadbuild/mock/$(basename $CFG)" | sed 's/.cfg$//') + if [ ! -L $LNK ] && [ -d $LNK ]; then + echo "WARNING: Found directory at '$LNK' when symlink was expected. Fixing..." + \rm -rf $LNK + if [ -d $LNK ]; then + \mv $LNK $LNK.clean_me + fi + fi + if [ -L $LNK ]; then + DEST=$(readlink $LNK) + if [ "$DEST" != "$MY_WORKSPACE" ] || [ ! -d "$MY_WORKSPACE" ]; then + echo "WARNING: Found broken symlink at '$LNK'. Fixing..." + \rm -f $LNK + fi + fi + if [ ! -L $LNK ]; then + if [ ! -d "$MY_WORKSPACE" ]; then + echo "ERROR: Can't create symlink from $LNK to $MY_WORKSPACE as destination does not exist." + exit 1 + fi + ln -s $MY_WORKSPACE $LNK + fi + fi + + if [ -d /localdisk/loadbuild/mock-cache ]; then + mkdir -p $MY_WORKSPACE/cache + LNK=$(echo "/localdisk/loadbuild/mock-cache/$(basename $CFG)" | sed 's/.cfg$//') + if [ ! -L $LNK ] && [ -d $LNK ]; then + echo "WARNING: Found directory at '$LNK' when symlink was expected. Fixing..." + \rm -rf $LNK + if [ -d $LNK ]; then + \mv $LNK $LNK.clean_me + fi + fi + if [ -L $LNK ]; then + DEST=$(readlink $LNK) + if [ "$DEST" != "$MY_WORKSPACE/cache" ] || [ ! -d "$MY_WORKSPACE/cache" ]; then + echo "WARNING: Found broken symlink at '$LNK'. Fixing..." + \rm -f $LNK + fi + fi + if [ ! -L $LNK ]; then + if [ ! -d "$MY_WORKSPACE/cache" ]; then + echo "ERROR: Can't create symlink from $LNK to $MY_WORKSPACE/cache as destination does not exist." + exit 1 + fi + ln -s $MY_WORKSPACE/cache $LNK + fi + fi +} + +remove_mock_symlinks () { + local LNK + local CFG=$1 + if [ -d /localdisk/loadbuild/mock ]; then + LNK=$(echo "/localdisk/loadbuild/mock/$(basename $CFG)" | sed 's/.cfg$//') + if [ -L $LNK ]; then + \rm -f $LNK + fi + if [ -d $LNK ]; then + \rm -rf $LNK + if [ $? -ne 0 ]; then + \mv -f $LNK $LNK.clean_me + fi + fi + fi + + if [ -d /localdisk/loadbuild/mock-cache ]; then + LNK=$(echo "/localdisk/loadbuild/mock-cache/$(basename $MY_BUILD_CFG)" | sed 's/.cfg$//') + if [ -L $LNK ]; then + \rm -f $MY_WORKSPACE/cache $LNK + fi + if [ -d $LNK ]; then + \rm -rf $LNK + if [ $? -ne 0 ]; then + \mv -f $LNK $LNK.clean_me + fi + fi + fi +} + +umount_mock_root_as_tmpfs_all () { + for i in $(seq 0 $((ABSOLUTE_MAX_WORKERS-1))); do + umount_mock_root_as_tmpfs $i + done +} + +umount_mock_root_as_tmpfs_cfg () { + local CFG=$1 + local build_idx=$(basename $CFG | sed 's#.*[.]b\([0-9]*\)[.]cfg#\1#') + if [ "$build_idx" != "" ]; then + umount_mock_root_as_tmpfs $build_idx + else + echo "umount_mock_root_as_tmpfs_cfg: Failed to map '$CFG' to a build_idx" + fi +} + +umount_mock_root_as_tmpfs () { + local build_idx=$1 + local mount_dir=$(readlink -f $MY_WORKSPACE/mock)/b${build_idx}/root + local rc + + mount | grep tmpfs | grep $mount_dir &> /dev/null + if [ $? -ne 0 ]; then + return 0 + fi + mock_tmpfs_umount $mount_dir &> /dev/null + + rc=$? + if [ $rc -ne 0 ]; then + echo "FAILED: mock_tmpfs_umount $mount_dir" + fi + return $rc +} + +kill_descendents () +{ + local kill_pid=$1 + local kill_all=$2 + local need_stop=$3 + local iteration=$4 + local ret=0 + local rc=0 + + # echo "kill_descendents pid=$kill_pid, all=$kill_all stop=$need_stop, iteration=$iteration" + + local relevant_recursive_children="$ME" + local relevant_recursive_promote_children="mock" + local relevant_other_children="mockchain-parallel" + + local recursive_promote_children=$(for relevant_child in $relevant_recursive_promote_children; do pgrep -P $kill_pid $relevant_child; done) + local recursive_children=$(for relevant_child in $relevant_recursive_children; do pgrep -P $kill_pid $relevant_child; done) + local other_children="" + + if [ $kill_all -eq 1 ]; then + recursive_promote_children="" + recursive_children=$(pgrep -P $kill_pid) + fi + + if [ $iteration -eq 0 ]; then + other_children=$(for relevant_child in $relevant_other_children; do pgrep -P $kill_pid $relevant_child; done) + if [ "$other_children" != "" ]; then + ret=1 + fi + fi + + if [ $need_stop -eq 1 ]; then + for pid in $recursive_children $recursive_promote_children; do + kill -SIGSTOP $pid &> /dev/null + done + fi + + for pid in $recursive_children; do + kill_descendents "$pid" $kill_all $need_stop $((iteration + 1)) + done + for pid in $recursive_promote_children; do + kill_descendents "$pid" 1 1 $((iteration + 1)) + done + + # echo "kill: $recursive_children $recursive_promote_children" + for pid in $recursive_children $recursive_promote_children; do + kill $pid &> /dev/null + rc=$? + if [ $need_stop -eq 1 ]; then + kill -SIGCONT $pid &> /dev/null + fi + if [ $rc -eq 0 ] && [ $iteration -eq 0 ]; then + wait $pid + fi + done + + # echo "kill: $other_children" + for pid in $other_children; do + kill $pid &> /dev/null + rc=$? + if [ $rc -eq 0 ] && [ $iteration -eq 0 ]; then + wait $pid + fi + done + + return $ret +} + +function my_exit_n() { + local need_mock_cleanup + # echo "$BASHPID: $ME: my_exit: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + # echo "$BASHPID: $ME: my_exit: waiting" + wait + # echo "$BASHPID: $ME: my_exit: wait complete" + # echo "$BASHPID: $ME: my_exit: need_mock_cleanup=$need_mock_cleanup" + if [ $need_mock_cleanup -ne 0 ]; then + umount_mock_root_as_tmpfs_all + fi +} + +function my_exit() { + local need_mock_cleanup + # echo "$BASHPID: $ME: my_exit: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + # echo "$BASHPID: $ME: my_exit: waiting" + wait + # echo "$BASHPID: $ME: my_exit: wait complete" + # echo "$BASHPID: $ME: my_exit: need_mock_cleanup=$need_mock_cleanup" + if [ $need_mock_cleanup -ne 0 ]; then + sleep 1 + fi + umount_mock_root_as_tmpfs_all +} + +function my_sigint_n() { + local ARG=$1 + echo "$BASHPID: $ME: my_sigint_n: ARG=$ARG" + echo "$BASHPID: $ME: my_sigint_n: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + echo "$BASHPID: $ME: my_sigint_n: waiting" + wait + echo "$BASHPID: $ME: my_sigint_n: wait complete" + if [ $need_mock_cleanup -ne 0 ]; then + umount_mock_root_as_tmpfs_cfg $ARG + fi + exit 1 +} + +function my_sighup_n() { + local ARG=$1 + echo "$BASHPID: $ME: my_sighup_n: ARG=$ARG" + echo "$BASHPID: $ME: my_sighup_n: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + echo "$BASHPID: $ME: my_sighup_n: waiting" + wait + echo "$BASHPID: $ME: my_sighup_n: wait complete" + if [ $need_mock_cleanup -ne 0 ]; then + umount_mock_root_as_tmpfs_cfg $ARG + fi + exit 1 +} + +function my_sigabrt_n() { + local ARG=$1 + echo "$BASHPID: $ME: my_sigabrt_n: ARG=$ARG" + echo "$BASHPID: $ME: my_sigabrt_n: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + echo "$BASHPID: $ME: my_sigabrt_n: waiting" + wait + echo "$BASHPID: $ME: my_sigabrt_n: wait complete" + if [ $need_mock_cleanup -ne 0 ]; then + umount_mock_root_as_tmpfs_cfg $ARG + fi + exit 1 +} + +function my_sigterm_n() { + local ARG=$1 + echo "$BASHPID: $ME: my_sigterm_n: ARG=$ARG" + echo "$BASHPID: $ME: my_sigterm_n: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + echo "$BASHPID: $ME: my_sigterm_n: waiting" + wait + echo "$BASHPID: $ME: my_sigterm_n: wait complete" + echo "$BASHPID: $ME: my_sigterm_n: need_mock_cleanup=$need_mock_cleanup" + if [ $need_mock_cleanup -ne 0 ]; then + umount_mock_root_as_tmpfs_cfg $ARG + fi + exit 1 +} + +function my_sigint() { + echo "$BASHPID: $ME: my_sigint: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + echo "$BASHPID: $ME: my_sigterm_n: waiting" + wait + echo "$BASHPID: $ME: my_sigterm_n: wait complete" + if [ $need_mock_cleanup -ne 0 ]; then + umount_mock_root_as_tmpfs_all + fi + exit 1 +} + +function my_sighup() { + echo "$BASHPID: $ME: my_sighup: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + echo "$BASHPID: $ME: my_sighup: waiting" + wait + echo "$BASHPID: $ME: my_sighup: wait complete" + if [ $need_mock_cleanup -ne 0 ]; then + umount_mock_root_as_tmpfs_all + fi + exit 1 +} + +function my_sigabrt() { + echo "$BASHPID: $ME: my_sigabrt: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + echo "$BASHPID: $ME: my_sigabrt: waiting" + wait + echo "$BASHPID: $ME: my_sigabrt: wait complete" + if [ $need_mock_cleanup -ne 0 ]; then + umount_mock_root_as_tmpfs_all + fi + exit 1 +} + +function my_sigterm() { + echo "$BASHPID: $ME: my_sigterm: killing children" + local need_mock_cleanup + kill_descendents $BASHPID 0 0 0 + need_mock_cleanup=$? + echo "$BASHPID: $ME: my_sigterm: waiting" + wait + echo "$BASHPID: $ME: my_sigterm: wait complete" + echo "$BASHPID: $ME: my_sigterm: need_mock_cleanup=$need_mock_cleanup" + if [ $need_mock_cleanup -ne 0 ]; then + umount_mock_root_as_tmpfs_all + fi + exit 1 +} + +trapwrap() { + local WCMD=$1 + shift + declare -i pid status=255 + # set the trap for the foreground process + trap my_sigint INT + trap my_sighup HUP + trap my_sigabrt ABRT + trap my_sigterm TERM + # run the command in background + ### "$@" & pid=$! + WARGS=() + x=0 + for i in "$@"; do + WARGS[$x]="$i" + x=$((x+1)) + done + echo "$WCMD ${WARGS[@]/#/}" + $WCMD "${WARGS[@]/#/}" & pid=$! + # wait until bg command finishes, handling interruptions by trapped signals + while (( status > 128 )); do + wait $pid + status=$? + done + # restore the trap + trap - INT + trap - HUP + trap - ABRT + trap - TERM + # return the command exit status + return $status +} + +trapwrap_n() { + local ARG=$1 + shift + local WCMD=$1 + shift + declare -i pid status=255 + # set the trap for the foreground process + trap my_exit_n EXIT + trap "my_sigint_n $ARG" INT + trap "my_sighup_n $ARG" HUP + trap "my_sigabrt_n $ARG" ABRT + trap "my_sigterm_n $ARG" TERM + # run the command in background + WARGS=() + x=0 + for i in "$@"; do + WARGS[$x]="$i" + x=$((x+1)) + done + echo "$WCMD ${WARGS[@]/#/}" + $WCMD "${WARGS[@]/#/}" & pid=$! + # wait until bg command finishes, handling interruptions by trapped signals + while (( status > 128 )); do + wait $pid + status=$? + done + # restore the trap + trap - INT + trap - HUP + trap - ABRT + trap - TERM + # return the command exit status + return $status +} + +trap my_exit EXIT + +mock_get_cache_dir () { + local CFG=$1 + local CACHE_DIR="$MY_WORKSPACE/cache" + local CACHE_LINE=$(grep "config_opts[[][']cache_topdir['][]]" $CFG) + if [ $? -eq 0 ]; then + CACHE_DIR=$(echo "$CACHE_LINE" | awk -F \' '{ print $4 }') + fi + echo "$CACHE_DIR" +} + +mock_get_root_dir () { + local CFG=$1 + local ROOT_DIR="$MY_WORKSPACE/mock" + local ROOT_LINE=$(grep "config_opts[[][']root['][]]" $CFG) + if [ $? -eq 0 ]; then + ROOT_DIR="$MY_WORKSPACE/"$(echo "$ROOT_LINE" | awk -F \' '{ print $4 }') + fi + echo "$ROOT_DIR" +} + +mock_clean_cfg () { + local CFG=$1 + echo "${FUNCNAME[0]}: $CFG" + echo "==================================" + mock_clean_cache_cfg $CFG + echo "==================================" + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all" + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all + echo "==================================" + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --clean" + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --clean + ### Note: this sometimes leaves behind a $MY_WORKSPACE/cache/mock/yum_cache/yumcache.lock + echo "==================================" + mock_clean_cache_all_cfg $CFG + echo "==================================" +} + +mock_sub_configs () { + find $MY_WORKSPACE/configs/$MY_BUILD_ENVIRONMENT* -name "$MY_BUILD_ENVIRONMENT*b[0-9]*.cfg" +} + +mock_clean () { + echo "${FUNCNAME[0]}: in" + echo "==================================" + remove_mock_symlinks $MY_BUILD_CFG + set_mock_symlinks $MY_BUILD_CFG + echo "==================================" + for SUB_CFG in $(mock_sub_configs); do + local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev) + ( mock_clean_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) & + done + wait + # mock_clean_cfg $BUILD_CFG + echo "==================================" + remove_mock_symlinks $MY_BUILD_CFG + echo "${FUNCNAME[0]}: out" +} + +mock_partial_clean_cfg () { + local CFG=$1 + local SRPMS_LIST="$2" + local RPMS_LIST="$3" + local CMD + local TMP + local RC + + echo "${FUNCNAME[0]}: CFG=$CFG SRPMS_LIST='$SRPMS_LIST' RPMS_LIST='$RPMS_LIST'" + + TMP=$(mktemp /tmp/mock_partial_clean_cfg_XXXXXX) + if [ $? -ne 0 ]; then + echo "${FUNCNAME[0]}: mktemp failed" + return 1 + fi + + local ROOT_DIR=$(mock_get_root_dir $CFG) + + if [ -d $ROOT_DIR/root/builddir/build/SOURCES ]; then + echo "rm -rf $ROOT_DIR/root/builddir/build/SOURCES/*" + \rm -rf $ROOT_DIR/root/builddir/build/SOURCES/* 2>> /dev/null + fi + + if [ -d $ROOT_DIR/root/builddir/build/SPECS ]; then + echo "rm -rf $ROOT_DIR/root/builddir/build/SPECS/*" + \rm -rf $ROOT_DIR/root/builddir/build/SPECS/* 2>> /dev/null + fi + + for s in $SRPMS_LIST; do + f=$(basename $s) + if [ -f $ROOT_DIR/root/builddir/build/SRPMS/$f ]; then + \rm -f -v $ROOT_DIR/root/builddir/build/SRPMS/$f 2>> /dev/null + fi + if [ -f $ROOT_DIR/root/builddir/build/originals/$f ]; then + \rm -f -v $ROOT_DIR/root/builddir/build/originals/$f 2>> /dev/null + fi + done + + for r in $RPMS_LIST; do + for d in $(find $ROOT_DIR/root/builddir/build/BUILD/ -maxdepth 1 -name '$r*' 2>> /dev/null); do + echo "rm -rf $d" + \rm -rf $d 2>> /dev/null + done + if [ -d $ROOT_DIR/root/builddir/build/RPMS ]; then + for f in $(find $ROOT_DIR/root/builddir/build/RPMS -maxdepth 1 -name "$r*rpm" 2>> /dev/null); do + \rm -f -v $f 2>> /dev/null + done + fi + done + + + local NO_CLEAN_LIST=$(create-no-clean-list) + echo "NO_CLEAN_LIST=$NO_CLEAN_LIST" + + local RPMS_CLEAN_LIST="" + local NEED_FULL_MOCK_CLEAN=0 + for r in $RPMS_LIST; do + if ! str_lst_contains $r "$NO_CLEAN_LIST" ; then + RPMS_CLEAN_LIST=$(join_by ' ' $RPMS_CLEAN_LIST $r) + else + echo "Can't remove '$r' from mock environment without a wipe"; + NEED_FULL_MOCK_CLEAN=1 + fi + done + + if [ $NEED_FULL_MOCK_CLEAN -eq 1 ]; then + echo "Wipe the mock environment" + mock_clean_cfg $CFG + RC=$? + else + # Intent of following is for $RPMS_LIST to be expand now while the remaining $ varaibles are for bash inside mock to expand + echo "Try to uninstall from the mock environment these packages: $RPMS_CLEAN_LIST" + CMD='LST="'$RPMS_CLEAN_LIST'"; + DELETE_LIST=""; + for r in $LST; do + FOUND=$(rpm -q $r) ; + if [ $? -eq 0 ]; then + DELETE_LIST="$DELETE_LIST $FOUND"; + fi; + done; + echo "uninstalling these packages: $DELETE_LIST"; + if [ "$DELETE_LIST" != "" ]; then + rpm -e --nodeps $DELETE_LIST; + fi' + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --chroot "bash -c '$CMD'" &>> $TMP + RC=$? + if [ $RC -ne 0 ]; then + cat $TMP + \rm -f $TMP + return $RC + fi + + mock_clean_cache_cfg $CFG + RC=$? + \rm -f $TMP + fi + + return $RC +} + +mock_partial_clean () { + local SRPMS_LIST="$1" + local RPMS_LIST="$2" + echo "${FUNCNAME[0]}: in" + echo "${FUNCNAME[0]}: '$SRPMS_LIST' '$RPMS_LIST'" + echo "==================================" + local NO_CLEAN_LIST=$(create-no-clean-list) + echo "==================================" + for SUB_CFG in $(mock_sub_configs); do + local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev) + ( mock_partial_clean_cfg $SUB_CFG "$SRPMS_LIST" "$RPMS_LIST" 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) & + done + wait + # mock_partial_clean_cfg $BUILD_CFG "$SRPMS_LIST" "$RPMS_LIST" + echo "==================================" + echo "${FUNCNAME[0]}: out" +} + +mock_clean_cache_cfg () { + local CFG=$1 + local TMP + local RC + + echo "${FUNCNAME[0]}: $CFG '$SRPMS_LIST' '$RPMS_LIST'" + + TMP=$(mktemp /tmp/mock_clean_cache_cfg_XXXXXX) + if [ $? -ne 0 ]; then + echo "${FUNCNAME[0]}: mktemp failed" + return 1 + fi + + echo "${FUNCNAME[0]}: $CFG" + + clean_yum_cache_cfg $CFG + + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache" &> $TMP + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache &>> $TMP + RC=$? + if [ $RC -ne 0 ]; then + cat $TMP + fi + + \rm -f $TPM + return $RC +} + +mock_clean_cache () { + echo "${FUNCNAME[0]}: in" + for SUB_CFG in $(mock_sub_configs); do + local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev) + ( mock_clean_cache_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) & + done + wait + # mock_clean_cache_cfg $BUILD_CFG + echo "${FUNCNAME[0]}: out" +} + +mock_clean_cache_all_cfg () { + local CFG=$1 + + echo "${FUNCNAME[0]}: $CFG" + echo "==================================" + clean_yum_cache_cfg $CFG + echo "==================================" + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all" + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --scrub=all + echo "==================================" +} + +mock_clean_cache_all () { + echo "${FUNCNAME[0]}: in" + for SUB_CFG in $(mock_sub_configs); do + local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev) + ( mock_clean_cache_all_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) & + done + wait + # mock_clean_cache_all_cfg $BUILD_CFG + echo "${FUNCNAME[0]}: out" +} + +mock_clean_metadata_cfg () { + local CFG=$1 + local TMP + local RC + + echo "${FUNCNAME[0]}: $CFG" + + TMP=$(mktemp /tmp/mock_partial_clean_cfg_XXXXXX) + if [ $? -ne 0 ]; then + echo "${FUNCNAME[0]}: mktemp failed" + return 1 + fi + + CMD=$((cat $CFG; grep config_opts\\[\'yum.conf\'\\\] $CFG | sed 's#\\n#\n#g') | grep '^[[]' | grep -v main | sed 's/[][]//g' | sed 's#^#yum --enablerepo=#' | sed 's#$# clean metadata#' | sort -u | tr '\n' ';') + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP + RC=$? + if [ $RC -ne 0 ]; then + cat $TMP + fi + \rm -f $TMP + return $RC +} + +mock_clean_metadata () { + echo "${FUNCNAME[0]}: in" + for SUB_CFG in $(mock_sub_configs); do + local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev) + ( mock_clean_metadata_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) & + done + wait + echo "${FUNCNAME[0]}: out" +} + +update_cgcs_repo () { + local REPO_NAME=$1 + ( + cd $MY_REPO/$REPO_NAME/ + + local CURR_HEAD=$(git rev-parse HEAD) + local LAST_HEAD_FILE="$MY_REPO/$REPO_NAME/.last_head" + local LAST_HEAD_FILE_OLD="$MY_WORKSPACE/$REPO_NAME.last_head" + local CHANGED + local NEW_UNTRACKED + local NEED_REBUILD + local NEED_MOCK_CLEAN=0 + local d + + if [ -f LAST_HEAD_FILE_OLD -a ! -f LAST_HEAD_FILE ]; then + \cp LAST_HEAD_FILE_OLD LAST_HEAD_FILE + fi + + local LAST_HEAD=$(cat $LAST_HEAD_FILE | head -n 1) + + for d in "Binary" "Source"; do + NEED_REBUILD=0 + if [ ! -d $d/repodata ]; then + NEED_REBUILD=1 + fi + if [ "$CURR_HEAD" != "$LAST_HEAD" ]; then + NEED_REBUILD=1 + fi + + CHANGED=$(git diff --name-only | grep $d) + if [ "x$CHANGED" != "x" ]; then + NEED_REBUILD=1 + fi + + NEW_UNTRACKED=$(git ls-files . --exclude-standard --others | grep $d) + if [ "x$NEW_UNTRACKED" != "x" ]; then + NEED_REBUILD=1 + fi + + if [ $NEED_REBUILD -eq 1 ]; then + NEED_MOCK_CLEAN=1 + echo "" + echo "Need to recreate $REPO_NAME/$d/repodata" + mkdir -p $d + if [ -d $d/repodata ]; then + update_repodata "$d" + else + recreate_repodata "$d" + fi + fi + done + echo "$CURR_HEAD" > $LAST_HEAD_FILE + \cp $LAST_HEAD_FILE $LAST_HEAD_FILE_OLD + if [ $NEED_MOCK_CLEAN -eq 1 ]; then + echo "" + echo "Need to clean mock" + mock_clean + set_mock_symlinks $MY_BUILD_CFG + fi + ) +} + +mock_clean_mounts_dir () { + local MOUNT=$1 + local RC + + if [ "$MOUNT" == "" ]; then + return 1 + fi + mount | grep "$MOUNT" >> /dev/null + if [ $? -eq 0 ]; then + RC=1 + which mock_cache_umount >> /dev/null + if [ $? -eq 0 ]; then + echo "umount '$MOUNT'" + mock_cache_umount "$MOUNT" + if [ $? -eq 0 ]; then + RC=0 + fi + fi + if [ $RC -eq 1 ]; then + echo "ERROR: Directory '$MOUNT' is already mounted and will cause a build failure within mock." + echo "Ask your system administrator to umount '$MOUNT'." + exit 1 + fi + fi + return 0 +} + +mock_clean_mounts_cfg () { + local CFG=$1 + local ROOT_DIR=$(mock_get_root_dir $CFG) + local YUM_CACHE_MOUNT=$(readlink -f "$ROOT_DIR/root/var/cache/yum") + local PROC_MOUNT=$(readlink -f "$ROOT_DIR/root/proc") + local SYS_MOUNT=$(readlink -f "$ROOT_DIR/root/sys") + local SHM_MOUNT=$(readlink -f "$ROOT_DIR/root/dev/shm") + local PTS_MOUNT=$(readlink -f "$ROOT_DIR/root/dev/pts") + local MOUNT + + echo "${FUNCNAME[0]}: $CFG" + for MOUNT in "$YUM_CACHE_MOUNT" "$PROC_MOUNT" "$SYS_MOUNT" "$SHM_MOUNT" "$PTS_MOUNT"; do + mock_clean_mounts_dir "$MOUNT" + done +} + +mock_clean_mounts () { + echo "${FUNCNAME[0]}: in" + for SUB_CFG in $(mock_sub_configs); do + local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev) + ( mock_clean_mounts_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) & + done + wait + # mock_clean_mounts_cfg $BUILD_CFG + echo "${FUNCNAME[0]}: out" +} + +clean_yum_cache_cfg () { + local CFG=$1 + local CACHE_DIR=$(mock_get_cache_dir $CFG) + local ROOT_DIR=$(mock_get_root_dir $CFG) + local RC=0 + + echo "${FUNCNAME[0]}: $CFG" + + local YUM_CACHE_MOUNT=$(readlink -f "$ROOT_DIR/root/var/cache/yum") + local YUM_CACHE_LOCK="$CACHE_DIR/mock/yum_cache/yumcache.lock" + # echo "clean_yum_cache YUM_CACHE_MOUNT='$YUM_CACHE_MOUNT' YUM_CACHE_LOCK='$YUM_CACHE_LOCK'" + + if [ "$YUM_CACHE_MOUNT" != "" ]; then + mock_clean_mounts_dir "$YUM_CACHE_MOUNT" + fi + + if [ -f "$YUM_CACHE_LOCK" ]; then + RC=1 + which mock_cache_unlock >> /dev/null + if [ $? -eq 0 ]; then + mock_cache_unlock "$YUM_CACHE_LOCK" + if [ $? -eq 0 ]; then + RC=0 + fi + fi + if [ $RC -eq 1 ]; then + echo "ERROR: File '$YUM_CACHE_LOCK' exists and will cause a build failure within mock." + echo "Ask your system administrator to delete '$YUM_CACHE_LOCK'." + exit 1 + fi + fi + return $RC +} + + +clean_yum_cache () { + echo "${FUNCNAME[0]}: in" + for SUB_CFG in $(mock_sub_configs); do + local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev) + ( clean_yum_cache_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) & + done + wait + # clean_yum_cache_cfg $BUILD_CFG + echo "${FUNCNAME[0]}: out" +} + +mock_update_cfg () { + local CFG=$1 + echo "${FUNCNAME[0]}: $CFG" + echo "==================================" + set_mock_symlinks $CFG + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --update" + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --update + echo "==================================" +} + +mock_init_cfg () { + local CFG=$1 + echo "${FUNCNAME[0]}: $CFG" + echo "==================================" + set_mock_symlinks $CFG + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --init" + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --init + echo "==================================" +} + +mock_update_or_init_cfg () { + local CFG=$1 + local TMP + local RC + echo "${FUNCNAME[0]}: $CFG" + local ROOT_DIR=$(mock_get_root_dir $CFG) + + TMP=$(mktemp /tmp/mock_update_or_init_cfg_XXXXXX) + if [ $? -ne 0 ]; then + echo "${FUNCNAME[0]}: mktemp failed" + return 1 + fi + if [ -d $ROOT_DIR/root ]; then + echo "Updating the mock environment" + set_mock_symlinks $CFG + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --update" + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --update &> $TMP + RC=$? + else + echo "Init the mock environment" + set_mock_symlinks $CFG + echo "$MOCK -r $CFG --configdir $(dirname $CFG) --init" + trapwrap_n $CFG $MOCK -r $CFG --configdir $(dirname $CFG) --init &> $TMP + RC=$? + fi + if [ $RC -ne 0 ]; then + cat $TMP + fi + \rm -f $TMP + return $RC +} + +mock_update_or_init () { + echo "${FUNCNAME[0]}: in" + for SUB_CFG in $(mock_sub_configs); do + local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev) + ( mock_update_or_init_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) & + done + wait + # mock_update_or_init_cfg $BUILD_CFG + echo "${FUNCNAME[0]}: out" +} + +if [ "x$PROJECT" == "x" ]; then + echo "PROJECT environmnet variable is not defined." + exit 1 +fi + +if [ "x$SRC_BUILD_ENVIRONMENT" == "x" ]; then + echo "SRC_BUILD_ENVIRONMENT environmnet variable is not defined." + exit 1 +fi + +NO_DESCENDANTS=0 +NO_REQUIRED=0 +NO_AUTOCLEAN=0 +NO_BUILD_INFO=0 +HELP=0 +CLEAN_FLAG=0 +FORMAL_FLAG=0 +CAREFUL=0 + +# read the options +TEMP=$(getopt -o ha::bc: --long std,rt,installer,no-required,no-descendants,no-autoclean,no-build-info,clean,tmpfs-clean,formal,careful,help,arga::,argb,argc: -n "$ME" -- "$@") + +if [ $? -ne 0 ]; then + usage + exit 1 +fi + +eval set -- "$TEMP" + +export BUILD_TYPE=std +trap my_exit EXIT + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -a|--arga) + case "$2" in + "") ARG_A='some default value' ; shift 2 ;; + *) ARG_A=$2 ; shift 2 ;; + esac ;; + -b|--argb) ARG_B=1 ; shift ;; + --careful) CAREFUL=1 ; shift ;; + --no-descendants) NO_DESCENDANTS=1 ; shift ;; + --no-required) NO_REQUIRED=1 ; shift ;; + --no-autoclean) NO_AUTOCLEAN=1; shift ;; + --no-build-info) NO_BUILD_INFO=1; shift ;; + --formal) FORMAL_FLAG=1; shift ;; + --std) BUILD_TYPE=std; shift ;; + --rt) BUILD_TYPE=rt; shift ;; + --installer) BUILD_TYPE=installer; shift ;; + -h|--help) HELP=1 ; shift ;; + --clean) CLEAN_FLAG=1 ; shift ;; + --tmpfs-clean) if [ -n "$MY_WORKSPACE" ]; then export MY_WORKSPACE=$MY_WORKSPACE/$BUILD_TYPE; exit 0; fi ;; + -c|--argc) + case "$2" in + "") shift 2 ;; + *) ARG_C=$2 ; shift 2 ;; + esac ;; + --) shift ; break ;; + *) echo "Internal error!" ; exit 1 ;; + esac +done + + +# Reset variables +if [ -n "$MY_WORKSPACE" ]; then + export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE} + export MY_WORKSPACE=$MY_WORKSPACE_TOP/$BUILD_TYPE +else + export MY_PATCH_WORKSPACE_TOP=${MY_PATCH_WORKSPACE_TOP:-$MY_PATCH_WORKSPACE} + export MY_PATCH_WORKSPACE=$MY_PATCH_WORKSPACE_TOP/$BUILD_TYPE +fi + +export MY_BUILD_DIR_TOP=${MY_BUILD_DIR_TOP:-$MY_BUILD_DIR} +export MY_BUILD_DIR=$MY_BUILD_DIR_TOP/$BUILD_TYPE + +export MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT} +export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT_TOP-$BUILD_TYPE + +export MY_SRC_RPM_BUILD_DIR=$MY_BUILD_DIR/rpmbuild +export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT.cfg +export MY_BUILD_CFG=$MY_WORKSPACE/$MY_BUILD_ENVIRONMENT_FILE +export MY_MOCK_ROOT=$MY_WORKSPACE/mock/root + +LAST_PLATFORM_RELEASE_FILE="$MY_BUILD_DIR/.platform_release" + +if [ "$BUILD_TYPE" != "std" ]; then + PKG_DIRS_FILE=centos_pkg_dirs_$BUILD_TYPE +fi + +TARGETS=$@ + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ $FORMAL_FLAG -eq 1 ]; then + export FORMAL_BUILD=1 +fi + +SRC_ROOT="$MY_REPO" +if [ "x$MY_REPO" == "x" ]; then + SRC_ROOT=$HOME +fi + +BUILD_ROOT="$MY_WORKSPACE" +if [ "x$MY_WORKSPACE" == "x" ]; then + BUILD_ROOT="$MY_PATCH_WORKSPACE" + + if [ "x$MY_PATCH_WORKSPACE" == "x" ]; then + echo "ERROR: reqiure one of MY_WORKSPACE or MY_PATCH_WORKSPACE be defined" + exit 1 + fi +fi + +export BUILD_BASE="$BUILD_ROOT" +export CCACHE_DIR="$BUILD_ROOT/.ccache" +export RESULT_DIR="$BUILD_BASE/results" +export SRC_BASE="$SRC_ROOT" + +if [ "x$MY_SRC_RPM_BUILD_DIR" != "x" ]; then + RPM_BUILD_ROOT=$MY_SRC_RPM_BUILD_DIR +else + RPM_BUILD_ROOT=$BUILD_BASE/rpmbuild +fi + +RELEASE_INFO_FILE=$SRC_BASE/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +if [ -f $RELEASE_INFO_FILE ]; then + source $MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +else + echo "ERROR: failed to find RELEASE_INFO_FILE=$RELEASE_INFO_FILE" + exit 1 +fi + +if [ "x$PLATFORM_RELEASE" == "x" ]; then + echo "ERROR: PLATFORM_RELEASE is not defined in $RELEASE_INFO_FILE" + exit 1 +fi + +export RPM_BUILD_BASE="$RPM_BUILD_ROOT" +export SRPM_OUT="$RPM_BUILD_BASE/SRPMS" +export RPM_DIR="$RPM_BUILD_BASE/RPMS" +export SPECS_DIR="$RPM_BUILD_BASE/SPECS" +export SOURCES_DIR="$RPM_BUILD_BASE/SOURCES" +export PLATFORM_RELEASE + +if [ ! -d $BUILD_BASE ]; then + echo "ERROR: expected to find directory at '$BUILD_BASE'" + exit 1 +fi + + +mkdir -p $RPM_BUILD_BASE +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$RPM_BUILD_BASE'" + exit 1 +fi + +mkdir -p $SRPM_OUT/repodata +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$SRPM_OUT/repodata'" + exit 1 +fi + +mkdir -p $RPM_DIR/repodata +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$RPM_DIR/repodata'" + exit 1 +fi + +if [ "x$MY_BUILD_CFG" == "x" ];then + echo "ERROR: reqiure MY_BUILD_CFG to be defined" + exit 1 +fi + +export BUILD_CFG="$MY_BUILD_CFG" + +# Place build-time environement variables in mock environment +echo "FORMAL_BUILD=$FORMAL_BUILD" +echo "modify-build-cfg $BUILD_CFG" +${DIR}/modify-build-cfg $BUILD_CFG +if [ $? -ne 0 ]; then + echo "Could not modifiy $BUILD_CFG"; + exit 1 +fi + +if [ ! -f $BUILD_CFG ]; then + echo "ERROR: Mock config file not found at '$BUILD_CFG'" + exit 1 +fi + +# Create symlinks from /var/... to /localdisk/loadbuild/... if on a build server + +set_mock_symlinks $MY_BUILD_CFG + +if [ $CLEAN_FLAG -eq 1 ]; then + umount_mock_root_as_tmpfs_all +fi + +if [ $CLEAN_FLAG -eq 0 ]; then + ls $SRPM_OUT/*.src.rpm &>> /dev/null + if [ $? -ne 0 ]; then + echo "Nothing to build in '$SRPM_OUT'" + exit 0 + fi +fi + +ALL=0 +UNRESOLVED_TARGETS=" " +if [ "x$TARGETS" == "x" ]; then + echo "make: all" + ALL=1 +else + echo "make: $TARGETS" + UNRESOLVED_TARGETS="$TARGETS" +fi + +if [ "$BUILD_TYPE" != "std" ]; then + # This defines ... + # STD_SRPM_PKG_NAME_TO_PATH + # STD_SRPM_PKG_NAMES + srpm_build_std_dictionary $MY_WORKSPACE_TOP/std/rpmbuild/SRPMS +fi + +# This defines ... +# SRPM_PKG_NAME_TO_PATH +# SRPM_PKG_NAMES +srpm_build_dictionary $SRPM_OUT + +SRPMS_TO_COMPILE=() +SRPMS_LIST="" +RPMS_LIST="" + +clean_list () { + local SRPMS_LIST="$1" + local RPMS_LIST="$2" + local ALL=$3 + local TARGET + local b + local d + local f + local n + local p + local r + local s + local sn + local t + local SPEC_DIR + + echo "${FUNCNAME[0]}: '$SRPMS_LIST' '$RPMS_LIST' '$ALL'" + if [ $ALL -eq 1 ]; then + for r in $(find $RPM_DIR -name "*.rpm"); do + \rm -f -v $r + done + + if [ $CLEAN_FLAG -eq 1 ]; then + for d in $(find $SPECS_DIR -type d); do + echo "rm -rf $d" + \rm -rf "$d" 2>> /dev/null + done + fi + + for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -type d 2>> /dev/null); do + echo "rm -rf $d" + \rm -rf "$d" 2>> /dev/null + done + else + for s in $SRPMS_LIST; do + ( + SPEC_DIR=$(spec_cache_dir_from_srpm $s) + sn=$(rpm_get_name $s) + update_spec_cache $s + + TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$s") + for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do + echo "rm -rf $d" + \rm -rf "$d" 2>> /dev/null + done + + for p in $(ls -1 $SPEC_DIR/BUILDS); do + for r in $(find $RESULT_DIR/$USER-* $RPM_DIR -name "$p-*.rpm" 2>> /dev/null); do + if [ -f $r ]; then + n=$(rpm_get_name $r) + if [ "$n" == "$p" ]; then + if [[ "$r" == *.src.rpm ]]; then + if [ "$n" != "$sn" ]; then + continue + fi + + TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$r") + for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do + echo "rm -rf $d" + \rm -rf "$d" 2>> /dev/null + done + + else + rs=$(rpm_get_srpm $r) + if [[ "$rs" != "$sn"-[0-9]* ]]; then + continue + fi + fi + + \rm -f -v $r + fi + fi + done + done + + TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$s") + + if [ $CLEAN_FLAG -eq 1 ]; then + for d in $(find $SPECS_DIR -type d -name "$TARGET*" 2>> /dev/null); do + echo "rm -rf $d" + \rm -rf "$d" 2>> /dev/null + done + fi + + for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do + echo "rm -rf $d" + \rm -rf "$d" 2>> /dev/null + done + ) & + done + echo "waiting on file deletion" + wait + echo "wait complete" + fi + + echo "" + echo "Cleaning repodata" + for d in $(find -L $MY_WORKSPACE/rpmbuild $MY_WORKSPACE/results -type d -name repodata); do + recreate_repodata $(dirname $d) + done + + echo "" + echo "Cleaning mock environment" + echo "" + + if [ $ALL -eq 1 ]; then + # Wipe everything + if [ "x$RPM_DIR" != "x" ]; then + \rm -rf -v $RPM_DIR/* 2>> /dev/null + fi + + \rm -f -v $RESULT_DIR/mockchain.log 2>> /dev/null + mock_clean + else + # Wipe only traces of what we built + mock_partial_clean "$SRPMS_LIST" "$RPMS_LIST" + fi +} + +echo "ALL=$ALL" +( +trap my_exit EXIT +trap my_sigint INT +trap my_sighup HUP +echo "$CMDLINE" +echo "ALL=$ALL" + +if [ $CLEAN_FLAG -eq 0 ]; then + if [ -d $RESULT_DIR ]; then + # in case previous build recieved a ctrl-C and didn't get a change to copy it's successful work into RPM_DIR + for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --no-run-if-empty --max-args=1 dirname | sort -u); do + rsync -u $d/*.rpm $RPM_DIR + done + for d in $(find -L $RESULT_DIR -type d -name repodata); do + update_repodata $(dirname $d) + done + fi +fi + +spec_cache_dir_from_srpm () { + local SRPM=${1} + local SPEC_DIR=$(echo $SRPM | sed 's#/SRPMS/#/SPECS/#') + echo "$SPEC_DIR" +} + +update_spec_cache () { + local SRPM=${1} + local SPEC_DIR=$(spec_cache_dir_from_srpm $SRPM) + local NEED_UPDATE=0 + + if [ ! -d $SPEC_DIR ]; then + mkdir -p $SPEC_DIR + NEED_UPDATE=1 + else + find "$SPEC_DIR" -not -newer "$SRPM" -name '*.spec' | grep 'spec' >> /dev/null + if [ $? -eq 0 ]; then + NEED_UPDATE=1 + fi + fi + + if [ $NEED_UPDATE -ne 0 ]; then + ( + cd $SPEC_DIR + \rm -rf BUILDS BUILDS_VR *.spec 2>> /dev/null + mkdir -p BUILDS + mkdir -p NAMES + mkdir -p SERVICES + mkdir -p BUILDS_VR + rpm2cpio $SRPM | pax -r '*.spec' + if [ $? -ne 0 ]; then + echo "ERROR: no spec file found in '$SRPM'" + fi + for f in $(find . -name '*.spec' | sort -V); do + touch $f + for p in $(spec_list_ver_rel_packages $f); do + touch "BUILDS_VR/$p" + done + for p in $(spec_list_packages $f); do + touch "BUILDS/$p" + done + for p in $(spec_find_tag Name $f 2>> /dev/null); do + touch "NAMES/$p" + done + for p in $(spec_find_global service $f 2>> /dev/null); do + touch "SERVICES/$p" + done + done + ) + fi +} + +# Find the list of packages we must compile + +echo "Find the list of packages we must compile" + +NEED_BUILD_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-need-build-XXXXXX) +if [ $? -ne 0 ] || [ "x$NEED_BUILD_DIR" == "x" ]; then + echo "Failed to create temp directory under $MY_WORKSPACE/tmp" + exit 1 +fi + +UNRESOLVED_TARGETS_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-unresolved-XXXXXX) +if [ $? -ne 0 ] || [ "x$UNRESOLVED_TARGETS_DIR" == "x" ]; then + echo "Failed to create temp directory under $MY_WORKSPACE/tmp" + exit 1 +fi + +for n in ${UNRESOLVED_TARGETS}; do + touch $UNRESOLVED_TARGETS_DIR/$n +done + +PLATFORM_RELEASE_CHANGED=0 +if [ -f $LAST_PLATFORM_RELEASE_FILE ]; then + LAST_PLATFORM_RELEASE=$(cat $LAST_PLATFORM_RELEASE_FILE) + if [ "$LAST_PLATFORM_RELEASE" != "$PLATFORM_RELEASE" ]; then + PLATFORM_RELEASE_CHANGED=1 + fi +else + PLATFORM_RELEASE_CHANGED=1 +fi + +for n in "${SRPM_PKG_NAMES[@]}"; do + ( + s=${SRPM_PKG_NAME_TO_PATH[$n]} + SPEC_DIR=$(spec_cache_dir_from_srpm $s) + update_spec_cache $s + # echo "$BASHPID: considering $n: $s, SPEC_DIR=$SPEC_DIR" + NEED_BUILD=0 + + if [ "x$TARGETS" == "x" ]; then + # We weren't given a list of build targets. + # Build anything missing or out of date. + NEED_BUILD=0 + + if [ -f $RESULT_DIR/$USER-$PROJECT-$SRC_BUILD_ENVIRONMENT-$BUILD_TYPE/$(basename ${s//.src.rpm/})/fail ]; then + echo "Previous build of $(basename ${s//.src.rpm/}) failed" + NEED_BUILD=1 + elif [ ! -f $RESULT_DIR/$USER-$PROJECT-$SRC_BUILD_ENVIRONMENT-$BUILD_TYPE/$(basename ${s//.src.rpm/})/success ]; then + echo "No previous build of $(basename ${s//.src.rpm/})" + NEED_BUILD=1 + else + LOCAL_RPMS_VRA_LIST=$(ls -1 $SPEC_DIR/BUILDS_VR | tr '\n' ' ') + + for f in $LOCAL_RPMS_VRA_LIST; do + m=$(find $RPM_DIR/$f*rpm 2>> /dev/null | wc -l) + if [ $m -eq 0 ] && [ -f "$UNBUILT_PATTERN_FILE" ]; then + echo $f | grep -f "$UNBUILT_PATTERN_FILE" >> /dev/null && m=1 + if [ $m -eq 1 ]; then + echo "Excluding '$f' due to match in UNBUILT_PATTERN_FILE '$UNBUILT_PATTERN_FILE'" + if [ -f "$IMAGE_INC_FILE" ] ; then + for t in $(grep -v '^#' "$IMAGE_INC_FILE"); do + ii=$(echo $f | grep "^$t-[0-9]" | wc -l) + if [ $ii -gt 0 ]; then + echo "Including '$f' due to match in IMAGE_INC_FILE '$IMAGE_INC_FILE' due to pattern '^$t-[0-9]'" + m=0 + break + fi + done + fi + fi + fi + + newer=$(find $RPM_DIR/$f*rpm -type f -not -cnewer $s 2>> /dev/null | wc -l) + # echo "$m $newer=find $RPM_DIR/$f*rpm -type f -not -cnewer $s 2>> /dev/null | wc -l" + if [ $m -eq 0 ] || [ $newer -gt 0 ] || [ $CLEAN_FLAG -eq 1 ]; then + if [ $newer -gt 0 ]; then + echo "Including '$f' due to newer code" + find $RPM_DIR/$f*rpm -type f -not -cnewer $s + else + if [ $m -eq 0 ]; then + echo "Including '$f' due to m=0" + else + if [ $CLEAN_FLAG -eq 1 ]; then + echo "Including '$f' due to CLEAN_FLAG=1" + fi + fi + fi + NEED_BUILD=1 + break + fi + done + fi + else + # We were given a list of build targets, + # try to find packages matching that list. + NEED_BUILD=0 + for f in $(find $SPEC_DIR/NAMES $SPEC_DIR/SERVICES $SPEC_DIR/BUILDS -type f 2>> /dev/null); do + b=$(basename $f) + for t in $TARGETS; do + if [[ ( "$b" == "$t" ) || ( ( "$BUILD_TYPE" == "rt" ) && ( "$b" == "$t-rt" ) ) ]]; then + echo "Including named target '$f'" + TARGET_FOUND=$t + NEED_BUILD=1 + # UNRESOLVED_TARGETS=$(echo "$UNRESOLVED_TARGETS" | sed "s/\(^\|[[:space:]]\)$TARGET_FOUND\([[:space:]]\|$\)/ /g") + if [ -f $UNRESOLVED_TARGETS_DIR/$TARGET_FOUND ]; then + \rm -f $UNRESOLVED_TARGETS_DIR/$TARGET_FOUND + fi + break + fi + done + done + fi + + if [ $NO_BUILD_INFO -eq 0 ]; then + if [ "$n" == "build-info" ]; then + echo "Including '$n' by default" + NEED_BUILD=1 + fi + fi + + if [ $PLATFORM_RELEASE_CHANGED -eq 1 ]; then + grep '%{platform_release}' $SPEC_DIR/*.spec >> /dev/null + if [ $? -eq 0 ]; then + echo "Including '$n' due to changed platform_release" + NEED_BUILD=1 + fi + fi + + if [ $NEED_BUILD -eq 1 ]; then + echo "found $n: $s" + touch "$NEED_BUILD_DIR/$n" + # SRPMS_TO_COMPILE+=("$n") + fi + ) & +done +echo "waiting" +wait +for n in $(ls -1 $NEED_BUILD_DIR); do + SRPMS_TO_COMPILE+=("$n") +done +UNRESOLVED_TARGETS=" " +for n in $(ls -1 $UNRESOLVED_TARGETS_DIR); do + UNRESOLVED_TARGETS="$UNRESOLVED_TARGETS $n" +done +\rm -rf $NEED_BUILD_DIR +\rm -rf $UNRESOLVED_TARGETS_DIR + +ORIG_SRPMS_TO_COMPILE=( ${SRPMS_TO_COMPILE[@]} ) + +echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}" + + +# adding dependant packages +if [ $CLEAN_FLAG -eq 0 ] && [ $NO_DESCENDANTS -eq 0 ] && [ -f $SRPM_DIRECT_DESCENDANTS_FILE ]; then + echo + echo "adding dependant packages" + + # This array will accumulate a list of secondary build targets. + TRANSITIVE_SRPMS_TO_COMPILE=() + + # Add packages that directly depend on the primary build targets in ORIG_SRPMS_TO_COMPILE + for n in ${ORIG_SRPMS_TO_COMPILE[@]}; do + needs=( $(grep "^$n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$n;//" | sed 's/,/ /g'; alt_n=$(echo "$n" | sed 's#-rt$##'); if [ "$alt_n" != "$n" ]; then grep "^$alt_n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$alt_n;//" | sed 's/,/ /g' | sed 's#\([^[:space:]]*\)#\1-rt#g'; fi ) ) + + # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for + compilable_needs=( $(intersection needs SRPM_PKG_NAMES) ) + TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) ) + done + + # For non-std build, and if non specific build targets are named, then search all + # packages that we might build and check if they require a package that DID build + # in the std build. If so build the package as a secondary target, even though the + # primary target was from a different build_type. + if [ "$BUILD_TYPE" != "std" ] && [ $ALL -eq 1 ] && [ -f $SRPM_TO_RPM_MAP_FILE ] && [ -f $SRPM_RPM_DIRECT_REQUIRES_FILE ]; then + # Test all that we can build ... + for n in ${SRPM_PKG_NAMES[@]}; do + contains ORIG_SRPMS_TO_COMPILE $n + if [ $? -eq 0 ]; then + # Already on the primary build list, skip it. + echo "skip $n" + continue + fi + + STD_NEEDS_BUILD=0 + + # Iterate over all binary rpms names produce by the candidate package + for b in $(grep "^$n;" "$SRPM_TO_RPM_MAP_FILE" | sed "s/$n;//" | sed 's/,/ /g'); do + # find an rpm file with the rpm name we seek + for bp in $(find $RPM_DIR -name "$b-[0-9]*.rpm" | grep -v '.src.rpm'); do + if [ "$b" != "$(rpm_get_name $bp)" ]; then + # rpm name doesn't match + continue + fi + + # Iterate over binary rpms names required by the candidate package + for r in $(grep "^$n;" "$SRPM_RPM_DIRECT_REQUIRES_FILE" | sed "s/$n;//" | sed 's/,/ /g'); do + # find a required rpm file with the rpm name we seek, AND is newer than the produced rpm file + for rp in $(find $(echo $RPM_DIR | sed "s#/$BUILD_TYPE/#/std/#") -name "$r-[0-9]*.rpm" -cnewer $bp | grep -v '.src.rpm'); do + if [ "$r" != "$(rpm_get_name $rp)" ]; then + # rpm name doesn't match + continue + fi + + # Ok, a required rpm is newer than a built rpm, we should rebuild! + echo "rebuild '$n' due to newer '$r'" + STD_NEEDS_BUILD=1 + break + done + done + done + + # Avoid pointless processing if we already have a positive result. + if [ $STD_NEEDS_BUILD -eq 1 ]; then + break + fi + done + + if [ $STD_NEEDS_BUILD -eq 1 ]; then + # Compile is requires due to an updated required package in the std build. + # Add 'n' to array TRANSITIVE_SRPMS_TO_COMPILE. + TRANSITIVE_SRPMS_TO_COMPILE=( $(put TRANSITIVE_SRPMS_TO_COMPILE $n) ) + fi + done + fi + + # If the kernel or kernel-rt packages where absent from the primary build targets, but + # added as a secondary target, then make sure all out-of-tree kernel modules are also + # added. + for n in kernel kernel-rt; do + KERNEL_IN_ORIG=0 + KERNEL_IN_TRANSITIVE=0 + contains ORIG_SRPMS_TO_COMPILE "$n" && KERNEL_IN_ORIG=1 + contains TRANSITIVE_SRPMS_TO_COMPILE "$n" && KERNEL_IN_TRANSITIVE=1 + if [ $KERNEL_IN_TRANSITIVE -eq 1 ] && [ $KERNEL_IN_ORIG -eq 0 ]; then + needs=( $(grep "^$n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$n;//" | sed 's/,/ /g'; alt_n=$(echo "$n" | sed 's#-rt$##'); if [ "$alt_n" != "$n" ]; then grep "^$alt_n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$alt_n;//" | sed 's/,/ /g' | sed 's#\([^[:space:]]*\)#\1-rt#g'; fi ) ) + + # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for + compilable_needs=( $(intersection needs SRPM_PKG_NAMES) ) + TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) ) + fi + done + + # Append the secondary targetc list to the primary list + SRPMS_TO_COMPILE=( $(union SRPMS_TO_COMPILE TRANSITIVE_SRPMS_TO_COMPILE) ) + echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}" +fi + + +MUST_SRPMS_TO_COMPILE=( ${SRPMS_TO_COMPILE[@]} ) + +# adding required packages +if [ $CLEAN_FLAG -eq 0 ] && [ "x$TARGETS" != "x" ] && [ $NO_REQUIRED -eq 0 ] && [ -f $SRPM_TRANSITIVE_REQUIRES_FILE ]; then + echo + echo "adding required packages" + TRANSITIVE_SRPMS_TO_COMPILE=() + for n in ${MUST_SRPMS_TO_COMPILE[@]}; do + needs=( $(grep "^$n;" "$SRPM_TRANSITIVE_REQUIRES_FILE" | sed "s/$n;//" | sed 's/,/ /g') ) + + # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for + compilable_needs=( $(intersection needs SRPM_PKG_NAMES) ) + TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) ) + + for b in "${un[@]}"; do + echo $b + done + done + + SRPMS_TO_COMPILE=( $(union TRANSITIVE_SRPMS_TO_COMPILE SRPMS_TO_COMPILE) ) + echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}" +fi + + +# Determine build order ... now done in mockchain4 +SRPMS_TO_COMPILE=( $(echo ${SRPMS_TO_COMPILE[@]} | sed 's/ /\n/g' | sort -u) ) + + +# convert pkg names to paths, clean work dirs if needed +echo +echo "Mapping packages to src rpm paths" +for n in ${SRPMS_TO_COMPILE[@]}; do + s=${SRPM_PKG_NAME_TO_PATH[$n]} + SPEC_DIR=$(spec_cache_dir_from_srpm $s) + update_spec_cache $s + + SRPMS_LIST="$SRPMS_LIST $s" + # echo "SRPMS_LIST = $SRPMS_LIST" + + TMP_RPMS_LIST=$(ls -1 $SPEC_DIR/BUILDS | tr '\n' ' ') + RPMS_LIST="$RPMS_LIST $TMP_RPMS_LIST" +done +echo + +if [ $CLEAN_FLAG -eq 0 ]; then + update_cgcs_repo cgcs-centos-repo + if [ -d $MY_REPO/cgcs-3rd-party-repo ]; then + update_cgcs_repo cgcs-3rd-party-repo + fi +fi + +mock_clean_mounts + +# clean work dirs if needed +CLEAN_BEFORE_BUILD_SRPM_LIST="" +CLEAN_BEFORE_BUILD_RPM_LIST="" +if [ $CLEAN_FLAG -eq 0 ]; then + echo + echo "Calculating minimal clean list" + for nm in ${SRPMS_TO_COMPILE[@]}; do + MUST_CLEAN=0 + contains MUST_SRPMS_TO_COMPILE $nm && MUST_CLEAN=1 + + s=${SRPM_PKG_NAME_TO_PATH[$nm]} + SPEC_DIR=$(spec_cache_dir_from_srpm $s) + update_spec_cache $s + + LOCAL_RPMS_LIST=$(ls -1 $SPEC_DIR/BUILDS | tr '\n' ' ') + LOCAL_RPMS_VRA_LIST=$(ls -1 $SPEC_DIR/BUILDS_VR | tr '\n' ' ') + + for f in $LOCAL_RPMS_VRA_LIST; do + m=$(find $RPM_DIR/$f*rpm 2>> /dev/null | wc -l) + if [ -f "$UNBUILT_PATTERN_FILE" ]; then + echo $f | grep -f "$UNBUILT_PATTERN_FILE" >> /dev/null && m=1 + fi + + n=$(find $RPM_DIR/$f*rpm -type f -not -cnewer $s 2>> /dev/null | wc -l) + # echo "$n=find $RPM_DIR/$f*rpm -type f -not -cnewer $s 2>> /dev/null | wc -l" + if [ $m -eq 0 ] || [ $n -gt 0 ] || [ $MUST_CLEAN -eq 1 ]; then + CLEAN_BEFORE_BUILD_SRPM_LIST="$CLEAN_BEFORE_BUILD_SRPM_LIST $s" + CLEAN_BEFORE_BUILD_RPM_LIST="$CLEAN_BEFORE_BUILD_RPM_LIST $LOCAL_RPMS_LIST" + break + fi + done + done +fi + + +if [ "$UNRESOLVED_TARGETS" != " " ]; then + if [ $CLEAN_FLAG -eq 0 ]; then + echo "" + echo "ERROR: failed to resolve build targets: $UNRESOLVED_TARGETS" + exit 1 + fi +fi + +echo "SRPMS_LIST = $SRPMS_LIST" +echo "RPMS_LIST = $RPMS_LIST" + + +echo +if [ $CLEAN_FLAG -eq 0 ]; then + # pre-create these directories as $USER, + # else mock will create them as root and fails to clean them. + # Note: keep these in sync with mockchain-parallel! + for i in $(seq 0 $((MAX_WORKERS-1))); do + mkdir -p $MY_WORKSPACE/mock/b$i + mkdir -p $MY_WORKSPACE/cache/b$i/mock + done + + mock_update_or_init +fi +set_mock_symlinks $MY_BUILD_CFG + +echo +echo "Cleaning" +if [ $CLEAN_FLAG -eq 1 ]; then + # Clean what the user asked for + echo "========= clean_list '$SRPMS_LIST' '$RPMS_LIST' $ALL" + \rm -r -f -v $MY_WORKSPACE/mock-$USER-* + clean_list "$SRPMS_LIST" "$RPMS_LIST" "$ALL" + + exit 0 +else + # Clean what we intend to build + if [ $NO_AUTOCLEAN -eq 1 ]; then + echo "no-autoclean was requested" + else + if [ "$CLEAN_BEFORE_BUILD_SRPM_LIST" != "" ]; then + echo "========= clean_list '$CLEAN_BEFORE_BUILD_SRPM_LIST' '$CLEAN_BEFORE_BUILD_RPM_LIST' 0" + clean_list "$CLEAN_BEFORE_BUILD_SRPM_LIST" "$CLEAN_BEFORE_BUILD_RPM_LIST" 0 + fi + fi +fi + +echo +echo "Cleaning repodata" + +BUILD_ENVIRONMENT_DIR=$(basename $BUILD_CFG) +BUILD_ENVIRONMENT_DIR=${BUILD_ENVIRONMENT_DIR%.*} +LOCAL_URL=http://127.0.0.1:8088$BUILD_BASE/results/$BUILD_ENVIRONMENT_DIR/ +LOCAL_SRC_URL=http://127.0.0.1:8088$BUILD_BASE/rpmbuild/SRPMS/ + +for d in $(find -L $RESULT_DIR -type d -name repodata); do +(cd $d/.. + if [ -f repodata/*comps*xml ]; then + \mv repodata/*comps*xml comps.xml + fi + \rm -rf repodata +) +done + +echo +echo "Cleaning Metadata" + +MOCKCHAIN_LOG="$RESULT_DIR/mockchain.log" +mkdir -p $RESULT_DIR +touch $RESULT_DIR/build_start +\rm -rf $MOCKCHAIN_LOG + +mock_clean_metadata + +echo +echo "Building" + +CMD_PREFIX="" +if [ -x /bin/ionice ]; then + CMD_PREFIX="nice -n 20 ionice -c Idle /bin/ionice " +fi + +CMD_OPTIONS="-m --no-clean -m --no-cleanup-after" +if [ $CAREFUL -eq 1 ]; then + CMD_OPTIONS="-m --no-cleanup-after" +fi +echo "CAREFUL=$CAREFUL" + +# Sets WORKERS and MOCKCHAIN_RESOURCE_ALLOCATION +compute_resources $SRPMS_LIST + + +if [ -f $SRPM_RPM_DIRECT_REQUIRES_FILE ]; then + CMD_OPTIONS="$CMD_OPTIONS --srpm-dependency-file $SRPM_RPM_DIRECT_REQUIRES_FILE" +fi +if [ -f "$RPM_DIRECT_REQUIRES_FILE" ]; then + CMD_OPTIONS="$CMD_OPTIONS --rpm-dependency-file $RPM_DIRECT_REQUIRES_FILE" +fi +if [ -f "$RPM_TO_SRPM_MAP_FILE" ]; then + CMD_OPTIONS="$CMD_OPTIONS --rpm-to-srpm-map-file $RPM_TO_SRPM_MAP_FILE" +fi + + +for s in $SRPMS_LIST; do + d=$(echo "$s" | sed 's#/SRPMS/#/SOURCES/#') + if [ -f $d/BIG ]; then + BUILD_SIZE=$(cat $d/BIG | { read first rest ; echo $first ; }) + CMD_OPTIONS="$CMD_OPTIONS --mark-big-path $BUILD_SIZE:$s" + fi + if [ -f $d/SLOW ]; then + BUILD_SPEED=$(cat $d/SLOW | { read first rest ; echo $first ; }) + CMD_OPTIONS="$CMD_OPTIONS --mark-slow-path $BUILD_SPEED:$s" + fi +done +echo "CMD_OPTIONS=$CMD_OPTIONS" + +echo "MAX_WORKERS=$MAX_WORKERS" +echo "MOCKCHAIN_RESOURCE_ALLOCATION=$MOCKCHAIN_RESOURCE_ALLOCATION" + +CMD="$CMD_PREFIX mockchain-parallel -r $BUILD_CFG -l $BUILD_BASE --recurse --workers=$MAX_WORKERS --worker-resources=$MOCKCHAIN_RESOURCE_ALLOCATION --basedir=$MY_WORKSPACE --log=$MOCKCHAIN_LOG --tmp_prefix=$USER --addrepo=$LOCAL_URL --addrepo=$LOCAL_SRC_URL $CMD_OPTIONS -m --rebuild $SRPMS_LIST" +echo "" +echo "$CMD -m --define='_tis_dist .tis' -m --define='platform_release $PLATFORM_RELEASE'" +echo "" + +trapwrap stdbuf -o0 $CMD -m --define="_tis_dist .tis" -m --define="platform_release $PLATFORM_RELEASE" +MOCKCHAIN_RC=$? + +echo $PLATFORM_RELEASE > $LAST_PLATFORM_RELEASE_FILE + +if [ $CLEAN_FLAG -eq 0 ]; then + umount_mock_root_as_tmpfs_all +fi + +for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --max-args=1 dirname | sort -u); do + rsync -u $d/*.rpm $RPM_DIR +done + +if [ $ALL -eq 1 ]; then + echo + echo "Auditing for obsolete srpms" + for r in $(find $RESULT_DIR $RPM_DIR -name '*.src.rpm'); do + ( + f=$(basename $r) + if [ ! -f "$SRPM_OUT/$f" ]; then + \rm -fv $r + fi + ) & + done + echo "waiting for srpm audit to complete" + wait + echo "Auditing for obsolete rpms" + for r in $(find $RESULT_DIR $RPM_DIR -name '*.rpm' | grep -v 'src.rpm'); do + ( + s=$(rpm_get_srpm $r) + if [ ! -f "$SRPM_OUT/$s" ]; then + echo "Failed to find '$SRPM_OUT/$s'" + \rm -fv $r + fi + ) & + done + echo "waiting for rpm audit to complete" + wait + echo "Audit complete" + echo "" +fi + +if [ $MOCKCHAIN_RC -ne 0 ]; then + echo "ERROR: Failed to build rpms using '$CMD'" + exit 1 +fi + +echo "Recreate repodata" +for d in $(find -L $MY_WORKSPACE/rpmbuild $MY_WORKSPACE/results -type d -name repodata); do + update_repodata $(dirname "$d") +done + + +if [ -f $MOCKCHAIN_LOG ]; then + grep 'following pkgs could not be successfully built' $MOCKCHAIN_LOG >> /dev/null + if [ $? -eq 0 ]; then + FAILED_PKGS="" + for p in $(sed -n '/following pkgs could not be successfully built:/,/Results out to/p' $MOCKCHAIN_LOG | grep -v '*** Build Failed ***' | sed 1d | sed '$ d' | cut -d ':' -f2-); do + PKG=$(basename $p) + FAILED_PKGS="$PKG $FAILED_PKGS" + done + echo + echo "Failed to build packages: $FAILED_PKGS" + exit 1 + fi +fi + +# If we're doing a nightly or formal build (i.e. not a developer build) then we +# want to sign certain packages. Note that only certain users (i.e. jenkins) +# have the authority to requiest that packages be signed. +# +# Signing is not actually done on this server (the keys are kept safe on a +# different server with very limited access) but we can invoke a script to +# make calls to the signing server. Note that this will NOT work if you are +# not Jenkins and don't have access to the Jenkins cross server login keys. +# +# Note that both std and rt builds must be complete before invoking the signing +# script +if [ 0$FORMAL_BUILD -eq 1 ] && [ "$USER" == "jenkins" ]; then + if [ -e $MY_WORKSPACE_TOP/std ] && [ -e $MY_WORKSPACE_TOP/rt ]; then + # Create dir for log, if it doesn't exit + mkdir -p $MY_WORKSPACE_TOP/export + echo "We are jenkins, and we are trying to do a formal build -- calling signing server" + echo " to sign boot RPMs with secure boot keys" + + MY_WORKSPACE=$MY_WORKSPACE_TOP sign-build > $MY_WORKSPACE_TOP/export/sign-build.log 2>&1 + if [ $? -ne 0 ]; then + echo "Signing of packages failed -- see $MY_WORKSPACE_TOP/export/sign-build.log" + exit 1 + fi + fi +fi + +exit 0 +) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-rpms-parallel_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]} diff --git a/build-tools/build-rpms-serial b/build-tools/build-rpms-serial new file mode 100755 index 00000000..b303ea05 --- /dev/null +++ b/build-tools/build-rpms-serial @@ -0,0 +1,1328 @@ +#!/bin/bash +# set -x + +export ME=$(basename "$0") +CMDLINE="$ME $@" + + +CREATEREPO=$(which createrepo_c) +if [ $? -ne 0 ]; then + CREATEREPO="createrepo" +fi + +DEPENDANCY_DIR="$MY_REPO/cgcs-tis-repo/dependancy-cache" +SRPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires" +SRPM_TRANSITIVE_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-transitive-requires" +SRPM_TRANSITIVE_DESCENDANTS_FILE="$DEPENDANCY_DIR/SRPM-transitive-descendants" +SRPM_DIRECT_DESCENDANTS_FILE="$DEPENDANCY_DIR/SRPM-direct-descendants" +SRPM_RPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires-rpm" +RPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/RPM-direct-requires" +RPM_TO_SRPM_MAP_FILE="$DEPENDANCY_DIR/rpm-to-srpm" +SRPM_TO_RPM_MAP_FILE="$DEPENDANCY_DIR/srpm-to-rpm" + +UNBUILT_PATTERN_FILE="$MY_REPO/build-data/unbuilt_rpm_patterns" +IMAGE_INC_FILE="$MY_REPO/build-tools/build_iso/image.inc" + +export MOCK=/usr/bin/mock + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source $DIR/spec-utils +source $DIR/srpm-utils + +HOME=$(pwd) + +usage () { + echo "" + echo "Usage: " + echo " $ME [ [--rt] [--no-required] [--no-descendants] [--no-build-info] [--no-autoclean] [--formal] ]" + echo " $ME --clean [ [--no-descendants] ]" + echo " $ME --help" + echo "" +} + +number_of_cpus () { + /usr/bin/nproc +} + + +# +# Delete old repodata and reate a new one +# +recreate_repodata () { + local DIR=${1} + + (cd $DIR + if [ -f repodata/*comps*xml ]; then + \mv repodata/*comps*xml comps.xml + fi + \rm -rf repodata + if [ -f comps.xml ]; then + $CREATEREPO -g comps.xml --workers $(number_of_cpus) $(pwd) + else + $CREATEREPO --workers $(number_of_cpus) $(pwd) + fi + ) +} + +# +# Update existing repodata +# +update_repodata () { + local DIR=${1} + + (cd $DIR + TMP=$(mktemp /tmp/update_repodata_XXXXXX) + RC=0 + if [ -f comps.xml ]; then + $CREATEREPO --update -g comps.xml --workers $(number_of_cpus) $(pwd) &> $TMP + RC=$? + else + $CREATEREPO --update --workers $(number_of_cpus) $(pwd) &> $TMP + RC=$? + fi + if [ $RC -ne 0 ]; then + cat $TMP + fi + \rm -f $TMP + ) +} + +# +# return array that is the intersection of two other arrays +# +# NEW_ARRAY=( $( intersection ARRAY1 ARRAY2 ) ) +# +intersection () { + local Aname=$1[@] + local Bname=$2[@] + local A=("${!Aname}") + local B=("${!Bname}") + + # echo "${A[@]}" + # echo "${B[@]}" + for a in "${A[@]}"; do + # echo "a=$a" + for b in "${B[@]}"; do + # echo "b=$b" + if [ "$a" == "$b" ]; then + echo "$a" + break + fi + done + done +} + +# +# return array that is the union of two other arrays +# +# NEW_ARRAY=( $( union ARRAY1 ARRAY2 ) ) +# +union () { + local Aname=$1[@] + local Bname=$2[@] + local A=("${!Aname}") + local B=("${!Bname}") + local a + local b + + for a in "${A[@]}"; do + echo "$a" + done + + for b in "${B[@]}"; do + local found=0 + for a in "${A[@]}"; do + if [ "$a" == "$b" ]; then + found=1 + break + fi + done + if [ $found -eq 0 ]; then + echo $b + fi + done +} + +# +# returns 0 if element is in the array +# +# e.g. contains ARRAY $SEEKING && echo "$SEEKING is in 'ARRAY'" +# +contains () { + local Aname=$1[@] + local A=("${!Aname}") + local seeking=$2 + local in=1 + + for a in "${A[@]}"; do + if [[ $a == $seeking ]]; then + in=0 + break + fi + done + return $in +} + +# +# Append element to array if not present +# +# ARRAY=( $( put ARRAY $ELEMENT ) ) +# +put () { + local Aname=$1[@] + local A=("${!Aname}") + local element="$2" + for a in "${A[@]}"; do + echo "$a" + done + contains A "$element" || echo "$element" +} + +build_order_recursive () { + local target=$1 + local idx + local remainder_list + local needs + local needs_list + + for((idx=0;idx<${#UNORDERED_LIST[@]};idx++)); do + if [ ${UNORDERED_LIST[idx]} == $target ]; then + remainder_list=( ${UNORDERED_LIST[@]:0:$idx} ${UNORDERED_LIST[@]:$((idx + 1))} ) + UNORDERED_LIST=( ${remainder_list[@]} ) + needs=( $(grep "^$target;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$target;//" | sed 's/,/ /g') ) + needs_list=( $(intersection needs remainder_list) ) + for((idx=0;idx<${#needs_list[@]};idx++)); do + build_order_recursive ${needs_list[idx]} + done + echo $target + break + fi + done +} + +build_order () { + local Aname=$1[@] + local original_list=("${!Aname}") + local needs + local needs_list + local remainder_list + local idx + local element + local next_start=0 + local old_next_start=0 + local progress=1 + + while [ ${#original_list[@]} -gt 0 ] && [ $progress -gt 0 ]; do + progress=0 + old_next_start=$next_start + for((idx=$next_start;idx<${#original_list[@]};idx++)); do + element=${original_list[idx]} + next_start=$idx + remainder_list=( ${original_list[@]:0:$idx} ${original_list[@]:$((idx + 1))} ) + needs=( $(grep "^$element;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$element;//" | sed 's/,/ /g') ) + needs_list=( $(intersection needs remainder_list) ) + if [ ${#needs_list[@]} -eq 0 ]; then + echo "$element" + original_list=( "${remainder_list[@]}" ) + if [ $next_start -ge ${#original_list[@]} ]; then + next_start=0 + fi + progress=1 + break + fi + done + if [ $old_next_start -ne 0 ]; then + progress=1 + next_start=0 + fi + done + + if [ ${#original_list[@]} -gt 0 ]; then + # Had trouble calculating a build order for these remaining packages, so stick them at the end + UNORDERED_LIST=( ${original_list[@]} ) + while [ ${#UNORDERED_LIST[@]} -gt 0 ]; do + element=${UNORDERED_LIST[0]} + build_order_recursive $element + done + fi +} + +set_mock_symlinks () { + local LNK + local DEST + if [ -d /localdisk/loadbuild/mock ]; then + mkdir -p $MY_WORKSPACE + LNK=$(echo "/localdisk/loadbuild/mock/$(basename $MY_BUILD_CFG)" | sed 's/.cfg$//') + if [ ! -L $LNK ] && [ -d $LNK ]; then + echo "WARNING: Found directory at '$LNK' when symlink was expected. Fixing..." + \rm -rf $LNK + if [ -d $LNK ]; then + \mv $LNK $LNK.clean_me + fi + fi + if [ -L $LNK ]; then + DEST=$(readlink $LNK) + if [ "$DEST" != "$MY_WORKSPACE" ] || [ ! -d "$MY_WORKSPACE" ]; then + echo "WARNING: Found broken symlink at '$LNK'. Fixing..." + \rm -f $LNK + fi + fi + if [ ! -L $LNK ]; then + if [ ! -d "$MY_WORKSPACE" ]; then + echo "ERROR: Can't create symlink from $LNK to $MY_WORKSPACE as destination does not exist." + exit 1 + fi + ln -s $MY_WORKSPACE $LNK + fi + fi + + if [ -d /localdisk/loadbuild/mock-cache ]; then + mkdir -p $MY_WORKSPACE/cache + LNK=$(echo "/localdisk/loadbuild/mock-cache/$(basename $MY_BUILD_CFG)" | sed 's/.cfg$//') + if [ ! -L $LNK ] && [ -d $LNK ]; then + echo "WARNING: Found directory at '$LNK' when symlink was expected. Fixing..." + \rm -rf $LNK + if [ -d $LNK ]; then + \mv $LNK $LNK.clean_me + fi + fi + if [ -L $LNK ]; then + DEST=$(readlink $LNK) + if [ "$DEST" != "$MY_WORKSPACE/cache" ] || [ ! -d "$MY_WORKSPACE/cache" ]; then + echo "WARNING: Found broken symlink at '$LNK'. Fixing..." + \rm -f $LNK + fi + fi + if [ ! -L $LNK ]; then + if [ ! -d "$MY_WORKSPACE/cache" ]; then + echo "ERROR: Can't create symlink from $LNK to $MY_WORKSPACE/cache as destination does not exist." + exit 1 + fi + ln -s $MY_WORKSPACE/cache $LNK + fi + fi +} + +remove_mock_symlinks () { + local LNK + if [ -d /localdisk/loadbuild/mock ]; then + LNK=$(echo "/localdisk/loadbuild/mock/$(basename $MY_BUILD_CFG)" | sed 's/.cfg$//') + if [ -L $LNK ]; then + \rm -f $LNK + fi + if [ -d $LNK ]; then + \rm -rf $LNK + if [ $? -ne 0 ]; then + \mv -f $LNK $LNK.clean_me + fi + fi + fi + + if [ -d /localdisk/loadbuild/mock-cache ]; then + LNK=$(echo "/localdisk/loadbuild/mock-cache/$(basename $MY_BUILD_CFG)" | sed 's/.cfg$//') + if [ -L $LNK ]; then + \rm -f $MY_WORKSPACE/cache $LNK + fi + if [ -d $LNK ]; then + \rm -rf $LNK + if [ $? -ne 0 ]; then + \mv -f $LNK $LNK.clean_me + fi + fi + fi +} + + +mock_clean () { + echo "==================================" + remove_mock_symlinks + set_mock_symlinks + echo "==================================" + clean_yum_cache + echo "==================================" + echo "$MOCK -r $BUILD_CFG --scrub=root-cache --scrub=yum-cache --scrub=cache" + $MOCK -r $BUILD_CFG --scrub=root-cache --scrub=yum-cache --scrub=cache + echo "==================================" + echo "$MOCK -r $BUILD_CFG --scrub=all" + $MOCK -r $BUILD_CFG --scrub=all + echo "==================================" + echo "$MOCK -r $BUILD_CFG --clean" + $MOCK -r $BUILD_CFG --clean + ### Note: this sometimes leavs behind a $MY_WORKSPACE/cache/mock/yum_cache/yumcache.lock + echo "==================================" + clean_yum_cache + echo "==================================" + echo "$MOCK -r $BUILD_CFG --scrub=all" + $MOCK -r $BUILD_CFG --scrub=all + echo "==================================" + remove_mock_symlinks +} + +update_cgcs_repo () { + local REPO_NAME=$1 + ( + cd $MY_REPO/$REPO_NAME/ + + local CURR_HEAD=$(git rev-parse HEAD) + local LAST_HEAD_FILE="$MY_REPO/$REPO_NAME/.last_head" + local LAST_HEAD_FILE_OLD="$MY_WORKSPACE/$REPO_NAME.last_head" + local CHANGED + local NEW_UNTRACKED + local NEED_REBUILD + local NEED_MOCK_CLEAN=0 + local d + + if [ -f LAST_HEAD_FILE_OLD -a ! -f LAST_HEAD_FILE ]; then + \cp LAST_HEAD_FILE_OLD LAST_HEAD_FILE + fi + + local LAST_HEAD=$(cat $LAST_HEAD_FILE | head -n 1) + + for d in "Binary" "Source"; do + NEED_REBUILD=0 + if [ ! -d $d/repodata ]; then + NEED_REBUILD=1 + fi + if [ "$CURR_HEAD" != "$LAST_HEAD" ]; then + NEED_REBUILD=1 + fi + + CHANGED=$(git diff --name-only | grep $d) + if [ "x$CHANGED" != "x" ]; then + NEED_REBUILD=1 + fi + + NEW_UNTRACKED=$(git ls-files . --exclude-standard --others | grep $d) + if [ "x$NEW_UNTRACKED" != "x" ]; then + NEED_REBUILD=1 + fi + + if [ $NEED_REBUILD -eq 1 ]; then + NEED_MOCK_CLEAN=1 + echo "" + echo "Need to recreate $REPO_NAME/$d/repodata" + NEED_MOCK_CLEAN=1 + mkdir -p $d + if [ -d $d/repodata ]; then + update_repodata "$d" + else + recreate_repodata "$d" + fi + fi + done + echo "$CURR_HEAD" > $LAST_HEAD_FILE + \cp $LAST_HEAD_FILE $LAST_HEAD_FILE_OLD + if [ $NEED_MOCK_CLEAN -eq 1 ]; then + echo "" + echo "Need to clean mock" + mock_clean + set_mock_symlinks + fi + ) +} + +clean_yum_cache () { + local RC=0 + YUM_CACHE_MOUNT="$MY_WORKSPACE/mock/root/var/cache/yum" + YUM_CACHE_LOCK="$MY_WORKSPACE/cache/mock/yum_cache/yumcache.lock" + echo "clean_yum_cache $YUM_CACHE_MOUNT $YUM_CACHE_LOCK" + mount | grep "$YUM_CACHE_MOUNT" >> /dev/null + if [ $? -eq 0 ]; then + RC=1 + which mock_cache_umount >> /dev/null + if [ $? -eq 0 ]; then + mock_cache_umount "$YUM_CACHE_MOUNT" + if [ $? -eq 0 ]; then + RC=0 + fi + fi + if [ $RC -eq 1 ]; then + echo "ERROR: Directory '$YUM_CACHE_MOUNT' is already mounted and will cause a build failure within mock." + echo "Ask your system administrator to umount '$YUM_CACHE_MOUNT'." + exit 1 + fi + fi + + if [ -f "$YUM_CACHE_LOCK" ]; then + RC=1 + which mock_cache_unlock >> /dev/null + if [ $? -eq 0 ]; then + mock_cache_unlock "$YUM_CACHE_LOCK" + if [ $? -eq 0 ]; then + RC=0 + fi + fi + if [ $RC -eq 1 ]; then + echo "ERROR: File '$YUM_CACHE_LOCK' exists and will cause a build failure within mock." + echo "Ask your system administrator to delete '$YUM_CACHE_LOCK'." + exit 1 + fi + fi + return $RC +} + +if [ "x$PROJECT" == "x" ]; then + echo "PROJECT environmnet variable is not defined." + exit 1 +fi + +if [ "x$SRC_BUILD_ENVIRONMENT" == "x" ]; then + echo "SRC_BUILD_ENVIRONMENT environmnet variable is not defined." + exit 1 +fi + +NO_DESCENDANTS=0 +NO_REQUIRED=0 +NO_AUTOCLEAN=0 +NO_BUILD_INFO=0 +HELP=0 +CLEAN_FLAG=0 +FORMAL_FLAG=0 +CAREFUL=0 + +# read the options +TEMP=$(getopt -o ha::bc: --long serial,std,rt,installer,no-required,no-descendants,no-autoclean,no-build-info,clean,formal,careful,help,arga::,argb,argc: -n "$ME" -- "$@") + +if [ $? -ne 0 ]; then + usage + exit 1 +fi + +eval set -- "$TEMP" + +export BUILD_TYPE=std + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -a|--arga) + case "$2" in + "") ARG_A='some default value' ; shift 2 ;; + *) ARG_A=$2 ; shift 2 ;; + esac ;; + -b|--argb) ARG_B=1 ; shift ;; + --careful) CAREFUL=1 ; shift ;; + --no-descendants) NO_DESCENDANTS=1 ; shift ;; + --no-required) NO_REQUIRED=1 ; shift ;; + --no-autoclean) NO_AUTOCLEAN=1; shift ;; + --no-build-info) NO_BUILD_INFO=1; shift ;; + --formal) FORMAL_FLAG=1; shift ;; + --std) BUILD_TYPE=std; shift ;; + --rt) BUILD_TYPE=rt; shift ;; + --installer) BUILD_TYPE=installer; shift ;; + -h|--help) HELP=1 ; shift ;; + --clean) CLEAN_FLAG=1 ; shift ;; + -c|--argc) + case "$2" in + "") shift 2 ;; + *) ARG_C=$2 ; shift 2 ;; + esac ;; + --serial) shift ;; + --) shift ; break ;; + *) echo "Internal error!" ; exit 1 ;; + esac +done + + +# Reset variables +if [ -n "$MY_WORKSPACE" ]; then + export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE} + export MY_WORKSPACE=$MY_WORKSPACE_TOP/$BUILD_TYPE +else + export MY_PATCH_WORKSPACE_TOP=${MY_PATCH_WORKSPACE_TOP:-$MY_PATCH_WORKSPACE} + export MY_PATCH_WORKSPACE=$MY_PATCH_WORKSPACE_TOP/$BUILD_TYPE +fi + +export MY_BUILD_DIR_TOP=${MY_BUILD_DIR_TOP:-$MY_BUILD_DIR} +export MY_BUILD_DIR=$MY_BUILD_DIR_TOP/$BUILD_TYPE + +export MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT} +export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT_TOP-$BUILD_TYPE + +export MY_SRC_RPM_BUILD_DIR=$MY_BUILD_DIR/rpmbuild +export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT.cfg +export MY_BUILD_CFG=$MY_WORKSPACE/$MY_BUILD_ENVIRONMENT_FILE +export MY_MOCK_ROOT=$MY_WORKSPACE/mock/root + + +if [ "$BUILD_TYPE" != "std" ]; then + PKG_DIRS_FILE=centos_pkg_dirs_$BUILD_TYPE +fi + +TARGETS=$@ + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ $FORMAL_FLAG -eq 1 ]; then + export FORMAL_BUILD=1 +fi + +SRC_ROOT="$MY_REPO" +if [ "x$MY_REPO" == "x" ]; then + SRC_ROOT=$HOME +fi + +BUILD_ROOT="$MY_WORKSPACE" +if [ "x$MY_WORKSPACE" == "x" ]; then + BUILD_ROOT="$MY_PATCH_WORKSPACE" + + if [ "x$MY_PATCH_WORKSPACE" == "x" ]; then + echo "ERROR: reqiure one of MY_WORKSPACE or MY_PATCH_WORKSPACE be defined" + exit 1 + fi +fi + +export BUILD_BASE="$BUILD_ROOT" +export CCACHE_DIR="$BUILD_ROOT/.ccache" +export RESULT_DIR="$BUILD_BASE/results" +export SRC_BASE="$SRC_ROOT" + +if [ "x$MY_SRC_RPM_BUILD_DIR" != "x" ]; then + RPM_BUILD_ROOT=$MY_SRC_RPM_BUILD_DIR +else + RPM_BUILD_ROOT=$BUILD_BASE/rpmbuild +fi + +RELEASE_INFO_FILE=$SRC_BASE/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +if [ -f $RELEASE_INFO_FILE ]; then + source $MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +else + echo "ERROR: failed to find RELEASE_INFO_FILE=$RELEASE_INFO_FILE" + exit 1 +fi + +if [ "x$PLATFORM_RELEASE" == "x" ]; then + echo "ERROR: PLATFORM_RELEASE is not defined in $RELEASE_INFO_FILE" + exit 1 +fi + +export RPM_BUILD_BASE="$RPM_BUILD_ROOT" +export SRPM_OUT="$RPM_BUILD_BASE/SRPMS" +export RPM_DIR="$RPM_BUILD_BASE/RPMS" +export SPECS_DIR="$RPM_BUILD_BASE/SPECS" +export SOURCES_DIR="$RPM_BUILD_BASE/SOURCES" +export PLATFORM_RELEASE + +if [ ! -d $BUILD_BASE ]; then + echo "ERROR: expected to find directory at '$BUILD_BASE'" + exit 1 +fi + + +mkdir -p $RPM_BUILD_BASE +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$RPM_BUILD_BASE'" + exit 1 +fi + +mkdir -p $SRPM_OUT/repodata +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$SRPM_OUT/repodata'" + exit 1 +fi + +mkdir -p $RPM_DIR/repodata +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$RPM_DIR/repodata'" + exit 1 +fi + +if [ "x$MY_BUILD_CFG" == "x" ];then + echo "ERROR: reqiure MY_BUILD_CFG to be defined" + exit 1 +fi + +export BUILD_CFG="$MY_BUILD_CFG" + +# Place build-time environement variables in mock environment +echo "FORMAL_BUILD=$FORMAL_BUILD" +echo "modify-build-cfg $BUILD_CFG" +${DIR}/modify-build-cfg $BUILD_CFG +if [ $? -ne 0 ]; then + echo "Could not modifiy $BUILD_CFG"; + exit 1 +fi + +if [ ! -f $BUILD_CFG ]; then + echo "ERROR: Mock config file not found at '$BUILD_CFG'" + exit 1 +fi + +# Create symlinks from /var/... to /localdisk/loadbuild/... if on a build server + +set_mock_symlinks + +ls $SRPM_OUT/*.src.rpm &>> /dev/null +if [ $? -ne 0 ] && [ $CLEAN_FLAG -eq 0 ] ; then + echo "Nothing to build in '$SRPM_OUT'" + exit 0 +fi + +ALL=0 +UNRESOLVED_TARGETS=" " +if [ "x$TARGETS" == "x" ]; then + echo "make: all" + ALL=1 +else + echo "make: $TARGETS" + UNRESOLVED_TARGETS="$TARGETS" +fi + +if [ "$BUILD_TYPE" != "std" ]; then + # This defines ... + # STD_SRPM_PKG_NAME_TO_PATH + # STD_SRPM_PKG_NAMES + srpm_build_std_dictionary $MY_WORKSPACE_TOP/std/rpmbuild/SRPMS +fi + +# This defines ... +# SRPM_PKG_NAME_TO_PATH +# SRPM_PKG_NAMES +srpm_build_dictionary $SRPM_OUT + +SRPMS_TO_COMPILE=() +SRPMS_LIST="" +RPMS_LIST="" + +clean_list () { + local SRPMS_LIST="$1" + local RPMS_LIST="$2" + local ALL=$3 + local TARGET + local b + local d + local f + local n + local p + local r + local s + local t + + if [ $ALL -eq 1 ]; then + for r in $(find $RPM_DIR -name "*.rpm"); do + \rm -f -v $r + done + for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -type d); do + echo "rm -rf $d" + \rm -rf "$d" 2>> /dev/null + done + else + for s in $SRPMS_LIST; do + for p in $(srpm_list_packages $s); do + for r in $(find $RESULT_DIR/$USER-* $RPM_DIR -name "$p-*.rpm"); do + if [ -f $r ]; then + n=$(rpm_get_name $r) + if [ "$n" == "$p" ]; then + TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$r") + for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do + echo "rm -rf $d" + \rm -rf "$d" 2>> /dev/null + done + + \rm -f -v $r + fi + fi + done + done + + TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$s") + for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do + echo "rm -rf $d" + \rm -rf "$d" 2>> /dev/null + done + done + fi + + + echo "Cleaning mock environment" + + if [ $ALL -eq 1 ]; then + if [ "x$RPM_DIR" != "x" ]; then + \rm -rf -v $RPM_DIR/* 2>> /dev/null + fi + + \rm -f -v $RESULT_DIR/mockchain.log 2>> /dev/null + mock_clean + else + echo "rm -rf $BUILD_BASE/mock/root/builddir/build/SOURCES/*" + \rm -rf $BUILD_BASE/mock/root/builddir/build/SOURCES/* 2>> /dev/null + + echo "rm -rf $BUILD_BASE/mock/root/builddir/build/SOURCES/*" + \rm -rf $BUILD_BASE/mock/root/builddir/build/SPECS/* 2>> /dev/null + + for s in $SRPMS_LIST; do + f=$(basename $s) + if [ -f $BUILD_BASE/mock/root/builddir/build/SRPMS/$f ]; then + \rm -f -v $BUILD_BASE/mock/root/builddir/build/SRPMS/$f 2>> /dev/null + fi + if [ -f $BUILD_BASE/mock/root/builddir/build/originals/$f ]; then + \rm -f -v $BUILD_BASE/mock/root/builddir/build/originals/$f 2>> /dev/null + fi + done + + for r in $RPMS_LIST; do + for d in $(find $BUILD_BASE/mock/root/builddir/build/BUILD/ -maxdepth 1 -name '$r*' 2>> /dev/null); do + echo "rm -rf $d" + \rm -rf $d 2>> /dev/null + done + if [ -d $BUILD_BASE/mock/root/builddir/build/RPMS ]; then + for f in $(find $BUILD_BASE/mock/root/builddir/build/RPMS -maxdepth 1 -name "$r*rpm" 2>> /dev/null); do + \rm -f -v $f 2>> /dev/null + done + fi + done + + echo "==================================" + echo "'rpm -e $RPMS_LIST; exit' | $MOCK -r $BUILD_CFG --shell" + echo "LIST='$RPMS_LIST'; " \ + 'DELETE_LIST=""; ' \ + 'for r in $LIST; do ' \ + ' if [ "$r" != "kernel-headers" ] && [ "$r" != "bash" ] && [ "$r" != "centos-release" ] && [ "$r" != "openldap" ] && [ "$r" != "setup" ] && [ "$r" != "shadow-utils" ]; then ' \ + ' FOUND=$(rpm -q $r) ; ' \ + ' if [ $? -eq 0 ]; then ' \ + ' DELETE_LIST="$DELETE_LIST $FOUND"; ' \ + ' fi; ' \ + ' else ' \ + ' echo "Skipping $r"; ' \ + ' fi; ' \ + 'done; ' \ + 'echo "uninstalling these packages: $DELETE_LIST"; ' \ + 'rpm -e --nodeps $DELETE_LIST; ' \ + 'exit' | $MOCK -r $BUILD_CFG --shell + echo "====== $?" + + echo "==================================" + clean_yum_cache + echo "==================================" + echo "$MOCK -r $BUILD_CFG --scrub=root-cache --scrub=yum-cache --scrub=cache" + $MOCK -r $BUILD_CFG --scrub=root-cache --scrub=yum-cache --scrub=cache + echo "==================================" + fi + + echo "Cleaning repodata" + for d in $(find -L $MY_WORKSPACE/rpmbuild $MY_WORKSPACE/results -type d -name repodata); do + recreate_repodata $(dirname $d) + done +} + +( +echo "$CMDLINE" + +if [ $CLEAN_FLAG -eq 0 ]; then + if [ -d $RESULT_DIR ]; then + # in case previous build recieved a ctrl-C and didn't get a change to copy it's successful work into RPM_DIR + for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --no-run-if-empty --max-args=1 dirname | sort -u); do + rsync -u $d/*.rpm $RPM_DIR + done + for d in $(find -L $RESULT_DIR -type d -name repodata); do + update_repodata $(dirname $d) + done + fi +fi + + +# Find the list of packages we must compile + +for n in "${SRPM_PKG_NAMES[@]}"; do + r=${SRPM_PKG_NAME_TO_PATH[$n]} + # echo "considering $n: $r" + NEED_BUILD=0 + + TMPDIR=$(mktemp -d /tmp/build-rpms-serial-XXXXXX) + cd $TMPDIR + rpm2cpio $r 2>> /dev/null | pax -r '*.spec' + if [ $? -ne 0 ]; then + echo "ERROR: no spec file found in '$r'" + fi + cd - >> /dev/null + + if [ "x$TARGETS" == "x" ]; then + # We weren't given a list of build targets. + # Build anything missing or out of date. + NEED_BUILD=0 + + if [ -f $RESULT_DIR/$USER-$PROJECT-$SRC_BUILD_ENVIRONMENT-$BUILD_TYPE/$(basename ${r//.src.rpm/})/fail ]; then + echo "Previous build of $(basename ${r//.src.rpm/}) failed" + NEED_BUILD=1 + elif [ ! -f $RESULT_DIR/$USER-$PROJECT-$SRC_BUILD_ENVIRONMENT-$BUILD_TYPE/$(basename ${r//.src.rpm/})/success ]; then + echo "No previous build of $(basename ${r//.src.rpm/})" + NEED_BUILD=1 + else + LOCAL_RPMS_VRA_LIST="" + for f in $(find $TMPDIR -name '*.spec' | sort -V); do + for p in $(spec_list_ver_rel_packages $f); do + LOCAL_RPMS_VRA_LIST="$LOCAL_RPMS_VRA_LIST $p" + done + done + + for f in $LOCAL_RPMS_VRA_LIST; do + m=$(find $RPM_DIR/$f*rpm 2>> /dev/null | wc -l) + if [ $m -eq 0 ] && [ -f "$UNBUILT_PATTERN_FILE" ]; then + echo $f | grep -f "$UNBUILT_PATTERN_FILE" >> /dev/null && m=1 + if [ $m -eq 1 ]; then + echo "Excluding '$f' due to match in UNBUILT_PATTERN_FILE '$UNBUILT_PATTERN_FILE'" + if [ -f "$IMAGE_INC_FILE" ] ; then + for t in $(grep -v '^#' "$IMAGE_INC_FILE"); do + ii=$(echo $f | grep "^$t-[0-9]" | wc -l) + if [ $ii -gt 0 ]; then + echo "Including '$f' due to match in IMAGE_INC_FILE '$IMAGE_INC_FILE' due to pattern '^$t-[0-9]'" + m=0 + break + fi + done + fi + fi + fi + + newer=$(find $RPM_DIR/$f*rpm -type f -not -cnewer $r 2>> /dev/null | wc -l) + # echo "$m $newer=find $RPM_DIR/$f*rpm -type f -not -cnewer $r 2>> /dev/null | wc -l" + if [ $m -eq 0 ] || [ $newer -gt 0 ] || [ $CLEAN_FLAG -eq 1 ]; then + NEED_BUILD=1 + break + fi + done + fi + else + # We were given a list of build targets, + # try to find packages matching that list. + NEED_BUILD=0 + for f in $(find $TMPDIR -name '*.spec' | sort -V); do + TARGET_LIST=( $TARGETS ) + TARGET_FOUND=$(spec_match_target_list TARGET_LIST "$f" 2>> /dev/null ) + if [ $? -eq 0 ]; then + # echo "found target '$TARGET_FOUND in '$f'" + NEED_BUILD=1 + UNRESOLVED_TARGETS=$(echo "$UNRESOLVED_TARGETS" | sed "s/\(^\|[[:space:]]\)$TARGET_FOUND\([[:space:]]\|$\)/ /g") + break + fi + done + fi + + if [ $NO_BUILD_INFO -eq 0 ]; then + if [ "$n" == "build-info" ]; then + NEED_BUILD=1 + fi + fi + + if [ $NEED_BUILD -eq 1 ]; then + echo "found $n: $r" + SRPMS_TO_COMPILE+=("$n") + fi + + \rm -rf $TMPDIR +done + +ORIG_SRPMS_TO_COMPILE=( ${SRPMS_TO_COMPILE[@]} ) + +echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}" + + +# adding dependant packages +if [ $CLEAN_FLAG -eq 0 ] && [ $NO_DESCENDANTS -eq 0 ] && [ -f $SRPM_DIRECT_DESCENDANTS_FILE ]; then + echo + echo "adding dependant packages" + + # This array will accumulate a list of secondary build targets. + TRANSITIVE_SRPMS_TO_COMPILE=() + + # Add packages that directly depend on the primary build targets in ORIG_SRPMS_TO_COMPILE + for n in ${ORIG_SRPMS_TO_COMPILE[@]}; do + needs=( $(grep "^$n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$n;//" | sed 's/,/ /g'; alt_n=$(echo "$n" | sed 's#-rt$##'); if [ "$alt_n" != "$n" ]; then grep "^$alt_n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$alt_n;//" | sed 's/,/ /g' | sed 's#\([^[:space:]]*\)#\1-rt#g'; fi ) ) + + # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for + compilable_needs=( $(intersection needs SRPM_PKG_NAMES) ) + TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) ) + done + + # For non-std build, and if non specific build targets are named, then search all + # packages that we might build and check if they require a package that DID build + # in the std build. If so build the package as a secondary target, even though the + # primary target was from a different build_type. + if [ "$BUILD_TYPE" != "std" ] && [ $ALL -eq 1 ] && [ -f $SRPM_TO_RPM_MAP_FILE ] && [ -f $SRPM_RPM_DIRECT_REQUIRES_FILE ]; then + # Test all that we can build ... + for n in ${SRPM_PKG_NAMES[@]}; do + contains ORIG_SRPMS_TO_COMPILE $n + if [ $? -eq 0 ]; then + # Already on the primary build list, skip it. + echo "skip $n" + continue + fi + + STD_NEEDS_BUILD=0 + + # Iterate over all binary rpms names produce by the candidate package + for b in $(grep "^$n;" "$SRPM_TO_RPM_MAP_FILE" | sed "s/$n;//" | sed 's/,/ /g'); do + # find an rpm file with the rpm name we seek + for bp in $(find $RPM_DIR -name "$b-[0-9]*.rpm" | grep -v '.src.rpm'); do + if [ "$b" != "$(rpm_get_name $bp)" ]; then + # rpm name doesn't match + continue + fi + + # Iterate over binary rpms names required by the candidate package + for r in $(grep "^$n;" "$SRPM_RPM_DIRECT_REQUIRES_FILE" | sed "s/$n;//" | sed 's/,/ /g'); do + # find a required rpm file with the rpm name we seek, AND is newer than the produced rpm file + for rp in $(find $(echo $RPM_DIR | sed "s#/$BUILD_TYPE/#/std/#") -name "$r-[0-9]*.rpm" -cnewer $bp | grep -v '.src.rpm'); do + if [ "$r" != "$(rpm_get_name $rp)" ]; then + # rpm name doesn't match + continue + fi + + # Ok, a required rpm is newer than a built rpm, we should rebuild! + echo "rebuild '$n' due to newer '$r'" + STD_NEEDS_BUILD=1 + break + done + done + done + + # Avoid pointless processing if we already have a positive result. + if [ $STD_NEEDS_BUILD -eq 1 ]; then + break + fi + done + + if [ $STD_NEEDS_BUILD -eq 1 ]; then + # Compile is requires due to an updated required package in the std build. + # Add 'n' to array TRANSITIVE_SRPMS_TO_COMPILE. + TRANSITIVE_SRPMS_TO_COMPILE=( $(put TRANSITIVE_SRPMS_TO_COMPILE $n) ) + fi + done + fi + + # If the kernel or kernel-rt packages where absent from the primary build targets, but + # added as a secondary target, then make sure all out-of-tree kernel modules are also + # added. + for n in kernel kernel-rt; do + KERNEL_IN_ORIG=0 + KERNEL_IN_TRANSITIVE=0 + contains ORIG_SRPMS_TO_COMPILE "$n" && KERNEL_IN_ORIG=1 + contains TRANSITIVE_SRPMS_TO_COMPILE "$n" && KERNEL_IN_TRANSITIVE=1 + if [ $KERNEL_IN_TRANSITIVE -eq 1 ] && [ $KERNEL_IN_ORIG -eq 0 ]; then + needs=( $(grep "^$n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$n;//" | sed 's/,/ /g'; alt_n=$(echo "$n" | sed 's#-rt$##'); if [ "$alt_n" != "$n" ]; then grep "^$alt_n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$alt_n;//" | sed 's/,/ /g' | sed 's#\([^[:space:]]*\)#\1-rt#g'; fi ) ) + + # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for + compilable_needs=( $(intersection needs SRPM_PKG_NAMES) ) + TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) ) + fi + done + + # Append the secondary targetc list to the primary list + SRPMS_TO_COMPILE=( $(union SRPMS_TO_COMPILE TRANSITIVE_SRPMS_TO_COMPILE) ) + echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}" +fi + + +MUST_SRPMS_TO_COMPILE=( ${SRPMS_TO_COMPILE[@]} ) + +# adding required packages +if [ $CLEAN_FLAG -eq 0 ] && [ "x$TARGETS" != "x" ] && [ $NO_REQUIRED -eq 0 ] && [ -f $SRPM_TRANSITIVE_REQUIRES_FILE ]; then + echo + echo "adding required packages" + TRANSITIVE_SRPMS_TO_COMPILE=() + for n in ${MUST_SRPMS_TO_COMPILE[@]}; do + needs=( $(grep "^$n;" "$SRPM_TRANSITIVE_REQUIRES_FILE" | sed "s/$n;//" | sed 's/,/ /g') ) + + # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for + compilable_needs=( $(intersection needs SRPM_PKG_NAMES) ) + TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) ) + + for b in "${un[@]}"; do + echo $b + done + done + + SRPMS_TO_COMPILE=( $(union TRANSITIVE_SRPMS_TO_COMPILE SRPMS_TO_COMPILE) ) + echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}" +fi + + +# Determine build order +SRPMS_TO_COMPILE=( $(echo ${SRPMS_TO_COMPILE[@]} | sed 's/ /\n/g' | sort -u) ) +if [ $CLEAN_FLAG -eq 0 ]; then + echo + echo "Calculate optimal build order" + SRPMS_TO_COMPILE=( $(build_order SRPMS_TO_COMPILE) ) + echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}" +fi + + +# convert pkg names to paths, clean work dirs if needed +echo +echo "Mapping packages to src rpm paths" +for n in ${SRPMS_TO_COMPILE[@]}; do + r=${SRPM_PKG_NAME_TO_PATH[$n]} + + TMPDIR=$(mktemp -d /tmp/build-rpms-serial-XXXXXX) + cd $TMPDIR + rpm2cpio $r | pax -r '*.spec' + if [ $? -ne 0 ]; then + echo "ERROR: no spec file found in '$r'" + fi + cd - >> /dev/null + + SRPMS_LIST="$SRPMS_LIST $r" + # echo "SRPMS_LIST = $SRPMS_LIST" + + for f in $(find $TMPDIR -name '*.spec' | sort -V); do + for p in $(spec_list_packages $f); do + RPMS_LIST="$RPMS_LIST $p" + done + done + + # echo "RPMS_LIST = $RPMS_LIST" + + \rm -rf $TMPDIR +done + +if [ $CLEAN_FLAG -eq 0 ]; then + update_cgcs_repo cgcs-centos-repo + if [ -d $MY_REPO/cgcs-3rd-party-repo ]; then + update_cgcs_repo cgcs-3rd-party-repo + fi +fi + +# clean work dirs if needed +CLEAN_BEFORE_BUILD_SRPM_LIST="" +CLEAN_BEFORE_BUILD_RPM_LIST="" +if [ $CLEAN_FLAG -eq 0 ]; then + echo + echo "Calculating minimal clean list" + for nm in ${SRPMS_TO_COMPILE[@]}; do + MUST_CLEAN=0 + contains MUST_SRPMS_TO_COMPILE $nm && MUST_CLEAN=1 + + r=${SRPM_PKG_NAME_TO_PATH[$nm]} + + TMPDIR=$(mktemp -d /tmp/build-rpms-serial-XXXXXX) + cd $TMPDIR + rpm2cpio $r | pax -r '*.spec' + if [ $? -ne 0 ]; then + echo "ERROR: no spec file found in '$r'" + fi + cd - >> /dev/null + + LOCAL_RPMS_LIST="" + for f in $(find $TMPDIR -name '*.spec' | sort -V); do + for p in $(spec_list_packages $f); do + LOCAL_RPMS_LIST="$LOCAL_RPMS_LIST $p" + done + done + + LOCAL_RPMS_VRA_LIST="" + for f in $(find $TMPDIR -name '*.spec' | sort -V); do + for p in $(spec_list_ver_rel_packages $f); do + LOCAL_RPMS_VRA_LIST="$LOCAL_RPMS_VRA_LIST $p" + done + done + + for f in $LOCAL_RPMS_VRA_LIST; do + m=$(find $RPM_DIR/$f*rpm 2>> /dev/null | wc -l) + if [ -f "$UNBUILT_PATTERN_FILE" ]; then + echo $f | grep -f "$UNBUILT_PATTERN_FILE" >> /dev/null && m=1 + fi + + n=$(find $RPM_DIR/$f*rpm -type f -not -cnewer $r 2>> /dev/null | wc -l) + # echo "$n=find $RPM_DIR/$f*rpm -type f -not -cnewer $r 2>> /dev/null | wc -l" + if [ $m -eq 0 ] || [ $n -gt 0 ] || [ $MUST_CLEAN -eq 1 ]; then + CLEAN_BEFORE_BUILD_SRPM_LIST="$CLEAN_BEFORE_BUILD_SRPM_LIST $r" + CLEAN_BEFORE_BUILD_RPM_LIST="$CLEAN_BEFORE_BUILD_RPM_LIST $LOCAL_RPMS_LIST" + break + fi + done + + \rm -rf $TMPDIR + done +fi + + +if [ "$UNRESOLVED_TARGETS" != " " ]; then + if [ $CLEAN_FLAG -eq 0 ]; then + echo "" + echo "ERROR: failed to resolve build targets: $UNRESOLVED_TARGETS" + exit 1 + fi +fi + +echo "SRPMS_LIST = $SRPMS_LIST" +echo "RPMS_LIST = $RPMS_LIST" + + +echo +if [ -d $MY_WORKSPACE/mock ]; then + echo "Updating the mock environment" + echo "==================================" + set_mock_symlinks + echo "$MOCK -r $BUILD_CFG --update" + $MOCK -r $BUILD_CFG --update + echo "==================================" +else + echo "Init the mock environment" + echo "==================================" + set_mock_symlinks + echo "$MOCK -r $BUILD_CFG --init" + $MOCK -r $BUILD_CFG --init + echo "==================================" +fi +set_mock_symlinks + +echo +echo "Cleaning" +if [ $CLEAN_FLAG -eq 1 ]; then + # Clean what the user asked for + echo "========= clean_list '$SRPMS_LIST' '$RPMS_LIST' $ALL" + \rm -r -f -v $MY_WORKSPACE/mock-$USER-* + clean_list "$SRPMS_LIST" "$RPMS_LIST" "$ALL" + + exit 0 +else + # Clean what we intend to build + if [ $NO_AUTOCLEAN -eq 1 ]; then + echo "no-autoclean was requested" + else + if [ "$CLEAN_BEFORE_BUILD_SRPM_LIST" != "" ]; then + echo "========= clean_list '$CLEAN_BEFORE_BUILD_SRPM_LIST' '$CLEAN_BEFORE_BUILD_RPM_LIST' 0" + clean_list "$CLEAN_BEFORE_BUILD_SRPM_LIST" "$CLEAN_BEFORE_BUILD_RPM_LIST" 0 + fi + fi +fi + +echo +echo "Cleaning caches" +clean_yum_cache +$MOCK -r $BUILD_CFG --scrub=all + + +echo +echo "Cleaning repodata" + +BUILD_ENVIRONMENT_DIR=$(basename $BUILD_CFG) +BUILD_ENVIRONMENT_DIR=${BUILD_ENVIRONMENT_DIR%.*} +LOCAL_URL=http://127.0.0.1:8088$BUILD_BASE/results/$BUILD_ENVIRONMENT_DIR/ +LOCAL_SRC_URL=http://127.0.0.1:8088$BUILD_BASE/rpmbuild/SRPMS/ + +for d in $(find -L $RESULT_DIR -type d -name repodata); do +(cd $d/.. + if [ -f repodata/*comps*xml ]; then + \mv repodata/*comps*xml comps.xml + fi + \rm -rf repodata +) +done + +echo +echo "Building" + +MOCKCHAIN_LOG="$RESULT_DIR/mockchain.log" +mkdir -p $RESULT_DIR +touch $RESULT_DIR/build_start +\rm -rf $MOCKCHAIN_LOG + +(grep '^[[]' $BUILD_CFG | grep -v main | sed 's/[][]//g' | sed 's#^#yum --enablerepo=#' | sed 's#$# clean metadata#'; echo "exit") | $MOCK -r $BUILD_CFG --shell + +CMD_PREFIX="" +if [ -x /bin/ionice ]; then + CMD_PREFIX="nice -n 20 ionice -c Idle /bin/ionice " +fi + +CMD_OPTIONS="-m --no-clean -m --no-cleanup-after" +if [ $CAREFUL -eq 1 ]; then + CMD_OPTIONS="-m --no-cleanup-after" +fi +echo "CAREFUL=$CAREFUL" +echo "CMD_OPTIONS=$CMD_OPTIONS" + +CMD="$CMD_PREFIX mockchain -r $BUILD_CFG -l $BUILD_BASE --recurse --log=$MOCKCHAIN_LOG --tmp_prefix=$USER --addrepo=$LOCAL_URL --addrepo=$LOCAL_SRC_URL -m --rootdir=$BUILD_BASE/mock/root $CMD_OPTIONS -m --rebuild $SRPMS_LIST -m --define='_tis_dist .tis' -m --define='platform_release $PLATFORM_RELEASE'" +echo "$CMD" +eval stdbuf -o0 $CMD +MOCKCHAIN_RC=$? + +for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --max-args=1 dirname | sort -u); do + rsync -u $d/*.rpm $RPM_DIR +done + +if [ $ALL -eq 1 ]; then + echo + echo "Auditing for obsolete srpms" + for r in $(find $RESULT_DIR $RPM_DIR -name '*.src.rpm'); do + ( + f=$(basename $r) + if [ ! -f "$SRPM_OUT/$f" ]; then + \rm -fv $r + fi + ) & + done + echo "waiting for srpm audit to complete" + wait + echo "Auditing for obsolete rpms" + for r in $(find $RESULT_DIR $RPM_DIR -name '*.rpm' | grep -v 'src.rpm'); do + ( + s=$(rpm_get_srpm $r) + if [ ! -f "$SRPM_OUT/$s" ]; then + echo "Failed to find '$SRPM_OUT/$s'" + \rm -fv $r + fi + ) & + done + echo "waiting for rpm audit to complete" + wait + echo "Audit complete" + echo "" +fi + +if [ $MOCKCHAIN_RC -ne 0 ]; then + echo "ERROR: Failed to build rpms using '$CMD'" + exit 1 +fi + +echo "Recreate repodata" +for d in $(find -L $MY_WORKSPACE/rpmbuild $MY_WORKSPACE/results -type d -name repodata); do + update_repodata $(dirname "$d") +done + + +if [ -f $MOCKCHAIN_LOG ]; then + grep 'following pkgs could not be successfully built' $MOCKCHAIN_LOG >> /dev/null + if [ $? -eq 0 ]; then + FAILED_PKGS="" + for p in $(sed -n '/following pkgs could not be successfully built:/,/Results out to/p' $MOCKCHAIN_LOG | sed 1d | sed '$ d'); do + PKG=$(basename $p) + FAILED_PKGS="$PKG $FAILED_PKGS" + done + echo + echo "Failed to build packages: $FAILED_PKGS" + exit 1 + fi +fi + +# If we're doing a nightly or formal build (i.e. not a developer build) then we +# want to sign certain packages. Note that only certain users (i.e. jenkins) +# have the authority to requiest that packages be signed. +# +# Signing is not actually done on this server (the keys are kept safe on a +# different server with very limited access) but we can invoke a script to +# make calls to the signing server. Note that this will NOT work if you are +# not Jenkins and don't have access to the Jenkins cross server login keys. +# +# Note that both std and rt builds must be complete before invoking the signing +# script +if [ 0$FORMAL_BUILD -eq 1 ] && [ "$USER" == "jenkins" ]; then + if [ -e $MY_WORKSPACE_TOP/std ] && [ -e $MY_WORKSPACE_TOP/rt ]; then + # Create dir for log, if it doesn't exit + mkdir -p $MY_WORKSPACE_TOP/export + echo "We are jenkins, and we are trying to do a formal build -- calling signing server" + echo " to sign boot RPMs with secure boot keys" + + MY_WORKSPACE=$MY_WORKSPACE_TOP sign-build > $MY_WORKSPACE_TOP/export/sign-build.log 2>&1 + if [ $? -ne 0 ]; then + echo "Signing of packages failed -- see $MY_WORKSPACE_TOP/export/sign-build.log" + exit 1 + fi + fi +fi + +exit 0 +) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-rpms-serial_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]} diff --git a/build-tools/build-rpms4 b/build-tools/build-rpms4 new file mode 120000 index 00000000..caca9cc7 --- /dev/null +++ b/build-tools/build-rpms4 @@ -0,0 +1 @@ +build-rpms-parallel \ No newline at end of file diff --git a/build-tools/build-sdk b/build-tools/build-sdk new file mode 100755 index 00000000..563bd520 --- /dev/null +++ b/build-tools/build-sdk @@ -0,0 +1,70 @@ +#!/bin/bash + +# We find SDK files in two ways -- one is that any package with an +# "-cgts-sdk" component in the filename is deemed to have SDK content +# (this works great for packages that produce installable content, as well +# as SDK content which is placed in a separate -cgts-sdk package) +# +# The second way is explicitly name packages that contain SDK content. This +# works well for packages which only contain SDK content. The other reason +# that calling out packages explcitly may be of use is that some packages +# (like restapi-doc) may not be built automatically because they get caught +# in the blacklist filter (i.e. we normally don't build restapi-doc because +# it contains "doc"). + +EXPLICIT_PACKAGES=( \ + wrs-branding \ + wrs-heat-templates \ + install-log-server \ + restapi-doc \ + remote-clients \ + ) + +extract_to_sdk () { + pushd $SDKTMP + rpm2cpio $1 | cpio -id + popd +} + +SDKDIR=$MY_WORKSPACE/export/cgts-sdk +SDKTMP=$MY_WORKSPACE/export/cgts-sdk_tmp +SDKTMPFILE=$MY_WORKSPACE/export/sdk_tmp_file + +# These patterns match packages for which we find SDK tarballs +PATTERNS=(wrs-branding*.x86_64.rpm \ + wrs-heat-templates*.x86_64.rpm \ + *cgts-sdk*.x86_64.rpm \ + install-log-server*.x86_64.rpm \ + restapi-doc*.x86_64.rpm \ + remote-clients*.x86_64.rpm ) + +rm -f $SDKTMPFILE + +for pkg in "${EXPLICIT_PACKAGES[@]}"; do + ls $MY_WORKSPACE/std/rpmbuild/RPMS/$pkg*.x86_64.rpm > /dev/null + if [ $? -eq 2 ]; then # no page found... + build-rpms --std $pkg --no-descendants + fi +done + +for pat in "${PATTERNS[@]}"; do + find $MY_WORKSPACE/std/rpmbuild/RPMS/$pat >> $SDKTMPFILE +done + +rm -rf $SDKDIR $SDKTMP +mkdir -p $SDKDIR +mkdir -p $SDKTMP + +# extract the files +while read rpm; do + extract_to_sdk $rpm +done < $SDKTMPFILE + +find $SDKTMP -name "*.tgz" -exec cp -v {} $SDKDIR \; + +rm -f $SDKTMPFILE +rm -rf $SDKTMP + +echo "" +echo "Done. SDK in $SDKDIR" +echo "" diff --git a/build-tools/build-srpms b/build-tools/build-srpms new file mode 100755 index 00000000..aef8a681 --- /dev/null +++ b/build-tools/build-srpms @@ -0,0 +1,34 @@ +#!/bin/bash + +# This program is a wrapper around build-srpms-parallel and build-srpms-serial + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +usage () { + echo "" + echo "Usage: " + echo " Create source rpms:" + echo " build-srpms [--serial] [args]" +} + +SERIAL_FLAG=0 + +for arg in "$@"; do + case "$1" in + --serial) SERIAL_FLAG=1 ;; + esac +done + +which mock_tmpfs_umount >> /dev/null +if [ $? -ne 0 ]; then + SERIAL_FLAG=1 +fi + +if [ $SERIAL_FLAG -eq 1 ]; then + echo "build-srpms-serial $@" + build-srpms-serial "$@" +else + echo "build-srpms-parallel $@" + build-srpms-parallel "$@" +fi + diff --git a/build-tools/build-srpms-parallel b/build-tools/build-srpms-parallel new file mode 100755 index 00000000..20f08662 --- /dev/null +++ b/build-tools/build-srpms-parallel @@ -0,0 +1,1438 @@ +#!/bin/bash +# set -x + +export ME=$(basename "$0") +CMDLINE="$ME $@" + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source $DIR/spec-utils +source $DIR/srpm-utils +source $DIR/classify + +INITIAL_DIR=$(pwd) +export DISTRO="centos" +SRPM_SCRIPT="build_srpm" +SRPM_DATA="build_srpm.data" +PKG_DIRS_FILE=centos_pkg_dirs + +DEFAULT_SRPM_SCRIPT="$DIR/default_$SRPM_SCRIPT" +SCRIPT_PATH="$DISTRO" +DATA_PATH="$DISTRO" +FILES_PATH="$DISTRO/files" +ORIG_SPECS_PATH="$DISTRO" +SRPM_LIST_PATH="$DISTRO/srpm_path" +MIRROR_ROOT="$MY_REPO/cgcs-centos-repo" +THIRD_PARTY_ROOT="$MY_REPO/cgcs-3rd-party-repo" +REPO_DOWNLOADS_ROOT="$MY_REPO" +SRPM_REBUILT_LIST="" +SRPM_FAILED_REBUILD_LIST="" + +STOP_SCHEDULING=0 + +ABSOLUTE_MAX_WORKERS=8 +MAX_WORKERS=$(grep -c ^processor /proc/cpuinfo) +if [ "$MAX_WORKERS" == "" ] || [ "$MAX_WORKERS" == "0" ]; then + MAX_WORKERS=1 +fi + +if [ $MAX_WORKERS -gt $ABSOLUTE_MAX_WORKERS ]; then + MAX_WORKERS=$ABSOLUTE_MAX_WORKERS +fi + +echo "MAX_WORKERS=$MAX_WORKERS" + +CREATEREPO=$(which createrepo_c) +if [ $? -ne 0 ]; then + CREATEREPO="createrepo" +fi + +usage () { + echo "" + echo "Usage: " + echo " Create source rpms:" + echo " $ME [--rt | --std | --installer] [--no-descendants] [--formal] [ list of package names ]" + echo "" + echo " Delete source rpms, and the directories associated with it's creation:" + echo " Note: does not clean an edit environment" + echo " $ME --clean [--rt | --std | --installer] [optional list of package names]" + echo "" + echo " Extract an src.rpm into a pair of git trees to aid in editing it's contents," + echo " one for source code and one for metadata such as the spec file." + echo " If --no-meta-patch is specified, then WRS patches are omitted." + echo " $ME --edit [--rt | --std | --installer] [--no-meta-patch] [list of package names]" + echo "" + echo " Delete an edit environment" + echo " $ME --edit --clean [--rt | --std | --installer] [list of package names]" + echo "" + echo " This help page" + echo " $ME --help" + echo "" +} + +# This function creates a bunch of subdirs in $MY_WORKSPACE and makes sure +# that a $MY_BUILD_CFG file exists. +# +# The goal of this is to have a script do as much of the annoying +# grunt-work so that the "how to build it" instructions aren't 200 lines +create_output_dirs () { + # make sure variables are sane before continuing + # Note that $BUILD_ROOT contains either $MY_WORKSPACE or $MY_PATCH_WORKSPACE + if [ "x$BUILD_ROOT" == "x" ]; then + return + fi + if [ "x$MY_BUILD_CFG" == "x" ]; then + return + fi + if [ "x$MY_BUILD_DIR" == "x" ]; then + return + fi + if [ "x$MY_SRC_RPM_BUILD_DIR" == "x" ]; then + return + fi + + # create output dirs + mkdir -p $MY_BUILD_DIR + mkdir -p $MY_SRC_RPM_BUILD_DIR + mkdir -p $MY_SRC_RPM_BUILD_DIR/SOURCES + mkdir -p $MY_SRC_RPM_BUILD_DIR/SPECS + mkdir -p $MY_SRC_RPM_BUILD_DIR/BUILD + mkdir -p $MY_SRC_RPM_BUILD_DIR/RPMS + mkdir -p $MY_SRC_RPM_BUILD_DIR/SRPMS + + # create $MY_BUILD_CFG, if required + if [ ! -f $MY_BUILD_CFG ]; then + echo "FORMAL_BUILD=$FORMAL_BUILD" + echo "modify-build-cfg $MY_BUILD_CFG" + ${DIR}/modify-build-cfg $MY_BUILD_CFG + if [ $? -ne 0 ]; then + echo "Could not modifiy $MY_BUILD_CFG"; + exit 1 + fi + fi + +} + +NO_DESCENDANTS=0 +NO_BUILD_INFO=0 +HELP=0 +CLEAN_FLAG=0 +FORMAL_FLAG=0 +BUILD_TYPE_FLAG=0 +EDIT_FLAG=0 +NO_META_PATCH_FLAG=0 + +# read the options +TEMP=$(getopt -o ha::bc: --long std,rt,installer,no-descendants,no-meta-patch,no-build-info,help,formal,clean,edit,arga::,argb,argc: -n "$ME" -- "$@") + +if [ $? -ne 0 ]; then + usage + exit 1 +fi + +eval set -- "$TEMP" + +export BUILD_TYPE=std + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -a|--arga) + case "$2" in + "") ARG_A='some default value' ; shift 2 ;; + *) ARG_A=$2 ; shift 2 ;; + esac ;; + -b|--argb) ARG_B=1 ; shift ;; + --no-descendants) NO_DESCENDANTS=1 ; shift ;; + --no-build-info) NO_BUILD_INFO=1 ; shift ;; + -h|--help) HELP=1 ; shift ;; + -c|--argc) + case "$2" in + "") shift 2 ;; + *) ARG_C=$2 ; shift 2 ;; + esac ;; + --clean) CLEAN_FLAG=1 ; shift ;; + --formal) FORMAL_FLAG=1 ; shift ;; + --std) BUILD_TYPE_FLAG=1; BUILD_TYPE=std; shift ;; + --rt) BUILD_TYPE_FLAG=1; BUILD_TYPE=rt; shift ;; + --installer) BUILD_TYPE=installer; shift ;; + --edit) EDIT_FLAG=1 ; shift ;; + --no-meta-patch) NO_META_PATCH_FLAG=1 ; shift ;; + --) shift ; break ;; + *) echo "Internal error!" ; exit 1 ;; + esac +done + +# Reset variables +if [ -n "$MY_WORKSPACE" ]; then + export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE} + export MY_WORKSPACE=$MY_WORKSPACE_TOP/$BUILD_TYPE +else + export MY_PATCH_WORKSPACE_TOP=${MY_PATCH_WORKSPACE_TOP:-$MY_PATCH_WORKSPACE} + export MY_PATCH_WORKSPACE=$MY_PATCH_WORKSPACE_TOP/$BUILD_TYPE +fi + +export MY_BUILD_DIR_TOP=${MY_BUILD_DIR_TOP:-$MY_BUILD_DIR} +export MY_BUILD_DIR=$MY_BUILD_DIR_TOP/$BUILD_TYPE + +export MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT} +export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT_TOP-$BUILD_TYPE + +export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT.cfg +export MY_SRC_RPM_BUILD_DIR=$MY_BUILD_DIR/rpmbuild +export MY_BUILD_CFG=$MY_WORKSPACE/$MY_BUILD_ENVIRONMENT_FILE +export MY_MOCK_ROOT=$MY_WORKSPACE/mock/root + +if [ "$BUILD_TYPE" != "std" ]; then + PKG_DIRS_FILE=centos_pkg_dirs_$BUILD_TYPE +fi + +echo "CLEAN_FLAG=$CLEAN_FLAG" +TARGETS=$@ + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ $FORMAL_FLAG -eq 1 ]; then + export FORMAL_BUILD="yes" +fi + +if [ "x$TARGETS" == "x" ] && [ $EDIT_FLAG -eq 1 ]; then + echo "ERROR: a package name is required when --edit is specified" + usage + exit 0 +fi + +SRC_ROOT="$MY_REPO" +if [ "x$MY_REPO" == "x" ]; then + SRC_ROOT=$INITIAL_DIR +fi + +BUILD_ROOT="$MY_WORKSPACE" +if [ "x$MY_WORKSPACE" == "x" ]; then + BUILD_ROOT="$MY_PATCH_WORKSPACE" + + if [ "x$MY_PATCH_WORKSPACE" == "x" ]; then + echo "ERROR: require one of MY_WORKSPACE or MY_PATCH_WORKSPACE be defined" + exit 1 + fi +fi + +export CCACHE_DIR="$BUILD_ROOT/.ccache" +export SRC_BASE="$SRC_ROOT" +export CGCS_BASE="$SRC_BASE/addons/wr-cgcs/layers/cgcs" +export SPECS_BASE="$ORIG_SPECS_PATH" +export FILES_BASE="$FILES_PATH" + +export BUILD_BASE="$BUILD_ROOT" +BUILD_INPUTS="$BUILD_BASE/inputs" +SRPM_ASSEMBLE="$BUILD_BASE/srpm_assemble" +SRPM_WORK="$BUILD_BASE/srpm_work" + +if [ "x$MY_SRC_RPM_BUILD_DIR" != "x" ]; then + RPM_BUILD_ROOT=$MY_SRC_RPM_BUILD_DIR +else + RPM_BUILD_ROOT=$BUILD_BASE/rpmbuild +fi + +create_output_dirs + +export RPM_BUILD_BASE="$RPM_BUILD_ROOT" +export SRPM_OUT="$RPM_BUILD_BASE/SRPMS" +export SOURCE_OUT="$RPM_BUILD_BASE/SOURCES" +export RPM_DIR="$RPM_BUILD_BASE/RPMS" + +if [ ! -d $CGCS_BASE ]; then + echo "ERROR: expected to find directory at '$CGCS_BASE'" + exit 1 +fi + +if [ ! -d $BUILD_BASE ]; then + echo "ERROR: expected to find directory at '$BUILD_BASE'" + exit 1 +fi + +RELEASE_INFO_FILE=$SRC_BASE/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +if [ -f $RELEASE_INFO_FILE ]; then + source $MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +else + echo "ERROR: failed to find RELEASE_INFO_FILE=$RELEASE_INFO_FILE" + exit 1 +fi + +if [ "x$PLATFORM_RELEASE" == "x" ]; then + echo "ERROR: PLATFORM_RELEASE is not defined in $RELEASE_INFO_FILE" + exit 1 +fi + +export PLATFORM_RELEASE + +mkdir -p $RPM_BUILD_BASE +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$RPM_BUILD_BASE'" + exit 1 +fi + +mkdir -p $SRPM_OUT +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$SRPM_OUT'" + exit 1 +fi + +mkdir -p $RPM_DIR +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$RPM_DIR'" + exit 1 +fi + +build_dir () { + local build_idx=$1 + local d=$2 + local w=$3 + export PKG_BASE=$d + export WORK_BASE=$w + export SPECS_BASE="$PKG_BASE/$ORIG_SPECS_PATH" + local RC + + local ORIG_DIR=$(pwd) + # echo "build_dir: PKG_BASE=$PKG_BASE" + + cd "$PKG_BASE" + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd into '$PKG_BASE'" + return 1 + fi + + if [ ! -d $ORIG_SPECS_PATH ]; then + # nothing to do + echo "WARNING: '$ORIG_SPECS_PATH' not found in '$PKG_BASE'" + cd "$ORIG_DIR" + return 0 + fi + + SRPM_COUNT=0 + ORIG_SRPM_PATH="" + if [ -f $SRPM_LIST_PATH ]; then + # we've found a file (ex centos/srpm_path) which lists a path to a source + # RPM file + # + # The specified file can be of the form + # + # repo:path/to/file.src.rpm + # mirror:path/to/file.src.rpm + # /path/to/file.rpm + # path/to/file.rpm + # + # If "repo:" is specified, then we search for the file relative to + # $REPO_DOWNLOADS_ROOT (i.e. a path to the file in a "downloads subgit) + # + # If "mirror:" is specified, then we search for the file relateive to + # $MIRROR_ROOT + # + # If "3rd_party:" is specified, then we search for the file relateive to + # $THIRD_PARTY_ROOT + # + # An absolute path is parsed as an absolute path (mainly intended for + # developer/experimental use without checking in files or messing with + # your git repos) + # + # A lack of prefix (relative path name) is interpretted as "mirror:" + # (legacy support for existing packages) + # + # Other prefixes (file:, http:, whatever:)are unsupported at this time + + for p in $(grep -v '^#' $SRPM_LIST_PATH | grep -v '^$'); do + # absolute path source rpms + echo "$p" | grep "^/" >/dev/null && ORIG_SRPM_PATH=$p + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle repo: definitions + echo "$p" | grep "^repo:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^repo:%$REPO_DOWNLOADS_ROOT/%") + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle mirror: definitions + echo "$p" | grep "^3rd_party:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^3rd_party:%$THIRD_PARTY_ROOT/%") + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle mirror: definitions + echo "$p" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^mirror:%$MIRROR_ROOT/%" | sed "s#CentOS/tis-r3-CentOS/kilo/##" | sed "s#CentOS/tis-r3-CentOS/mitaka/##") + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # we haven't found a valid prefix yet, so assume it's a legacy + # file (mirror: interpretation) + ORIG_SRPM_PATH="$MIRROR_ROOT/$p" + fi + + # echo "ORIG_SRPM_PATH=$ORIG_SRPM_PATH" + if [ -f $ORIG_SRPM_PATH ]; then + SRPM_COUNT=$((SRPM_COUNT + 1)) + else + echo "ERROR: Invalid srpm path '$p', evaluated as '$ORIG_SRPM_PATH', found in '$PKG_BASE/$SRPM_LIST_PATH'" + ORIG_SRPM_PATH="" + return 3 + fi + done + fi + + # Clean up an tmp_spec_*.spec file left by a prior failed build + for f in $(find $ORIG_SPECS_PATH -name 'tmp_spec_*.spec'); do + \rm -f $f + done + + SPEC_COUNT=$(find $ORIG_SPECS_PATH -name '*.spec' | wc -l) + if [ $SPEC_COUNT -eq 0 ]; then + if [ -f $ORIG_SPECS_PATH/spec_path ]; then + SPECS_BASE=$SRC_BASE/$(cat $SPECS_BASE/spec_path) + SPEC_COUNT=$(find $SPECS_BASE -maxdepth 1 -name '*.spec' | wc -l) + fi + fi + + if [ $SPEC_COUNT -eq 0 ] && [ $SRPM_COUNT -eq 0 ]; then + # nothing to do + echo "ERROR: Neither srpm_path nor .spec file not found in '$PKG_BASE/$ORIG_SPECS_PATH'" + cd "$ORIG_DIR" + return 0 + fi + + + if [ $SPEC_COUNT -gt 0 ] && [ $SRPM_COUNT -gt 0 ]; then + # nothing to do + echo "ERROR: Please provide only one of srpm_path or .spec files, not both, in '$PKG_BASE/$ORIG_SPECS_PATH'" + cd $ORIG_DIR + return 0 + fi + + if [ $SPEC_COUNT -gt 0 ]; then + build_dir_spec $build_idx + RC=$? + cd "$ORIG_DIR" + return $RC + else + build_dir_srpm $build_idx $ORIG_SRPM_PATH + RC=$? + cd "$ORIG_DIR" + return $RC + fi + + cd "$ORIG_DIR" + return 0 +} + + +clean_srpm_dir () { + local build_idx=$1 + local DIR=$2 + local SRPM_PATH + local SRPM_FILE + local SRPM_OUT_PATH + local SRPM_NAME + local SRPM_OUT_NAME + local INPUTS_TO_CLEAN="" + + echo "clean_srpm_dir build_idx=$build_idx DIR=$DIR" + for SRPM_PATH in $(find "$DIR" -name '*.src.rpm'); do + SRPM_FILE=$(basename $SRPM_PATH) + SRPM_NAME=$(rpm -q --queryformat '%{NAME}\n' --nosignature -p $SRPM_PATH 2>> /dev/null) + if [ $CLEAN_FLAG -eq 1 ]; then + sed -i "/^$SRPM_NAME$/d" $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_${build_idx} + fi + \rm -fv $SRPM_PATH $SRPM_OUT/$SRPM_FILE + if [ -d $SRPM_ASSEMBLE/$SRPM_NAME ]; then + echo "rm -rf $SRPM_ASSEMBLE/$SRPM_NAME" + \rm -rf $SRPM_ASSEMBLE/$SRPM_NAME + fi + if [ -d $SOURCE_OUT/$SRPM_FILE ]; then + echo "rm -rf $SOURCE_OUT/$SRPM_FILE" + \rm -rf $SOURCE_OUT/$SRPM_FILE + fi + + INPUTS_TO_CLEAN=$(if [ "x$INPUTS_TO_CLEAN" != "x" ]; then echo $INPUTS_TO_CLEAN; fi; find $BUILD_INPUTS -type d -name $SRPM_NAME) + + # Look for older versions of the same src rpm that also need cleaning + for SRPM_OUT_PATH in $(ls -1 $SRPM_OUT/$SRPM_NAME*.src.rpm 2>> /dev/null); do + SRPM_OUT_FILE=$(basename $SRPM_OUT_PATH) + SRPM_OUT_NAME=$(rpm -q --queryformat '%{NAME}\n' -p $SRPM_OUT_PATH 2>> /dev/null) + if [ "$SRPM_NAME" == "$SRPM_OUT_NAME" ]; then + \rm -fv $SRPM_OUT_PATH + if [ -d $SOURCE_OUT/$SRPM_OUT_FILE ]; then + echo "rm -rf $SOURCE_OUT/$SRPM_OUT_FILE" + \rm -rf $SOURCE_OUT/$SRPM_OUT_FILE + fi + fi + done + done + + if [ "x$INPUTS_TO_CLEAN" != "x" ]; then + for d in $INPUTS_TO_CLEAN; do + if [ -d $d/rpmbuild ]; then + echo "rm -rf $d" + \rm -rf $d + fi + done + fi +} + +build_dir_srpm () { + local build_idx=$1 + local ORIG_SRPM_PATH=$2 + + local ORIG_SRPM=$(basename $ORIG_SRPM_PATH) + local NAME=$(rpm -q --queryformat '%{NAME}\n' --nosignature -p $ORIG_SRPM_PATH) + local PKG_NAME_VER=$(rpm -q --queryformat '%{NAME}-%{VERSION}-%{RELEASE}\n' --nosignature -p $ORIG_SRPM_PATH) + local PKG_DIR="$NAME" + local TARGET_FOUND="" + local RC=0 + + local NEED_BUILD=0 + + if [ "x$TARGETS" == "x" ]; then + NEED_BUILD=1 + TARGET_FOUND=$NAME + else + TARGET_LIST=( $TARGETS ) + TARGET_FOUND=$(srpm_match_target_list TARGET_LIST "$ORIG_SRPM_PATH" 2>> /dev/null) + if [ $? -eq 0 ]; then + echo "found target '$TARGET_FOUND' in '$ORIG_SRPM'" + NEED_BUILD=1 + sed -i "/^$TARGET_FOUND$/d" $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_${build_idx} + fi + fi + + if [ $NEED_BUILD -eq 0 ]; then + return 0 + fi + + local ROOT_DIR="$SRPM_ASSEMBLE" + if [ $EDIT_FLAG -eq 1 ]; then + mkdir -p $SRPM_WORK + ROOT_DIR="$SRPM_WORK" + fi + local PKG_ROOT_DIR="$ROOT_DIR/$PKG_DIR" + local BUILD_DIR="$PKG_DIR/rpmbuild" + local FULL_BUILD_DIR="$ROOT_DIR/$BUILD_DIR" + local SRPM_DIR="$FULL_BUILD_DIR/SRPMS" + # local SOURCES_DIR="$FULL_BUILD_DIR/SOURCES" + local SOURCES_DIR="$SOURCE_OUT" + + if [ $CLEAN_FLAG -eq 1 ]; then + # clean + echo "===== Cleaning '$TARGET_FOUND' =====" + + if [ -d $SRPM_DIR ] && [ $EDIT_FLAG -eq 0 ]; then + clean_srpm_dir $build_idx "$SRPM_DIR" + fi + + if [ -d $PKG_ROOT_DIR ]; then + echo "rm -rf $PKG_ROOT_DIR" + \rm -rf "$PKG_ROOT_DIR" + fi + else + #build + echo "===== Build SRPM for '$TARGET_FOUND' =====" + echo "PKG_BASE=$PKG_BASE" + echo "BUILD_DIR=$BUILD_DIR" + echo "SRPM_DIR=$SRPM_DIR" + + if [ ! -d $ROOT_DIR ]; then + mkdir -p "$ROOT_DIR" + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: mkdir '$ROOT_DIR' failed" + return 1 + fi + fi + + export DATA="$DATA_PATH/$SRPM_DATA" + local COPY_LIST + local COPY_LIST_TO_TAR + local SRC_DIR + local TIS_PATCH_VER + + BUILD_IS_BIG=0 + BUILD_IS_SLOW=0 + srpm_source_build_data $DATA + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: failed to source $DATA" + return 1 + fi + + local BUILD_NEEDED=2 + local SRPM_OUT_PATH2 + + for SRPM_PATH in $(find "$FULL_BUILD_DIR/SRPMS" -name '*.src.rpm' | sort -V); do + if [ $BUILD_NEEDED -eq 2 ]; then + BUILD_NEEDED=0 + fi + + b=$(basename $SRPM_PATH) + SRPM_OUT_PATH2=$(find $SRPM_OUT -name $b) + if [ "x$SRPM_OUT_PATH2" == "x" ]; then + BUILD_NEEDED=1 + fi + + n=$(find $PKG_BASE -type f -cnewer $SRPM_PATH | wc -l) + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + + n=$(find $ORIG_SRPM_PATH -type f -cnewer $SRPM_PATH | wc -l) + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + + if [ -f $PKG_BASE/$DATA ]; then + ( + cd $PKG_BASE + BUILD_NEEDED=0 + srpm_source_build_data $DATA + + # NOTE: SRC_DIR is not honored in this build path + + if [ "x$COPY_LIST" != "x" ]; then + n=$(find $COPY_LIST -type f -cnewer $SRPM_PATH | wc -l) + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + + # NOTE: COPY_LIST_TO_TAR is not honored in this build path + + exit $BUILD_NEEDED + ) + if [ $? -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + done + + if [ $BUILD_NEEDED -eq 0 ]; then + echo "SRPM build not required for '$PKG_BASE'" + echo "===== Build complete for '$TARGET_FOUND' =====" + echo + return 0 + fi + + if [ $EDIT_FLAG -eq 0 ]; then + clean_srpm_dir $build_idx "$FULL_BUILD_DIR/SRPMS" + + if [ -d $PKG_ROOT_DIR ]; then + echo "arf rm -rf $PKG_ROOT_DIR" + \rm -rf $PKG_ROOT_DIR + fi + fi + + if [ $EDIT_FLAG -eq 1 ]; then + PKG_CLASSIFICATION=$(classify $PKG_BASE) + echo "$PKG_CLASSIFICATION = classify $PKG_BASE" + if [ "$PKG_CLASSIFICATION" == "spec + tarball" ] || [ "$PKG_CLASSIFICATION" == "srpm + patches" ]; then + echo "OK to edit $PKG_BASE" + else + echo "Can't edit this package, it is of type '$PKG_CLASSIFICATION', it is not derived from SRPM or tarball and patches" + return 1 + fi + + echo "srpm_extract_to_git '$ORIG_SRPM_PATH' '$PKG_BASE' '$ROOT_DIR' '$BUILD_DIR' '$PKG_NAME_VER' '$NO_META_PATCH_FLAG' '$TIS_PATCH_VER'" + srpm_extract_to_git $ORIG_SRPM_PATH $PKG_BASE $ROOT_DIR $BUILD_DIR $PKG_NAME_VER $NO_META_PATCH_FLAG $TIS_PATCH_VER + RC=$? + if [ $RC -ne 0 ]; then + if [ $RC -eq 1 ]; then + echo "ERROR: srpm_extract_to_git: failed to extract srpm '$ORIG_SRPM_PATH'" + fi + return $RC + fi + + local LOC=$(for g in $(find $PKG_ROOT_DIR/gits -type d -name .git); do d=$(dirname $g); (cd $d; git tag | grep "pre_wrs_$PKG_NAME_VER" >> /dev/null; if [ $? -eq 0 ]; then echo $d; fi); done | head -n 1 ) + echo "===== '$TARGET_FOUND' has been extracted for editing. =====" + echo "===== Metadata can be found at: $PKG_ROOT_DIR/rpmbuild" + echo "===== Source code can be found at: $LOC" + return 0 + fi + + AGE=$(find $PKG_BASE $ORIG_SRPM_PATH -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + if [ -f $PKG_BASE/$DATA ]; then + AGE2=$( + cd $PKG_BASE + srpm_source_build_data $DATA + PATH_LIST="" + + # NOTE: SRC_DIR is not honored in this build path + + if [ "x$COPY_LIST" != "x" ]; then + PATH_LIST="$PATH_LIST $COPY_LIST" + fi + + # NOTE: COPY_LIST_TO_TAR is not honored in this build path + + + if [ "x$PATH_LIST" == "x" ]; then + echo "0" + else + AGE2=$(find $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + echo "$AGE2" + fi + ) + if [ $AGE2 -gt $AGE ]; then + AGE=$AGE2 + fi + fi + + srpm_extract $ORIG_SRPM_PATH $PKG_BASE $ROOT_DIR $BUILD_DIR $PKG_NAME_VER + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: failed to extract srpm '$ORIG_SRPM_PATH'" + return 1 + fi + + if [ "x$COPY_LIST" != "x" ]; then + echo "COPY_LIST: $COPY_LIST" + for p in $COPY_LIST; do + # echo "COPY_LIST: $p" + \cp -L -r -f -v $p $FULL_BUILD_DIR/SOURCES + if [ $? -ne 0 ]; then + echo "ERROR: COPY_LIST: file not found: '$p'" + exit 1 + fi + done + fi + + srpm_assemble $FULL_BUILD_DIR $TIS_PATCH_VER + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: failed to assemble srpm for '$PKG_NAME_VER'" + echo "$TARGET_FOUND" >> $MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_${build_idx} + # SRPM_FAILED_REBUILD_LIST="$SRPM_FAILED_REBUILD_LIST $TARGET_FOUND" + return 1 + fi + + TS=$(date -d @$AGE +%Y-%m-%dT%H:%M:%S) + for s in $(find $FULL_BUILD_DIR/SRPMS -name '*.src.rpm'); do + \cp -L -f -v $s $SRPM_OUT/ + ss=$(basename $s) + touch $SRPM_OUT/$ss --date=$TS + + mkdir -p $SOURCES_DIR/$ss + BIG_FLAG_FILE="$SOURCES_DIR/$ss/BIG" + SLOW_FLAG_FILE="$SOURCES_DIR/$ss/SLOW" +echo "BIG_FLAG_FILE=$BIG_FLAG_FILE" +echo "SLOW_FLAG_FILE=$SLOW_FLAG_FILE" + + if [ $BUILD_IS_BIG -gt 0 ]; then + echo "$BUILD_IS_BIG" > $BIG_FLAG_FILE + else + if [ -f $BIG_FLAG_FILE ]; then + \rm -f $BIG_FLAG_FILE + fi + fi + + if [ $BUILD_IS_SLOW -gt 0 ]; then + echo "$BUILD_IS_SLOW" > $SLOW_FLAG_FILE + else + if [ -f $SLOW_FLAG_FILE ]; then + \rm -f $SLOW_FLAG_FILE + fi + fi + + done + + echo "$TARGET_FOUND" >> $MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_${build_idx} + # SRPM_REBUILT_LIST="$SRPM_REBUILT_LIST $TARGET_FOUND" + echo "SRPM build successful for '$PKG_NAME_VER'" + echo "===== Build complete for '$TARGET_FOUND' =====" + echo + fi + + return 0 +} + + +build_dir_spec () { + local build_idx=$1 + + local NEED_BUILD=0 + local TARGET_FOUND="" + + if [ "x$TARGETS" == "x" ]; then + NEED_BUILD=1 + for f in $(find $SPECS_BASE -maxdepth 1 -name '*.spec'); do + TARGET_FOUND=$(spec_find_global service "$f" 2>> /dev/null) + if [ $? -ne 0 ]; then + TARGET_FOUND=$(spec_find_tag Name "$f" 2>> /dev/null) + if [ $? -ne 0 ]; then + TARGET_FOUND="" + fi + fi + done + else + TARGET_LIST=( $TARGETS ) + for f in $(find $SPECS_BASE -maxdepth 1 -name '*.spec' 2>> /dev/null); do + TARGET_FOUND=$(spec_match_target_list TARGET_LIST "$f" 2>> /dev/null) + if [ $? -eq 0 ]; then + echo "found target '$TARGET_FOUND' in '$f'" + NEED_BUILD=1 + sed -i "/^$TARGET_FOUND$/d" $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_${build_idx} + break + fi + done + fi + + if [ $NEED_BUILD -eq 1 ]; then + MAKE_SRPM="$SCRIPT_PATH/$SRPM_SCRIPT" + export DATA="$DATA_PATH/$SRPM_DATA" + + export RPMBUILD_BASE="$WORK_BASE/rpmbuild" + SRPM_PATH="$RPMBUILD_BASE/SRPMS" + SPEC_PATH="$RPMBUILD_BASE/SPECS" + SOURCES_PATH="$RPMBUILD_BASE/SOURCES" + local ROOT_DIR="$RPMBUILD_BASE" + local PKG_ROOT_DIR="$RPMBUILD_BASE" + local SPEC=$(find $SPECS_BASE -maxdepth 1 -name '*.spec' | head -n 1) + local NAME=$(spec_find_tag Name $SPEC) + local PKG_NAME_VER=$(spec_name_ver_rel $SPEC) + local PKG_DIR="$NAME" + local BUILD_DIR="$PKG_DIR/rpmbuild" + local FULL_BUILD_DIR="$ROOT_DIR/$BUILD_DIR" + local SRPM_DIR="$FULL_BUILD_DIR/SRPMS" + # local SOURCES_DIR="$FULL_BUILD_DIR/SOURCES" + local SOURCES_DIR="$SOURCE_OUT" + + if [ $EDIT_FLAG -eq 1 ]; then + mkdir -p $SRPM_WORK + ROOT_DIR="$SRPM_WORK" + PKG_ROOT_DIR="$ROOT_DIR/$PKG_DIR" + fi + + if [ $CLEAN_FLAG -eq 1 ]; then + # clean + echo "===== Cleaning '$TARGET_FOUND' =====" + if [ -d $SRPM_PATH ] && [ $EDIT_FLAG -eq 0 ]; then + clean_srpm_dir $build_idx $SRPM_PATH + fi + + if [ -d $PKG_ROOT_DIR ]; then + echo "rm -rf $PKG_ROOT_DIR" + \rm -rf "$PKG_ROOT_DIR" + fi + else + # build + echo "===== Build SRPM for '$TARGET_FOUND' =====" + echo "PKG_BASE=$PKG_BASE" + echo "WORK_BASE=$WORK_BASE" + echo "RPMBUILD_BASE=$RPMBUILD_BASE" + if [ ! -x $MAKE_SRPM ]; then + if [ ! -f $DATA ]; then + echo "expected to find an executable script at '$MAKE_SRPM' or data for the default script at '$DATA'" + cd $INITIAL_DIR + exit 1 + else + MAKE_SRPM="$DEFAULT_SRPM_SCRIPT" + fi + fi + + local BUILD_NEEDED=2 + local SRPM_OUT_PATH2 + + BUILD_IS_BIG=0 + BUILD_IS_SLOW=0 +# SAL source + srpm_source_build_data $DATA + + for SRPM_PATH2 in $(find "$RPMBUILD_BASE/SRPMS" -name '*.src.rpm' | sort -V); do + if [ $BUILD_NEEDED -eq 2 ]; then + BUILD_NEEDED=0 + fi + + b=$(basename $SRPM_PATH2) + SRPM_OUT_PATH2=$(find $SRPM_OUT -name $b) + if [ "x$SRPM_OUT_PATH2" == "x" ]; then + BUILD_NEEDED=1 + fi + + n=$(find $PKG_BASE -type f -cnewer $SRPM_PATH2 | wc -l) + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + + if [ -f $PKG_BASE/$DATA ]; then + ( + cd $PKG_BASE + BUILD_NEEDED=0 + srpm_source_build_data $DATA + if [ "x$SRC_DIR" != "x" ]; then + if [ -d "$SRC_DIR" ]; then + n=$(find $SRC_DIR -type f -cnewer $SRPM_PATH2 | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | wc -l) + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + fi + + if [ "x$COPY_LIST" != "x" ]; then + n=$(find $COPY_LIST -type f -cnewer $SRPM_PATH2 | wc -l) + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + + if [ "x$COPY_LIST_TO_TAR" != "x" ]; then + n=$(find $COPY_LIST_TO_TAR -type f -cnewer $SRPM_PATH2 | wc -l) + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + + exit $BUILD_NEEDED + ) + if [ $? -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + done + + if [ $EDIT_FLAG -eq 1 ]; then + local COPY_LIST + local COPY_LIST_TO_TAR + local SRC_DIR + local TIS_PATCH_VER + + PKG_CLASSIFICATION=$(classify $PKG_BASE) + echo "$PKG_CLASSIFICATION = classify $PKG_BASE" + if [ "$PKG_CLASSIFICATION" == "spec + tarball" ] || [ "$PKG_CLASSIFICATION" == "srpm + patches" ]; then + echo "OK to edit $PKG_BASE" + else + echo "Can't edit this package, it is of type '$PKG_CLASSIFICATION', it is not derived from SRPM or tarball and patches" + return 1 + fi + + srpm_source_build_data $DATA + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: failed to source $DATA" + return 1 + fi + + echo "tar_and_spec_extract_to_git '$SPEC' '$PKG_BASE' '$ROOT_DIR' '$BUILD_DIR' '$PKG_NAME_VER' '$NO_META_PATCH_FLAG' '$TIS_PATCH_VER'" + tar_and_spec_extract_to_git "$SPEC" "$PKG_BASE" "$ROOT_DIR" "$BUILD_DIR" "$PKG_NAME_VER" "$NO_META_PATCH_FLAG" "$TIS_PATCH_VER" + RC=$? + if [ $RC -ne 0 ]; then + if [ $RC -eq 1 ]; then + echo "ERROR: srpm_extract_to_git: failed to extract srpm '$ORIG_SRPM_PATH'" + fi + return $RC + fi + + local LOC=$(for g in $(find $PKG_ROOT_DIR/gits -type d -name .git); do d=$(dirname $g); (cd $d; git branch --all | grep "$PKG_NAME_VER" >> /dev/null; if [ $? -eq 0 ]; then echo $d; fi); done | head -n 1 ) + echo "===== '$TARGET_FOUND' has been extracted for editing. =====" + echo "===== Metadata can be found at: $PKG_ROOT_DIR/rpmbuild" + echo "===== Source code can be found at: $LOC" + return 0 + fi + + if [ $BUILD_NEEDED -eq 0 ]; then + echo "SRPM build not required for '$PKG_BASE'" + echo "===== Build complete for '$TARGET_FOUND' =====" + echo + return 0 + fi + + + echo "MAKE_SRPM=$MAKE_SRPM" + echo "DATA=$DATA" + + if [ -d "$RPMBUILD_BASE/SRPMS" ]; then + clean_srpm_dir $build_idx "$RPMBUILD_BASE/SRPMS" + fi + if [ -d $RPMBUILD_BASE ]; then + echo "rm -rf $RPMBUILD_BASE" + \rm -rf "$RPMBUILD_BASE" + fi + + echo "mkdir -p $WORK_BASE $SRPM_PATH $SPEC_PATH $SOURCES_PATH" + mkdir -p "$WORK_BASE" && \ + mkdir -p "$SRPM_PATH" && \ + mkdir -p "$SPEC_PATH" && \ + mkdir -p "$SOURCES_PATH" + if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directories under: $WORK_BASE" + fi + + \cp -L -f -v $SPECS_BASE/*.spec $SPEC_PATH/ + if [ $? -ne 0 ]; then + echo "ERROR: Failed to copy spec files from '$SPECS_BASE' to '$SPEC_PATH'" + fi + + # build + $MAKE_SRPM + if [ $? -ne 0 ]; then + echo "ERROR: script failed '$MAKE_SRPM'" + echo "$TARGET_FOUND" >> $MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_${build_idx} + # SRPM_FAILED_REBUILD_LIST="$SRPM_FAILED_REBUILD_LIST $TARGET_FOUND" + exit 1 + fi + + + AGE=$(find $PKG_BASE -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + if [ -f $PKG_BASE/$DATA ]; then + AGE2=$( + cd $PKG_BASE + srpm_source_build_data $DATA + PATH_LIST="" + if [ "x$SRC_DIR" != "x" ]; then + if [ -d "$SRC_DIR" ]; then + PATH_LIST="$PATH_LIST $SRC_DIR" + fi + fi + + if [ "x$COPY_LIST" != "x" ]; then + PATH_LIST="$PATH_LIST $COPY_LIST" + fi + + if [ "x$COPY_LIST_TO_TAR" != "x" ]; then + PATH_LIST="$PATH_LIST $COPY_LIST_TO_TAR" + fi + + if [ "x$PATH_LIST" == "x" ]; then + echo "0" + else + AGE2=$(find $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1) + echo "$AGE2" + fi + ) + if [ $AGE2 -gt $AGE ]; then + AGE=$AGE2 + fi + fi + + TS=$(date -d @$AGE +%Y-%m-%dT%H:%M:%S) + for s in $(find $SRPM_PATH -name '*.src.rpm'); do + \cp -L -f $s $SRPM_OUT/ + ss=$(basename $s) + touch $SRPM_OUT/$ss --date=$TS + + mkdir -p $SOURCES_DIR/$ss + BIG_FLAG_FILE="$SOURCES_DIR/$ss/BIG" + SLOW_FLAG_FILE="$SOURCES_DIR/$ss/SLOW" +echo "BIG_FLAG_FILE=$BIG_FLAG_FILE" +echo "SLOW_FLAG_FILE=$SLOW_FLAG_FILE" + + if [ $BUILD_IS_BIG -gt 0 ]; then + echo $BUILD_IS_BIG > $BIG_FLAG_FILE + else + if [ -f $BIG_FLAG_FILE ]; then + \rm -f $BIG_FLAG_FILE + fi + fi + + if [ $BUILD_IS_SLOW -gt 0 ]; then + echo $BUILD_IS_SLOW > $SLOW_FLAG_FILE + else + if [ -f $SLOW_FLAG_FILE ]; then + \rm -f $SLOW_FLAG_FILE + fi + fi + done + + echo "$TARGET_FOUND" >> $MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_${build_idx} + # SRPM_REBUILT_LIST="$SRPM_REBUILT_LIST $TARGET_FOUND" + echo "===== Build complete for '$TARGET_FOUND' =====" + echo + fi + fi + + return 0 +} + +( +echo "$CMDLINE" + +if [ -L $BUILD_ROOT/repo ]; then + REPO_DEST=$(readlink $BUILD_ROOT/repo) + if [ "$REPO_DEST" != "$SRC_ROOT" ]; then + echo "Error: MY_REPO changed since last build" + echo " old path: $REPO_DEST" + echo " new path: $SRC_ROOT" + echo "Please run '$ME --clean' if you want to compile from a new source tree" + exit 1 + fi +fi + +if [ ! -L $BUILD_ROOT/repo ]; then + ln -s $SRC_ROOT $BUILD_ROOT/repo +fi + +ALL=0 +UNRESOLVED_TARGETS="" +if [ "x$TARGETS" == "x" ]; then + echo "make: all" + ALL=1 +else + echo "make: $TARGETS" + UNRESOLVED_TARGETS="$TARGETS" +fi + +workers=0 +max_workers=$MAX_WORKERS +declare -A build_env + +init_build_env () { + local i=0 + local stop=$((max_workers-1)) + for i in $(seq 0 $stop); do + build_env[$i]='Idle' + done +} + +init_build_env + +get_idle_build_env () { + local i=0 + local stop=$((max_workers-1)) + if [ $stop -ge 255 ]; then + stop=254 + fi + for i in $(seq 0 $stop); do + if [ ${build_env[$i]} == 'Idle' ]; then + build_env[$i]='Busy' + return $i + fi + done + return 255 +} + +set_build_env_pid () { + local idx=$1 + local val=$2 + build_env[$idx]=$val +} + +release_build_env () { + local idx=$1 + build_env[$idx]='Idle' +} + +reaper () { + local reaped=0 + local last_reaped=-1 + local i=0 + local stop=$((max_workers-1)) + local p=0 + local ret=0 + + if [ $stop -ge 255 ]; then + stop=254 + fi + + while [ $reaped -gt $last_reaped ]; do + last_reaped=$reaped + for i in $(seq 0 $stop); do + p=${build_env[$i]} + if [ "$p" == "Idle" ] || [ "$p" == "Busy" ]; then + continue + fi + # echo "test $i $p" + kill -0 $p &> /dev/null + if [ $? -ne 0 ]; then + wait $p + ret=$? + workers=$((workers-1)) + reaped=$((reaped+1)) + release_build_env $i + if [ $ret -ne 0 ]; then + # if [ $ret -eq 1 ]; then + VERB="build" + if [ $EDIT_FLAG ]; then + VERB="edit" + fi + if [ $CLEAN_FLAG ]; then + VERB="clean" + fi + sleep 1 + echo "ERROR: Failed to $VERB src.rpm from source at 'b$i'" + cat "$LOG_DIR/$i" >> $LOG_DIR/errors + echo "ERROR: Failed to $VERB src.rpm from source at 'b$i'" >> $LOG_DIR/errors + echo "" >> $LOG_DIR/errors + # fi + STOP_SCHEDULING=1 + fi + fi + done + done + return $reaped +} + + +# Set up files to collect parallel build results ... +mkdir -p $MY_WORKSPACE/tmp +fn="$MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_merge" + +if [ -f $fn ]; then + \rm -f $fn +fi + +for n in $UNRESOLVED_TARGETS; do + echo $n >> $fn; +done + +if [ -f $fn ]; then + sort $fn > $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS +else + \rm -f -v $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS + touch $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS +fi + +for i in $(seq 0 $((max_workers-1))); do + for fn in $MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_$i $MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_$i $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_$i; do + if [ -f $fn ]; then + \rm -f -v $fn + fi + done + \cp $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_$i +done + +# Build src.rpm's +LOG_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-log-XXXXXX) +if [ "x$LOG_DIR" == "x" ]; then + echo "failed to create temporary directory" + exit 1; +fi +for g in $(find $SRC_BASE -type d -name .git | sort -V); do + GIT_ROOT=$(dirname $g) + if [ $STOP_SCHEDULING -eq 1 ]; then + break; + fi + for p in $(cat $GIT_ROOT/$PKG_DIRS_FILE 2>> /dev/null); do + if [ $STOP_SCHEDULING -eq 1 ]; then + break; + fi + src_dir="$GIT_ROOT/$p" + if [ -d $src_dir ]; then + if [ -d $src_dir/centos ]; then + rel_dir=$(echo $src_dir | sed "s:^$SRC_BASE::") + work_dir="$BUILD_INPUTS$rel_dir" + + # Free up a worker + while [ $workers -ge $max_workers ]; do + reaper + reaped=$? + if [ $reaped -eq 0 ]; then + sleep 0.1 + fi + done + + workers=$((workers+1)) + get_idle_build_env + b=$? + if [ $b -ge 255 ]; then + echo "get_idle_build_env failed to find a free slot" + exit 1 + fi + PREFIX="b$b" + ( build_dir $b $src_dir $work_dir 2>&1 | sed "s#^#${PREFIX}: #" | tee $LOG_DIR/$b; exit ${PIPESTATUS[0]} ) & + pp=$! + set_build_env_pid $b $pp + else + echo "ERROR: Failed to find 'centos' in '$p', found in file '$GIT_ROOT/$PKG_DIRS_FILE'" + fi + else + echo "ERROR: Bad path '$p' in file '$GIT_ROOT/$PKG_DIRS_FILE'" + fi + done +done + +# Wait for remaining workers to exit +while [ $workers -gt 0 ]; do + reaper + reaped=$? + if [ $reaped -eq 0 ]; then + sleep 0.1 + fi +done + +if [ $STOP_SCHEDULING -eq 1 ]; then + echo "============ Build failed =============" + if [ -f $LOG_DIR/errors ]; then + cat $LOG_DIR/errors + fi + \rm -rf $LOG_DIR + exit 1 +fi +\rm -rf $LOG_DIR + +# Transfer results from files back into variables +SRPM_REBUILT_LIST=$((for i in $(seq 0 $((max_workers-1))); do + fn=$MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_$i + if [ -f $fn ]; then + cat $fn | tr '\n' ' ' + fi + done) | sed 's/ $//') + +SRPM_FAILED_REBUILD_LIST=$((for i in $(seq 0 $((max_workers-1))); do + fn=$MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_$i + if [ -f $fn ]; then + cat $fn | tr '\n' ' ' + fi + done) | sed 's/ $//') + +UNRESOLVED_TARGETS=$(for i in $(seq 0 $((max_workers-1))); do + if [ -f $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_$i ]; then + comm -1 -2 $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_$i > $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_merge + \mv $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_merge $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS + fi + done + cat $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS | tr '\n' ' ' | sed 's/ $//') + +\rm -rf $MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_* $MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_* $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS* 2>> /dev/null + +# Try to find and clean orphaned and discontinued .src.rpm's +if [ $ALL -eq 1 ]; then + echo + echo "Auditing for obsolete srpms" + # PACKAGES_CONSIDERED="" + AUDIT_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-audit-XXXXXX) + if [ $? -eq 0 ] && [ "x$AUDIT_DIR" != "x" ]; then + for g in $(find $SRC_BASE -type d -name .git | sort -V); do + GIT_ROOT=$(dirname $g) + for p in $(cat $GIT_ROOT/$PKG_DIRS_FILE 2>> /dev/null); do + ( + src_dir="$GIT_ROOT/$p" + if [ -d $src_dir ]; then + if [ -d $src_dir/$DISTRO ]; then + + for f in $(find $src_dir/centos -name '*.spec' | sort -V); do + NAME=$(spec_find_tag Name "$f" 2>> /dev/null) + if [ $? -eq 0 ]; then + # PACKAGES_CONSIDERED="$PACKAGES_CONSIDERED $NAME" + touch "$AUDIT_DIR/$NAME" + fi + done + if [ -f $src_dir/$SRPM_LIST_PATH ]; then + + for p in $(grep -v '^#' $src_dir/$SRPM_LIST_PATH | grep -v '^$'); do + ORIG_SRPM_PATH="" + # absolute path source rpms + echo "$p" | grep "^/" >/dev/null && ORIG_SRPM_PATH=$p + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle repo: definitions + echo "$p" | grep "^repo:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^repo:%$REPO_DOWNLOADS_ROOT/%") + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle repo: definitions + echo "$p" | grep "^3rd_party:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^3rd_party:%$THIRD_PARTY_ROOT/%") + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle mirror: definitions + echo "$p" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^mirror:%$MIRROR_ROOT/%" | sed "s#CentOS/tis-r3-CentOS/kilo/##" | sed "s#CentOS/tis-r3-CentOS/mitaka/##") + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # we haven't found a valid prefix yet, so assume it's a legacy + # file (mirror: interpretation) + ORIG_SRPM_PATH="$MIRROR_ROOT/$p" + fi + + if [ -f $ORIG_SRPM_PATH ]; then + NAME=$(rpm -q --queryformat '%{NAME}\n' -p $ORIG_SRPM_PATH 2>> /dev/null) + if [ $? -eq 0 ]; then + # PACKAGES_CONSIDERED="$PACKAGES_CONSIDERED $NAME" + touch "$AUDIT_DIR/$NAME" + fi + fi + done + fi + fi + fi + ) & + done + done + echo "waiting" + wait + + echo "Auditing for obsolete srpms Phase 2" + for r in $(find $SRPM_OUT -name '*.src.rpm' | sort -V); do + ( + NAME=$(rpm -q --queryformat '%{NAME}\n' -p $r 2>> /dev/null) + ALT_NAME=$(echo $NAME | sed "s#-$BUILD_TYPE\$##") + FOUND=0 + # for p in $PACKAGES_CONSIDERED; do + # if [[ "$NAME" == "$p" || ( "$BUILD_TYPE" != "std" && "$NAME" == "$p-$BUILD_TYPE" ) ]]; then + # FOUND=1 + # break + # fi + # done + if [[ -f "$AUDIT_DIR/$NAME" || ( "$BUILD_TYPE" != "std" && -f "$AUDIT_DIR/$ALT_NAME" ) ]]; then + FOUND=1 + fi + if [ $FOUND -eq 0 ]; then + for INPUT_DIR in $(find $BUILD_INPUTS -name $NAME | sort -V); do + if [ -d "$INPUT_DIR/rpmbuild/SRPMS" ]; then + clean_srpm_dir $build_idx "$INPUT_DIR/rpmbuild/SRPMS" + fi + if [ -d $INPUT_DIR ]; then + echo "rm -rf $r" + \rm -rf $r + fi + done + if [ -f $r ]; then + \rm -f -v $r + fi + fi + ) & + done + echo "waiting" + wait + \rm -rf "$AUDIT_DIR" + fi + echo "Auditing for obsolete srpms done" +fi + +if [ $CLEAN_FLAG -eq 1 ]; then + if [ $ALL -eq 1 ]; then + \rm -rf $BUILD_INPUTS + \rm -rf $SOURCE_OUT/*.src.rpm + fi +fi + +if [ $EDIT_FLAG -ne 1 ]; then + echo "==== Update repodata =====" + mkdir -p $SRPM_OUT/repodata + for d in $(find -L $SRPM_OUT -type d -name repodata); do + (cd $d/.. + \rm -rf repodata + $CREATEREPO $(pwd) + ) + done + echo "==== Update repodata complete =====" +fi + +FINAL_RC=0 +if [ $CLEAN_FLAG -eq 0 ] && [ $EDIT_FLAG -eq 0 ]; then + echo "" + if [ "$SRPM_FAILED_REBUILD_LIST" != "" ]; then + N=$(echo "$SRPM_FAILED_REBUILD_LIST" | wc -w) + echo "Failed to build $N packages:" + echo " $SRPM_FAILED_REBUILD_LIST" + FINAL_RC=1 + fi + if [ "$SRPM_REBUILT_LIST" != "" ]; then + N=$(echo "$SRPM_REBUILT_LIST" | wc -w) + echo "Successfully built $N packages:" + echo " $SRPM_REBUILT_LIST" + echo "" + echo "Compiled src.rpm's can be found here: $SRPM_OUT" + fi + if [ "$SRPM_FAILED_REBUILD_LIST" == "" ] && [ "$SRPM_REBUILT_LIST" == "" ]; then + echo "No packages required a rebuild" + fi +fi + + +if [ "$UNRESOLVED_TARGETS" != "" ]; then + echo "" + echo "ERROR: failed to resolve build targets: $UNRESOLVED_TARGETS" + FINAL_RC=1 +fi + +exit $FINAL_RC +) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-srpms-parallel_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]} diff --git a/build-tools/build-srpms-serial b/build-tools/build-srpms-serial new file mode 100755 index 00000000..d86d0e4d --- /dev/null +++ b/build-tools/build-srpms-serial @@ -0,0 +1,1161 @@ +#!/bin/bash +# set -x + +CMDLINE="build-srpms-serial $@" + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source $DIR/spec-utils +source $DIR/srpm-utils +source $DIR/classify + +INITIAL_DIR=`pwd` +export DISTRO="centos" +SRPM_SCRIPT="build_srpm" +SRPM_DATA="build_srpm.data" +PKG_DIRS_FILE=centos_pkg_dirs + +DEFAULT_SRPM_SCRIPT="$DIR/default_$SRPM_SCRIPT" +SCRIPT_PATH="$DISTRO" +DATA_PATH="$DISTRO" +FILES_PATH="$DISTRO/files" +# ORIG_SPECS_PATH="$DISTRO/rpmbuild/SPECS" +ORIG_SPECS_PATH="$DISTRO" +SRPM_LIST_PATH="$DISTRO/srpm_path" +MIRROR_ROOT="$MY_REPO/cgcs-centos-repo" +THIRD_PARTY_ROOT="$MY_REPO/cgcs-3rd-party-repo" +REPO_DOWNLOADS_ROOT="$MY_REPO" +SRPM_REBUILT_LIST="" +SRPM_FAILED_REBUILD_LIST="" + +CREATEREPO=$(which createrepo_c) +if [ $? -ne 0 ]; then + CREATEREPO="createrepo" +fi + +usage () { + echo "" + echo "Usage: " + echo " Create source rpms:" + echo " build-srpms [--rt | --std | --installer] [--no-descendants] [--formal] [ list of package names ]" + echo "" + echo " Delete source rpms, and the directories associated with it's creation:" + echo " Note: does not clean an edit environment" + echo " build-srpms --clean [--rt | --std | --installer] [optional list of package names]" + echo "" + echo " Extract an src.rpm into a pair of git trees to aid in editing it's contents," + echo " one for source code and one for metadata such as the spec file." + echo " If --no-meta-patch is specified, then WRS patches are omitted." + echo " build-srpms --edit [--rt | --std | --installer] [--no-meta-patch] [list of package names]" + echo "" + echo " Delete an edit environment" + echo " build-srpms --edit --clean [--rt | --std | --installer] [list of package names]" + echo "" + echo " This help page" + echo " build-srpms --help" + echo "" +} + +# This function creates a bunch of subdirs in $MY_WORKSPACE and makes sure +# that a $MY_BUILD_CFG file exists. +# +# The goal of this is to have a script do as much of the annoying +# grunt-work so that the "how to build it" instructions aren't 200 lines +create_output_dirs () { + # make sure variables are sane before continuing + # Note that $BUILD_ROOT contains either $MY_WORKSPACE or $MY_PATCH_WORKSPACE + if [ "x$BUILD_ROOT" == "x" ]; then + return + fi + if [ "x$MY_BUILD_CFG" == "x" ]; then + return + fi + if [ "x$MY_BUILD_DIR" == "x" ]; then + return + fi + if [ "x$MY_SRC_RPM_BUILD_DIR" == "x" ]; then + return + fi + + # create output dirs + mkdir -p $MY_BUILD_DIR + mkdir -p $MY_SRC_RPM_BUILD_DIR + mkdir -p $MY_SRC_RPM_BUILD_DIR/SOURCES + mkdir -p $MY_SRC_RPM_BUILD_DIR/SPECS + mkdir -p $MY_SRC_RPM_BUILD_DIR/BUILD + mkdir -p $MY_SRC_RPM_BUILD_DIR/RPMS + mkdir -p $MY_SRC_RPM_BUILD_DIR/SRPMS + + # create $MY_BUILD_CFG, if required + if [ ! -f $MY_BUILD_CFG ]; then + echo "FORMAL_BUILD=$FORMAL_BUILD" + echo "modify-build-cfg $MY_BUILD_CFG" + ${DIR}/modify-build-cfg $MY_BUILD_CFG + if [ $? -ne 0 ]; then + echo "Could not modifiy $MY_BUILD_CFG"; + exit 1 + fi + fi + +} + +NO_DESCENDANTS=0 +NO_BUILD_INFO=0 +HELP=0 +CLEAN_FLAG=0 +FORMAL_FLAG=0 +BUILD_TYPE_FLAG=0 +EDIT_FLAG=0 +NO_META_PATCH_FLAG=0 + +# read the options +TEMP=`getopt -o ha::bc: --long serial,std,rt,installer,no-descendants,no-meta-patch,no-build-info,help,formal,clean,edit,arga::,argb,argc: -n 'build-srpms' -- "$@"` + +if [ $? -ne 0 ]; then + usage + exit 1 +fi + +eval set -- "$TEMP" + +export BUILD_TYPE=std + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -a|--arga) + case "$2" in + "") ARG_A='some default value' ; shift 2 ;; + *) ARG_A=$2 ; shift 2 ;; + esac ;; + -b|--argb) ARG_B=1 ; shift ;; + --no-descendants) NO_DESCENDANTS=1 ; shift ;; + --no-build-info) NO_BUILD_INFO=1 ; shift ;; + -h|--help) HELP=1 ; shift ;; + -c|--argc) + case "$2" in + "") shift 2 ;; + *) ARG_C=$2 ; shift 2 ;; + esac ;; + --clean) CLEAN_FLAG=1 ; shift ;; + --formal) FORMAL_FLAG=1 ; shift ;; + --std) BUILD_TYPE_FLAG=1; BUILD_TYPE=std; shift ;; + --rt) BUILD_TYPE_FLAG=1; BUILD_TYPE=rt; shift ;; + --installer) BUILD_TYPE=installer; shift ;; + --edit) EDIT_FLAG=1 ; shift ;; + --no-meta-patch) NO_META_PATCH_FLAG=1 ; shift ;; + --serial) shift ;; + --) shift ; break ;; + *) echo "Internal error!" ; exit 1 ;; + esac +done + +# Reset variables +if [ -n "$MY_WORKSPACE" ]; then + export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE} + export MY_WORKSPACE=$MY_WORKSPACE_TOP/$BUILD_TYPE +else + export MY_PATCH_WORKSPACE_TOP=${MY_PATCH_WORKSPACE_TOP:-$MY_PATCH_WORKSPACE} + export MY_PATCH_WORKSPACE=$MY_PATCH_WORKSPACE_TOP/$BUILD_TYPE +fi + +export MY_BUILD_DIR_TOP=${MY_BUILD_DIR_TOP:-$MY_BUILD_DIR} +export MY_BUILD_DIR=$MY_BUILD_DIR_TOP/$BUILD_TYPE + +export MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT} +export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT_TOP-$BUILD_TYPE + +export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT.cfg +export MY_SRC_RPM_BUILD_DIR=$MY_BUILD_DIR/rpmbuild +export MY_BUILD_CFG=$MY_WORKSPACE/$MY_BUILD_ENVIRONMENT_FILE +export MY_MOCK_ROOT=$MY_WORKSPACE/mock/root + +if [ "$BUILD_TYPE" != "std" ]; then + PKG_DIRS_FILE=centos_pkg_dirs_$BUILD_TYPE +fi + +echo "CLEAN_FLAG=$CLEAN_FLAG" +TARGETS=$@ + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ $FORMAL_FLAG -eq 1 ]; then + export FORMAL_BUILD="yes" +fi + +if [ "x$TARGETS" == "x" ] && [ $EDIT_FLAG -eq 1 ]; then + echo "ERROR: a package name is required when --edit is specified" + usage + exit 0 +fi + +SRC_ROOT="$MY_REPO" +if [ "x$MY_REPO" == "x" ]; then + SRC_ROOT=$INITIAL_DIR +fi + +BUILD_ROOT="$MY_WORKSPACE" +if [ "x$MY_WORKSPACE" == "x" ]; then + BUILD_ROOT="$MY_PATCH_WORKSPACE" + + if [ "x$MY_PATCH_WORKSPACE" == "x" ]; then + echo "ERROR: require one of MY_WORKSPACE or MY_PATCH_WORKSPACE be defined" + exit 1 + fi +fi + +export CCACHE_DIR="$BUILD_ROOT/.ccache" +export SRC_BASE="$SRC_ROOT" +export CGCS_BASE="$SRC_BASE/addons/wr-cgcs/layers/cgcs" +export SPECS_BASE="$ORIG_SPECS_PATH" +export FILES_BASE="$FILES_PATH" + +export BUILD_BASE="$BUILD_ROOT" +BUILD_INPUTS="$BUILD_BASE/inputs" +SRPM_ASSEMBLE="$BUILD_BASE/srpm_assemble" +SRPM_WORK="$BUILD_BASE/srpm_work" + +if [ "x$MY_SRC_RPM_BUILD_DIR" != "x" ]; then + RPM_BUILD_ROOT=$MY_SRC_RPM_BUILD_DIR +else + RPM_BUILD_ROOT=$BUILD_BASE/rpmbuild +fi + +create_output_dirs + +export RPM_BUILD_BASE="$RPM_BUILD_ROOT" +export SRPM_OUT="$RPM_BUILD_BASE/SRPMS" +export RPM_DIR="$RPM_BUILD_BASE/RPMS" + +if [ ! -d $CGCS_BASE ]; then + echo "ERROR: expected to find directory at '$CGCS_BASE'" + exit 1 +fi + +if [ ! -d $BUILD_BASE ]; then + echo "ERROR: expected to find directory at '$BUILD_BASE'" + exit 1 +fi + +RELEASE_INFO_FILE=$SRC_BASE/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +if [ -f $RELEASE_INFO_FILE ]; then + source $MY_REPO/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +else + echo "ERROR: failed to find RELEASE_INFO_FILE=$RELEASE_INFO_FILE" + exit 1 +fi + +if [ "x$PLATFORM_RELEASE" == "x" ]; then + echo "ERROR: PLATFORM_RELEASE is not defined in $RELEASE_INFO_FILE" + exit 1 +fi + +export PLATFORM_RELEASE + +mkdir -p $RPM_BUILD_BASE +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$RPM_BUILD_BASE'" + exit 1 +fi + +mkdir -p $SRPM_OUT +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$SRPM_OUT'" + exit 1 +fi + +mkdir -p $RPM_DIR +if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directory '$RPM_DIR'" + exit 1 +fi + +build_dir () { + local d=$1 + local w=$2 + export PKG_BASE=$d + export WORK_BASE=$w + export SPECS_BASE="$PKG_BASE/$ORIG_SPECS_PATH" + local RC + + local ORIG_DIR=`pwd` + # echo "build_dir: PKG_BASE=$PKG_BASE" + + cd "$PKG_BASE" + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd into '$PKG_BASE'" + return 1 + fi + + if [ ! -d $ORIG_SPECS_PATH ]; then + # nothing to do + echo "WARNING: '$ORIG_SPECS_PATH' not found in '$PKG_BASE'" + cd "$ORIG_DIR" + return 0 + fi + + SRPM_COUNT=0 + ORIG_SRPM_PATH="" + if [ -f $SRPM_LIST_PATH ]; then + # we've found a file (ex centos/srpm_path) which lists a path to a source + # RPM file + # + # The specified file can be of the form + # + # repo:path/to/file.src.rpm + # mirror:path/to/file.src.rpm + # /path/to/file.rpm + # path/to/file.rpm + # + # If "repo:" is specified, then we search for the file relative to + # $REPO_DOWNLOADS_ROOT (i.e. a path to the file in a "downloads subgit) + # + # If "mirror:" is specified, then we search for the file relateive to + # $MIRROR_ROOT + # + # If "3rd_party:" is specified, then we search for the file relateive to + # $THIRD_PARTY_ROOT + # + # An absolute path is parsed as an absolute path (mainly intended for + # developer/experimental use without checking in files or messing with + # your git repos) + # + # A lack of prefix (relative path name) is interpretted as "mirror:" + # (legacy support for existing packages) + # + # Other prefixes (file:, http:, whatever:)are unsupported at this time + + for p in `grep -v '^#' $SRPM_LIST_PATH | grep -v '^$'`; do + # absolute path source rpms + echo "$p" | grep "^/" >/dev/null && ORIG_SRPM_PATH=$p + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle repo: definitions + echo "$p" | grep "^repo:" >/dev/null && ORIG_SRPM_PATH=`echo $p | sed "s%^repo:%$REPO_DOWNLOADS_ROOT/%"` + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle mirror: definitions + echo "$p" | grep "^3rd_party:" >/dev/null && ORIG_SRPM_PATH=`echo $p | sed "s%^3rd_party:%$THIRD_PARTY_ROOT/%"` + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle mirror: definitions + echo "$p" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=`echo $p | sed "s%^mirror:%$MIRROR_ROOT/%" | sed "s#CentOS/tis-r3-CentOS/kilo/##" | sed "s#CentOS/tis-r3-CentOS/mitaka/##"` + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # we haven't found a valid prefix yet, so assume it's a legacy + # file (mirror: interpretation) + ORIG_SRPM_PATH="$MIRROR_ROOT/$p" + fi + + # echo "ORIG_SRPM_PATH=$ORIG_SRPM_PATH" + if [ -f $ORIG_SRPM_PATH ]; then + SRPM_COUNT=$((SRPM_COUNT + 1)) + else + echo "ERROR: Invalid srpm path '$p', evaluated as '$ORIG_SRPM_PATH', found in '$SRPM_LIST_PATH'" + ORIG_SRPM_PATH="" + exit 1 + fi + done + fi + + # Clean up an tmp_spec_*.spec file left by a prior failed build + for f in $(find $ORIG_SPECS_PATH -name 'tmp_spec_*.spec'); do + \rm -f $f + done + + SPEC_COUNT=`find $ORIG_SPECS_PATH -name '*.spec' | wc -l` + if [ $SPEC_COUNT -eq 0 ]; then + if [ -f $ORIG_SPECS_PATH/spec_path ]; then + SPECS_BASE=$SRC_BASE/$(cat $SPECS_BASE/spec_path) + SPEC_COUNT=`find $SPECS_BASE -maxdepth 1 -name '*.spec' | wc -l` + fi + fi + + if [ $SPEC_COUNT -eq 0 ] && [ $SRPM_COUNT -eq 0 ]; then + # nothing to do + echo "ERROR: Neither srpm_path nor .spec file not found in '$PKG_BASE/$ORIG_SPECS_PATH'" + cd "$ORIG_DIR" + return 0 + fi + + + if [ $SPEC_COUNT -gt 0 ] && [ $SRPM_COUNT -gt 0 ]; then + # nothing to do + echo "ERROR: Please provide only one of srpm_path or .spec files, not both, in '$PKG_BASE/$ORIG_SPECS_PATH'" + cd $ORIG_DIR + return 0 + fi + + if [ $SPEC_COUNT -gt 0 ]; then + build_dir_spec + RC=$? + cd "$ORIG_DIR" + return $RC + else + build_dir_srpm $ORIG_SRPM_PATH + RC=$? + cd "$ORIG_DIR" + return $RC + fi + + cd "$ORIG_DIR" + return 0 +} + + +clean_srpm_dir () { + local DIR=$1 + local SRPM_PATH + local SRPM_FILE + local SRPM_OUT_PATH + local SRPM_NAME + local SRPM_OUT_NAME + local INPUTS_TO_CLEAN="" + + echo "clean_srpm_dir DIR=$DIR" + for SRPM_PATH in `find "$DIR" -name '*.src.rpm'`; do + SRPM_FILE=$(basename $SRPM_PATH) + SRPM_NAME=$(rpm -q --queryformat '%{NAME}\n' --nosignature -p $SRPM_PATH 2>> /dev/null) + rm -fv $SRPM_PATH $SRPM_OUT/$SRPM_FILE + if [ -d $SRPM_ASSEMBLE/$SRPM_NAME ]; then + echo "rm -rf $SRPM_ASSEMBLE/$SRPM_NAME" + rm -rf $SRPM_ASSEMBLE/$SRPM_NAME + fi + INPUTS_TO_CLEAN=$(if [ "x$INPUTS_TO_CLEAN" != "x" ]; then echo $INPUTS_TO_CLEAN; fi; find $BUILD_INPUTS -type d -name $SRPM_NAME) + for SRPM_OUT_PATH in `ls -1 $SRPM_OUT/$SRPM_NAME* 2>> /dev/null`; do + SRPM_OUT_NAME=$(rpm -q --queryformat '%{NAME}\n' -p $SRPM_OUT_PATH 2>> /dev/null) + if [ "$SRPM_NAME" == "$SRPM_OUT_NAME" ]; then + rm -fv $SRPM_OUT_PATH + fi + done + done + + if [ "x$INPUTS_TO_CLEAN" != "x" ]; then + for d in $INPUTS_TO_CLEAN; do + if [ -d $d ]; then + echo "rm -rf $d" + rm -rf $d + fi + done + fi +} + +build_dir_srpm () { + local ORIG_SRPM_PATH=$1 + + local ORIG_SRPM=$(basename $ORIG_SRPM_PATH) + local NAME=`rpm -q --queryformat '%{NAME}\n' --nosignature -p $ORIG_SRPM_PATH` + local PKG_NAME_VER=`rpm -q --queryformat '%{NAME}-%{VERSION}-%{RELEASE}\n' --nosignature -p $ORIG_SRPM_PATH` + local PKG_DIR="$NAME" + local TARGET_FOUND="" + local RC=0 + + local NEED_BUILD=0 + + if [ "x$TARGETS" == "x" ]; then + NEED_BUILD=1 + TARGET_FOUND=$NAME + else + TARGET_LIST=( $TARGETS ) + TARGET_FOUND=$(srpm_match_target_list TARGET_LIST "$ORIG_SRPM_PATH" 2>> /dev/null) + if [ $? -eq 0 ]; then + echo "found target '$TARGET_FOUND' in '$ORIG_SRPM'" + NEED_BUILD=1 + UNRESOLVED_TARGETS=$(echo "$UNRESOLVED_TARGETS" | sed "s/\(^\|[[:space:]]\)$TARGET_FOUND\([[:space:]]\|$\)/ /g") + fi + fi + + if [ $NEED_BUILD -eq 0 ]; then + return 0 + fi + + local ROOT_DIR="$SRPM_ASSEMBLE" + if [ $EDIT_FLAG -eq 1 ]; then + mkdir -p $SRPM_WORK + ROOT_DIR="$SRPM_WORK" + fi + local PKG_ROOT_DIR="$ROOT_DIR/$PKG_DIR" + local BUILD_DIR="$PKG_DIR/rpmbuild" + local FULL_BUILD_DIR="$ROOT_DIR/$BUILD_DIR" + local SRPM_DIR="$FULL_BUILD_DIR/SRPMS" + + if [ $CLEAN_FLAG -eq 1 ]; then + # clean + echo "===== Cleaning '$TARGET_FOUND' =====" + + if [ -d $SRPM_DIR ] && [ $EDIT_FLAG -eq 0 ]; then + clean_srpm_dir "$SRPM_DIR" + fi + + if [ -d $PKG_ROOT_DIR ]; then + echo "rm -rf $PKG_ROOT_DIR" + rm -rf "$PKG_ROOT_DIR" + fi + else + #build + echo "===== Build SRPM for '$TARGET_FOUND' =====" + echo "PKG_BASE=$PKG_BASE" + echo "BUILD_DIR=$BUILD_DIR" + echo "SRPM_DIR=$SRPM_DIR" + + if [ ! -d $ROOT_DIR ]; then + mkdir -p "$ROOT_DIR" + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: mkdir '$ROOT_DIR' failed" + return 1 + fi + fi + + export DATA="$DATA_PATH/$SRPM_DATA" + local COPY_LIST + local COPY_LIST_TO_TAR + local SRC_DIR + local TIS_PATCH_VER + + srpm_source_build_data $DATA + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: failed to source $DATA" + return 1 + fi + + local BUILD_NEEDED=2 + local SRPM_OUT_PATH2 + + for SRPM_PATH in `find "$FULL_BUILD_DIR/SRPMS" -name '*.src.rpm' | sort -V`; do + if [ $BUILD_NEEDED -eq 2 ]; then + BUILD_NEEDED=0 + fi + + b=$(basename $SRPM_PATH) + SRPM_OUT_PATH2=`find $SRPM_OUT -name $b` + if [ "x$SRPM_OUT_PATH2" == "x" ]; then + BUILD_NEEDED=1 + fi + + n=`find $PKG_BASE -type f -cnewer $SRPM_PATH | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + + n=`find $ORIG_SRPM_PATH -type f -cnewer $SRPM_PATH | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + + if [ -f $PKG_BASE/$DATA ]; then + ( + cd $PKG_BASE + BUILD_NEEDED=0 + srpm_source_build_data $DATA + + # NOTE: SRC_DIR is not honored in this build path + + if [ "x$COPY_LIST" != "x" ]; then + n=`find $COPY_LIST -type f -cnewer $SRPM_PATH | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + + # NOTE: COPY_LIST_TO_TAR is not honored in this build path + + exit $BUILD_NEEDED + ) + if [ $? -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + done + + if [ $BUILD_NEEDED -eq 0 ]; then + echo "SRPM build not required for '$PKG_BASE'" + echo "===== Build complete for '$TARGET_FOUND' =====" + echo + return 0 + fi + + if [ $EDIT_FLAG -eq 0 ]; then + clean_srpm_dir "$FULL_BUILD_DIR/SRPMS" + + if [ -d $PKG_ROOT_DIR ]; then + echo "arf rm -rf $PKG_ROOT_DIR" + rm -rf $PKG_ROOT_DIR + fi + fi + + if [ $EDIT_FLAG -eq 1 ]; then + PKG_CLASSIFICATION=$(classify $PKG_BASE) + echo "$PKG_CLASSIFICATION = classify $PKG_BASE" + if [ "$PKG_CLASSIFICATION" == "spec + tarball" ] || [ "$PKG_CLASSIFICATION" == "srpm + patches" ]; then + echo "OK to edit $PKG_BASE" + else + echo "Can't edit this package, it is of type '$PKG_CLASSIFICATION', it is not derived from SRPM or tarball and patches" + return 1 + fi + + echo "srpm_extract_to_git '$ORIG_SRPM_PATH' '$PKG_BASE' '$ROOT_DIR' '$BUILD_DIR' '$PKG_NAME_VER' '$NO_META_PATCH_FLAG' '$TIS_PATCH_VER'" + srpm_extract_to_git $ORIG_SRPM_PATH $PKG_BASE $ROOT_DIR $BUILD_DIR $PKG_NAME_VER $NO_META_PATCH_FLAG $TIS_PATCH_VER + RC=$? + if [ $RC -ne 0 ]; then + if [ $RC -eq 1 ]; then + echo "ERROR: srpm_extract_to_git: failed to extract srpm '$ORIG_SRPM_PATH'" + fi + return $RC + fi + + local LOC=$(for g in $(find $PKG_ROOT_DIR/gits -type d -name .git); do d=$(dirname $g); (cd $d; git tag | grep "pre_wrs_$PKG_NAME_VER" >> /dev/null; if [ $? -eq 0 ]; then echo $d; fi); done | head -n 1 ) + echo "===== '$TARGET_FOUND' has been extracted for editing. =====" + echo "===== Metadata can be found at: $PKG_ROOT_DIR/rpmbuild" + echo "===== Source code can be found at: $LOC" + return 0 + fi + + AGE=`find $PKG_BASE $ORIG_SRPM_PATH -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1` + if [ -f $PKG_BASE/$DATA ]; then + AGE2=$( + cd $PKG_BASE + srpm_source_build_data $DATA + PATH_LIST="" + + # NOTE: SRC_DIR is not honored in this build path + + if [ "x$COPY_LIST" != "x" ]; then + PATH_LIST="$PATH_LIST $COPY_LIST" + fi + + # NOTE: COPY_LIST_TO_TAR is not honored in this build path + + + if [ "x$PATH_LIST" == "x" ]; then + echo "0" + else + AGE2=`find $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1` + echo "$AGE2" + fi + ) + if [ $AGE2 -gt $AGE ]; then + AGE=$AGE2 + fi + fi + + srpm_extract $ORIG_SRPM_PATH $PKG_BASE $ROOT_DIR $BUILD_DIR $PKG_NAME_VER + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: failed to extract srpm '$ORIG_SRPM_PATH'" + return 1 + fi + + if [ "x$COPY_LIST" != "x" ]; then + echo "COPY_LIST: $COPY_LIST" + for p in $COPY_LIST; do + # echo "COPY_LIST: $p" + \cp -L -r -f -v $p $FULL_BUILD_DIR/SOURCES + if [ $? -ne 0 ]; then + echo "ERROR: COPY_LIST: file not found: '$p'" + exit 1 + fi + done + fi + + srpm_assemble $FULL_BUILD_DIR $TIS_PATCH_VER + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: failed to assemble srpm for '$PKG_NAME_VER'" + SRPM_FAILED_REBUILD_LIST="$SRPM_FAILED_REBUILD_LIST $TARGET_FOUND" + return 1 + fi + + TS=$(date -d @$AGE +%Y-%m-%dT%H:%M:%S) + for s in `find $FULL_BUILD_DIR/SRPMS -name '*.src.rpm'`; do + \cp -L -f -v $s $SRPM_OUT/ + ss=$(basename $s) + touch $SRPM_OUT/$ss --date=$TS + done + + SRPM_REBUILT_LIST="$SRPM_REBUILT_LIST $TARGET_FOUND" + echo "SRPM build successful for '$PKG_NAME_VER'" + echo "===== Build complete for '$TARGET_FOUND' =====" + echo + fi + + return 0 +} + + +build_dir_spec () { + local NEED_BUILD=0 + local TARGET_FOUND="" + + if [ "x$TARGETS" == "x" ]; then + NEED_BUILD=1 + for f in `find $SPECS_BASE -maxdepth 1 -name '*.spec'`; do + TARGET_FOUND=`spec_find_global service "$f" 2>> /dev/null` + if [ $? -ne 0 ]; then + TARGET_FOUND=`spec_find_tag Name "$f" 2>> /dev/null` + if [ $? -ne 0 ]; then + TARGET_FOUND="" + fi + fi + done + else + TARGET_LIST=( $TARGETS ) + for f in `find $SPECS_BASE -maxdepth 1 -name '*.spec' 2>> /dev/null`; do + TARGET_FOUND=$(spec_match_target_list TARGET_LIST "$f" 2>> /dev/null) + if [ $? -eq 0 ]; then + echo "found target '$TARGET_FOUND' in '$f'" + NEED_BUILD=1 + UNRESOLVED_TARGETS=$(echo "$UNRESOLVED_TARGETS" | sed "s/\(^\|[[:space:]]\)$TARGET_FOUND\([[:space:]]\|$\)/ /g") + break + fi + done + fi + + if [ $NEED_BUILD -eq 1 ]; then + MAKE_SRPM="$SCRIPT_PATH/$SRPM_SCRIPT" + export DATA="$DATA_PATH/$SRPM_DATA" + + export RPMBUILD_BASE="$WORK_BASE/rpmbuild" + SRPM_PATH="$RPMBUILD_BASE/SRPMS" + SPEC_PATH="$RPMBUILD_BASE/SPECS" + SOURCES_PATH="$RPMBUILD_BASE/SOURCES" + local ROOT_DIR="$RPMBUILD_BASE" + local PKG_ROOT_DIR="$RPMBUILD_BASE" + local SPEC=$(find $SPECS_BASE -maxdepth 1 -name '*.spec' | head -n 1) + local NAME=$(spec_find_tag Name $SPEC) + local PKG_NAME_VER=$(spec_name_ver_rel $SPEC) + local PKG_DIR="$NAME" + local BUILD_DIR="$PKG_DIR/rpmbuild" + local FULL_BUILD_DIR="$ROOT_DIR/$BUILD_DIR" + local SRPM_DIR="$FULL_BUILD_DIR/SRPMS" + + if [ $EDIT_FLAG -eq 1 ]; then + mkdir -p $SRPM_WORK + ROOT_DIR="$SRPM_WORK" + PKG_ROOT_DIR="$ROOT_DIR/$PKG_DIR" + fi + + if [ $CLEAN_FLAG -eq 1 ]; then + # clean + echo "===== Cleaning '$TARGET_FOUND' =====" + if [ -d $SRPM_PATH ] && [ $EDIT_FLAG -eq 0 ]; then + clean_srpm_dir $SRPM_PATH + fi + + if [ -d $PKG_ROOT_DIR ]; then + echo "rm -rf $PKG_ROOT_DIR" + rm -rf "$PKG_ROOT_DIR" + fi + else + # build + echo "===== Build SRPM for '$TARGET_FOUND' =====" + echo "PKG_BASE=$PKG_BASE" + echo "WORK_BASE=$WORK_BASE" + echo "RPMBUILD_BASE=$RPMBUILD_BASE" + if [ ! -x $MAKE_SRPM ]; then + if [ ! -f $DATA ]; then + echo "expected to find an executable script at '$MAKE_SRPM' or data for the default script at '$DATA'" + cd $INITIAL_DIR + exit 1 + else + MAKE_SRPM="$DEFAULT_SRPM_SCRIPT" + fi + fi + + local BUILD_NEEDED=2 + local SRPM_OUT_PATH2 + + for SRPM_PATH2 in `find "$RPMBUILD_BASE/SRPMS" -name '*.src.rpm' | sort -V`; do + if [ $BUILD_NEEDED -eq 2 ]; then + BUILD_NEEDED=0 + fi + + b=$(basename $SRPM_PATH2) + SRPM_OUT_PATH2=`find $SRPM_OUT -name $b` + if [ "x$SRPM_OUT_PATH2" == "x" ]; then + BUILD_NEEDED=1 + fi + + n=`find $PKG_BASE -type f -cnewer $SRPM_PATH2 | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + + if [ -f $PKG_BASE/$DATA ]; then + ( + cd $PKG_BASE + BUILD_NEEDED=0 + srpm_source_build_data $DATA + if [ "x$SRC_DIR" != "x" ]; then + if [ -d "$SRC_DIR" ]; then + n=`find $SRC_DIR -type f -cnewer $SRPM_PATH2 | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + fi + + if [ "x$COPY_LIST" != "x" ]; then + n=`find $COPY_LIST -type f -cnewer $SRPM_PATH2 | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + + if [ "x$COPY_LIST_TO_TAR" != "x" ]; then + n=`find $COPY_LIST_TO_TAR -type f -cnewer $SRPM_PATH2 | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + + exit $BUILD_NEEDED + ) + if [ $? -gt 0 ]; then + BUILD_NEEDED=1 + fi + fi + done + + if [ $EDIT_FLAG -eq 1 ]; then + local COPY_LIST + local COPY_LIST_TO_TAR + local SRC_DIR + local TIS_PATCH_VER + + PKG_CLASSIFICATION=$(classify $PKG_BASE) + echo "$PKG_CLASSIFICATION = classify $PKG_BASE" + if [ "$PKG_CLASSIFICATION" == "spec + tarball" ] || [ "$PKG_CLASSIFICATION" == "srpm + patches" ]; then + echo "OK to edit $PKG_BASE" + else + echo "Can't edit this package, it is of type '$PKG_CLASSIFICATION', it is not derived from SRPM or tarball and patches" + return 1 + fi + + srpm_source_build_data $DATA + if [ $? -ne 0 ]; then + echo "ERROR: build_dir_srpm: failed to source $DATA" + return 1 + fi + + echo "tar_and_spec_extract_to_git '$SPEC' '$PKG_BASE' '$ROOT_DIR' '$BUILD_DIR' '$PKG_NAME_VER' '$NO_META_PATCH_FLAG' '$TIS_PATCH_VER'" + tar_and_spec_extract_to_git "$SPEC" "$PKG_BASE" "$ROOT_DIR" "$BUILD_DIR" "$PKG_NAME_VER" "$NO_META_PATCH_FLAG" "$TIS_PATCH_VER" + RC=$? + if [ $RC -ne 0 ]; then + if [ $RC -eq 1 ]; then + echo "ERROR: srpm_extract_to_git: failed to extract srpm '$ORIG_SRPM_PATH'" + fi + return $RC + fi + + local LOC=$(for g in $(find $PKG_ROOT_DIR/gits -type d -name .git); do d=$(dirname $g); (cd $d; git branch --all | grep "$PKG_NAME_VER" >> /dev/null; if [ $? -eq 0 ]; then echo $d; fi); done | head -n 1 ) + echo "===== '$TARGET_FOUND' has been extracted for editing. =====" + echo "===== Metadata can be found at: $PKG_ROOT_DIR/rpmbuild" + echo "===== Source code can be found at: $LOC" + return 0 + fi + + if [ $BUILD_NEEDED -eq 0 ]; then + echo "SRPM build not required for '$PKG_BASE'" + echo "===== Build complete for '$TARGET_FOUND' =====" + echo + return 0 + fi + + + echo "MAKE_SRPM=$MAKE_SRPM" + echo "DATA=$DATA" + + if [ -d "$RPMBUILD_BASE/SRPMS" ]; then + clean_srpm_dir "$RPMBUILD_BASE/SRPMS" + fi + if [ -d $RPMBUILD_BASE ]; then + echo "rm -rf $RPMBUILD_BASE" + rm -rf "$RPMBUILD_BASE" + fi + + mkdir -p "$WORK_BASE" && \ + mkdir -p "$SRPM_PATH" && \ + mkdir -p "$SPEC_PATH" && \ + mkdir -p "$SOURCES_PATH" + if [ $? -ne 0 ]; then + echo "ERROR: Failed to create directories under: $WORK_BASE" + fi + + \cp -L -f -v $SPECS_BASE/*.spec $SPEC_PATH/ + if [ $? -ne 0 ]; then + echo "ERROR: Failed to copy spec files from '$SPECS_BASE' to '$SPEC_PATH'" + fi + + # build + $MAKE_SRPM + if [ $? -ne 0 ]; then + echo "ERROR: script failed '$MAKE_SRPM'" + SRPM_FAILED_REBUILD_LIST="$SRPM_FAILED_REBUILD_LIST $TARGET_FOUND" + exit 1 + fi + + + AGE=`find $PKG_BASE -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1` + if [ -f $PKG_BASE/$DATA ]; then + AGE2=$( + cd $PKG_BASE + srpm_source_build_data $DATA + PATH_LIST="" + if [ "x$SRC_DIR" != "x" ]; then + if [ -d "$SRC_DIR" ]; then + PATH_LIST="$PATH_LIST $SRC_DIR" + fi + fi + + if [ "x$COPY_LIST" != "x" ]; then + PATH_LIST="$PATH_LIST $COPY_LIST" + fi + + if [ "x$COPY_LIST_TO_TAR" != "x" ]; then + PATH_LIST="$PATH_LIST $COPY_LIST_TO_TAR" + fi + + if [ "x$PATH_LIST" == "x" ]; then + echo "0" + else + AGE2=`find $PATH_LIST -type f -exec stat --format '%Y' "{}" \; | grep -v '[/][.]git$' | grep -v '[/][.]git[/]' | sort -nr | head -n 1` + echo "$AGE2" + fi + ) + if [ $AGE2 -gt $AGE ]; then + AGE=$AGE2 + fi + fi + + TS=$(date -d @$AGE +%Y-%m-%dT%H:%M:%S) + for s in `find $SRPM_PATH -name '*.src.rpm'`; do + \cp -L -f $s $SRPM_OUT/ + ss=$(basename $s) + touch $SRPM_OUT/$ss --date=$TS + done + + SRPM_REBUILT_LIST="$SRPM_REBUILT_LIST $TARGET_FOUND" + echo "===== Build complete for '$TARGET_FOUND' =====" + echo + fi + fi + + return 0 +} + +( +echo "$CMDLINE" + +if [ -L $BUILD_ROOT/repo ]; then + REPO_DEST=`readlink $BUILD_ROOT/repo` + if [ "$REPO_DEST" != "$SRC_ROOT" ]; then + echo "Error: MY_REPO changed since last build" + echo " old path: $REPO_DEST" + echo " new path: $SRC_ROOT" + echo "Please run 'build-srpms --clean' if you want to compile from a new source tree" + exit 1 + fi +fi + +if [ ! -L $BUILD_ROOT/repo ]; then + ln -s $SRC_ROOT $BUILD_ROOT/repo +fi + +ALL=0 +UNRESOLVED_TARGETS=" " +if [ "x$TARGETS" == "x" ]; then + echo "make: all" + ALL=1 +else + echo "make: $TARGETS" + UNRESOLVED_TARGETS="$TARGETS" +fi + +if [ $EDIT_FLAG -eq 0 ]; then + if [ $CLEAN_FLAG -eq 1 ]; then + EXTRA_RPM_FLAGS="" + + if [ $NO_BUILD_INFO -eq 1 ]; then + EXTRA_RPM_FLAGS+=" --no-build-info" + fi + + if [ $BUILD_TYPE_FLAG -eq 1 ]; then + EXTRA_RPM_FLAGS+=" --$BUILD_TYPE" + fi + + if [ $ALL -eq 1 ]; then + build-rpms-serial --clean $EXTRA_RPM_FLAGS + rm -f $BUILD_ROOT/repo + else + build-rpms-serial --clean $EXTRA_RPM_FLAGS $TARGETS + fi + fi +fi + +for g in `find $SRC_BASE -type d -name .git | sort -V`; do + GIT_ROOT=$(dirname $g) + for p in $(cat $GIT_ROOT/$PKG_DIRS_FILE 2>> /dev/null); do + src_dir="$GIT_ROOT/$p" + if [ -d $src_dir ]; then + if [ -d $src_dir/centos ]; then + rel_dir=$(echo $src_dir | sed "s:^$SRC_BASE::") + work_dir="$BUILD_INPUTS$rel_dir" + build_dir $src_dir $work_dir + RC=$? + if [ $RC -ne 0 ]; then + if [ $RC -eq 1 ]; then + VERB="build" + if [ $EDIT_FLAG ]; then + VERB="edit" + fi + if [ $CLEAN_FLAG ]; then + VERB="clean" + fi + echo "ERROR: Failed to $VERB src.rpm from source at '$p'" + fi + exit 1 + fi + else + echo "ERROR: Failed to find 'centos' in '$p', found in file '$GIT_ROOT/$PKG_DIRS_FILE'" + fi + else + echo "ERROR: Bad path '$p' in file '$GIT_ROOT/$PKG_DIRS_FILE'" + fi + done +done + +# Try to find and clean orphaned and discontinued .src.rpm's +if [ $ALL -eq 1 ]; then + echo + echo "Auditing for obsolete srpms" + PACKAGES_CONSIDERED="" + for g in `find $SRC_BASE -type d -name .git | sort -V`; do + GIT_ROOT=$(dirname $g) + for p in $(cat $GIT_ROOT/$PKG_DIRS_FILE 2>> /dev/null); do + src_dir="$GIT_ROOT/$p" + if [ -d $src_dir ]; then + if [ -d $src_dir/$DISTRO ]; then + + for f in `find $src_dir/centos -name '*.spec' | sort -V`; do + NAME=`spec_find_tag Name "$f" 2>> /dev/null` + if [ $? -eq 0 ]; then + PACKAGES_CONSIDERED="$PACKAGES_CONSIDERED $NAME" + fi + done + if [ -f $src_dir/$SRPM_LIST_PATH ]; then + + for p in `grep -v '^#' $src_dir/$SRPM_LIST_PATH | grep -v '^$'`; do + ORIG_SRPM_PATH="" + # absolute path source rpms + echo "$p" | grep "^/" >/dev/null && ORIG_SRPM_PATH=$p + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle repo: definitions + echo "$p" | grep "^repo:" >/dev/null && ORIG_SRPM_PATH=`echo $p | sed "s%^repo:%$REPO_DOWNLOADS_ROOT/%"` + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle repo: definitions + echo "$p" | grep "^3rd_party:" >/dev/null && ORIG_SRPM_PATH=`echo $p | sed "s%^3rd_party:%$THIRD_PARTY_ROOT/%"` + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle mirror: definitions + echo "$p" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=`echo $p | sed "s%^mirror:%$MIRROR_ROOT/%" | sed "s#CentOS/tis-r3-CentOS/kilo/##" | sed "s#CentOS/tis-r3-CentOS/mitaka/##"` + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # we haven't found a valid prefix yet, so assume it's a legacy + # file (mirror: interpretation) + ORIG_SRPM_PATH="$MIRROR_ROOT/$p" + fi + + if [ -f $ORIG_SRPM_PATH ]; then + NAME=`rpm -q --queryformat '%{NAME}\n' -p $ORIG_SRPM_PATH 2>> /dev/null` + if [ $? -eq 0 ]; then + PACKAGES_CONSIDERED="$PACKAGES_CONSIDERED $NAME" + fi + fi + done + fi + fi + fi + done + done + + for r in $(find $SRPM_OUT -name '*.src.rpm' | sort -V); do + NAME=$(rpm -q --queryformat '%{NAME}\n' -p $r 2>> /dev/null) + FOUND=0 + for p in $PACKAGES_CONSIDERED; do + if [[ "$NAME" == "$p" || ( "$BUILD_TYPE" != "std" && "$NAME" == "$p-$BUILD_TYPE" ) ]]; then + FOUND=1 + break + fi + done + if [ $FOUND -eq 0 ]; then + for INPUT_DIR in $(find $BUILD_INPUTS -name $NAME | sort -V); do + if [ -d "$INPUT_DIR/rpmbuild/SRPMS" ]; then + clean_srpm_dir "$INPUT_DIR/rpmbuild/SRPMS" + fi + if [ -d $INPUT_DIR ]; then + echo "rm -rf $r" + rm -rf $r + fi + done + if [ -f $r ]; then + rm -f -v $r + fi + fi + done +set +x +fi + +if [ $CLEAN_FLAG -eq 1 ]; then + if [ $ALL -eq 1 ]; then + rm -rf $BUILD_INPUTS + fi +fi + +if [ $EDIT_FLAG -ne 1 ]; then + echo "==== Update repodata =====" + mkdir -p $SRPM_OUT/repodata + for d in `find -L $SRPM_OUT -type d -name repodata`; do + (cd $d/.. + rm -rf repodata + $CREATEREPO `pwd` + ) + done + echo "==== Update repodata complete =====" +fi + +if [ $CLEAN_FLAG -eq 0 ] && [ $EDIT_FLAG -eq 0 ]; then + echo "" + if [ "$SRPM_FAILED_REBUILD_LIST" != "" ]; then + N=`echo "$SRPM_FAILED_REBUILD_LIST" | wc -w` + echo "Failed to build $N packages:" + echo " $SRPM_FAILED_REBUILD_LIST" + fi + if [ "$SRPM_REBUILT_LIST" != "" ]; then + N=`echo "$SRPM_REBUILT_LIST" | wc -w` + echo "Successfully built $N packages:" + echo " $SRPM_REBUILT_LIST" + echo "" + echo "Compiled src.rpm's can be found here: $SRPM_OUT" + fi + if [ "$SRPM_FAILED_REBUILD_LIST" == "" ] && [ "$SRPM_REBUILT_LIST" == "" ]; then + echo "No packages required a rebuild" + fi +fi + + +if [ "$UNRESOLVED_TARGETS" != " " ]; then + echo "" + echo "ERROR: failed to resolve build targets: $UNRESOLVED_TARGETS" + exit 1 +fi + +exit 0 +) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-srpms-serial_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]} diff --git a/build-tools/build-srpms4 b/build-tools/build-srpms4 new file mode 120000 index 00000000..5301273b --- /dev/null +++ b/build-tools/build-srpms4 @@ -0,0 +1 @@ +build-srpms-parallel \ No newline at end of file diff --git a/build-tools/build_guest/build-guest-image.py b/build-tools/build_guest/build-guest-image.py new file mode 100755 index 00000000..ec26319a --- /dev/null +++ b/build-tools/build_guest/build-guest-image.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python + +# +# Build a bootable guest image from the supplied rootfs archive +# + +import getopt +import guestfs +import os +import sys + + +MBR_FILE='/usr/share/syslinux/mbr.bin' +MBR_SIZE=440 + +def build_image(inputfile, outputfile, extrasize, trace): + g = guestfs.GuestFS(python_return_dict=True) + + # Set the trace flag so that we can see each libguestfs call. + if trace: + g.set_trace(1) + + # Create a raw-format sparse disk image with padding of size + inputsize = os.path.getsize(inputfile) + g.disk_create(outputfile, "raw", inputsize + extrasize) + + # Attach the new disk image to libguestfs. + g.add_drive_opts(outputfile, format="raw", readonly=0) + + # Run the libguestfs back-end. + g.launch() + + # Get the list of devices. Because we only added one drive + # above, we expect that this list should contain a single + # element. + devices = g.list_devices() + assert(len(devices) == 1) + + # Partition the disk as one single MBR partition. + g.part_disk(devices[0], "mbr") + + # Get the list of partitions. We expect a single element, which + # is the partition we have just created. + partitions = g.list_partitions() + assert(len(partitions) == 1) + + # Create a filesystem on the partition. + # NOTE: extlinux does not support 64-bit file systems + g.mkfs("ext4", partitions[0], features="^64bit") + + # Now mount the filesystem so that we can add files. + g.mount(partitions[0], "/") + + # Upload file system files and directories. + g.tar_in(inputfile, "/") + + # Install the boot loader + g.extlinux("/boot") + + # Unmount the file systems. + g.umount_all(); + + # Write the master boot record. + with open(MBR_FILE, mode='rb') as mbr: + mbr_data = mbr.read() + assert(len(mbr_data) == MBR_SIZE) + g.pwrite_device(devices[0], mbr_data, 0) + + # Mark the device as bootable. + g.part_set_bootable(devices[0], 1, 1) + + # Label the boot disk for root identification + g.set_label(partitions[0], "wrs_guest") + + # Shutdown and close guest image + g.shutdown() + g.close() + + +def exit_usage(result=0): + print('USAGE: -i -o [-s ]') + sys.exit(result) + + +def main(argv): + inputfile = None + outputfile = None + extrasize = None + trace = False + + try: + opts, args = getopt.getopt(argv,"hxi:o:s:", + ["input=", "output=", "size="]) + except getopt.GetoptError: + exit_usage(2) + for opt, arg in opts: + if opt == '-h': + exit_usage() + if opt == '-x': + trace = True + elif opt in ("-i", "--input"): + inputfile = arg + elif opt in ("-o", "--output"): + outputfile = arg + elif opt in ("-s", "--size"): + extrasize = int(arg) + + if not inputfile: + print(stderr, "ERROR: missing input file") + exit_usage(-1) + + if not outputfile: + print(stderr, "ERROR: missing output file") + exit_usage(-1) + + if not extrasize: + extrasize = 0 + + build_image(inputfile, outputfile, extrasize, trace) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/build-tools/build_guest/image-rt.inc b/build-tools/build_guest/image-rt.inc new file mode 100644 index 00000000..40ac0d56 --- /dev/null +++ b/build-tools/build_guest/image-rt.inc @@ -0,0 +1,39 @@ +# List of CGTS packages to be included/installed in guest image +# If these have dependencies, they will be pulled in automatically +# +build-info + +# custom cloud-init configuration + +# guest extensions +guest-scale-agent +guest-host-comm +guest-client + +# Latest version of the i40e drivers from Intel. +kmod-i40e-rt +kmod-i40evf-rt + +# Latest version of the ixgbe drivers from Intel. +kmod-ixgbe-rt +kmod-ixgbevf-rt + +# Configure connectx pro devices to ethernet mode. +# This is needed for PT devices +# mlx4-config + +# Intel QuickAssist +# qat16-guest +# qat16-guest-app +qat17-rt + +# TPM2 libs to enable vTPM on Guest +tss2 +tpm2-tools +kmod-tpm-rt + +# ima plugin for RPM +rpm-plugin-systemd-inhibit + +# Add cfn-push-stats for heat demos +heat-cfntools diff --git a/build-tools/build_guest/image.inc b/build-tools/build_guest/image.inc new file mode 100644 index 00000000..b0f9a418 --- /dev/null +++ b/build-tools/build_guest/image.inc @@ -0,0 +1,44 @@ +# List of CGTS packages to be included/installed in guest image +# If these have dependencies, they will be pulled in automatically +# +build-info + +# custom cloud-init configuration + +# guest extensions +guest-scale-agent +guest-host-comm +guest-client + +# Latest version of the i40e drivers from Intel. +kmod-i40e +kmod-i40evf + +# Latest version of the ixgbe drivers from Intel. +kmod-ixgbe +kmod-ixgbevf + +# Intel QuickAssist +# qat16-guest +# qat16-guest-app +qat17 + +# TPM2 libs to enable vTPM on Guest +tss2 +tpm2-tools +kmod-tpm + +# This will help us have our automation debug TC failures when pings to VMs fail. +qemu-guest-agent + +# ima plugin for rpm +rpm-plugin-systemd-inhibit + +# Add debugging tools +perf +zip +unzip +traceroute + +# Add cfn-push-stats for heat demos +heat-cfntools diff --git a/build-tools/build_guest/rootfs-exclude.txt b/build-tools/build_guest/rootfs-exclude.txt new file mode 100644 index 00000000..273a301f --- /dev/null +++ b/build-tools/build_guest/rootfs-exclude.txt @@ -0,0 +1,13 @@ +# exclude special filesystems +/builddir +/dev/* +/proc/* +/tmp/* +/sys/* +/root/rootfs.tar + +# exclude local repo yum configuration +/etc/yum/yum.conf + +# omit platform hooks to check install uuid +/etc/dhcp/dhclient-enter-hooks diff --git a/build-tools/build_guest/rootfs-rt/boot/extlinux.conf b/build-tools/build_guest/rootfs-rt/boot/extlinux.conf new file mode 100644 index 00000000..d57fd306 --- /dev/null +++ b/build-tools/build_guest/rootfs-rt/boot/extlinux.conf @@ -0,0 +1,7 @@ +SERIAL 0 115200 + +DEFAULT linux +LABEL linux + KERNEL vmlinuz + INITRD initramfs.img + APPEND rw root=LABEL=wrs_guest clocksource=pit console=tty0 console=ttyS0 biosdevname=0 net.ifnames=0 no_timer_check audit=0 cgroup_disable=memory isolcpus=1-3 irqaffinity=0 nmi_watchdog=0 softlockup_panic=0 intel_idle.max_cstate=0 processor.max_cstate=1 idle=poll diff --git a/build-tools/build_guest/rootfs-setup.sh b/build-tools/build_guest/rootfs-setup.sh new file mode 100755 index 00000000..1188244a --- /dev/null +++ b/build-tools/build_guest/rootfs-setup.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +BUILD_MODE='' +if [ "$1" == "--rt" ]; then + BUILD_MODE="rt" +fi +if [ "$1" == "--std" ]; then + BUILD_MODE="std" +fi + +# Setup boot directory for syslinux configuration (/boot/extlinux.conf) +ln -s $(ls /boot/vmlinuz-*.x86_64 | head -1) /boot/vmlinuz +ln -s $(ls /boot/initramfs-*.x86_64.img | head -1) /boot/initramfs.img + +# Setup root and wrsroot users +usermod -p $(openssl passwd -1 root) root +useradd -p $(openssl passwd -1 wrsroot) wrsroot + +# Enable SUDO access for wrsroot +echo "wrsroot ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +# Enable remote root login to permit automated tools to run privileged commands +sed -i 's%^#\(PermitRootLogin \)%\1%' /etc/ssh/sshd_config +sed -i 's#^\(PermitRootLogin \).*#\1yes#' /etc/ssh/sshd_config + +# Enable password login to permit automated tools to run commands +sed -i 's%^#\(PasswordAuthentication \)%\1%' /etc/ssh/sshd_config +sed -i 's#^\(PasswordAuthentication \).*#\1yes#' /etc/ssh/sshd_config + +# Disable PAM authentication +sed -i 's#^\(UsePAM \).*#\1no#' /etc/ssh/sshd_config + +# Prevent cloud_init for reverting our changes +sed -i 's#^\(ssh_pwauth:\).*#\1 1#' /etc/cloud/cloud.cfg +sed -i 's#^\(disable_root:\).*#\1 0#' /etc/cloud/cloud.cfg + +# Setup SSHD to mark packets for QoS processing in the host (this seems to +# be broken in our version of SSHd so equivalent iptables rules are being +# added to compensate. +echo "IPQoS cs7" >> /etc/ssh/sshd_config + +# Disable reverse path filtering to permit traffic testing from +# foreign routes. +sed -i 's#^\(net.ipv4.conf.*.rp_filter=\).*#\10#' /etc/sysctl.conf + +# Change /etc/rc.local to touch a file to indicate that the init has +# completed. This is required by the AVS vbenchmark tool so that it knows +# that the VM is ready to run. This was added because VM instances take a +# long time (2-3 minutes) to resize their filesystem when run on a system with +# HDD instead of SSD. +chmod +x /etc/rc.d/rc.local +echo "touch /var/run/.init-complete" >> /etc/rc.local + +if [ "$BUILD_MODE" == "rt" ]; then + # Adjust system tuning knobs during init when using rt kernel (CGTS-7047) + echo "echo 1 > /sys/devices/virtual/workqueue/cpumask" >> /etc/rc.local + echo "echo 1 > /sys/bus/workqueue/devices/writeback/cpumask" >> /etc/rc.local + echo "echo -1 > /proc/sys/kernel/sched_rt_runtime_us" >> /etc/rc.local + echo "echo 0 > /proc/sys/kernel/timer_migration" >> /etc/rc.local + echo "echo 10 > /proc/sys/vm/stat_interval" >> /etc/rc.local +fi + +# Disable audit service by default +# With this enabled, it causes system delays when running at maximum +# capacity that impacts the traffic processing enough to cause unclean +# traffic runs when doing benchmark tests. +systemctl disable auditd + +if [ "$BUILD_MODE" == "rt" ]; then + # Additional services to disable on rt guest (CGTS-7047) + systemctl disable polkit.service + systemctl disable tuned.service +fi + +# Clean the yum cache. We don't want to maintain it on the guest file system. +yum clean all + +# update /etc/rsyslog.conf to have OmitLocalLogging off +sed -i 's#OmitLocalLogging on#OmitLocalLogging off#g' /etc/rsyslog.conf + +# select correct kernel and initrd +if [ "$BUILD_MODE" == "rt" ]; then + PATTERN=$(rpm -q --qf '%{VERSION}-%{RELEASE}' kernel-rt) +else + PATTERN=$(rpm -q --qf '%{VERSION}-%{RELEASE}' kernel) +fi +cd /boot +rm -f vmlinuz initramfs.img +ln -s $(ls -1 vmlinuz-$PATTERN*) vmlinuz +ln -s $(ls -1 initramfs-$PATTERN*img) initramfs.img diff --git a/build-tools/build_guest/rootfs-std/boot/extlinux.conf b/build-tools/build_guest/rootfs-std/boot/extlinux.conf new file mode 100644 index 00000000..fff8aadd --- /dev/null +++ b/build-tools/build_guest/rootfs-std/boot/extlinux.conf @@ -0,0 +1,7 @@ +SERIAL 0 115200 + +DEFAULT linux +LABEL linux + KERNEL vmlinuz + INITRD initramfs.img + APPEND rw root=LABEL=wrs_guest clocksource=pit console=tty0 console=ttyS0 biosdevname=0 net.ifnames=0 no_timer_check diff --git a/build-tools/build_guest/rootfs/etc/cloud/cloud.cfg.d/99_wrs-datasources.cfg b/build-tools/build_guest/rootfs/etc/cloud/cloud.cfg.d/99_wrs-datasources.cfg new file mode 100644 index 00000000..0fc57890 --- /dev/null +++ b/build-tools/build_guest/rootfs/etc/cloud/cloud.cfg.d/99_wrs-datasources.cfg @@ -0,0 +1,18 @@ +# Override the datasource list to use only those that are expected (and needed) +# to work in our lab environment. +# +datasource_list: + - NoCloud + - ConfigDrive + - Ec2 + - None + +# Adjust the Ec2 max_wait to be 30 seconds instead of the default 120 seconds, +# and set the list of URLs to be the only one that we expect to work in our lab +# environment so that we avoid DNS lookup failures for alternate choices. +# +datasource: + Ec2: + timeout: 10 + max_wait: 30 + metadata_urls: ['http://169.254.169.254'] diff --git a/build-tools/build_guest/rootfs/etc/dhcp/dhclient.conf b/build-tools/build_guest/rootfs/etc/dhcp/dhclient.conf new file mode 100644 index 00000000..356713a5 --- /dev/null +++ b/build-tools/build_guest/rootfs/etc/dhcp/dhclient.conf @@ -0,0 +1,21 @@ +## Use a CID based on the hardware address for both IPv4 and IPv6. This mostly +## useful for IPv6 to ensure that the client is not using a random DUID for the +## CID on each reboot. +send dhcp6.client-id = concat(00:03:00, hardware); +send dhcp-client-identifier = concat(00:03:00, hardware); + +## Defaults for all interfaces +request interface-mtu, subnet-mask, broadcast-address, time-offset, + classless-static-routes; + +interface "eth0" { + ## Override for eth0 to add requests for attributes that we only care to + ## configure for our primary network interface + request interface-mtu, subnet-mask, broadcast-address, time-offset, + domain-name, domain-name-servers, host-name, + classless-static-routes, routers; +} + +timeout 15; + +retry 5; diff --git a/build-tools/build_guest/rootfs/etc/iptables.rules b/build-tools/build_guest/rootfs/etc/iptables.rules new file mode 100644 index 00000000..293aee95 --- /dev/null +++ b/build-tools/build_guest/rootfs/etc/iptables.rules @@ -0,0 +1,12 @@ +*mangle +:PREROUTING ACCEPT [0:0] +:INPUT ACCEPT [0:0] +:FORWARD ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +-A OUTPUT -o eth0 -p tcp --sport 22 -j DSCP --set-dscp-class CS7 +-A OUTPUT -o eth0 -p tcp --dport 22 -j DSCP --set-dscp-class CS7 +-A OUTPUT -o eth0 -p udp --sport 67:68 -j DSCP --set-dscp-class CS7 +-A OUTPUT -o eth0 -p udp --dport 67:68 -j DSCP --set-dscp-class CS7 +-A OUTPUT -o eth0 -d 169.254.169.254 -j DSCP --set-dscp-class CS7 +COMMIT diff --git a/build-tools/build_guest/rootfs/etc/modprobe.d/floppy.conf b/build-tools/build_guest/rootfs/etc/modprobe.d/floppy.conf new file mode 100644 index 00000000..81e9704e --- /dev/null +++ b/build-tools/build_guest/rootfs/etc/modprobe.d/floppy.conf @@ -0,0 +1 @@ +blacklist floppy diff --git a/build-tools/build_guest/rootfs/etc/modprobe.d/wrs_avp.conf b/build-tools/build_guest/rootfs/etc/modprobe.d/wrs_avp.conf new file mode 100644 index 00000000..cf8f9947 --- /dev/null +++ b/build-tools/build_guest/rootfs/etc/modprobe.d/wrs_avp.conf @@ -0,0 +1 @@ +options wrs_avp kthread_cpulist=0-7 kthread_policy=0 diff --git a/build-tools/build_guest/rootfs/etc/modules-load.d/wrs_avp.conf b/build-tools/build_guest/rootfs/etc/modules-load.d/wrs_avp.conf new file mode 100644 index 00000000..988b8bff --- /dev/null +++ b/build-tools/build_guest/rootfs/etc/modules-load.d/wrs_avp.conf @@ -0,0 +1 @@ +wrs_avp diff --git a/build-tools/build_guest/rootfs/etc/sysconfig/network-scripts/ifcfg-eth0 b/build-tools/build_guest/rootfs/etc/sysconfig/network-scripts/ifcfg-eth0 new file mode 100644 index 00000000..73ac446c --- /dev/null +++ b/build-tools/build_guest/rootfs/etc/sysconfig/network-scripts/ifcfg-eth0 @@ -0,0 +1,8 @@ +DEVICE=eth0 +BOOTPROTO=dhcp +ONBOOT=yes +TYPE=Ethernet +USERCTL=yes +PEERDNS=yes +IPV6INIT=no +PERSISTENT_DHCLIENT=1 diff --git a/build-tools/build_guest/rootfs/etc/udev/rules.d/65-renumber-net.rules b/build-tools/build_guest/rootfs/etc/udev/rules.d/65-renumber-net.rules new file mode 100644 index 00000000..f5c68e36 --- /dev/null +++ b/build-tools/build_guest/rootfs/etc/udev/rules.d/65-renumber-net.rules @@ -0,0 +1,4 @@ +# Renames interfaces to be sequential ethX interface names regardless of interface type +# This is required to avoid a kernel host patch that starts number at 1000 and to +# override slot specific naming for non-kernel interfaces. +ACTION=="add", SUBSYSTEM=="net", DRIVERS=="?*", ATTR{type}=="1", KERNEL=="eth?*" PROGRAM=="/usr/lib/udev/renumber_device", NAME="$result" diff --git a/build-tools/build_guest/rootfs/usr/lib/udev/renumber_device b/build-tools/build_guest/rootfs/usr/lib/udev/renumber_device new file mode 100755 index 00000000..c9d184b5 --- /dev/null +++ b/build-tools/build_guest/rootfs/usr/lib/udev/renumber_device @@ -0,0 +1,12 @@ +#!/bin/bash +# Renames interfaces to be sequential ethX interface names regardless of interface type +# This is required to avoid a kernel host patch that starts number at 1000 and to +# override slot specific naming for non-kernel interfaces. + +# The ifindex for the first interface that is not 'lo' will be 2. +# Therefore adjust the numbering to start at 0 for eth0..ethN naming + +INDEX=$(($IFINDEX-2)) +echo "eth$INDEX" + +exit 0 diff --git a/build-tools/build_guest/rpm-install-list-rt.txt b/build-tools/build_guest/rpm-install-list-rt.txt new file mode 100644 index 00000000..aaf4e2e5 --- /dev/null +++ b/build-tools/build_guest/rpm-install-list-rt.txt @@ -0,0 +1,305 @@ +# list of standard packages to include in the guest image +acl +acpid +audit +audit-libs +audit-libs-python +authconfig +basesystem +bash +bind-libs-lite +bind-license +binutils +bonnie++ +bridge-utils +btrfs-progs +bzip2-libs +ca-certificates +centos-logos +centos-release +checkpolicy +chkconfig +chrony +cloud-init +cloud-utils-growpart +coreutils +cpio +cracklib +cracklib-dicts +cronie +cronie-anacron +crontabs +cryptsetup-libs +curl +cyrus-sasl-lib +dbus +dbus-glib +dbus-libs +dbus-python +device-mapper +device-mapper-libs +dhclient +dhcp-common +dhcp-libs +diffutils +dmidecode +dnsmasq +dracut +dracut-config-generic +dracut-config-rescue +dracut-network +e2fsprogs +e2fsprogs-libs +elfutils-libelf +elfutils-libs +ethtool +expat +file +file-libs +filesystem +findutils +fipscheck +fipscheck-lib +freetype +gawk +gdbm +gettext +gettext-libs +glib2 +glibc +glibc-common +glib-networking +gmp +gnupg2 +gnutls +gobject-introspection +gpgme +grep +groff-base +grub2 +grub2-tools +grubby +gsettings-desktop-schemas +gssproxy +gzip +hardlink +hostname +info +initscripts +iperf3 +iproute +iptables +iputils +irqbalance +iwl7265-firmware +jansson +jbigkit-libs +json-c +kbd +kbd-legacy +kbd-misc +kernel-rt +kernel-rt-tools +kernel-rt-tools-libs +kexec-tools +keyutils +keyutils-libs +kmod +kmod-libs +kpartx +krb5-libs +less +libacl +libassuan +libattr +libbasicobjects +libblkid +libcap +libcap-ng +libcgroup +libcollection +libcom_err +libcroco +libcurl +libdaemon +libdb +libdb-utils +libedit +libestr +libevent +libffi +libgcc +libgcrypt +libgomp +libgpg-error +libgudev1 +libidn +libini_config +libjpeg-turbo +libmnl +libmodman +libmount +libndp +libnetfilter_conntrack +libnfnetlink +libnfsidmap +libnl3 +libnl3-cli +libpath_utils +libpcap +libpipeline +libproxy +libpwquality +libref_array +libselinux +libselinux-python +libselinux-utils +libsemanage +libsemanage-python +libsepol +libsoup +libss +libssh2 +libstdc++ +libsysfs +libtalloc +libtasn1 +libteam +libtevent +libtiff +libtirpc +libunistring +libuser +libutempter +libuuid +libverto +libverto-tevent +libwebp +libxml2 +libyaml +logrotate +lua +lzo +make +man-db +mariadb-libs +microcode_ctl +mozjs17 +ncurses +ncurses-base +ncurses-libs +nettle +net-tools +newt +newt-python +nfs-utils +nspr +nss +nss-softokn +nss-softokn-freebl +nss-sysinit +nss-tools +nss-util +numactl-libs +openssh +openssh-clients +openssh-server +openssl +openssl-libs +os-prober +p11-kit +p11-kit-trust +pam +parted +passwd +pciutils +pciutils-libs +pcre +pinentry +pkgconfig +policycoreutils +policycoreutils-python +polkit +polkit-pkla-compat +popt +postfix +ppp +procps-ng +pth +pygobject3-base +pygpgme +pyliblzma +python +python-backports +python-backports-ssl_match_hostname +python-chardet +python-cheetah +python-configobj +python-decorator +python-iniparse +python-IPy +python-jsonpatch +python-jsonpointer +python-kitchen +python-libs +python-markdown +python-perf +python-pillow +python-prettytable +python-pycurl +python-pygments +python-pyudev +python-requests +python2-six +python-urlgrabber +python-urllib3 +pyxattr +PyYAML +qrencode-libs +quota +quota-nls +rdma +readline +rootfiles +rpcbind +rpm +rpm-build-libs +rpm-libs +rpm-python +rsync +rsyslog +sed +rt-setup +rtctl +shadow-utils +shared-mime-info +slang +snappy +sqlite +sudo +systemd +systemd-libs +systemd-sysv +sysvinit-tools +tar +tcpdump +tcp_wrappers +tcp_wrappers-libs +teamd +trousers +tuned +tzdata +ustr +util-linux +vim-minimal +virt-what +wget +which +wpa_supplicant +xz +xz-libs +yum +yum-metadata-parser +yum-plugin-fastestmirror +yum-utils +zlib diff --git a/build-tools/build_guest/rpm-install-list.txt b/build-tools/build_guest/rpm-install-list.txt new file mode 100644 index 00000000..f5cac374 --- /dev/null +++ b/build-tools/build_guest/rpm-install-list.txt @@ -0,0 +1,303 @@ +# list of standard packages to include in the guest image +acl +acpid +audit +audit-libs +audit-libs-python +authconfig +basesystem +bash +bind-libs-lite +bind-license +binutils +bonnie++ +bridge-utils +btrfs-progs +bzip2-libs +ca-certificates +centos-logos +centos-release +checkpolicy +chkconfig +chrony +cloud-init +cloud-utils-growpart +coreutils +cpio +cracklib +cracklib-dicts +cronie +cronie-anacron +crontabs +cryptsetup-libs +curl +cyrus-sasl-lib +dbus +dbus-glib +dbus-libs +dbus-python +device-mapper +device-mapper-libs +dhclient +dhcp-common +dhcp-libs +diffutils +dmidecode +dnsmasq +dracut +dracut-config-generic +dracut-config-rescue +dracut-network +e2fsprogs +e2fsprogs-libs +elfutils-libelf +elfutils-libs +ethtool +expat +file +file-libs +filesystem +findutils +fipscheck +fipscheck-lib +freetype +gawk +gdbm +gettext +gettext-libs +glib2 +glibc +glibc-common +glib-networking +gmp +gnupg2 +gnutls +gobject-introspection +gpgme +grep +groff-base +grub2 +grub2-tools +grubby +gsettings-desktop-schemas +gssproxy +gzip +hardlink +hostname +info +initscripts +iperf3 +iproute +iptables +iputils +irqbalance +iwl7265-firmware +jansson +jbigkit-libs +json-c +kbd +kbd-legacy +kbd-misc +kernel +kernel-tools +kernel-tools-libs +kexec-tools +keyutils +keyutils-libs +kmod +kmod-libs +kpartx +krb5-libs +less +libacl +libassuan +libattr +libbasicobjects +libblkid +libcap +libcap-ng +libcgroup +libcollection +libcom_err +libcroco +libcurl +libdaemon +libdb +libdb-utils +libedit +libestr +libevent +libffi +libgcc +libgcrypt +libgomp +libgpg-error +libgudev1 +libidn +libini_config +libjpeg-turbo +libmnl +libmodman +libmount +libndp +libnetfilter_conntrack +libnfnetlink +libnfsidmap +libnl3 +libnl3-cli +libpath_utils +libpcap +libpipeline +libproxy +libpwquality +libref_array +libselinux +libselinux-python +libselinux-utils +libsemanage +libsemanage-python +libsepol +libsoup +libss +libssh2 +libstdc++ +libsysfs +libtalloc +libtasn1 +libteam +libtevent +libtiff +libtirpc +libunistring +libuser +libutempter +libuuid +libverto +libverto-tevent +libwebp +libxml2 +libyaml +logrotate +lua +lzo +make +man-db +mariadb-libs +microcode_ctl +mozjs17 +ncurses +ncurses-base +ncurses-libs +nettle +net-tools +newt +newt-python +nfs-utils +nspr +nss +nss-softokn +nss-softokn-freebl +nss-sysinit +nss-tools +nss-util +numactl-libs +openssh +openssh-clients +openssh-server +openssl +openssl-libs +os-prober +p11-kit +p11-kit-trust +pam +parted +passwd +pciutils +pciutils-libs +pcre +pinentry +pkgconfig +policycoreutils +policycoreutils-python +polkit +polkit-pkla-compat +popt +postfix +ppp +procps-ng +pth +pygobject3-base +pygpgme +pyliblzma +python +python-backports +python-backports-ssl_match_hostname +python-chardet +python-cheetah +python-configobj +python-decorator +python-iniparse +python-IPy +python-jsonpatch +python-jsonpointer +python-kitchen +python-libs +python-markdown +python-perf +python-pillow +python-prettytable +python-pycurl +python-pygments +python-pyudev +python-requests +python2-six +python-urlgrabber +python-urllib3 +pyxattr +PyYAML +qrencode-libs +quota +quota-nls +rdma +readline +rootfiles +rpcbind +rpm +rpm-build-libs +rpm-libs +rpm-python +rsync +rsyslog +sed +setup +shadow-utils +shared-mime-info +slang +snappy +sqlite +sudo +systemd +systemd-libs +systemd-sysv +sysvinit-tools +tar +tcpdump +tcp_wrappers +tcp_wrappers-libs +teamd +trousers +tzdata +ustr +util-linux +vim-enhanced +virt-what +wget +which +wpa_supplicant +xz +xz-libs +yum +yum-metadata-parser +yum-plugin-fastestmirror +yum-utils +zlib diff --git a/build-tools/build_guest/rpm-remove-list.txt b/build-tools/build_guest/rpm-remove-list.txt new file mode 100644 index 00000000..4c355b2e --- /dev/null +++ b/build-tools/build_guest/rpm-remove-list.txt @@ -0,0 +1,7 @@ +# list of packages to be excluded from guest image +cpp +gcc +gcc-c++ +gdb +linux-firmware +rpm-build diff --git a/build-tools/build_iso/anaconda-ks.cfg b/build-tools/build_iso/anaconda-ks.cfg new file mode 100644 index 00000000..0f325b03 --- /dev/null +++ b/build-tools/build_iso/anaconda-ks.cfg @@ -0,0 +1,40 @@ +#version=DEVEL +# System authorization information +auth --enableshadow --passalgo=sha512 +# Use CDROM installation media +cdrom +# Use graphical install +graphical +# Run the Setup Agent on first boot +firstboot --enable +ignoredisk --only-use=sda +# Keyboard layouts +keyboard --vckeymap=us --xlayouts='us' +# System language +lang en_US.UTF-8 + +# Network information +network --bootproto=dhcp --device=enp0s3 --onboot=off --ipv6=auto +network --bootproto=static --device=enp0s8 --ip=10.10.10.10 --netmask=255.255.255.0 --ipv6=auto --activate +network --device=lo --hostname=localhost.localdomain + +#Root password +rootpw --lock +# System timezone +timezone America/New_York --isUtc +user --groups=wheel --name=wrsroot --password=$6$c3gaCcJlh.rp//Yx$/mIjNNoUDS1qZldBL29YSJdsA9ttPA/nXN1CPsIcCmionXC22APT3IoRSd9j5dPiZoviDdQf7YxLsOYdieOQr/ --iscrypted --gecos="wrsroot" +# System bootloader configuration +bootloader --location=mbr --boot-drive=sda +autopart --type=lvm +# Partition clearing information +clearpart --all --initlabel --drives=sda + +%packages +@^minimal +@core + +%end + +%addon com_redhat_kdump --disable --reserve-mb='auto' + +%end diff --git a/build-tools/build_iso/cgts_deps.sh b/build-tools/build_iso/cgts_deps.sh new file mode 100755 index 00000000..b39014a0 --- /dev/null +++ b/build-tools/build_iso/cgts_deps.sh @@ -0,0 +1,296 @@ +#!/bin/env bash + +# Here's the score, kids. There are a few different places from which we can +# get packages. In priority order, they are: +# +# The CGTS packages we've built ourselves +# The CGTS packages that Jenkins has built (coming soon to a script near you) +# The CentOS packages in various repos +# - Base OS +# - OpenStack Repos +# EPEL (Extra Packages for Enterprise Linux) +# +# This script can function in two ways: +# If you specify a filename, it assumes the file is a list of packages you +# want to install, or dependencies you want to meet. It installs whatever +# is in the list into current directory. Failure to find a dependency +# results in a return code of 1 +# +# If no file is specified, we generate a file ($DEPLISTFILE) of dependencies +# based on current directory +# +# We then continuously loop through generating new dependencies and installing +# them until either all dependencies are met, or we cannot install anymore +# +# We also log where dependencies were installed from into +# export/dist/report_deps.txt +# + +# This function generates a simple file of dependencies we're trying to resolve +function generate_dep_list { + TMP_RPM_DB=$(mktemp -d $(pwd)/tmp_rpm_db_XXXXXX) + mkdir -p $TMP_RPM_DB + rpm --initdb --dbpath $TMP_RPM_DB + rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' >> $DEPDETAILLISTFILE 2>&1 + rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' 2>&1 \ + | grep -v "error:" \ + | grep -v "warning:" \ + | grep -v "Preparing..." \ + | sed "s/ is needed by.*$//" | sed "s/ >=.*$//" | sort -u > $DEPLISTFILE + \rm -rf $TMP_RPM_DB +} + +# Takes a list of requirements (either explcit package name, or capabilities +# to provide) and install packages to meet those dependancies +# +# We take the list of requirements and first try to look them up based on +# package name. If we can't find a package with the name of the requirement, +# we use --whatprovides to complete the lookup. +# +# The reason for this initial name-based attempt is that a couple of funky +# packages (notably -devel packages) have "Provides:" capabilities which +# conflict with named packages. So if explictly say we want "xyz" then we'll +# install the "xyz" package, rather than "something-devel" which has "xyz" +# capabilities. +function install_deps { + local DEP_LIST="" + local DEP_LIST_FILE="$1" + + # Temporary files are used in a few different ways + # Here we essenitally create variable aliases to make it easier to read + # the script + local UNSORTED_PACKAGES=$TMPFILE + local SORTED_PACKAGES=$TMPFILE1 + local UNRESOLVED_PACKAGES=$TMPFILE2 + + rm -f $UNSORTED_PACKAGES + + while read DEP + do + DEP_LIST="${DEP_LIST} ${DEP}" + done < $DEP_LIST_FILE + + echo "Debug: List of deps to resolve: ${DEP_LIST}" + + if [ -z "${DEP_LIST}" ] + then + return 0 + fi + + # go through each repo and convert deps to packages based on package name + for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do + echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch ${DEP_LIST} --qf='%{name}'" + TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --qf='%{name}' ${DEP_LIST} | sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES + \rm -rf $TMP_DIR/yum-$USER-* + done + sort $UNSORTED_PACKAGES -u > $SORTED_PACKAGES + + # figure out any dependancies which could not be resolved based on + # package name. We use --whatpovides to deal with this + # + # First, we build a new DEP_LIST based on what was NOT found in + # search-by-name attempt + sort $DEP_LIST_FILE -u > $TMPFILE + comm -2 -3 $TMPFILE $SORTED_PACKAGES > $UNRESOLVED_PACKAGES + + # If there are any requirements not resolved, look up the packages with + # --whatprovides + if [ -s $UNRESOLVED_PACKAGES ]; then + DEP_LIST="" + \cp $SORTED_PACKAGES $UNSORTED_PACKAGES + while read DEP + do + DEP_LIST="${DEP_LIST} ${DEP}" + done < $UNRESOLVED_PACKAGES + + DEP_LIST=$(echo "$DEP_LIST" | sed 's/^ //g') + if [ "$DEP_LIST" != "" ]; then + + for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do + echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --whatprovides ${DEP_LIST} --qf='%{name}'" + TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --qf='%{name}' --whatprovides ${DEP_LIST} | sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES + \rm -rf $TMP_DIR/yum-$USER-* + done + fi + + sort -u $UNSORTED_PACKAGES > $SORTED_PACKAGES + fi + + # clean up + \rm -f $UNSORTED_PACKAGES $UNRESOLVED_PACKAGES + + # We now have, in SORTED_PACKAGES, a list of all packages that we need to install + # to meet our dependancies + DEP_LIST=" " + while read DEP + do + DEP_LIST="${DEP_LIST}${DEP} " + done < $SORTED_PACKAGES + rm $SORTED_PACKAGES + + # go through each repo and install packages + local TARGETS=${DEP_LIST} + echo "Debug: Resolved list of deps to install: ${TARGETS}" + local UNRESOLVED + for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do + UNRESOLVED="$TARGETS" + + if [[ ! -z "${TARGETS// }" ]]; then + REPO_PATH=$(cat $YUM | sed -n "/^\[$REPOID\]\$/,\$p" | grep '^baseurl=' | head -n 1 | awk -F 'file://' '{print $2}' | sed 's:/$::') + >&2 echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'" + TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" | sort -r -V > $TMPFILE + \rm -rf $TMP_DIR/yum-$USER-* + + while read STR + do + >&2 echo "STR=$STR" + if [ "x$STR" == "x" ]; then + continue + fi + + PKG=`echo $STR | cut -d " " -f 1` + PKG_FILE=`echo $STR | cut -d " " -f 2` + PKG_REL_PATH=`echo $STR | cut -d " " -f 3` + PKG_PATH="${REPO_PATH}/${PKG_REL_PATH}" + + >&2 echo "Installing PKG=$PKG PKG_FILE=$PKG_FILE PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH from repo $REPOID" + cp $PKG_PATH . + if [ $? -ne 0 ] + then + >&2 echo " Here's what I have to work with..." + >&2 echo " TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $PKG --qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\"" + >&2 echo " PKG=$PKG PKG_FILE=$PKG_FILE REPO_PATH=$REPO_PATH PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH" + fi + + echo $UNRESOLVED | grep $PKG >> /dev/null + if [ $? -eq 0 ]; then + echo "$PKG found in $REPOID as $PKG" >> $BUILT_REPORT + echo "$PKG_PATH" >> $BUILT_REPORT + UNRESOLVED=$(echo "$UNRESOLVED" | sed "s# $PKG # #g") + else + echo "$PKG satisfies unknown target in $REPOID" >> $BUILT_REPORT + echo " but it doesn't match targets, $UNRESOLVED" >> $BUILT_REPORT + echo " path $PKG_PATH" >> $BUILT_REPORT + FOUND_UNKNOWN=1 + fi + done < $TMPFILE #<<< "$(TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\" | sort -r -V)" + \rm -rf $TMP_DIR/yum-$USER-* + TARGETS="$UNRESOLVED" + fi + done + >&2 echo "Debug: Packages still unresolved: $UNRESOLVED" + echo "Debug: Packages still unresolved: $UNRESOLVED" >> $WARNINGS_REPORT + echo "Debug: Packages still unresolved: $UNRESOLVED" >> $BUILT_REPORT + >&2 echo "" +} + +function check_all_explicit_deps_installed +{ + + PKGS_TO_CHECK=" " + while read PKG_TO_ADD + do + PKGS_TO_CHECK="$PKGS_TO_CHECK ${PKG_TO_ADD}" + done < $DEPLISTFILE + rpm -qp $MY_WORKSPACE/export/dist/isolinux/Packages/*.rpm --qf="%{name}\n" --nosignature > $TMPFILE + + while read INSTALLED_PACKAGE + do + echo $PKGS_TO_CHECK | grep -q "${INSTALLED_PACKAGE}" + if [ $? -eq 0 ]; then + PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE} //"` + PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE} / /"` + PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE}\$//"` + PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE}\$//"` + fi + done < $TMPFILE + + if [ -z "$PKGS_TO_CHECK" ] + then + >&2 echo "All explicitly specified packages resolved!" + else + >&2 echo "Could not resolve packages: $PKGS_TO_CHECK" + return 1 + fi + return 0 +} + +ATTEMPTED=0 +DISCOVERED=0 +OUTPUT_DIR=$MY_WORKSPACE/export +TMP_DIR=$MY_WORKSPACE/tmp +YUM=$OUTPUT_DIR/yum.conf +DEPLISTFILE=$OUTPUT_DIR/deps.txt +DEPDETAILLISTFILE=$OUTPUT_DIR/deps_detail.txt + +BUILT_REPORT=$OUTPUT_DIR/local.txt +WARNINGS_REPORT=$OUTPUT_DIR/warnings.txt +LAST_TEST=$OUTPUT_DIR/last_test.txt +TMPFILE=$OUTPUT_DIR/cgts_deps_tmp.txt +TMPFILE1=$OUTPUT_DIR/cgts_deps_tmp1.txt +TMPFILE2=$OUTPUT_DIR/cgts_deps_tmp2.txt + +touch "$BUILT_REPORT" +touch "$WARNINGS_REPORT" + +for i in "$@" +do +case $i in + -d=*|--deps=*) + DEPS="${i#*=}" + shift # past argument=value + ;; +esac +done + +mkdir -p $TMP_DIR + +rm -f "$DEPDETAILLISTFILE" +# FIRST PASS we are being given a list of REQUIRED dependencies +if [ "${DEPS}x" != "x" ]; then + cat $DEPS | grep -v "^#" | sed '/^\s*$/d' > $DEPLISTFILE + install_deps $DEPLISTFILE + if [ $? -ne 0 ]; then + exit 1 + fi +fi + +# check that we resolved them all +check_all_explicit_deps_installed +if [ $? -ne 0 ]; then + >&2 echo "Error -- could not install all explicitly listed packages" + exit 1 +fi + +ALL_RESOLVED=0 + +while [ $ALL_RESOLVED -eq 0 ]; do + cp $DEPLISTFILE $DEPLISTFILE.old + generate_dep_list + if [ ! -s $DEPLISTFILE ]; then + # no more dependencies! + ALL_RESOLVED=1 + else + DIFFLINES=`diff $DEPLISTFILE.old $DEPLISTFILE | wc -l` + if [ $DIFFLINES -eq 0 ]; then + >&2 echo "Warning: Infinite loop detected in dependency resolution. See $DEPLISTFILE for details -- exiting" + >&2 echo "These RPMS had problems (likely version conflicts)" + >&2 cat $DEPLISTFILE + + echo "Warning: Infinite loop detected in dependency resolution See $DEPLISTFILE for details -- exiting" >> $WARNINGS_REPORT + echo "These RPMS had problems (likely version conflicts)" >> $WARNINGS_REPORT + cat $DEPLISTFILE >> $WARNINGS_REPORT + + date > $LAST_TEST + + rm -f $DEPLISTFILE.old + exit 1 # nothing fixed + fi + install_deps $DEPLISTFILE + if [ $? -ne 0 ]; then + exit 1 + fi + fi +done + +exit 0 diff --git a/build-tools/build_iso/comps.xml.gz b/build-tools/build_iso/comps.xml.gz new file mode 100644 index 0000000000000000000000000000000000000000..da98749f35ae044ba71048c42d9f0281429e984a GIT binary patch literal 160726 zcmV(xK7M1LSA6N! zy(XQc$If)RU$RP4A(d6CD%V8@o{%sGn-JU+%h+xwn6?l?7NQW6E!$W;ki3h3aMlp) z#Nhuhd+(zXU}H&D>Y1lm3l-i!8~WwIZw7yH>YvVjdgj8PKRd@FdeX4iXP=+``1fbnK1mL7t!B`5e!0@+B9a`7B?W zffUB`*Lc1_5qQqSvy-T>c34#YO;HOJU*SuWJYV7YBF~p8EacC3mi*=KKl$Gh9)TV> zs~hSMQ;88~m^w>sJx}QuJF!INfx*8x)>h6QRh39wX6H2fvJy$Cjuz|0Cu32vC7GtK zsydURN`zikC!Qe_c(#`5Wg0TRPG&iVp>8c>SW-;66L-vkZY$P!afufjyy()iK+g=L zj7?t5QoPCEyW{#bbVU!!LGrT;{ zSJS+lQBOjz-_JHg#NcMIGt^0hRh_zHJ7xm)9F z(X7L z@h{K7Hu3YFE4$b4?{19k&i=Iv`Gr3L`NGU@?iQ^L8%M~avK28E1DB@58*B7nV`Xt! zih@SKBrVERC2UHjB3o=oH(A(GR8$R^Zpd25(j7A*GYQshB_g39>%^&(hYNVC$m4e? zJ+-yW5}Fd%Q`!iVSX`A-%J(xO9ZCu7rr5+R*|Mb+Brj-ND^ReiB&5jJijPt|ZE{xV zchqv`K*K|)m9VPlc3e)#8XI-Y!;~xqLO;bmI8S{L`pBTVU}3W6qdK^Ep3@>ySRR$4 zx^*x`p~FX5SX4odNES0BQzGq4Q6-a_9+8j~2%|EKY^_I?7)kgz#v`jT)*3eZV5-)-rS*6mn!h&`4oor1+?n*x|q7#UgC%A=t+G z!)e^;AI?CSd7Hb4uVNGfpa+zJ4zf!^7a#=zw1Ec0988+$4<}JocsRz6Q6Rk31MLCg zPl#oKN<-NbL^%ne3eikraiVx6gy$I4+(%o576O}8`mpj*r;y7Rr$rpLxdM?CTN$}k z%A2Dns6EI#`KH!#kX)hUS=Ot*1Mx?Eag3?}`H$OqkZ}`Ig-D=;t+nLUb&{TAJ{pe- z(8sXG$gsvR8L)$mQ+yPN6lTy=gjxT#f<$?0dqN;Rj8nxMdV>GZBITn-q+expPj}7|uM|$^4yCiaVJ#N_o69z61m>mZo2>UVC}70LoZfF%w$Z&iL(}@$sGU zF{DoycE*=?GP#}cTNnU2>7||Yot?2v@0Am(T$$9jEU9w25d3&0gHMcuM%SR|Bhc$6 zNS&J@1#gvC=6Ge@OMxeBcJL21KbMlq6b2QrBJz6I7%Po7`2tBxw+d^?!79y(2*{q| zm6BIim*li)eY_>oLT~3@-<)FmSHIf7o@4vj`>$_)wU;mLZ;b7KTiu_y&0f_?`;W5w z-(F$+_pZFo-ea%o?(1<+Z)LCNa(gp3*`7Pg_P*T!p1t~4d-Ej}@!iDU^wi#T&PR=e zs{74>I$M1Jf&U(x3Gr&3*QR;3N*NFaoux(Pf+|dTbG&+oQtt5Ty5Iw%9A(eo6Sd|w zdS)pFo@q}}E)eA=Mbm;Gp6p$Ucg;(x0|&&Pkj}k83U`_^)+qps3aIC`DPApkm0j~# zTNHl!hqU!?NF#?O9T&K1T8zX30}IY##arDf-pVwIs}L@HNWpV=DX=P5p%N`Wpw=(( z+yh#wvfi4O6BK@+0mD>`EVOInqj6op!x69m@ke}R%)=9pAp??Ti3+;BROTz`!_3!E zDn_7-N<|6*=R9AT=A}9OL-GY)+VBcNcv5K91F6eY?gQe+&?GOxOhiT!1+*Agq`Y;M zyqN2~z5M> zQ`%Y?M4~z~_0h}9ML=Ydr;lD#0xCT%*mefA!L21pjq5`DdK<+l5z^V^;}k^AvQ0Zxb-p_FZlKvp0hwza^u%1|53e$hWD1KHl{hGVhN{{s zV2v`$A~lE7P!tf}@ybUIKb=BI_#48&R}l=a!M5er`PwvhT>`)mo)O>}GSdi-TNydR z&JFHXL>8m?rk7o&aE7~giEW;{**47X4R$jk8RPDK%Fbd*#8JjFFlkl9A$f|sC1P}` z{4qi6qa)sd<6-=}F!aV@FwzKp{Pw7J0pG*8&oo__6oC6-Qoz%j6e9Ov)E|NWV#GI^ z>ogHIppXWN7GwmnIc;W-Gv@}XKF?kP?cJW~id9a_f=puYb2^y?SjUD4cdZ0v5#?tVSL3;OimH(pF$YZbqN#UGA}Q;nY%3P0ap`Dto$ck($hK7IJkN(@Pk zY7Y)cswE?Vh>A?dlFgK2!8$5?1c#FZks(DAQcPAAtHpMZX3CeCJz~h9-$)v47rHqz zFbHy_kg9~e&lv}_y|Ft{#YTQBpn$NVIkut(X4iGg4$3>K7#{@Wjf5mq4qEAmV+3X$ zF&!&pNtPN=Kr{^evEVOU{$R+$!`+|_kkz4n-glcFS^(joQb0#>aN30+MGE00lFgv9 zh7Bp`2plqHIR-Te$_~>%AZsjX1m)E9B$jF=Y~A=Uun@-%JO*N*Hp)^`Hs9}GfT21u zMYBR-DR@_=hJv;vo|GaHNsahtv~43Av`|O03|S4jfWUqw!FCh0$rq)R6ng)}znpk4 zgvjG!G|``+fBi=Z`epxu98Ix(b0wm3=(27mEFcOx1VKj3l#I9%3Hn+bB6m6 zpgVocT@>TdKgTE8$M+Q&XM zY^gyP%cNu~QZy)9PU0L7%4-Y-Z89ihAxjGk@iYf;i5+xlLnT^GDj~zvQwmZ!5;FeGU~=NH$48Zb%ok+K z4t=Ia`{NFs(P4WAQvYIf&@F>bT0;-492vT0$4uF}6!;Z7swLyGsNT=9s4)`L!x!a< z9rT3dvyngA{~Wk*JC0{#{V-dy?HIPMtN!QDB|DTHffo>jQICfG;~?NWc_)(>#BVFQv))W|LCNd?~~8PkONHIV#EJ`R~YT=N`{DKukB=olmwFEZfvC z;+064VCvQa^q9juq226wJi5@IWHT`;JF$-T0(cAgcX()&RPa(ST^Q-5B)mKPDSbNX ziCkRjo%Kw0vUI1nKzC;s#3kK2Wh_zZba%_0S-cvC_q(#<5-eTbYF$vh$8#H$=XSNf zu}ufB)+l&^w3l|^4bk@7-5I|^S4AIov%onR50z`YQl!h1F0VXj-+t_7bki2FH+$t( zt<;m*P2tdW!D(?Lkyqz?x_;Qz@&>Q#dEJlCbi4cO6kno?dz0O4ZVvhdZEPC6v_!Yp z-n`b<-NfdGceN}__s=qPJq)T-sPko)7xLWM-_=hRjD=aaG{cV8WpYE zTfe1w)S5kg>h{})BHPn#_7mMqV-9)|Z~wG!7j3pL1NCGFQ=r>9*Xh2_0>eu` zc(G^c;D*my$5Bd`XRaP=@zP5_w63DGo3i_Oad+Zz55_K?I1b$hIoVG7Kfe&-*m0=g zkP`EMp~5zYtPtG}@PDx(o(NcKByQ^3h<`RbK_`^r{|<$mQX+CFNvc#R2`@x3Bs=b3 zPSTVV{|Gy(TSHbbXcaZ3h+~IZAM1i&{_xK^q=a?dPR9HnM+`-DIYc|Re^$kmFH5TG zzmH=>)&~I#jhRmPgJ4`C=qCRkz)Ghoh+J-4oG^%U; zK^{CQ0LUFt74n2|6!Dy6m6pFy=FIx+$9@kT7H_8%Sy9mLQ=+991{zM!lsPNi8GiA$=)M!U*e%V3! zVcAOPh9ZT+@~CX;T|t*91k11_Cnd)uGung0gc2DjX32!H$ULh_Va2%WA+e4fC}3BK zFHVutKf!ZF=2;N7tpU3k+M~CGBfSJTH-fG23{nxQdF@u1m;hbny1 zZ2wmM_4SS>-~!H-j#z&k@|wWB)Cw7j`Ei~AuX5sDuX46mp9uxBI;sc&mm!m)tY_MU zR!1UIA#gVfNfhlUR{**a+4yA9zGr!j7odI^5hgx>?jWzT5a_jk7Xt<-m%>IRL$dVC zBawIyIL;fPr@Ia`f^YZfmVRw}tXUz;m?wA}AFW~t0CjVRaDFS7kj=Pk(=s6A1rn?K z+9WJliiGln)yiGII!R_Cn}%r!Y;qdNpN%W5r2Ok8{>=Lu0A zGex=|*9h;}m+#VU=h8m6;)Pv0UtF3(A3{r$;;=D;8|Kn9&(DcWpkJg7vb7W?0dp z=y)Wl*i2S5IoaBnLg&SDgDy*u^sHk>;zL^-ano~Ca@zOubKK{ENNYRvtW06ylT1s7 z_Bt9hUWw}FVWpRiu(%YFahU|wj$~MJ%rO;!w?5sIv_3J2Dzv@=AjB5dKUKD13V)c~gb1drnAd5NLnESQP$2?$HO11xl(Nf1{tN5Dm{ZSWQMY7_I-%}jt znP-Gw5cw@nA#X~n5KqCA@rHwvWAKC@k0PKIeQwK|rJxfXoV5poMN-9BwY{+BB%Nw7(ZNMZjpQ7WTOy|_6!!`Mx-MlL{{_&e|C1Jw+`n>fuX1I7?grbx z|JVJiGq1C^U#;I~BA1+>Ox=Ec>l!B4UC$HB8|btBP0^pXlTMJdj-MClOCot35uQUT zyf%60lca#I)dybEyhyrB4oQUPlN>r4q11O3)^=Iaa+>lr$w~WilyW?NhKy_E$t6pl zN+0|Wd)L<6#*wA}N+U1p0U}^06YS2u3?>&K2sVQR@I38H&?1{uhh#U?-IO9d>3*^0 z*p4H~k`>FcXo|W~v~0llqR_fWI_Sr5}+ud$TT^L^1Qt=V6V)RO+Mz5CLL_yrX`mVr9S&X zHZ9mIgRoVU+r$HQW1=CEQ5C?ee;9l`lIT-_&csw7&q{spi0V-c;&9d{Q6yFB>CNa` zFY-|~{47p<;)BjxxSW!#``k_b*rcseavyn`iU-^Q2)s9q1Jn+b!2$VD?!#|KUo&2Y z8f@NcOlCYh8j_9%*oJNB9r;lPfC2(oCYY(;?&#vt2upzg`-#A*Kst%WFT}CPSiNZ} zb+|(=n`KF|W@ZbPED>)yD*-`GldKbSNOW?0)5K1$Yor^(Oe^2e*Ab+MnJW6jHqTB? zIW#BD$xp)j^zyfm7NgKD{KmLe$W~&s~gmJqE`B~e+%}9+xh->!uR)gf4>GlJ8-BNI!hiG zg=}17Ly?=jeGvo7r3-HhDEB=3Q5ec?k}{5s<2K12wu9gbcGrO2nxWUkgWZN-KuCI> zZ<5Cn0B`#sO=tB%aa$OGd%jt|N*uxUrJuxsTVGNq46;4nJaH2Gc5PjTBMxNirY_n8 zYg^_PB0{z_(ryo_tqY5*5nr~A5;(Sx)cPW{SOBdX^n^jP=P|B`1I?aq@c(1LvtDN) z4m=ya{0P9a&7I!3cx=6+7z)NNx|hP|(U3q$9D8-#mB`qu<8ZXZUeD_(s|U0KEL}>F zr?eqw)jmb)1t>t(QicqqmkFz2DF0-}##={SFsU>IP!t8kU}HbkV`+w#I>1y3{sMUF zfMo~Nsx_p_pNz|Q84$MI1PCLK@HL8jK|8WP(nuUtU3JxE-~ethWDIr%hgGpl%lu#h zF}=F;Ej2ygwt&Byd%mS>i2XT7B6{M?0r@|8?%*vslg61bg)a^_5)LZaD|O31ERwIz z49buh)GG*9I(q^1@juv}OtMG<{`hg1L6-QBz5RE5C?hzN=)xw#Y6;H%Xj+J$nrujB z!F0SfOd#QTDST-Vp}hqc?Grt%CR8O$0mrPnFB2;qTVjgvty{MYrEX;s#u|$pE38sb zZ;a!EYh3MMAB7PvA(}|HP&{!CKf8iRKDW`q5~sKp0#-(q;*<*{Vk*1D^#~kJ@TF!r zW6i!ZVHejZa*2-A817%>Y|t}yhNPi;MhO`-bz?xTf8S9MS9Euz_(o)3A6Gz793U~N zWJwFFAiIZP3C;qlgiu7VZwj%$T}1s>5DFZ4KC}d|U^4*m*BhfRY8%Is(+ood356-J z9V|T`T!xl$r4J|=SD|_Z5`G#)=xqZ(6j7RhMF86X%faV6=<9C=VRDd6Yo?Dt8jUf` z5fux9`VTR+@AW&7UvM`FBT|%+gNe;09z+EbzN@|Yb{KHK3&l4VYOfbIPVZgu0)@q^ zr&F`i>GDCNo7k{>fX9i1fR#PIY+`E>feur@*&p*JstHh@swC<%+kDDH+~(HgH! z+^1>@6c@xTG^U#4aAG}-7xIei7zg)CtiSDkj_n-?!G%=MVa)*w>0cN_B&MU-C6DZc z5kk6zt#Or?eJ%yFz_(E7ZVo@sISXQ4aL8p1xRLWV#e6uB*mPYS5nC5&yg7*UM zi%})hCKy4e&WS+hFfQG9bPHNXsQB4M^+>2X#?=c-T+D#MiGiRX>3WR_p7F zqS0>=fV}c90&dyJM{&JUOL(sg(A`Q^;?f9%(3oo=tlQ-!>}kS>08*WV zHNgEykPTKNU?PhCed?2v1KiJbq(Hx*5$cQUBJz;Ewb!N;oRH1$vJGOba~FVud)NCxhjan>#1V z7abhy)%Od(u6_HD24C@%qG-XW)nSSkK<2C_^^an>6`XuMTxABL7;*{#6H^VQVadJq zhiBv0pN-#oHeP-3V8|>*&hMA~r_296`AZ;kN=hX@g<=%Uyqr-iHLy+$vRPrrMpI6s zjv{6tm`JYg&sTq5{CW20#c(CxS18cKMH`(QoYGQP?9u!HnjF^==!svTljb8 z+0d0|L)XaXaEz2CO~O!568gWs7g(5@1<#f|y(}`>tjE`Kfj2KF3ygXpYHaOAy zG7Zebq2LOA4>b(*{h;sh><>2{k4bqwXX(I<(EXpg;3W;>ATmO=EJe@sl=Ok3 zS)ZPlJ^|8uL9pIyDp6p)VlPkf`ePEomk_IE`0N7wAc~oL_tC+T;*fm+Lc9YTW8l<( zxBYp%aShR5V zaFGlr#L{AR2@bLE@bE<{u}cefp_aW{O1D|x)w2%k>QKtC57Hz?X7Zi zT#(lF4Y+6nK~T&;>D|(^Q7YcOfanv&!0sTnRj$Lz zYCa5#)z>|68VI6Kb%TqcOBICY?@7F*tcq1+WTZX7ueaiiFTG)J2>(>=QE=rm$F+6V)-PmCGm=!dZ0oyDc zu4HgNE2~Y|978$)$zXB9S8>kOl$sT%)XeH>L$_orhNY>eV#q;h9m&pEf=uxkdo_zZ zjk{wgAgS?YMBbDW3Grp(%L2Ix;@ql=Mb<%8jv+*00AY__h;iQwX6NTQoh;!!s+EXH z+6-ichogk+|{-%{&humpIym0LCKo&}3LOE7D5{Z07ChBI^RCOng zeIgr?a=6Y+*GxUD_>@!x`ynA(7wIRL&nj`Ovl$!&qGpDgObhWzyQZKQFK8n|PVO2B zQjGOZQzY0(?N)vjZjbtt0aGLx4h zFDs5wR!QcBX8;us7B@1KyiR5?l|Jvt$!azty|lLz6T~LSs+Z1*u6fa!#vK!Q>gLH$ zq9fF?>i`)#GV{fIh|X~C+GO=aWQWAHit1-O7MiSy-hIu=C&dpfuWBjLJ>|2#Ir4ZG zoe23{tfkqAaaKkuqYAGJhL#p_%2INcobNCSnx*DM_i82b$wE%_F>WPH(RpJja!&NYALD!q z8u5WdZOqO%qqtkmk%w=WtRgA#kFix`uhL`cL?0c7qGN^P$f5J7)$en73JLQD!A$YL# zoEp(7@fjxY_3GG$@lKeStCsNf^+zfod_{&NyJqe71z*mlD>J8>XVa4<8{{FDJe$r6 zWEzWip%uQ$TjHZbPxnL*oL#$I1~iq(x*So>%wrjA7;5CnW|Dx?QVik2sEJ<|J(>JM zkC<+Uz8HyyfYFgcoGoccis~B|Z`zYtxkT5p%eW*5tMe4@xr~8TIlQ z>B3{u{IVdMT`4o(0|T_aMQyBHKGrH!NTy;PW1*lXh27Bj^QK2nMpeLbjlTN*zeLaf zxMrSkK+dYN88!9Rw1)(V1Jeo_uWj@}?k!|K+vuTNG1QdsMfZ1LlC4=iA-vG`HA@LN zTO!oJ0nh?#rrK%9hN?7ZkAHx_6DKQ60ypUsw^{2qYH5uc6=0cR)mmJ3XN=JZEms*2 z6cAGhiIU^nqXYi_$$$F|yWq+&A@y;nmZGU$dbidq_2nPmu(ZP!DdRouuu83e_cT5% zT4tx(nN^`eApO6YH{*WS!tKOMd+Q4NFFwaM2D>#s`@G|^Y>?F-1wF@VwnC@|pi zgpA@Vms?FaKep`cajY?-OeA+1XGmnZbhxd%a6=j4W~)03{~g%H`SALj8q(HNo{SlL zy@>w-*~JzoyIA+|WRSo8ab(p;kf~w-St^ifzFefT>XeFp47n<*__`!;SJ>rIyBt`E z0q;oiiJMWSzO>mw2*?zLenb^00 z@g3i$tC3Ip^iz1z3NnW~1PGNPCl;qv2NnA2VG)TEwy}keks-XerN$~-?hf)73^#$4 zyGkxUC|(OS#u68{6%c@CpKpPbKU z*-I3r)LBromZWPE@mIQ4yhO-h^x^m4$2L=M9Blo zQ0T$hRvb#f%Uth_)q&u1c@C)sTWJ3=x1kB)WNSC#460-Bv`e1TE(M%+2~V4gw$)QG z;*}`XUx^V99b)4p!8({0YIgN5&p{J{#+Lri85Q&V|K(x+;`?&;aO3f%Os`~08R|RA zIhE?`>B+oq6#Nb(X_o(CR!Jth^uEIl(vr;FK85^y7WGO&-lvp0d}v7T!7`Fz@2tN~ z{3u0|3n?jO96szKQG*8Rd?tONy}rsxBEMKlmMlCy1tTd;bs6}uqfAONE^!IiJ@J4I zMNWd0nZ_44vQgG7;{WLe^>f=On8Ks6^p;LD<;nCcx}eztd`7B8nwv;=?pby7#^Flh za3u*6FYO#!^TI1KqoBBI7u|2_lMgySj3RC25curiqZg6&@*1_MB3&kIs5x4EQ;da^ z3G7XAuFQ(PcCiUPrY|rE2?^RQ{=)j4Rdg)3=ofG0m&<**N_F@s#U|wqui$5LfI#h5 z#=yfV@7YAch`NV zd^&UGba-BRy*v5(+WhM)^Ab>BF3$h$LGAU;GO}GVCA`+bYHtj3sfKljE3@X1Y$8`Cg#DCJ^MJRlIQ*EaJTtIB z755r51C6AaEZac2JR>F?+Ar1w_RLkgd|;Pv(6UU4>Pvu<9%|@KP)1FvvxmHyJPSd- zU%_9taMP4q(}^6NF0E#)9EWgWXw#&Jl+02$E`!XQeVv6QuyO+FW)E?x^2F6mYASpq z&$cgQ^5KjsNl8o`m`pLf9;T&s>n3;T4mW^cUCH6&_Het-$<*T>Sd0vfE5~=#Pqu$g z+U-y=z9+kH`w2Z6f9Do$yZz+;(M9tA(9r}XGNjUc1Hk)9zP3^5>J2CqC!SC;l=+kK zu1cM~>2y+-@`raMOHHb2pho3ByHQ^ldj@r}6)b!$v-}jwj8M+cVsnyD*tq2=>}|T_ zom2W@Q4O*fPUjij~!r_TV4eea%h9FsUb;rwpF?_c9pDtTo}c!KHy}bTbN)Y zX9Mze2mEJR#al$u(Q)#VS831bqv(~qqs*WeILjzy3aw)L4pMmw*x@?<-NL`HB`BLG zS}5Esxz{Xt=bJhXm+0H|B6l!3K(X=wd2hGr)FG2;+B0d$mo_GiD}DE3@9X3Fa`W=z zmvh4}b}t@JTq2uJ=GR};HcoC1AAi3GmBf4B5@VWRgs+%#bn=aHYL%hiN0pQ5hsPu1 z&MryUe!u@k?E=|$GB&~X{t))wn7eGZxS1@tXHwhDu*ziTyZ**dUN!l;YeaS5e0UWJ~8TYk7%h z9WV1%3n6r;ZwVd*&(;bVA?A%18LL-1mkGC^iAHLnMA`?W|M*eq&S3>Jwt^brN3tdN zW#o)>u2bn#3|$VifV3XnOT<&B;P(j97cF91t75M%K^p8F&x<+PE-EY&e8(v2y^8S> zOUB>=`mi?9?nZ(z!vl=jGbY@s;sN&1uqQ1QQ(<%)89v z=c0XYlpHWq7oo`x^5X*47zyqYQW`reuP;xYdoysKW+P6gYi};p0?h(|Z6~H5CGZ4Hr%34R}6RNh+FmOXcz;YJ=7bIGWIC=Onv(5|FCy% zO>HDu0{$x*ewls1vm5)m5gonLu{%4{*PZU2jo6osGAf{qZe5Zf%o7qbHi!q=uifUMx9xc{U;JJXZ=@L4qpF_zNWKmjPz2MKb<>}sQ`ndGCH)N7Q z|G^yZ*|r$YlLuE1v+jIlwo{N zq7sbDvJz}3-Ac5Bd?-`R?NYP@ZdiRB21&F-ChUQdE{F>oj0%S(q%S|9baTw#A8GVv zL;wifFVAS;EDLvMX}l4ibkD44PinSTT>BXv7)@rpQ`q z#6YCEg)uYs-T4|G>GZ3rfhTLpCSIovfJt~HzeQ_czePqx`!EM9(}*6JXSg~ zu70xILa6wr;{Haz;)7s{0Hzhg%bP;Fp3KOt9i}X#OXd^*@g4FpJ`Qn@nh}|N0dC&g z)mS(zlUSflibSI9p3IPRK#3BcteT&!)dS}RS-}MQkoYre6vW#h{B1`fQMp5yQvVyx zMWAINwpd<*AU|KnWWc&|&{!@*@Tt;O;KB6Z2F#Qz)1f^BY4gav){-Xh(jKGCvM+JgiYnMorNnP8;FUH*68*i4E% z`a=ZL^X$%49S5Q;4tNP}A%dF6{#Mij35z-Yv~y+z0Iz-mhq#9 z5Sg=8>qzvQ2cl2*lUC8eh=B#-Hz8hQBLAZA0XkV719BZ-CK%2U*)!nkHSob2&%Iih zKFs7WMdC#wX=zcdyVe)+4AY(TfRBAQFev1T4$k9O$ia1*sDxO2?nS&GK6?rPIZSkw z3*5SHA<))?Emo^rEXLvxN6gD+Jq&@gd6V)OsBM^I=hkrd6c0wZf-3ZV9kH7Myp@D!t>4jtr>d22ZJiF} z^)ViYGEqV6*6J^t2z^8NPCpBWnbeW@7>0N(;Tkxvvv3|vw_2emJQgI|p|0O$m|bV_ zmi~5f7tGi>skSE6Y*Q-n_`NJ@B?iqfZ+6nxa^n`uQZUsy&44}IDnJ)>kvAK&RE%ni zTiPNOOcysyi2FY7snqS;GV0%mN*7!!Y zrmZ!u7|}Ba_^mYG7qEn_WDT=K&z#j;DIOhjNXrz7mKj4sOxy^1Spbp`7n4ZtGh0aa zS!_(e>-2>WAvJ;PPiYHDQfs^+Jn|(Y@uT>~G{g0#tpJw3YV6&a@ZA8bq>%-Q`G?-} z;tKjJ355WO^hn(b1bPfhor)a2L@qp@@I4-Al}R^MCQ$lrr9ym9#bNa00kGu9EI$Kw z3Uo6uAV!#7N+dw0Zl;6Y@i=9Zla5Q0v3GVEF}4 zSFJ=(SjIO_fpD^pXF&*$hPlppcnJe2q^AMj`dGlA;&k>H^Im|p_=kQ3&mX=SDx+XM5PXSK@x^;C>AM z7O;Q>59yg;#fw7wIRNo|3eeertR6PF&=G${AO7^a9iaJi0$j#uwVcz_S#XV|GiJfw zZSyQ>;3xdHbuR2wId~VJ4Z4l?_3p&WV)pP+|6wV8aAklB{x6HyEOi;i+k?^ZmpeC^ zQ*mb6HR68KoLNZ;yef^V6 zmGEF^kiPPQVhq-0eN|M|WiA`E>b3~CG}Uc+)eO~THVa5~na7!?!ac4TuT?4XNmQTh zIg|;mx^2{nz-nc?H}2U64bRw{&2+Y;wm##Twu1S~*si>c3eYreRHl!3#uIsU+bhnn z6+~^eV|N4v?hJlu->NF=+WmNk=Ii zY~FMpc*QIkQ6VL%z&|QI`X~cw%&%FEx0vz+%FN?y3pyIAg=lBaueU02E^GQ zx30C|CeA%=oNH?_XrkS>fOakKu_n!JKyw>dhZzR)SwQ+I3;7YzTWv6ZM`N>ZRYiG4h@U{x#cze>{}n9nPZlAdj2B zQ>d_yX{-0?XSYQ7sDJ9sYX51$;3DQ=N?*bF?1lT9o(cTja(Xh2PuUhL%EF?38BV;n zh_}HblL+9Y6S&*qsn;T1R}-=k&vh}*+@oAKg*i~%BKIGoJCZpGu${rX=#E8*%D$Sq zdN49`Fp&{NvM;wELe|=Rrw3$NkH5WBv~}Kd?9vp4Po?p z2o(z^4($y>Xb&;J%$u_>ccu;>Oo;krZj|rzh_dP7zcsAv^df&h5woc$o1oMqXKwc4 z*@MkrR7i}ty z$|~ru8PrbKP&-|#DBh@P8)|`IN4RfM&63C??aWb!0#r z0tIFoHwMEOb53mjGfC~CxyJ%_N2W02_7?U%R;8DCpFvI}sc^l4 zUl$p5H>25-0rag^1E5U{v-v79#*AbgJ7TVGM?f35I2_hXW4|?rOqkK;-l{H`t8~GD z2`0~Au3c>6u4%u-RbKiu=A1KXGIi5jRX4GQdFiHl0$;PI>T`B6vGrfb9g7weaqNI!7{Z{}nK+eDO`asu>E;#WK^vO|~ zn8jfF&Hu#ORi&+sNU=8Jqpv07ozz9&c)E-%2+l1cvrjezW|W?BRSOZxsHla2Y^?N1 z^z}-@A=R|D`Doik7B`cm?fVU_sGKp0+`91DLL#A97sQ>%Y9r+Z6zh?tn^sjhCbX=I zi|JXsn`iu4%WhOl?k$XxmoQDj_{{EZX%Bkw!!%aPNP8Mn+@vYy@OqBtF0o8mcA;gL zvBp)e(A%AWm_LN4tfhUj5)=rHPBrKz)PGIXH3~jsI~3V z6pNs|LAq7_F2~6yR|o~6@{jAiZ{`+{?p_B5O#eH&zfF!3i${|S$Eooj*V2Ts;ucb) zNFLoT9Q7BEM$)=U`R1l4XVW!GCdZ?1PqCy^dVPSotOn=SOpArTMUz|WJi^?Ssu~%l z&6*0|txJZr^@1>_b-P&U3kk!r8`TPK3%jw!Zr5L_@I#_{o#HIG#1A&C@?Ai1EA54# zis;e<9DR;yi#d)+EA+FWp50<#*gT}tbM=|;ZeaEz63q#`nKF~n63a?&E%JI70iiDS zF%G(gjUPG*2X(FLXMrvPDa|uDI;|}vXqMkrNN5%_Enb8N{zk1~N@+ z3qzP$55GIdu00`4_XdU*(n?c;%ouyJeW_c5omFa&++%$SmDjq3F=drqLSk>D^1V9J zQ@gB6{s4aq`XsB!CMvNBm^9Y)J^4DFd01oQ*z0Ry&Rdgg2R}g{uDg1%n>1tL={6Sr zG*3(0ep&>lu2^R8(>yJy#Ek<2KOoX|?t?$eYG?FPJC&(7R8pRec7_5G*n(NA`PG`psg$XiznVE#t%ks8}+>`_P!oog!B$9MNuOWMKqJ&Lq&mDG%A{>yCbC3={+&b ztJaB4tlFCTrM7b)d`#aRwrK+AKG+|=v!6`uk7V|Tll#dLvVVDQ|MHq`E-FJln}6Q_ zy#HC|`3+~7yu?nS=;xyx05SB~^Py*J&$54CwoNF*3Z<_SeYh$-v`ybm)8D0Q5!*ar zOEb7_7i^Ud2@UI}b6VOLf0TRLRmm1!(>L*Fu~vH&1g66BWY`}p?~tDIOs8M+*(Seq z?t_n1N%7gH^3j@quf~Ete|fIG9V~s0N6j5ZmrNh!H}}3B_%{Ac?wg%&R=!QxrVjq^ z?|*mBaae6m&kE1`ZBr@mOCz1K5`p~{_K@DG_3eIF zt!K8t93F+Sor)dwF?>Ju{lxb-zQ5%L)PoLdJ4!oLH>0$Jw(nD^-8Kmwbl>%U_u#wr z|Fi90MIZ3?9isSf7l!SuQ2e2DAN(%XF4^vHQRw*jq&(9ekkn54P}Mg3GU+h-LREsT zN?3BY>mczrThvce?f2c-?;GFmySLvrw%?bcpI!L?V3wt#Zgs+Q8cv%Ej#GuBr0pI8 zKh-_HkOERPLSI6UZw73iI(?bD97Tan%VN7@4@e4fF4?ar%%EeKRC3xZ#(7yjhy zk)PO3;BeH{lBSypT%=S(`ifw?FALRvm+$ikjN6SBZF2;SlN%4U%>@0dZDh2~ztBtD ztaK#*QSzy_EspqM(R51mHGQ1r2WqrKvY%!V6`cw6KHacS^&uR}1SjO`lafQqPo$`u zgk8D>;Jnj=>gY+P)H`qB7(it-%{AL4)MKfVYAX#yL0p^Bb;ANx`0TfzbkK^n;#!gZ zo6w36wBoE*T%nh?nP@+khvZI0^~s%%8$?_VwHPh*`M|T4zb`zqom*WByoL(4FCVVA z-YXvL_PN+H;vylPG@p0IE-9TIR5xk3kL1gMcWEWDS%z z+Wksv2kZ{>^w>bg{-nwkdZ0WLAz}IJm|R`~i!F`|L9qUXKL%w)m8f+@8RJYCmuHYh zXZXQw`Zqz$cL^RBPa(`+u5%Dvm|U-F>2V~*X)T>IPQ5LePPwHU9^e+F%AbJyf}lbt zFzX)0`#c0O_ERWJBG$9gM#+Ng_tjMIL4NdyiR*YMy80ZmD6<{l$BUPF%;7ATap&26 zhm*siFg4%%W@Pp_HARjJQ%8eW^nIE~hgOqWd>Vbrsra~%1 zhdJ(gxTF)l#pOB&C*gxQ*c)f$PFqNxH!h4FyGvT8V0Ylq1Ax022rBZ$NRI@I__kl^ zt}LGyCGkUlpUT0IYCX*dQ?@F`x zus+4c($~bsMZeN6Syn1h5S8PAXp$dc(+{levaQ7Q_8>x`)doevTHtyR{unsD2th5B*gj@+4F<*ldFIv>mLsr^*~B+ zbw3zge%-gj4s&@ry2VPln`Ec5&ddQUDdtmL8u3lJE=Um>c2vp%FIhXpGby2nPwoN< zT|948ZHt@_tD$z6)W(z-Py_CN6wj%K=o%=VEvC9eM5au>j#fYH4i(NQ|8rZ!TgSA) z20w2DpiNQbZn_xygmbpTX+hy!9id3n)4-u3Vw6o#@%~Y0x5lC#>I|#i#v5&=L{|YX z89i30e*ORKT}yM@R+j!Nx3ZZ*0%%h{yLHBM5q=;>7VWEQx{4Y^ZVMM{NIxx#wJ50DK6<1(cqysi{h2 z5Wu;(c-+T1_dDMgr^B(95WmTET>Raq=Zg_+FW|%0$+TX(?e# zL)R_r^u#T*;(eq+`&LyKucZm-gxrZ3drmP+GE#Dw_iD;kE+8DhYt=3#_0CwhS7YXq zy0{SzLE`sEAB?z7P7CoQ;bFw^r3jnml}rp{9+LQ!;6&Jgy(<^+YqjU5bOxy;K79AY zi5MN%Go83TBWA9eQdrw~TWHgyAtiEF3|M27RLr0=@tkN8O5Vzcy}Z(?zWs+kM6-@& z{$3R8ccNGyiB|O^e|RT~`8#jFAIbc~cca|Jjutctk%zzzJ>Nd%SC7CfZdG!6G?}x2{x0nx(eh|5s zoEbm*&e5Y0%Av!5oA|?k2~x;wW)$kzC-kKJMZyX=C_A?QDqA4+@<&O7_OH4?Y6Tu8c~W@G_P^6tesqny?L-~Lj# z=3kTCuS&@Wm$j{%t?;(a`-ruCAI);;lX5m^u+{v4TvK4QbtA5)=!euAu3AJY_Hwl} zfvM$vbVHm=s)#F(Wn{&@kE`etLflaj;6zQEJxcf7H(Q7DM|xr|HKb%KD#2FK67bo7 z5+$npkr+HMozx5_4X?x6HKZf2+N|4<9lRNuLF(1{^RJc$e%%-hOdVCMuJ%+{%A5n4 zCCy%B*}5#7meDRxh2enmyhz~twO^zl=hHOj_wB{JRG|Ew!|c*B&kmi6%=T179@#Q3 z5l8h$L<8*jNU0}iZ8&gdB!CBOmVHZCGN3UpV_!cH|}6E#BG19EiBc3BCF=s zT3Vw$iJ51Khk>}A?jtekFs`uP=lOp2nrj4try??QmT@KZj3<3oasf^>^ z8V+g~aVWdMh#AMYK+Te^TmwM2OH6L*fbpJjF0tKZHgXOjvr{jzrEhGFq+5JE<(fLc z!ZYoZfKvn;V|UBh%L|yHO`q}g*riQXXD59t!S3;@mqrjKyOd4x6<5@S57dx8gD^uQ-rKLH8{wfz5QC;aRV zo=p->W>iqxgUclT;4&~oCFp4709FLnQd~rDjV$*vd|sVfJ`cz&Xgbq+~_%(`yAO9F~1s7Wp_DJHKQJPL~dy$^qPUvhh_;MQD*F%fwN24P_B1B$7eVabBzl-fl@|=LpC>E&MKzAG2>t~?cGaE_QUawz%dHVS_Rz(Ya535 z6SP}}MlI=PTTV-0)B$)q61ri?3HB+S^!9)84WDk-{(cuHbeFG?@CX zff`hQi3>Gx^{2k^8uyT%-_whdPKBp^H_%nQYU8GR=l2U^FN>qpWPf<*+10aL|7n2h zLd}Mgd79?JMpdT1cx1#RkWd_<5_S&a~lBV>Jp$F^dIW<_0tJ z;Jlac1o~YA0Nl}CTELUg@2wSQs;faw-E)d$g9#F>Ox<9ef4Wwf9dswl{e={ zfqv_557Tb~;U>r5{&o2-c|EZB>hyK;s$6;f-4*hBcHq^>-0N=#$m=Tu05fMExiIZk zqwtZ-=j>0XWkb%Lle}6_j#RU)lSG~VHBOUN@LD&-2QV_gVC=VS)O!uL)^tDI(w z!tnM%PY(+{ChZ2#@ZOd6j0Hz233pad9nbAtn@1;B35U(Eb0MTnM@DbWE*;8UXDUc6 z&7+KeXB}CiK`j0f$RpWh7q}?Xw1dNUqaebl`U^IrR=Gr~<)Lc%k`o9i@L0)pCrIL< z+qyZ4d|4Ar&L~)DWek7!wNgqd&wDAs=4}u^oUzHSR>q>4T+=dAi>-UATvJ&vp~rh$ z1a?B?1WW+Ne{R}C!3D7YbL=-u0O&!YE@16Ci;P}uSHaB;@Q`|e;?RM?qd|w^ixGh> zAk|j^sle7)Js2BU1ygYsapOK-eYO4~rRt34gle=VvfZI;rl^; z1rYdm`OnG^>zH-10%9=qfDPhj@-R`@!rI(u9Dc7Pz+1CKn@G@W>=hdnmeq^^4_?%wcrX zdy*USB$#W$#gj1Oq{JD+*oh^!7p0l(bdy`mkclR(;jSev$~46|$65qm3)RKbc-ewe zdXr%6p%IJiT%dcF-F9KOGkWq}Y-zVKvt@HRRJE*%?Oi zu@x+hn(v$srHX`6;1|WwUn;lv@Ad(4*xbne?Xh3(T;Cr%{o>Z?XBTch>zRGgfA8hQ z{L5?KfDjSJfWxHHJU>78%jVGYzT5kg>v+jC)Ej|IUK)6I@)SJ^*SYs>Z{wA|(E~RKOAlwrLEU5yYri%-cGp0qSV4{#J z^NUQ-3@XRSekVau?~YwwQd5Q!^fkJm8vJZKq`U;MCQVMzrYKqj?bKrQf05i2k1=F@ zi6j+x2nuOuL1%ME-~XL37Rl*pL{jUT*}GX}QGrQj+S0QzAAk~`KwO{_ERBBVSq}{i zT!M8Zw$xKVLXT3D1*Pzdn6_95TlSf)&i1gyqNu2-Ae9za>pHlW^{OfXp;Z5n_48RV z%hG;E&(p1T9S)e*!;5kfm&r_3PU`usBAmsR##)L7x@aw2;PQNxu{!+}`!jBZ=Q>`y zdWADLkZsUO7iq7)QZ@4d7|%heeCt`PwClx#sH=!I5nf7<(^ZIDd!*@(+InAqq(D%);x_({Fmq zZ~DJ(9>EWcowGSx3S66Fgh#qdEKYt9x-M8~-m~q5)aUC0>xY2RYu#d6hsOmmeMJoa zGM1MD#-2+}!qJBpjgAHl9LB(Gfpf%^4k_jv0a-{CqQ_Mw(TzVm(N^;CvY{vSPQ^@< zlp(dzQ?#x`djKTjaTj_Bofn^0$)paVc-#r4MnotQ9@bfLG~+!H9N$nW!SD`hThl|! zTtRyi3is8}g}~`w(uR_c>qgS_L`}^Y#zdVcs0bvI=_Smg#QJ3Q)^O{HrlS=fA*dMv zX*tAWgG{M?(g{$ujs>=eKlX*kYKp}J?XVtxH>*D^?OdS$;{N)$-h@X79fO87O6U_o zs27W)FTcHJ13t$`U)(?YqBt5D`N^!Gk1yt~_47sDs*xST&6^wZD(g^707K|&i~ zpy41s%;^zFcCuYd)!AWjZ^(BN5BGcMNX!VJK|9~0m2VR4o(=~a`G4YW!1wwTho*Fc z#HHhfdEtq+uO)~=q?L{NArX&d@e+t}V(`PH3=)?v<}eD>5FQoNsr2FtYZGv&%N7|w zNK3>^0gO)Bv$AZvlJIj087|cW!3!k^iMo&YamYrk=a}@s=wtxId69I2h`581ZvzM6+-ls- zzHvI5KJ6WP+B^BQua_RhDu-5n{0(qw6Ao++Kx+6M91hOx&G~^>#VfzAS6*G6YwCx- zxp4h;f3F8Z1{defsThjvFpy$i9@9gCh2gXOEudkoaA4QZ9PG)uZZEMxTY%xRQDfZE z{f@fCc9#$h$JL0-YQQ-CrbsOh)CzCsu;EtC5mX|6@`0W=KratmcFL+154_awViQ5?ojA~+9Zhh!d zp?7xpxF(z$nrVsT0|1%P>MAR%uI!Hy9u;nxZL#wR%$E)=t7hYs#Ef`j1V zhX@bazkS{Y=iDCKA0K6Spy!tc@aY`_9LNj)f$}l7z|cM{;1KXye;3G4Eh@xWwD`@D zAudKDWeM|pU|mx|nPXhLxGH`vopxMo;gJd{6dAA2KT?VY;kTLMTRPVL@$GQ2yxTOy!_xLev?e=_{!^zLG7yum3a1++Rl**Xo` zcexz}6Nh(~cV~YdeKJ6vZ2mmDyRy6ZWH^9+ViCJP+SNh72Yqla_J|8RL$q_fio4kk z-m&_Y2$DVDG*7!d4YSVGODY)~vcM1by4e7Tf z4i1<^sYZ6pMi5N0rydB^@Q+d-`W^jQ>dxyO)T@_(X5Sa{K<7LkD^3{7l(oFB=CTrN ze*RdAf8V-Cud-Eqc+sp0+WFdbz4g&CgZ97VxMaoEu9QDo0)iyTypqNSoZp(ElfSJ> z2jHgsAr>D)!%w8u95w5d)7^5;pQi=|DGQH!x0kmUx9@DPZr|EoBy{uMcKK1yqk%_* z+nd|#kNOBbcAuWJ@u+|M?)Jv^?d|)Id{Ysx750k3qyJXXd4L zh#dKLcDBmp-0l+nyZGcJ^b>^sTzoP_yRE?|C+VCHdXL?upGSSmQD{%w-XW_Q+Pmv% zxm~fkOsk8|%q2C~QW&!aWrB|>E4TXkKrFL?B!zvudATKkf`u+uF?7j7l@&)5C^)hN z^ffPo!Y27?`lpLOm3}Jxo=~JkuVh0#}OEW}TIjC}e zB6p~=*=;@in*L31t##A)<7-1(>VIzj*z>sfcnbe+KCaMzSLoInc|3(b<{y9m zTpC1Dd~85I_o}hAOea5vwYBVjGHq>tvF{1;o`{)OXxEwby%&g%zPvE<{Nn9k*cns> zy^4~c-|qE~xk^^gP!LEfW0a3fS&85HCUZK=LG0|ivx%A zECf7o22wLO!Q2wxN9Pb{4Pm5o$&2|xfX?-=0&m86}yNRk@{(s$fk)Q}wu#R1D=|u~Q-z z{Rqjw(M_3d=~>xd;+KVO%RDx_SuL-TYWWmKQdu6W?|rhU=qjdpvcP906c)%1fYDL7 z3xbyZ_@0Il^f(_uTSE|=^OOk`07|Z(YqDwqHG@VwQEE>IGu3LLY1!Uw*$n^T8Br+qJGbKkPBQ}fPTk^TW9tCd1^V-#g_XW_IB-|0T6 z<9?x4lGwX*on+g$iWad9W$#YE@AzSdYWMHjHf`DzXr(D(WYT=~`ly3NAdRzRTZxtH2g6)h6kene z>hnSnNp9kCv;IU7L{w9x>+!<2;DZK7cz^Qtvn%E2mj<5pmY7q_AhDwOo^hKV3IROx z)#W~mnLdS1h$2qZDrxra)mGlm%O?7&96)Mgc+R15lurYyfO$$ zB7lQoX(QK>)M<_caV8y4@+*&XxynH^Q%%nn#?8zler}oYtdLdot~c=8E#cvlh@YFJ zXN++;4f}c*I7Vj9d!8^JW&&X_Ecs)Mpgiy>umz#JMH(Hil1m)~86{$UGjqmHBD91S z4jeqa)wyd3E!iZEZW<%yVH`Y+J!nc8a`Ks;G%cx9@02~M8hX7yCypukliOPD4!b5) z%KbDU2uaaal3ju6E8Vt5f_s@$c7Qu32>19{vq!S6=~POBu`K8M2^mH0*jf|Db&NB& zfI0|+T>7`#jFb+c)vIXX)C1#+8I+I1D@Ciy;<9|H zgt!JRwTUHu7m$U(YbRTld_r8(C-lxI4{WOBsCG38{tWW|hZ zCKk2?Y8uZeAtZ>w(o|BS)GzTaDd<<@qI=AQo)sk`Nmx=)(MMMbQE6+VLvg^b%r(;MrFmv&| zY?z8Lago^>w2iqYoc}}^VkXYD5|`p~z{l<}>J}!VNwgdEZ3XP$*EOG$PK+acXts7FI5EJ-;Z>NQeR>U>6^ zzA}@5Y|e{!&YmI;pbYyT_O7j|jU-F|m5hGr-H2wMc3&r=XFNOMm~pS8J(tIh0#!p< zqNGukaGNJxxcTnxMz+BhOe3(kNkZHN=JFu(3T9$xa6gHq4emDn51Z$l%u)%YDwSY* z=V8MRQTNQM%E~FuUire_JS^cVaLe{4-o)NGBB@rYz%}GPLTJ7TW|N~jR5@c{8g~r=3{jq4p2S#u z+?uIOTSxiPt`yn5={Ma#eS@m=yZB-JO)n!VT$XRdTlntHnk&C`h(s&4quV!8v?9kK zG{P<@5F%x-SJ3BPaK^bTkOZFqKryx_An%*VrIc%?^49HH2n+H7IM>_;i2Pz@8W`{j zZw&ht)~PU44X9rRb&XrIF5FS&qhIq@$r~lkUR7W&Gs^INr4?ss^(!U(l_QW2$jhHZ zfjHGXv@xKCf>%|yEQ=UV_i>ZaEEc#Q8ZcB;U&8}s=&CE5)BLC#j32lNywpNX>dBTS__qP`*C!aw zVTYOO*s`y}`>hU7{ko!EW0JFxqBx1a5Pz59?aCU?JKyH9r0yW_i#__mc9lf=lf z7?bZNr6x*2K)koSJ(mmxqN|6@3Q zp};%ciSCTbw^Tt0v`(;r5VoYX@HHg@``_OjJlw6{r|M-FT*d4u#sn{5Xww9fn~> zia8yzP*=*Stha5f_F>`fGH=m?cM=wO9-TpHoL84J#nke@X*KPklLY$;nO zc0XyvB!>jRF2wHEOQ`{mHwHx2i+ZyffL!6cHY`Y{f*R3XdIG6@2hioc(iE@TR5$6~ zr4hRvDoxMWX6+{t)*Qg)hcq#N33kIoewe&pk8&LVmA$1jFDl)QC?`nQr9sdRR)!>s znWPjXO9*#nOXIgnX~+2ufMfo5v#}A6@Dbn8)E3MI;`U-grnKq!fDD#~YA_r{H8-MD{5+4|u@==>0cFRAAC-x*YOc`KTiRd%k|T{WVS` zz6G=5=V%J@cH;Fqd%aAUTf81t>Ue2k8EB_KM(rk&Dl08~S6aNsw=&$&Ra#7x7W+#J zgQW%MEBbK==l5A2zQN{24O=pLozoE;f${~o3BY^D7M+ic!49=k>2RiY0$;T`642V& zW&RDcYfLXrvd_a;Rg+zCZxzU2MTd&JQ+?!2AsHWa1o*S;LdgNpFIZzRpHMhgK%vZS z57x_XtzzE*>>g?7tzb>*aI3OdHIM_KJL^nX6{3KUCuAM@t^C4(Qwu%7G|B}I(Tb%gATohuY!G&; z9%k(G*SWtxMuERYWEK>F9s39;h|VKJmVZ0-v>G6$7JJnWV5J5$_UWr3pro7Xt06~+ z0}=8;py6&OP%knsYH{*A<|_=Qu2N&$eozO4^zc^t?Y+gL)vu3UJ~`~q9^Q6*8d4qb zo6&azLq`kwcZ*p#>E2vTR%kgpw9ummYp_F8*kAwp>1Us?%a<=RzVG6d3!ix;h?dVM zQYc=7VO*t`f-G^A^t)A61+e#gHTy6|VXsj)Mj5PnH4$HvX4Srs0f|iYs+NNkNa9Re z6Lf#6G`zNm1Mf@QgDt2p9yGaO6xhhx-kh#B2fe;Kh%fw=_Zc|j6?5nQMGZG=m{6A$ zSDLwBV{01RNby=g_58WK^7JlS`LN`9R!bRsMuj?Cl_&!tRYnTUja888MD|x<)$iIE)nFskd@`BHvQgj%U+rUH2OQp7Wjg5EheA7=X z|FZl`q}IGVu}4mAB&SsPw)~*`cp;*_4&%P^b(vH(e^8q5B|dBtXGvPJ>i?vaqu%JT z_U|_-KgD_5hn%qyVJ9gK=o3_+9F}Q7X(HtMx#R$~RY&-W8T1t~hbGksblyx@C}n(Qf(pAWek&F=IjdT6PAnm1Uys7f|)MozdQ%! zH(!u##9wu{kgywOxeSFn)lOS~7l<@_?eS`5vV`HY#Zbra_kN1qJyFczBViHz6dgHQ9JGr0*T_*A2zMHto~e_U09&}E=&;}O z$s+JCPH$3thIevnsNRf!wsRsz^A`Y$7}qqjrLBo?u2Uh*c8zNtz6Pi%{596eO}6oU z44?iV{swcUX}Z=D^iF9BnVva*;ZPHQ7r`pbByY5iFeanBZl!2D$?p*R*>A=Ex@Z_< zz2xzWG+pT9GpP@s$0P1p^Cs&eHjo5Xue`<(&7!QlrXYXRb=ivfoacCDaNGU(sMsGb ze0tdp(*Fxcz-ldRx@I)FKVk5p=_oNVk6w=EEbbKf2w$Tgk8EmIJJoQ|F?vnUM$?w;-!UnT<1iMJ56EFCl`wO=4uyt}vq&_t%Ve>6fCO73JXf{Gec)-4Ln7)Z}H}T~t2~Ov6 zH{TKGJKUhIKhTHe=YwD%=&WKVtbSX&h1O^mXIh zH!jF`Vb`OL9_V|+RuD7%H(<; zkBAedP)D%-C8+P>Gue*umBi}>)Rd@vK-&+TACRzZj~-{)#UtL#1QU>7?OR_d1 zKyT?SRJSo2kesMVVXS;2W7ZQHtNBD~ni$qe_-Nz>tqi22bl>;zp`?VoC#C3tg&I1b zc4p<0tfIqXsx)pI)5ZAzhgBb@UVYJ`uJjhrjgcYs{}H6>uGrfM+BF7*iZ zh+#`as;rv@tOrvh0cXO;ZxMVZNM-L+kSrR}MF*w@D=jsTSdhGJOlHJOFOJI__bIvo z#Rw87Yb}n4fzYei3RhuoNDZ4oUVekwjjG$j1>7n?p)uS!CIKV?3dKtkgSaV=*Wup< zKu%daF<*l~D)6N^-+d3)QzAtYFiVppHBy-+sg`NpNVX&wW4$2sAEf181EI3ACrzQf z_kx1kC+TC%l7t}Lh@>qTK2iJaTA6!1^pVVnG2tk46mMhw#EM zVeiMHV92Ya!q-cm5HGP+HB8OqCV*lU+=&H4&L+z)@S2M0lp}-u&Xt42x0rZo@*v)K z5F0s&jU8o%kM8#z#F7W`+la88B8J8$kLG9I-tQ9-oJ`JC+cWYe~ZL7NqvSthTQIt_KkWF1m&fM@lfLj%%6PEm*g;+iBeRH%Y}67D&L_~r{dDpixb zo5phTgptNhVx(5QBaR^tk2fL%AY}B?h$d2FbrSjaRX6JdIJMi?spx!#(9;O=gI^6; zaTD+0X;piaGE}z&+i?Pi5603k)DEptH4voHwD`gr#wq&Z6H0 zEVD|P!Jk%}0*5LFYv(pRH|ND`F|QU=y5U7svZ}_CEs`{diLmEw(WAV@QO)6c1{Z9B zrV4o`zA(h>1ypcb?K!k5GgtXo)P?P{W$L;kBU&f1=DMO?HbGf$Do{>AumXb8^VPv< z%EFbCJCfp|)G{qDf0+ideo;E7jK^ULcMYs=&Le z#OoSEy=_6f?b!D1a!Q#hP&lUjvVCHh!uu2-_Y<~#4;bO_!7an)?ZkG!JwTYN5+L-B zg#%Q*eD^Rp$1UNveK}&KQ4sO)$(^@1hslsW8tfw8F96muxY#%}F3Rg3{jm^z8@+YZ zKSNPSDDu|8;f-jyR3r||QWu;`{e`o8`C0;X=AkNU_;BC7J*d|Wlb0oELW+fjdO9)a z^IZ)#H?{lQg1l4Iyp|hQl|Xx)T1sVw7{jRZ6q~R_xcNVuLVoVBdR8UCQz7&WN=I6~ z5;!)23ZD|X>hqk%OgCCQ-=%ICO(CVd35xW5axkRoaH41ko@oHQ;4k1=d6sNY+MAW8 zYnl$k>oc9)Xw9KPHTjw&jh$FSA)_%uf>f=jMM$_J_Me52Fla+MrfK_B~C@ zXXYVzye5R1>DhQ@Ah@}wG;b`T$=?`AhH9z0r9T2)weZ)}3kWn1wV+QmTVvO-RnXYUb z0y|NQ zIinB37zvOxf^FCu4Pb!}?2U%6EtfG2(P%!FZ3Y|;khwvqVhv6g`XkV8ddVH}h*umw zS>_(b+iz|k^`{T}7T;xi-u6vezDgsA#Nqf5U6HVlchT;5%acF8T(#Vp|69P}M4-Et zY;N7BV({7f6f(h6Y)Ed{id+jXuc09F86&v5Q7d-)*%kH~c(rweZom8p4sX&GULn<| zsk+PY?RY%fzm?wao8Rwy%J%zm`+d{z)Zd;~-P&2%ZH$A*OUi|Fd%$vTw8OE>ac#ewdS2+v0 zT!lWv^{iH^EblLFLgxd&BtguFrxm0dxCX`L&4R<)OZVGYB>WsMB%XaJh#`;`hJ# z$jjGlM-jxsZU!^Vtss+1(Y3nnFhWObovL0p;J_dG597QSnE4^QmxsShe1k4k1My*e zS6aIkQUVfe%4h7_5=#Qa6-qcSlAa4eY)l-C5pPf4Syr;emV4( z?S40K3=0!v=PX7);tk_s4i&El1_N2%%W6QmuITUuC~gf5#CV~TG_SgubGqtKxoMYb z=>a)z)-z1#!MZDD;Mc1(Ys^sCA61Au2V8Huj1~S0LpKo3p``*WA@dlbI2jDt0G2pa znsRK{A@R9Mrf$3ugpl{d*W9WeGAoDa&w}mlUE+!0O5Nv$gSHNf>5+C<+v^kx>2kcG z**a^~!W}FvMoWwLSZN`PLb=PRjl1l45%I~ktGZrXt;6mEE!>OzXv5E&jS0f8y5@JS z>fiXoXe&;Jo3HaH_q+D6VzjxQ6F%LU)M>((m2JzmEQ2Jk&MLoB#@5qG9oEOGyFWZr zHg(|i_FxbwS1UAB5B`hSf_P0YA-oi*@d*8*_Uu!PzZBSZH-`3BFgiE;dhPGCe51>o zGTV5UP)L>vGo`|d(o&*Sm@F;bC=~`vg^^NWuJpg$T}xLRNtXUAacWc5=itB4=TvpO zdpuq3?kQKFStpQ2i5{s^X5xYs5)X-&aoNV$m^M}+K)i)`Nj8XuVCy!$0ql(^7dBJi z|1kI7$VjCGND8^n)SO-*@(~#knHd>zBkp&1$L&VJi8=Sw#8a{=bpj$eiY)rztgo$!86?hSpINy&J{#Z}3b%RS@e zS3%G5H@{Eysy-zA{cj|vguYz8R$fQZ-~Wc`0cLvOAc-=`_}*`S_S^r^)KIggLg8MoYCpZy1wVfu{5UXgY!=y*}(C@BK(UT$DZQh{sqVp@|mYJ z+fd5!z}@4%Rooc*QKHJYwljA)chrBl$d0;?y4X?lFhw#=u-9|AaMS}$I(sPHc$T33bm(>~0?(LVd+VFte)X{g0;VLHko6C4i67emH*fO{{U z7;qo~d~In*5ES$H^d$_ZFIj>TT+5Rjj&aFylzjtqE{!D+RfD6907XE$zeNdWRs?A? zlp+O%AjK?H$AXzPbR_r^lF+Bjy;){QIow-AH7$55JYDiibW!UDM&}+d<5eUJm550@uEoJ?ce`E{T^wvRprs^mT?|=m5swa6ZHo1qeih zb7m8|QFPG^6JH{y-6L`H3`HUk133m8kf7IvHMY%?H8#5e$<#AY+E_T8Bqy58I8hil zEZRk;1dcXY7N{MY?D|3ra?vSFAQIKRHh0Cs9CNp!oS@-=O9q&z+k$k<5g?j>`x}LC z|1WI{Yg7EFVjonn!&fn`TMt@$aTECwydgLcXeHaYWNAgQ*H+|P5w3iG`JJU2CY6fT zcTj|z z4te*MIqi8!Z__(i(a5ywxJRT;RMgKyaEp8ELAa}?_CP?fSi4_=k+TT{5Y2oXhE&$% z(I^tIg|&#)T2-vpio1m8&@Mle(|RsU8|;A`A9fIjVkA><)>@c1p9zuvSlOblC^eX$ zvI|vZBJ2o%rI~00AP(@a*rx2Vz4;BOPJrD4^FT#-7AsvqcJ9}@4KEu^Oe&o52ePmehC`jN)?Z~wg^r;N z9Uey!IykrbYWWp;M7Vxk*)sOUMQr<4==2W$8=}okNU|bxAAGU<*D(fh-Tb;?iVqYT z57A8!pSs$5cJ&@NO;uqcJQapL9iR5hs%ugILUIjaz+2A&SA%FgCbD6 z^J)u12J=W-g!zO&pWqM6)R$H*N zK_DDoDGtqz@}s@HcChsn{>dOH(Xldf@Wa8#!8AHoMq%^G!8kcs4jvthu!Cm@_YM-m z_cDGkhCdHs@5#XwgHv-1tK7gY_0F(^9}mXa!F|k>!Bx*-KY8%RTvAy72!#LIC02rMI8 zn2JGFWNst$s~AVKNA(m$6*wk z^=SdPi+m3MNurGZ3yo*pc{iH#ZZst5H#m8x#$aG;4=E$oTMQWwhAugW6#Z!!IO4sW3^tU@9IrH$o&(@lxWbX((P2SAG8X zHTH2Z`7x3>pXp@3Y-Y|Tv!v2ODC>_SkIzRJ*!jTtd4J+#&p12p3ZIYOJMSK67kr)e z?VL?*vGcLD^T%Plcp$;fV~^SSv)Fn6`q@D2Y(U1cKo{xW^WAxacfx1`dvz0gmfbA{ z42%D?z;3WQ40AW!ygLs)gNmWzxK*6LX*A@?;IO%PYOMMImVlRn8l71hp2GiH$66Vv6MiGF z*VzSMk6me6OM(+T?Kh9g100n9*W&iFL(_F7Ar~@*Epz;T(owJ#>O5e4uEn_AP|lT4w6HeLslTAMStpCjOInw~ z^76oetYi3;H(A&pV`L14sr?R#7Mr=GUE0)%BgIDP@FY&!g1Q);u+9T_#)QW1I(!yP zL@q-~GI2rZ*#DE|4_dHGLwsxHAs)teLkZ9zAGGlR0UHnSX`?(L2FjOA0?eo2(AmPg zt?nfg?!o`W)e^4xyUeZEITx=P?+M0B`W$-pQF@cut3WE~62hXRh+XxnP}=G04rsBY z19G~ZJ|2M5Lv(G_Y_QZFdd1kqCF$a)Olrvyp%Q<_iJ;mc+)FTNRD;Y5ua;R%bylOw zmxS~n^Pn#ZpRnp@%^^k6q!xq-ckzGwYB_$xS~DCUTt zy^Qg$hsp_)L|DRoYbm7U!-UBqX&!7%ac(@>aHMDWrz zixg%tE~%G7lO}n@Qik*7QHcE#vjdq*AvM?Bm>Tgly7C(nGj{QBOOPrIW*j~lX@JUU8fYGkzDQA3J@<&mG>BEsjom{*EvrA{Py zd|C0D&x2x`p%6|f{pAabs(?tdFLcJAQ9!R_@?v9#D%OOB^k(IXO1Q?Ey2?lF(U(o7 z9uS9GNJ+N&*8Rf92IaDHMJ~>)tP)5Ie<7{-G&k1hJQSP~g}f*!7*nH_ zi3Qi#RERw&tL_D2t74}hY9e!8f1j8M;FUDKJKTlKNlcwc*`)gy(xA9f95oP#TBwIvx;Hxjz?61R&}s^#m{uV-U4}I z&MiuymDblZ!n3I@N)gdkV(hiqABPM}Cqmx-)b3hULdaXI1tV=Mx=Zr&?0b(E(gR!( z-v0Okt+?#~+0(Mr6)yi269Regr9I`6@#wyXPm#pt9S!*lGqcOw{1>Nb z`(0AC6^K1t6fId0#TRDnMin@&b+m=RdTDbOMR{{kI84vgYU+4h0{r)DmEXqAN4r=u zADJ&dWjfB(4JfB*z6Sm!zEmFf8Z9g(Ic+E-0?~dV{0tX~M~R_^A6gSTu25M*EbiD$ zrnSNw1ySj8%<1#6BFhftC=acjwc}@PN>hgum1s*zSyM7<_9A3*^m51~m%el=ms%1! zJ$8uf5XIC+1mTX&C<-b1@N_ZW)*&gK5bBvl!qJ#pV{_dVh--^z@@g$nFLgzh z5xW`tB0NY$rKug6XM*6M$FH{;^#jmP5aJU_Lg7Q-rY<4{hjQ@A|8tW@>J*OA`1P;B zp;hQ7)*6_d@@o93{Ty-eXTP&_IVVL~<8aPizkq&f8?%Ix;R4rVQ6`U{y&%6ZW}R#3_( zlpsahV}&f_{}3{fxFzVkzp`v^1oQ=smffXj0jrfF3gI92ECaV_=+)P`ymExi*Y5x6<3Q!++A+0 zrOPciyqZRyZ6cOR?Z=#$ZaNn(h4RXedI<1+JNPEYifG8UVf3lz(Cr;7h2yce_t6SZ8@H@!#Q=$6NeX5r_k`|bVOD~2HAo1XltpS9#yam;_-&S2)lf}Z?r zkTo8^hzDRF{t0rta;&kah8e=+p5vEMX2S0eQZsZLlzh58b-KO$^UIBs!JZEtsE^26%O4=*Qv&h($|+&hhP)4B}5%qvn)y!`)kJ~`+XKywbw%}CfkxfB zjMG&nvK}V2O-6Z8{dIouYOp&Z$g3wpaCiTYh`;V%JdUkbjhLT0>PPs^QP*L`&`iWL zS1lal)~hE9)Rc&@qSe#YQY{2VR!>w@PVF@$YOdue*zmLJ>1wGKMo3ppB!E~ybq>P# zaQDq@=XB&w$3euC&VT2Rjkve}B7y?li{hMZkCn{4IewR~%0m9|>CP%R&cQOh@J)KV>6 zII5ngmKJ2z(y}>|RE*}6Il&>DsiHoMQ?BYc*U|%7wX||eVuWt1V;%(6Y*P*%KmusZ;Ucy>i4D^Aj_|T!356K{aS}YdnTM>YBM6;Bm=FAx!X}C z7$21p1h$i9I-m_q#EmRaP}PnIpPISq3MS%GCh^({7r8qWGQnd`IPv>LJ?ys^sz047 z#;ROe{RI--LIf;d!Mut#s5 zVJdB6A6u{91{=Hu7hAk!gvakyy7rp=wS?}dP(r9t3Bebl1cHiT;5Pdfza!aig^Wp` zsA03pD~%hxv}XRH7C%wej!rWJr#}2EmBg-p{ds;C3bV23SXxO}V3_W*RMRdDOEV-( zL;KOy<`5X}V9pbT`7W-$I^t*ua8_Q_OC_{A2cFM|8|c#cn8;v;GFT>5c|?%(OmZF9 zsK|!!=B0V0iQYHI({d%)Ioh9w3nW3`uG>Ozkb{CtaB)B;=zT{y$fbcJ<2X8Lns6%9 zHt$EWqZ#^AFv@4!`YGY+dy)+>- zAVx@@G{KK9*sxo?cv*mB7m%wMVXxE0Oy>&e=N#V4@w^c_l%q5<%>(BMcr(sCfn8%C zpNyP8oBAcydDcILnx(7HT@=J;tDV23x0yIQ&OY`GAyl?M!_G(VojrW`u{$LTh8%T3 zfb7!A10!@~ultZ&i4;!XCC*|uRdK2=I%4-nJzSiMpqd4jHs*NOg+d}-@O6;pM{YKp{D5pUD-H z&zVTT_op#GS)30T21bV(>QH=v_6D{WQOr9!^Xtv+w>7VR2aiJM(~e!0sd^35s;kSh zr)+<@ICksB{2J`OHow@W`Wjq)U_K59WHBt`jim1a<9`OC3?=#p%Ot1w5T)BfeZiH@Rk&0Ud)R-*t?Jn z({`r}@AuGM|M9P1%X^xcWj40Aw15?*2#YG91zHS#0H10I9&(Es0y8U6*UK`Ml1~X` zC3l3jC6}0Vyt$6bWJKsr;`og|U+!O?6YR>>uIM9=GdY=;W3Yc$oSks|(BzlKing&@Fhh z7#v@#Oy9gM(>I+i(H4S9c*NEd!a4aSOxx?x?XxLFN(vdKrGW@@(SZyI4ZiXEbocUO zJ_76EJm1i*MmnjT$7(5&^PaXxxExGv%`vkd${nBa!26E%k|BR5QTJ!h6+qGA{2?hy(*h9 z8<7|9o9CE+m_L*#qb$-Sfh+r^$|X^1B_lR_B%*d!xOU$cvy+q;J@qNG|=#_-9prtYN8sn(|t;7xZ%)Wv8bXcKm`g`t_jXf zCrp458AG8Sw?vgX3QMA_j?)i_3vA0}M~ui=8+D5K5o;FA=J%$+_=vOo3$#hJy|}si zlvTJ9Fj=tvl75Fu8Cs&9+*HjD=*zsqmB8k$g*o0(%$@n%Nb#Ei2pOsSFDtSw1LLgiX{^UPv70v99Ij z8FwPoEf`od*n;rq)16+Y8424=wetIreI01$VTLf7g&g7cCT&xm^#A%d^bh$mkaJ@Q zIXBP>TqY}Uxq?wh%o*~W8I@myTP={kjP$smuvnnw_Zdl>QgV^LV)dv-XWh68fBtmv zV|d{*>r_*1=sE~-93FdmpR|pUah^+F0?_@oBv;Z z*IQu=Jh*@xk@aCouYbr2bk$|C2rH;3PxBRqOKn_6la_S;2e-{rw0YYgHZY7l*=w$X zJswR5mv;RJx64z~dAlI7ccd}Dh8e-gS6CSj)8AuK8WKxY(yGy;WR3GRdeRfPlc};h znXm!^$3tm7J^iMEI6V0V8iNB7oSW;I#oCgs5V~DRI?jWJ?0gG@-+3@9_K@NF`9@6BXbGIC==7Gnlfu#o63gr# zJ|rj4zCC$5b;cHo*9*r>+g3~&X`btF;48X+G(YLlsck2+E+DeS=x;3*8LWm7Y&*lA6TS@WFyjbMMfYUxq~)U3(I$0#tZ zR+@j3RfGpy5<2(|-EPY?`7Qr3z>yD+oi4xLc{B2QCz8XzA}nEnFyG&J_&CswkDQ3* zHzRL`ARq0gH$#D?AyKn}pjlT%Xe}_Ug`=rATNO2+5TQ6^l6;{S9eh+y(-hkb<{|`B z9`KUjHteqi#tneCEN`lvHq3+25W|&p4$4x~Rsz|>5Uu&Um*p30FLnVS8Bv!95Pum^ znft>c62El-^e{O2gxPcdteRcauJ6`&JJbyM8`j;7nhE3! zES0UKumH&ChNh+?1v$X7u5c?mFm3_Rus2_gznXpZRU;@Vj7P*!*_ zWnCcMam+cFpgu~hB;q6i95?7eU1STwF4&|4+Z^^ z!(;fcEz0;fQrY+1(niG44m<-0n3X~U zwaD+i#oEm(d?6Mr@UocX-&IBHq?(O?T`8IXPp9)oh4OF-IW899ZRHFic(B>-35S*m zDKeFfq)i36M8tlPp&2NkW>llVnp%pnSu~Ywixdq2pQmhpvTW6Mu%w&mV}T#k0$jcN zd^a;AsH$PxV6Rf}p(OchbJcwbpHJGs*YD1GL}?sfS22TU!52mo)Db+j$4&47u8JDc zRn$0(1rN4y!(hf0)8AN_IGfkQt4#ANx0n;`vox~q~xQF-C0fAoE&@BPBPqyDRy3$$`HumHgP10zS}N3E$pH!*m~%%MIkvjIT< z0=9qpsK1O+N59D=6HCN!;kugzw8|eiTf#3pZVYgP|LTh?x)#<#)6Qx|;BO}VoJHEl ziwt2Lat1%&@S*1;pgxIW)X75WPb_|zUL>cttEaa&Pw(`R4~x~)d*2AWd@nHw0P=tR zcItF=3bKi+tK@X*DrOD|6HzWIIfxMCTaC>*jz_+P$wE8A#1qizWfvyBiEO^=!#($+ zjpcKzZTa$#R`^eA7^S@LVzj$}>y6Dxfy^#|+{ zv#mtt6@l_1`)Wqig<$JM6HB>i%1eh@UE3b=oO(+8KlHF&T*DZvNT%u`W_@FS7vrs6 z&+qh>@=T+GXDn4_XiKScEk-}rorKlZOWiXJ0MgZN#kAP*b@mvuBx9;Rw}JPlVGWJ` zHB5op#;lWSYhKB!$cE}eQq!mG?_o&9f_#;+gPp_CgD1=wmf@-O8~RLx-8Kn3T1CgH zFgBU0W;4UmrW#>tU!D@HXx1O(i2FDKIb5 zYjEL%rRTaGd=3W>niJEbHt>%Pf36h2zzA_cHdj6Ata2QahL=RS}q$=RV}Rj$al8_-1R_Nz9hw|4n<5<#QY>vhie}55$|W z0Ac+E=HlUiZ<@jv2sQ7(1V(gBOtM0FYYz!=50Ees2lJVmb_M^+2bqtjD#zbkJ(;`l zer1U%Wyd$KzrQ;9%hck#>eBn#cgKSdL{8M?RqmIG6IwpGRe4t%dVgp9b~`DH z8FY#37YyAQpRTtY_araH~Woe^9}*69L^KZ%Gu)peSQw?ept0tG%K zjTREgVBN?i>4&Zqlid)*R7**Qq3Gycv?ECiY+6vYB=r@xE}U2b3Tz7Kv1;l$J*89K zDWtLnyMyUiLX4u4U{f)e-2q?)u__m79)4kcu<|+BU}JUx%pw$2wBz^0V)vhRlq^ft z)pWec2qJXHu;R@Fs0}=3PCj1sLONAY*bX1Bwyl00tD>EwYRZDMo~ePU9tuiC963&! z>0qjr?o_{4GO;9dYYA3C&sQaOd8TjVlz7_+2&qa z5oWQ?*`&!=NlD2$nFMn?n=>q%dt2C^3QXyWvjtxdQN^Jqkcr4CjsfE0nMBb9 zI+rC=VaK~slsn-A#9fY*tYVni%exUdcHAmGmm~~*7ndsQ((a{3V;rxLQ5bFD7k3F-Z&ZG;P^zy4p#qcNm70seiV9MoAmsf$WyDxZ9+tS=FX7_dG6w zX<1gxsl)|n_%weK3EYL0K!DNNsiqSRq@gFO4sN(4)tQQ!YzV({%tS3<3a*l@54LJ+ ziN+8G#GT4soX55y{ZlR`d5#<-Z^XSHLPU=08SvrQv4>R%Dk!kRAv;e=+O2|PBphH% zRSKp8zOGnO95bWZDMwe~Q>sVlh$qJv4b!P<8ZlLFopvgUh0RE?1?jX~)ug<}zPerz zpu=>mxDD`x6iSMrXS-sai%mJPx3gwY;H$X3aYfilj%`UX7h#d<7tDhFyN^ElD4qa| z(wKA8qJm8hOT0Ay&exsgObHs>V0sZ5ULm`qH(`mLcOHqRZoSolSf@~W=KUNo(uG~;qQ zr!z%8u8Q0$spy!hbt!GKXb-hAa^bp*WZ7CzR`%hRAX!PHSLaGpt(fN8xzaABuTG&D z`^>hP24hn_1y2;K%=U2NZYbSKA<;qS1zs;y>AYW}fP>h@i@?9X_&f^UcjsS0$yA4u zVcKp;*#ed&Z%0<&jx4>sR$;FZAV}t3ue=!~tYR1p3CNa*6NfQLThDt+YZZze*iQlcnt$D;eC-rEf-0i@#=~JWR|Vw?AL(O ztSbx##e*OY8Gy8q9S)riWk%<{bB79biUEnO$*d}m|#2V@ByL zwLh=CUWfOvrT`2JjAW5&-E4aa#GUa&u7-%sYSBj{EBYvdIuGbA^RFte7Q%$*R3SV% z_&jh8%PgE6Z$HzUtX@OQ)@QW(Mqv38m;dv#pC9~Z-_JF84MT^)v9!a-65wG34r^FR zASS*7(+X}84J|HKe7)9H-_Qf6i4O}lL}(XZ@FX%QzbOB9VCL<>t+xX|z8x5MKYSks zOM|oEVnEd}L>j0bj>C_rG~UWqtx#|p$+-zM%KYSM;sAz0+2AE&LjSilFAAbR0NZ@7 zfNi9Q%>cl+fwg0Ih3Oi)($<=#kap-=)bm6)A~(arWkWkuVudamo{c!xSbX#w%qQIY z4dw;z{f4I<4r2~tC~OY)TArF9q-{mpw?k)?@Zy+A7j;iJji`G-8-YPcdCOfiI|GfK z$;M7!V`q#swpZ9&WBW;Cdy>6|A@)Q&dx78jifthWFrxHL_-CLz@sfF zkw6EHmLZ{l^5-*8oGs55?3V>i$txOD7)g(kI@=(BYTU<~X7pIZ&MCwyS+-Ijwn1px zCKO)~$+)$qU&#w_V{^sK6w@|Z!2Szxvb6Gto+BvV*t z4mC;IC>Vg)WBCqmS!f_8Tw#mW!Bt$ZupJ@u*EFl5aP~z=7nY$U_Rl%SA98Oat+&U; z&6&M57FtV`j0g9BZR zHy(@82(U5DS!(eax&J*$Y2vCx9@GmaP-%fzMkuJEF)25ax^ApTHBh>clJ#PyI@ zl|=UWC$={O+D-vN$b5Co?KYE*ho{PrbnB=IoKZ1|pO8 z?;s}6(9yt;N9F6}tUU=7aOH`k^21-|r;*Nk?x=hNaC(6CdkAoS;NMyt&JTdmY2xW{ z3hxJKQ?77Sotp@$^ANs-Sf4J~&jg3+sUWP*DoWf!lpoIJz0Tzh3!w~&O?{~?s6CT@4 zt3dhj$=;H|QQ#0d3obU!ea^3gm^SNxX>*|s&DT8tav!&WinsWA*%faK@I&TQ*{|C! z8c(wb+iqk4%D8bl>muX`@EU?hHDbQDl6-IooegHcVj;m1&i-8CuS6i^02Iz5N7*?G zg2Tzjm+`V7K; zF`5T|7Zv+=A?~S<5#zil8-s@v%kPXvC{Xu?*;~{fE~HP!Ixm z%`mGAm6%zCs*d0AJ&@tF0aNvN^#?XEwcuESQj!H!8_OsV1r3Unw06d^Yi@F27;>?J zi*(wYmE^ulvR(5~Y}g;HMn%BZAZ876a*ML_q6R|K3?lFh;?fA18V}-T57B4<_eL6! zrt0JCo<>-j8^kraeVij4q-MRbHpB_KIYG%J7Fjt1F*C?dM6wog2A$$DL;UD=t0&tlDPP)Af$TO@+>->?N*)GD&VEO%W z^>}sYWaH_{)9H85rruRC^4Bz*-#yzqxfPFYn+dP1f#j{V_dh-2NHgpbC;j{qe|ZEY z-Bzbvl%~-5x>GSNn@EYg_ z;Lx)iOqfH@PJ#s=mC%A4x30WFAC7Ii;B6*2EModGu*K4}f-ssWHY~=8g;F2uJ&K-T zW?={|gt$bS0=yarkNpe=HY}#+AKmbFfyE}qq}3+#L9{M69tFatbIWN04Vh7H>3CL+ zr%0{fe-mM$CeeDp2vxzl2<-q^4ZH(K{XZeR=fY9{8ZrUy91YAJ4FpE-K=Bvd7uq-1 z(N`jK8}QnvcfVz-Gx_xvDAxbIQ~hu+aQ<-3dBW9CM>($Kf{RVLfBiD104Pe61PDo$ZxF0-y)oa1irO$D99Fpc z78U^!3$05DpA0SwvseM$0JR)padPblu+xxfxe4nRTGMX!5%h=^7lCd8Rf%aAnJyZ# z{sbQN>>wT$r@imM_Grjv@r1911MlMrJ&g+727(a+u3Kn_#Bs{|bo}HNJL^s=qsLFi zA~>CSrC)}xiAuX;7dw)fB5MOIJ~*5VT`LiG=4D9x`5>VQcvLCb;?`u#YX zIOP+mzC^lV)3)sWJ4%Lq!>h`=|5RNezgN_pLUQ$4r&G<6ZczO74CKd?wyj>${r9Dg z1B6)I7B<{F2lyBj>N6Grwkx$468j|`fS>A?QefHzxc%CW3j!Dwv;d5R{0dfmUXe!t zulg{%E1sW^I{|08hi=Pt;?+3BH;Hl*p|nx%7-Zl38w-!6y?l!=;~=|wPW5`0b`^*D|rK!SBh3h05vO>!Kf;$ zZEP-kt}5;?<467ip7)y`=JYl6oOxQno{K4cj&qB|Q6u6YN_*>N%Udt}(*pjn&Yjwv zj+#5o);uQ{iKv(0>m)~x=JeK+(k`$nI;rM_Lkq#1IaKtbhxfe5k)H9V$Bm_d#^NlA zuJUl-*_g|w%Q<%C-CTpH{D8Y^7O|ICi3jn_NpWXUSsLQt&Qq?Z>tXZPD7qE#oL$3+ z4L#0XXv^SJtH=?tfGYMM3NsITNV@m=9YF4P_IH6!x70jayHv9OgK1=p9@WZI>62CH zZ(kPJ8KiePf)bbd)j7pI0I6!hHkpy)CO}OO+!kvGHQ-zYaNNDa0dR;h9f`vhTtLIB|~Kc6D*O1tz&kggf1YQ;wu)x+utqjSN>G$NyY;G5k>1 zxHgJTqZxD>O)v$N{11Cq_tZ9)<^L7cyv*!_AExJdx2kJ)r*>+$XQpa)w)R!Fg{@#) zUP(5AC%z$p4B1Xh5<;4$FC)!01s@x!(+dBh7i&n@;~f7=iI9gBO_gruIZ`T zs+5DId#}9)1r^lEQpA)BPX}hU-_GWf#%*ayD{I(&S}yk~99rxV^x8%-(5DragAv{+ z|FeJk%iY<(OkT@fi76MG-`)Lg{JY8T#v=L7!ty1;8x%_Ae{=_e1Mc+J(h7FJPBfE9I*Ox#cw7t|tGGmVqK5BEJFvfp z7)2zm%zc3dK2!uibq{~{9R|!9JTM%Lbj8uu1(FzV)3TuA^k4T39Sy$x85|P9Zdw0t z5D8nuIH$wmDUXPy8*)o?rWTNHstRPql~r5IS-t&A@!KcI&u?k@!tolSS`2LD@s>;a z$;G_3kz7-;w5;iw*+8n9)CwhgKr?$xjco#;R#X0&=6B*x`HYq<6*7|Eb2_Pp#Fp&U zFw6q_5wJbm(hWt;*(y3!oxvDNztVHOm?8#DW3IHdGXaOpzjvCOqNVd zErDaBte15)2ke?zW)V>XK8zkoH@k3{&nUop3C{3M_San`!<=hx3@O47|FXS4k3iar zkBrmnf#2gOT=2D51rT>*620~J$aAj%-R8!aOXA5{JJ$sqmpkka3HM7JoUw4YK9@Uz z=r#y2yDof@tpkZr2q-7#enSCkTkEctE>VYzyR>#IIQwKwTM3o1=$cJ{xqy49IkO=irCkulc^qukI_CFXul# zd3gEt(B-q1^5I?`aJoaAA4mR2`S|4V4~>KPfrlpYSk?(4JBHhv?dF6CN6d@x!8RNU ziaJyT(Do38kA#N5;NWw8r)agC(}2Tmj!;J#!Mg95_4?cv&VTdZTG#5uem zq9#kuxqUW=WH5uzIe2X+D4JIlaf)Gj|L1Z0D}2pswC0RsOyj5{=1~#AUT0~kuHeQa zCqyE-mW<`W{`?1TwBC)^@#%|#5D2$oJrqg^Kvfi#B0a_XKJXW~XVHWA-D)UH(t z={ra)nl;?_92qwUeZ-nh)MW|-Efox))KM4!$yH|;2`(a|Cb4l63Fo?lNUjR@PS)X4 zT%a*s67nOnML*Sli>Xh5G#==`r49gra!$nnjNNPW6@c>54ZIjiv6L(7c1b?;!aq%G zfHn4fnw+nbn%6VOi)9V9q>@!SUIcaOrr#*X8dm-~_)s}%og4r|;Yka<`3-bMG!@`6 zJRQWkE)!R{oeD~f3)pi-;Fk}P zF7qBCoa-DI4S&ci|Bf$#X4yVWCLm{pi8KPx_Cuu4M0QMM!aRTiLd1o6I!kl%^pQI! zu?xV|v*vGDgV)dzNV7jrrx8NNdnbT%^W+5&Ox-zY%15DiWa#ns+t%RQ);g(jpvSEa zzimMf>fPl2`OMV$@&S-sE>3^{?3;@}Z-bf^BP`{d?0j(QeCy@A#}D4N?!LP-#m>Yx zMI4Nui4?-0N~lUrnzkFTkqjECcET#^pCwegH@0lbFeB1hc!kSqR+kU2sYX&wN{gjb zQ%@SDxN?b%-TPT0Z=_2(`3v>*@vGQ(lKPTv@So&OioWG3Yy(#n7w*I{{B!9Jjq%>bi9z3e?q*6TuHOp zZO+D(H*RZj&z(|psZtJh#qA)cT0qQbY3x6v5W5#6)f@X7jGSR6ph#sV#;Yd!DA;N$ zYH`ae;%PmFDMP0GiY7X+dJ~8(wqti;q)K3L#JvV6&Y39dT3_s&n9*z-$hs|CHEk^o zBZ@m1ojs6*oz?R4>vm@c38aX=`9Km@e?DpC^isAOY$lh^}$>8##!w; zI!Kg?>`?4yZ6Tclpo!vkaH9o-ODAsMv>TkUz?Z1y7CS$?k{I`} zl*+1RUX8IRlzXg%+7n|%BEh(4G*el<9AnCYa8D_Qje?RHLyaj|WXqKCSv=vq9rG?3 zN$qw|TwyCOZ@=E`1LKoUWJ>a8H&drmY9Xnari!-kO*{BioDPbnal4-_X9Kt7Ut=b2 zTIEcvfm>=pOJs|xE{_HV`kho9y)SRDcL|q0`D=}BRSLlbGQ-DS`JHYot&q0mZ`$w3 zZu^3?{scTa&FWJNi3}k>Wrvdo6QH1>8lM1jGRDWyFjMj?)F0^BzcGyrQa{-NAn;?_ zII}2k(%DTsNkt@LYA4iGim7I?77~1yz^-Of14yXZx5l)96@fh8$-{vN>&W#rEM^MES^++S5c{801QW3uFT&3Obq3f$;rR+ zx6>J}qv>oqWMcTD`?Xxe52ITGM9rxwFlS}u&@U>rl6Yjx88fN_+m%9g4QVylL-~1w z5u*n#QOuPxdO@CR)U~1#Nz>?qXG~qi=t=%biSzBWj zE>4pvX;#3pB$dcxs>Q6HVtSvNW*>5#H&=_)h(^li(1`U))OgB&Q0wcHZrUr40G>&8 zW>+>mMP(X@YXvavJuG`##becO_*u3&#XG~=wd9IB;c*csVaRLrGy zyG!ZEyP~w8mBb$c6C-DMR|dSJu0+F6&W95miillXkDA-14UH}>>AO}Ou5H_*PRN> z6@rI^g&y)k8fragAP!F=EQ{!DxPCUW2FT3erL*CUvyqL9t;LJ`<7XqIh}hf+OCD-B z<`BwQ6;%@s#ot@)MkTO9=@M(i4%|T?WCewmBG*L10!WWPK0NrLRsT?bto-m|@WaZc z@?mE5hvsI~l%hI<{x&>SJzIqS8*9H<#O*B(Omqu{E^tYJI_?w5@vnpT9BgAnlwpvq zs|1ifK9>-P%<;yJz)4miR58^sV7peSFywIj*4_H3C!QMWv85Mr#b@EHh9T^8oLfCAy5a@92C)9rRtdrSS6qlniGYy7$9i{ z)g1WWC+PuQ^}HyET#Jx0^&4EIXpQo&a%JzD67PiBaos$;7*@&Q;rJL=+SA`Xp!s3ok56=(2p+)a}fBAfDKSEm8`TQ&s zI-j2HD(B0;a+E$!Y|ydNqo%0z7^Z@eSElyDAN`7I6}6OYK`ARy5LTM|Ku{b6vml7% zsh1+xiXX3{wpSskEl-{P2Bcg;y9Vhyeti_uu@-_))uD;fv1MK&5%izK5LHu}G$D{f zz{#Pa{Hy2qOF2(9O5I}T+9M6G?qPmt&=^T}UWjaWB`WZifLz49uOp%5IZf}0oUT3W zj}I(d`3zJMCG*$*|K@Z}WV>FQR`LZStKzHKYk#{~?I$Gz24`G_}%gXWdNy90tQ> z5?*@)4qwYdah>7f8$<+K=2Os85Qju{l2YPc zqU|&69-<0cGfFJ8NRj|jK&`)e;!n0#u=Jc}34;jVM(-pnFdND=EbYoP?3bN}$VbR+ zGOQ5GXBVU>xV#i(S?R!s;hBLNwC6+7aVR$R;l2i$-p8F3X}L69fbby~03qB(h`9VGS5u~Uiif!kdfH$3O+ zW_(w0Sp0N>*Et;1HvbXo6$a9G%sCl3DPK zQ`iM*WB|FA5ya!Zz)qDwJ`xF!#+Ay8@>LdnRS$i%A}f^j=Svf8i3Gn1hfibyx2KRw zgcnrgq$0s?l+M$1cdN)J1cvxoZs~$WBM()t2cXeddO$6(W_?U8HS$6yW?Ch08Y4r{ z>!ve_d<;$W8b^=6-rvca(<$Y2geWLZ8jzsH z3w!&BPYzC}usAkkfU!T0kou$v`Ce2tJ2v+SCJ!F9w_51YS@7o-t~qIfY~Fxxry{&T zyMf-IikDbK3G^RzoQ)?U%6OGN2}ud20N%yyy?aDZvW_`@6(^@pfmFkt5$`T?H2nf( z!2?A-dgT`I7xw={paaWGX5uoQT;~J}9=oJ#OqMg{il^+TqeT133#djK;4~xl6rK(X z$*yd^HvPUdg>BBB>|=H;_I1RBsN*JefxCUPUuJr9e;g2?y!ta~sl>N(et-0BYyI8q zOGpKr8|4l=Jmhu1#mPi;Pt)9C3buQW`PuW48SwAnp-88Rn1;yAi}+cHDWj0qdC*#6 zyGy#MS^xMmjy$I1Bm{Oa6F*?51SHpk+9=ss-brMjNGBjcJ}0BI2i_nuT;G*1LZU%G zQ2Av%o8i%LKjE!<1XhzbJpiA#f@Jkq}B*qbiprGYmcUFfM^%qzKBH#nnYyXb=n zn^n!;Ziu)Th>LC@3rqR6sofl1aj(77Qn-JTlm1d+ucd(6W0BAnKMtb88RSaGASVJG zvypHLpBg{$Hy@$jziXGj-8O--*Y)&7#V?bRp3yIjFEWc@n3DA%c_EVz-y-9fjRVLeF9v><=v`s zIr8jsbmvonB3e+@3AX1&xuhT_)Gj`s9|avD7#+IKs+*%DYUmdr+ERi)EOmMj;=&;| z+qET8#b<#k{trQ zh#I-^OU4G+&R{i120CzLgVJ-nXDX^~D{N=+TJTaZ(7`b_nH74+V}%}it=mrtdJuLa zF=sNXb6*&ggQ7IeQheycI5WHisNoHF8#q=Re+Y;8NXSGJcb^$*A$Q-8et5X>Zt}h) z`r&=;`TN>r2=w8PJSP*;4Jo0HLDAW~TF{G-FAqD69>rpxw49>nnd&DK%p}F_DaEXV z)ihr@SyB8I^ozQvL0gNf$;~0^fg;veO_nv1$0>A-ex-Bt54<7wO>w;H4UET9oy5>r z9<)~%JaKf{Yv5|%-}hx8!<(3`pjy~v!%UVGRXKiDG}UrR0rpbR9D(l5NU|k!p!eHt z$+*9Bh7KJPiq(GMeIr1r{3E;)Nj;a=Oj(XM_e#J`>S~$&&U@!*oqGOwF{1&Lphv$Y z&Gz>1Muw?RTFwc`yBeux!?y1|Pyd`=aCWNOF=*eg&@2|7oxu9B6SzI)tvGuqiq7&d zyIYR7Q?u=i!MCud4H0T@V)ru9YSMbW`p7&6cbZHed5VFbBT+Th6HD)^6%%V5FF@P$ zl!L4V1>##+{c%G?ftsjzZ_9_JKbx1wg^R> zpT)@WZKkEObHLejrAITR034vPfB)>syXWl7_WFDTqULW8=GiCiY4Xrgcp_+A2_^?ebYJbmGW@hr`jXt(sY7mWeO+j7B8|i*#4dOu+)D(5#c(+eJ65FXj=sX1Lehi$D zEPm1p6p@vqi*>k>{ZUoNzUj0rTTrKU$0^J@3`P!AblrD$JEVFNqX0wn4|GpE>`%pD zsvhh*t`^elHdwAgCLOn(5cOKuItsZiw6~s+Y3f90yIQWn)nF;+yMx=4SQ*xd!@AqM zqQk1_qv9ztyFKoflLcZwI(^KjEsuE~Il<5gGK49CA%v{WkzCYN0`+6@{eDq=M?cwsJ>WQ=Z=E z-;GV3AMEhnUoU*W@$e^8oi(FJjgRrckVt~bt5WsN{NmZj`S7k>$<_*3-cc$=+mY7` zy;e#sMhr(fVlgVLII^FjoP@u#9@8k;TI7n4j__@ij{dN{)dWnnPgdx+@QwR|StBXw z2zWuWi^+TfzS;p7DHZR8B=rP*Z0)NtPdKK0&X0u!I>$uX@-jJWTEfS*&|axTjqy3$ zLY%p1-b)}97KX!->S$$_b<-}XIeow=t3VBuH7r00gk&wN>{ehW;XyZQhA!`H>3Gd5 zYU!{D8{J4nFO+qgNrcDLw!k~FG2xKX18)w};?%f9Axgw-OepiqLuWAq$+G=1diy6K(q-tq_`3od1li_hKBvb)obeZ^0w%UzV4=9{)+0-Ix9IG)6 zxaqqsZ!&7CM9z-Cp#_1GbC+jD>6u&sV_C0W-I!8hWt@&V$2sI>) zl}M?Z0(}@ciZezm(Czv%@deBXwRBkE03(HF?u!co4RDt(_jKAJhxWqYBE(_dKbd_!^Kv4oNzcbjJ?fc>R^!Uc-g(=(bFKBQ^?l2|ij1w%;Xkpf<}QeuzG>Xlm^+5pB5h6zUn_&~4)rRXTt zfy2CLkjEBhsD?!vEakd)I|zHEscl{q&WzTxaWs(yZcxUfCY0U9BPJL?fe)-G8$X07 z_`)Ie!DWEF?v6SiBOM35nXwKSzg&&t+&ev973WI8@zXb z3LFqaRmhvE(UM~j4eVNNWTEnnFX9`Yt@)nM@i!RuO`%0hFu^9-$*Pc@TF^F+v%n1P zbOVp!Skl_?!VOB)+7Us2sw`UG&uuhJrF=_pk4d^Aa#-;6JWX_624>xB&&W z2gq^d2$g`H&QamDgYo+nA@e56tVp_zEh?p6iGxfZXt?3RCsdsGcI36;CVs$eP?2ug(-KU!X2NbuEjiGUvgI34 zr8NzMY7}UV3=S3)$+4tORf@4*Zn7HP@82Z#Dp3Rf`_O(9biT;?VrAH4%@-aZa-F z`q61e`2AdQ>YA$S=MP}llqf!5WZRaKvB1ls8Rf3f8^4IScXgfbi;RZ5G`ue&G+rr% zT}CG2%51vWKzhw_QSKDzhu5o%K);Kidv%p&?NB_IQ*JIK|B-wToalVR;Jjkz%Fs1L z>UrsR9pU4P2;EmvBEHywegl!;Cgc6da_wD6?#R9y5P)QFHK71iEBAb`0Q22UCxFbh z^4azr;J~`T1nxrBE7eNyfH}t3;gg-L^=${FU;s-uM&AphV4VX5aNIep;>A!ur$UsF zLCx!dGozwkr`+T{!VjWli2SplV<81q6X_iw3CwtxM~~W81aO6qv`1eJIKdZZ1~$YE z1ga1+eBodig2IX&<{&429hrFzh=E-)J7Zf2nS37lt2K%YCqOJ zUrLd1atOOo)qY{8S^byuDjSKGzc&KR;)&mxpwXjCn%&CLLNs3v@ zVu?B2c^D&=6yco5^~riB$Q(fv?*M)XTGDQngRGZ1e404KtEU^sgPTW_(}(>tP`*%U z(DtWs)6!oPkdXH6oWKlnRNKtV;ic*pGTOxmBfQs8#8272v%&tiv$tI~_}TpKX;Q!u zZxi>8)5PSvKqT^Lj!S4^j+aK{5tINRVoX@~$y#IzM38l?Xvc6fQ)m37b6Ghb##adyzHHOPSqvJI3{(~7;%+9dU zBTXdS?fKnE*dOk(qn%++DbX9!saPuqI-)f}4zyS*J|nP6xA4N{27<%V$9@hnpN5>A zhR)zQfDI}*$e^E>7Df13wb;Q8IsoTfb=OfXclcX_J@QWd0{qPgaAFKS)F}yHG;nD1 z*l0urM&^{i^?;G;zHmcqWaNFoM)n5{EVbAIK00v5bNkcE5JpUd1181&863BY13$7q ztq+#GdN%*6@PxbR=x$#mSn}%W;>qmY<6CRYv{Rk=za7ZLg?R|P;T>Qf4LXQ+**>ea z+kXMbNrzdElYC;gcNiabn|8o#<9v3WK@#FZNGU91;Kz?+%3)5p`OA$dz^Zjfd|N2L zq1Pdga9m)Y5L@`u)o8f0lr-3PDHiVvh3iM?Av*9$*w@i*MJ|_i+uO`Yr)Ae|fi8X^ zW*UAw+7%Aj@GSghL*(QWGaj?lHnGyf#-PbHPPvbM@Ho1@IUp1OS9h5l;Ktap80c873r7l^wuV^6=6wTvrMZV^LysDx5|C;| ztloLytK<$Iui=kP{43eN>!L#B6P)wzN-SYn@=Fs&AgQj4(tsuos2sPXp+$;qZVV)h zV-YbsKPVSo0RtD7*RV_&4BJL{!9T+*W#Sw4{VaQ>p5OhxK77G#%F4H-E=L^NhTrT@ zVEHXDAMdbFE!VHY@ zO$!0b!0*0~xgG2idWr1-1f7=Oh_bIK`$}p@{WU*#81Dw^q(=l5aIU6H#K&Y{Xn*ox zX|V<;(=rd358yF~XmYzcIp>H|e6Tc#l?O#kTu6g(l|F5g<`+|CKf((Z*LMSd4P3$= z7wcTMN`xe;5%9_##?2i?S}RwbR!N>j0=v3Tw3&dYG=+@(KYbU#(vfdiCws$8(u(qz z)&o|EnLni`f$;O9{X7}_{w|2P1>(<(ZC3$efyq1^SL~zGx+a|BI8C-N^Yb9OlPB~@OsK8rR6-HMf z7RA>jI5gA*m6gk;${s8ZQ0TNkg&XBY7T+SGq_>FW3W%&ONje&$<-A7=>lh_X67y>f zaj9k63OmE$Ax8ThuIi^?ttLt%$4rdItHp;*uU%!ZvLRzwY_Rh0pn>G;kjSWs*Hjqc z#^FF7Ye;S(a5Ho?{OEXoT%;$haYP0vsZt2aB!GIBZ+(bBPygZI16_bdrYyNY7uCqm z3oRv%q!bP|N3oh@L_#MnVya8n>7wMT8c;ACfPA*F{WK@C6~a)&Wwjz0ur>}h*B`N+46XCsrRi@TDl>R#dP z?V;0qyOdycy0Ct>c~cjs(Q#skNmnf?=|kwog@v>bf-@_0kr2UgLn&psTod?rlsJ0X z=IaVq)gC{ua$LsA>l&46NrJbEXn@_iO1$wx^Xyf1{WF>w2#Fz~q#Yq@G%AUH^Oq39 z*~3{QPgBU2B&U*ykEF?(z_o}5Rd8HU3HPePEG(PRngqv}nqa+Z&WR)lh6+^Hv3a4A z7^|G(Y8XKrw&MWyJ}0%5I7`+g++|UPf_4OQG(+IhI;bJRL6iLK^r4?T`mxJZ{8}Qy z8WrU$d5PX|qO402x?$_KBqf%?X}16j%1W{=&Z705z36XUt8nEj`1D;XUi|X!XB}vz~z>9_vLV_BZr_4zn*TRh;ZcuLKZbgjPl~M|f z+K3TkeriXRzydILU9tiom+H3yrr!vd?GZcFu8mo_;}43!QAG%%S}X!}lK`s9mvK{9 zA@aI}2g~8woQhB-cuHk8M2vdhz&%!!a}ktaZl14kIzIg6Tw$Wij1HJ1<0s2P^k@K4 zqGHYX3F1lh9Cv@V7<;_a<@xlNF&^45TQQq^~cN6_}C4X8DPUiJh~1?&3vhj~J& z-n-+{Q-WC0lO-N5nx8tlmOT1q@-Mq%$79nceZzm*9cMP^t0${R>vxU@0A4y;c=9^g zcd}7Do_^L0mZip~;CK_WCu5_R*wOfLhH~ssClBtuezw7mcKq$u@$y6i7>k6H{t75o zpgZ`BR$m|v6O1>>g5U~gp3RJOvNAk14K{dz80kKh?gRfe zzujhgyB+p)qXkO_#5Tqq9_>N)Z{l!Z?{IJh;u#>NvxNJMpqkDK{b&qLW~#DTW6CE4 z8gfDedJp3eMHzRXP{yP?*x|7N>uuuB>8*KUvo=mw(x($+Z-=s{%l*dL{Qb8R5NCKh zQMWEe1PTPlG`iZ!TC~uH@Y}N(9w5JayMB6Qt(vXI;>V3~)vBX!xbf(tq|60el@oga zkxXM0c1QTh=#>iYW|a}T57mUHKxi19VsfT4s5Cr-J9$H7ceo~RI;k=;8#1pjt@15o z(XE}PL_~6BbC}*ZLjG$8rFW4m8hq6SxlKdV#~&6i9Vc*(cEPq-+yl=02g?_aL`nLV zf9bfJ$X0yf;q9#|@WZTA$p3q~f1TN^LgDu1Hf*6(EHHr|%S@|6qf#eXC03nnn0%`T zir?~p`!>Q)At&b?Dn2`C`)6f>ok9gXWhg8J8L@#xezpl8X8yw@cp1za;X4&Q7Le7X z4F*yPsuJV*Y*Nif-sY#M*!oOV|9h$?d!1Z+^>pj?-LH?YFMF2uVU6Cb-hO@e0c%9b zWeo&=J~X1>ZC^Gp=ZpOn0~iAu%>5SM4Q&{CPO-aq`Iu3~8WS7pSYyo7gas))lOs5| zo`FVvI`wM=Fz#@yvnimopqO*(W%%W}O}#q8IZ8!bxmIEsewx7hkk*&YG5x00P<)=L zFdfAZ;6p-NH0NVGWEp?3><$ZMaU2mw)wm?Z96Ug)Gu!E)nSTu7SO%j~wL397=UAT3 zh(zbgVD+p6HFSzGCTCWCH zh-Is2<&cg}xgI3_rM0XtX7}oF@RO;lfBONVTh5|1kPI)-+6wAVzFYQ`Eh%Sq)atVP3QZX06eAUfzw8UK^H;t|IG_ce`|Beh! zbumm`1)HJy)7+4u+VTJdy3BdDQPHs>dTtpHCKeOAslCLY|d3qMOgy(shmnt@{|HMB9{Obnoy;2V79DL%iTZ^qraBZ0ZKF^ z0_xORm5a|55eGz?i5dzwGw84^;Hn$g2oAlVgC_}%8ls5H8t3t8q)ZJ;kxE<+ezE_I z;;sq8<~GpR(W|8gSLa`>Xw2*}Lc-&z?eM_#*keUZeQ@oMLCDm&66c=JWkvH;{gks0 zd06?BhnxA7qsMfQ7fAJi5>2ICC-aP&a-zU7ASStdQ!H{S?vx?as+QL^rutO1eXnZD zG1Z(fg_SOa2TI@`=YX0dOvICPn|I-=Ys;wWtl-Ykstw#7s6})^A{-J>jbtECp!E9A z=kql`B1HL8q3`w6Apm9WEs|fzp`h6`cNXV{$lYsRD~4RhSG!N3YwQ>N$+;mocD_;X zf0cSA31LLcD3QDA>Xrn}NQ^bpV0Lpr5X*1+TG8a!hEIb38nr^vuveWGr)AJ*1;SOp zAsw8>_Je5~yV@EwLnfqGqwHZd;ag_(7BPD_ zO^$h$D1j$%WdvDTk}m!fI54Mp*-+eXlBb;){&HFp(N*#28{sQ-BgjMUijK8U72XouMUn7^Wkz+&$n3aGvyKNQ-uvyVB< z1b&bM`kkY(#n&52sQ*;_w8p>&kylXSWlBhT@?1=EmUhtlVFSLNGsb^3 zf;2w;$oT1T-|5ntL4HwV@Zu);Dw4dRBCC=3CDJkp5g8*!6t;({iquE(yD;&KK7!Hz6) zw8{-&%Cc&V56qx(nB~;M++tiILy>ZfBug=~LmwDr=4oRhIpzLTBlbm$aeoL?fNlU% z;DtHv$?B`8&uHqAHl2rX|H;bK$@)08lQvt;SpRR>7>IC6IL8ct;6IpR(H0)k!a6d; zjK?})%=k$8yDJ<3op2qJ3Ni_*)qGLW&N4;AouGY7b$dYF#i=hN$bc%-xJJDQu4gHM z1IxmAaZ@=mkWfs4CEg9&+l$fC9C}c#`ia*r!I4|4n*B zx8c5Gj(<3;cNLesrkWEhchnK z%XTko1{PYx_I1Pp%-*s>x(s$`s5xeuPuvH9TiBc&kH*c29p>*HOx;jj1#enu9o{=c zg8B9Ps1Hq;;{R1Q>}tiKeC8a3myxS6tD?l+s}NU}aCh8|+PIeFJ`x!?guOK+;H}H((GU zo+z_g5=B*eq{vflzcy20C3AE*ryVn8TgpvVUPnc%Q%a0op4AotzDW}k#W&^3T%h0%`xaEiJTh_sMC5P~w) z0d&S5IaGAXg0BMpUSy}Hi>V;h-PyM{Teut10u@zfOZSYk`-!th!)Nz~jI-3&XNg(v zZM;o9Je?Rjok;264d{NTOmhjrR9AFR5CLS=s&OhvZv=OeMNL)6^BTJ{EtS5E%4f%Z zuy^kYgeaxTy4RM?KA4DTh@)k&H!8kblvA2ErsL+cBRmIC z76Nv_I;wzuc!?Ad;*H6gEo}Kf;yx+FeX7GZ3507%TxdyDcWcLTLd8N~>!%s;bxFF4Qv){E;b-8ENU8>hT8R*lC9YMt? zA+(Jg$cPk%A}g1?JEl0DS^=dsBda1 z4+Id7>g0AzYJh^SN_Z;TNHYO;(SI~Y_3E0_qoN>D}9s3@0snc%tnYxHsn;r7Ec6IuqKmYkTnl^{?IhWe%_o_QpwRJ2K2MR^MZWp2W zS3Uk}cwzl!(SX@yw}$L0r>q4uhA!J7yUh&wD}Y70#rPuL)fJA^#mr3Nrx7hg2rnz* zP<6wjK=4%Ac*$v8GrHvge7yLMAxidn*w1bW@eh5f+{5~);vYe|IT|bN#(E*Lg(O1W zS|h9&hNoarUX8bA5LWs;Vw0S+ufvSAV;*hb18>bC9HidAl@W~Q6g_~2YBO+pHiV?)csf&r5=AR-O(3qJoFy{Aq(SqloBo>r7*Wj(!cn3P5Aqi znwcbC{xu9o3}65w*AO#wPU~#B@9gf)#-O2_S%gv(;qRJov7IDH;yl?CZgvXY+`0u} zp$Qc+F>1hk(H;kYKpGB9$1LNbUpdMI7OPS~i2GEqB zB2%3V>m||@^-$3>{qivy#sy}JIya}$b#il4d8dwVXO1W99*~=lWo>Ret_jwC5xyMb zx#m&3J!JWj`!Py05|lvmN^BH-V%@bzY(FGUhAdyq?uPuGcoeq%;iJz!M03xtJbG+l z|MQ1`_}sW`wnc28F0Kc?l9JM%;0#O=5Tzc)0qf{k2TQA*GLQpzdu8HIHRYfd5&i=0 zrD=e`%D5&CULwuAOv;Lw4B}-om0Xfik_uK)>C7G-X%0^k^NB>QxHh*W-t#JVXDQ*n zJGa#PBJ-8j1PvlK;4mkc7z_Mr6x`4d)kGvdRS=m_3V1l9bXRd_s#q{(-4~quQBl{_ z3>5nR?@g^LEb+plq$yg1aq*HPE*gA3TUvi!>FZAh7msT(}s*8fV=mC+dv_1ayqtI-pW_;r~to!OO{ zvZY55N+cF1w1~}!VYWx&T~tFBHm<_NO7p7U1UWq;VqF3D0pKsPQ7S5C1Yc{Iv@(HJ6yohum0_EfL;EE1O4Twpxz{Omc848 znsWniTllv~%S4EobBHqCObrfbJdb|GCbgPJh&YL>9>`rB?}HXrs+7wS73hQ@>0D5Y z?y{AfpLf|WZ5)}fq1?HIAipE_PE4>{>~m`A%5CXtRd#Ds zp+mKF#nZS)X0sk+EJ_hhuQ|<%PINT!mV3J53)s{-eyVE8 zsAdKN+{T{@+0*PD<({3*_VGLQb~1fhm^@ut$4mswR(PAZb2hVPyzPH{y08YcuUPu| z)8?*0jIpP;=8dzN>`%EYw!uJ$S{M4*I2%g+v^jKolkGP$`SvzQ`%yiqUQPpz0HBRs z;0Qm&Jr;Rn#3jd!&SdZf;_h9(hL+IB_Og+w-^n-+Ii)Z4d)?0+46f0JG;KuRX)@!NW_z^$l53? zvaRyT9@b~Sr5gwehRY%g1Cl6QH&A9?@Y}eyig^@kP6h>X`lu-#MU1#*ouXSq)Z*r# z92b5ph_?hZMO51C`Nx0c(vVl!jz*laS*+(n>MajP%wB%C!$8jqjKU#P*LxFzTgRNU9WOI3 zY*5vZQ+-UlHnQTPXuyT5`IbC(2PT~bom)ZOv!o!gi`i%wF*KKsuuEr-t+ z5RZSXrrS`zjl?sR>>kR-**w@N$k;ueA?5hY)=LbM)W>@==1&;Fn41zTCCbhD>1m}mgW4b{DfWf0NR?R?z!z5q~&cR znL{43j@TSFDoXcTcrFr8bfTFn?@WbdU{S59aDN)aN_n=E+C=77ZU3*n+5OupHvnj% zBI)`lQceTUL{94X7TDB@X@NvEkcGuk9^PRQeesdF2r-)9aJ1?>C`cFw`Mu>WQG zg%x3f#e^(ONaTwOtu-jJK8EbDJ-Bml$~TbX`tM*U|9#C<*dK%GT^-k~jqZG{n?O1rPIk<7jyN^^?@=xuMt3zJ7ge>tyfSr(4aWeI2i-PM$8jS-t(L@bqMw^~-}F6ry*>BdXu<~UHoMIi)F#r#cI^Av<}n4T zYEzvIEO!SHf6#31tf^&_M~24U8lf5y&%H9(>Nrpv|L32zlSOzr`ms<}w(U9%?SB=r95Y0qmmjkv}#crn^JsaItU2JZPSyAny zeF+V98^O(N4f(Z`X`_*@8u`w8NwHn=SZmOVb%g!V%Dh;%(fCYktdkNO`Pr|2-2U;= zzYQ2aF8|xmkBdKMe%ydxjfr`%@*A=x`ic5xMbv;VS60Y3)-P|=n@_5v1j^E&M%Ecf1PB(lKn{SOa+nEm0-4~ZXEj31VNnE&BUW8$Gv48&7#eZdf6 z_+|FDZd-Q7zqTL{6fms7l~OWNN=Cz-jk1sF-KJ-SV>MRyS=oMU681x?Cz~7U?u%*i zp{NxL!2pbyrR-%ZQp(zO2P^>1cmx{?x$Q;=0i^e6c=71k-NV5o!l;19>LoO1x zW0ZC+Q>{l2*0u;&p=#P?#ANE;Hj!Qp*u)Az4e(viiMqMdk+ zrA>B`-SG9QDLV} z%>{UpSO|&1vw{Cvx9lO-!Oo?yd{B6NP`GnY$R8B$9_)-C6xI(4vj>F%@p}mO$sH7u z4RhF?$6b(PYJboRk?iGnyKAFm3^ybt3#4{tiz&j3Q$Ygkm(sR$(}Z1 z)>pA6D5StZrHoX0s0rbcOp0d`&vP~< zq({1loNy19vDEM~@N9CJLq9DOnO)iuWeL%)%ed%N?*E%EsfvH#O2oFl3dd|;XX%-* z!?pf#<@!$2{G;k?CH@}QCQ=g>TlusOYuiz)VzhQAK zA0DgOjAl$_4w&3e5S?4@3(;np8o@;16sG19-_$lw+oj}U$P@Q*)sQNbAuA6Od??STIdD^hrCChj(yItwpAK)Ngy z&tLlyx$;{voAv`6%qN@kRN4l_gJl1`G?q0}QjhLFdoy?U_1w^5KPl6#Ls}_heSX7z z7|IpxUpO4-#~RHGN((*64fC`}o)fvvlQSI-rwmd`NeJ;4kIQWC@jub11X1V1X$x^BGO-k(4|qTzGoWbry< z?jsszxB8W_0@|*F^dNIW)}km>E;1ygo>lULxZXvj!#O1~hRT&QE-*PrsII33`_DWk zz@(5LC_|nqTfUEK>HSrRg))>ngUc&C=>`*b$~DyAU1p8cLqsPu&vjBTw?z)3{XtSu z-^Sd(U81|9RFz#V9xIm%3*Y<)2Kl;2^ApU7V4WVV&p@Y``|*D^ zC!H@ZMB2Jus&^Kaq#E=v8i?uv&R?qYg1!E)2Mkh9?{LF zz?LzSO$ALiWDnI2S=w}7aEDsiSc^sMwq~08J+@NPFz}SGc0plyTR054P1;=m)^+^{_~Qb253j*jK!6hTTTdaW5zyq+bwR!=DPfG8}ZQg3hsNi(Hi z;r`}j(~WRw<>KhB{K1OVz55Wb;f){|V|%(Iq;s0W8s0rD&z<4|rUTy{4$dHx_aiJA z-aZ^$X1g^8lGVVVD;QgB|oxP$X4;p zg|mGV`G#{?Cq`Vv1yrj{6V-7eK=(tM60TTsq%k+z10qYWJM)n9-d}g=2m>2wk!$#& zz|9@x*@cYzX9%y=hflkm8H&dSJdtEQ5bmcaxj`+QvZ9=#sLSyTbP~htf^a37Q#c~s zL0QLt|mdJ-N+%E|95za>D}(`Fbk{ zf!(d#$#&*=beuQcy-^dC{5WXZffnFMiM1f(708N&q0a6yF-nKpjVO<^XoHfYtwK_eJH@#rc~e;qGv+VYR{#jMEs58zzmwPB8#W>ze4_ z$E}~?kp2zp4oQ*zr1sa_`i1^zcC#Tt6>216gd#oP*94N(*g+ zNP0kRW{wQB78hYq9C<$Zi{jDXz14SeNb{T8A8*-dWBf+* z`M9(KAL;W$g8NfgEc^sZn2TKOWC!^g_D9*ui55~iN2Lh>2Z7)L%9zK6Dq^E2dk*wMrdKZb8nD}9S%tw_7zGOVKf`Dnw18xjme*5YsR#~jfIw2;tA z)`0~xV=XnA5Aqy9B=&;L1d$=+K6f$EXPOfcxCsU7fQ&llUl(M=VYffc#?PR^X+~Vb6pGB!P1Lr))P>=){pFDVD*OykfrUkY!tRhO8`S&ojg1&A?C*N##T zK>$ZUxWA!fdF0J^UxP8SvC%ww`DQS$_DcokM>npWEKQ%Juf6G;J9+r%JwY-hkYIJ% z?YfAG1=XM(S9M!;hXZlA(q`MWT0OeOyiVH=E6~++*6G_4(1$Mkv2P`UGVr7 zOsW?Ivb06QJyAe;YTzrs7TeDQtw=I)=CzL>EZwC&&WEK)nrZ5j(w7_b=>b2{?-rFo zBH@kzFh5)0Psy-o-9iu-XyV}}Lk!?|X1CRDa{X|xXbEe?-e8cC_0*&RRw4<2iw7&r z>%Qkf6r>hJB1!#<$Sga5klJk^AP){NzZ|*;Ua%3Jkz5-V0b10D=z4B73x(u`MCcTx zo#yIB@ZBH?aBSY&J)6s(-LG3nrU!xuHp~2^sW8UP+^LdNkL$~^9QD_r|L(rdK`z906Rkxo;=IvSL(p&y|ArlNU&r#x`~2+7(8 zUv9%nuK-6|pp^3aBg_lY!8i<&h{sd#&WBbcXe|m5;*w2rvlnaMDtR|?D9}L)0Yu0(ZdE487N=KoqXetjWV9dWlaLF#% zPSK_9K2}yH5PZ#kszb~K7**%*!)Z%H$8f6{z@@_2RWz|=SX-1?akbTzjIoN2U*6w$ zTVc6SYSkH%#K`4BR|#o~TW*?$JG{}Y@Ev-COxb1DNLqrg(qBd`|k0+mki~7?KJK3@2H&PM}-D^mr8EBY`AnJs(Si# zmaH|z(s^0S?oyW1WZqH2!_Lcwk31(rK5`=_gBkdr)|-}%4-=-bRPF7aSMOQI|5p-_ zxh@_RIV|d+7RzaBls&MLEptXrOGQ)0jZ88>KLC0nUp?u+8jWdFSRuW*T8%OHXsgh zoWX!VRg~`UuOvy2O|XZ6djgK1m0yc2|B?7D3OZfml?nA*ncQ}gX8hi}mLxu8`&7GN%(Ok%zwtn2-zfE#pt96I~YI!k>+ zKipcY|#Owg};e9IX$=y`KZo zLCGKn8fGurZ(w9}UWix|`9*9~uBD~HfkZvb4Lc~;3`b5cEh-)YdUHlr zHzYxW-7MpDaV#35nRpfQA4~x3|Oq~hbJxXX%g$HpcilnmH-KK+Ck6hDCezK&NR5tpMxUjugpY__f+FS0; z7;@MD;NEsO372%-&fQ~2c+Lg=TSq)#ixX6VGhVsUMYqd=xcvDR01QFkG3@S1!vP8W zrz*qW-@7rEAZJ~PC2cQY3~bb>CdjBV`D8}V+Wjoy|I9>I9~m&RG1FSzNb@hb zAGg}dX?+HPe1y>}S=c^r#9sxrI|1%j!***FGYzn2@~ST4T6bAgN31V0WjbKFa`58$0e%bi}pZt92FKlm`U5X?Xk}J%sqlKYRkT z_wu_=>-F1%!rRu$`P?j5{ciQk&is1k-e%|4;=A?5pB{Wi%R``j!Tu0wO_%_&*ARd{ zrq@ciF1GaiSHl>CaoE58-QWN2Z;-TMIOdVF-yegO2Yxsxo8tB~|HAnT_U_3gE zafOeGPA6g-6dmX(Ht_GCd@5CE0OWs>dIGq3CT$;|*Y8SxV@mzP%S=J{d3wDayf0H5A-;|QbuQs?zF@7DwCl~`(awmP#=672UdqvqG6L^eYfq0| zQ#OxSPXoYJk84W&sGPs94=E$CKuO269P>B2-p-N%!#}5SYS^=xWUCCd>(#BKj@C^D zSi|%iY_PJa#Gj^3Eya8jCH7mj>m|?bL_Vvgpq4D@H;=Y78T|xLZlM*R5FO!^pR4rq7L4#W%TfXcF-!d%!(c^rVh-bDmi=yf zW!Ul*TqTSHAf0I2bo^AB)=z~#Rq(_v1-&WM{Rsa)wX5$2@+k1{iincKrBW8qy)TXeZ~$NQ2ap-p~DGNqK%CVF_KU?kx#(rgh43*a?6 zD(;r?+C<%LGlENl3+FJ+^-GK|QQqn@#hd25xv`(lDE^Kq#mj0$)y@9O#;WlCo!nvf z@1)3XP8)MfTjI`H?yTfJ6DbF+@$*-c=dYGJtrl5JhzE6hykR0xpt(88)H-xJUE*I& zpeZqEB#o>V(*xE*bvBJ@YBo1&WD`*~4oyp&DszD4`F@$6v_^Uo*;v3jr&qPV9uPk1 zY_GFdP`DXjbC=I%R8P83*}xXxz4eG~^$llT|BbIm>No%Ru=SPl@6Mp=FA_k@n6F68f81L7FaP>`e<}a&Xn9|e z>;B^gn#ustK)(QB(qF{CyK(=%#Qpmc4{#y>I|&4mej+(vkzxNEkO&~+|5M??#C{^c z=;$Mo0+YLPeEYa>@~cM(GC*{(pGexj_i58##_+!q`QjyO*BkgQX8pp8!GekbqKG~s zrvLMA`gh0t!dE2c|IFq5-(=7ihJ&(X7p`Lj;D_*as>nc_?tPb+I+p68KOix*4G@xHXcW!`h$h>`D~3 z--_6{h$Sl@p^Ammjb)3^Zd)jRdb+VM>RDLV94EA{lS6L;J30Y`d2r+*J6-=RQ5sJ5 zkRa~hl}mJx6x}C=L%YKjQ6p0n*<8nuG{h2$Rj8sVF@y86q)enn8fU2HrmNJchYp&X z8JJtlp^3N}Y{T$@{hRfDLBV~-k{EIuzaC=J%XrZJ&M@fVE1!cXq%*^fKM-X_X1MD51JSxMnl%7*QqoIBx?7`<~56B!iLG0IrIWUb;otlp5T2f0ZNsaX(pB^&V zq-PWCz{hQ2{n0R$PhoI6ZL;Q=g;R$OF?6OJ`jBRwQ-z(o0djdWuV!_F4f$xD-4K-- zt|Sm|GZDpQE5FEVWDv)IJz3mq_Q{6JN6;R!>$5v)veAK(E<`;$mC6ssyLZ_#1{apQ zzkI3Waep-vsts!nlTQ0+Xmf-s639!)~J%`@o zE=;+Z_l0EB8rHR*p%itu4ma3jMb(~IiFm0p;vKsR~*c{|_(^PR5y7vhscLZr~ zPayhXhwlLa`C;Rak^)2m0A&JHC@TPJ3(sSX;1!ar*!zK;c(i5_k2Y{i5H}T}F;SBi zQEB_MQl`yC*n?!B_QXyj>`RtWzKM*bplI1q0-}nIOV~&D^_eB)+`rrts=wrWnLP!X zsx%&U;y!JEmgv7#`j3XY$qOrcr$j_(GDpp z$}0A*L_1?`QDGX>B|JU?RyTuVTAST^0Rb!{idU8Zr@D;uQF7F__Eutz zbk!;a#pO#hoqLXvwDId94tiW>`&$tWYp8@dKIMHJVNr5p{0~X*1(R~L>9iDsgSyII zplB4xaI2nXLQg{dYfUVitaoQJY2eG#T0UV68201QOr)6AVKx!8>5jCTbUwuluc`HJ z+NX?U&frEvrYx<%p7oCRtWf`{dbjg(!2un~oD{Q?hH9BS1^ChSd4p*#Wk?<-z z>jO!+_3q}*`-8c&!tz;x&B5eZ;V=v~fdB^1RG=2m{lU!crpN^vcpi9+S1{RVU(gp%Tl%QzIn^I8_e*kWqJ~vG zwC2$%D^IFu6CLBngrq3ca@9Ak~@{`a&_|ZVO_cS z=BaYIynV5@&8}X$c!*c`#=1ZcFTN=QH}&PWvzPPDkdPM$1!E3aHy(wu$0*UPX61lA z;^-DAfM{osDN#Cmh&ARN#q6Ce+DJrQj3}Z!y_>-rfixBrri>De(`FHsbOU0%bO%Dx zK*O3NDXu^Xb5q#yCSrd8>9Z%WJ7o(36biWGQGK7_LRRXX1t^-vOulZQ5b3XB{r?hv z_5>F-`P_?(3Lq0m)SW|E7Ag<1a8}z^XBsR!f+!v52 z+pT0S(G&g^&s8vi*vId)9IQu9e{99!v+mOc+4**0=Z`Q~5m(D8Q6;q0*BK0i=CTqj z55l@4zrO0uxkMY z+Naw~?E4;^;gs!d{CWta3p;G4fzEvw72jds3Hr6d&*PUV7u`FYs35HD1s;4z5^DrL z3(yrB8VPRN+!0rdt!!T$!E4mHw)Gt$arbyD&``cY8CHpdhO1U(SDT$8_;dwD()G+6 zrOtwo&@79qBUl__;s6A!?8tTn9fE5F=bnHz1 zbZg;slPzmDmql&bDhJ(d?m9tx3y9Mrri_N-u5nwXYZ9QZO`kR_&|e$7vEu4b1=Dah zIZ1~{Qf`DWeQo@^Mu@^`B==x4n*maK@xrV>qWZ3z0c0Ns)S!~Q>`Q^2u_!+pPKe?j z>Lt|T9-anJVlb2<$=7Hm_rqih+b{lzGD@*x5ivDJ)ns~%FA8t4PqbpxhE0Sp<`Tvz zaEd55)KOi_s>9z;4USoHV$3OyP(+yTs2QVL1oqJ09B8>QE9)4zNZ10=0fmGv8A=SU zF&Jm-!8Zp~Up;gm0l6^It6bvH=US#=_n*K>y1eI6!8L{lZv zo8Bwq{~5oHr1vLt0OABP6$AmEOdhvRW{#UDh2xh9<79t$6=L9>INm>D3!>v)cJkH9 zS4^K%EkDpRCW8k%DGNDCJbZA6D#35ixXR!520r)YFUm-t63%@m4_n>P{ z<@g1#x-d!BB_=R|8XK$^079kfHwClH9+pa8fIRP$Y3ukC5SDJAub8MSUb+%;!!o{7BuElD^sA&Z@f@-2Z) zrjlm>Fm8*ZkA)Byby-Gy~NZO0KXoXKD&h(xrFk42ECJw!fy_;oy3IG8or8t`$RE~N;x+vIHIEX?kH46yPR;2GEpuIv*wFT$r3iV z4bL64F??j~-!j@5U|u>|#LUxa5J4`cmv}7!f+r>>B3&YDatCc&cVjO2lr?&W#s3!B zRi=pT38r8V1%DZgy|)d2cennwHU73$p%`~(ss7W{m*=l*o%@HK*WaDr-h4Ov;=BY! zA^!JZu`{*zwzbn)oO-vg&5pc#&^{Y4P{|1P;r!XF^H*!E;k$=3MAyWtN!>ukVs7>+gr$-EZRMs)eBVU|WLnuuxgP4gKu znn@sm7F0o(Y!2nD*^(;Baf)Huo4EnQkk76dilyY+jB4o-T^p5d$QCv^T}?(!Eti*n z1YND8LwWg!F~Qbo*?>H5uJj!>86sl_%0ZeMut?9M0HF-E?S+?Sjl9F(fk_0 zDTt-KD((9>F*$Z4+-L9&Kw8z4Aty7b4}4tKUMy{zA%4^j z4MSR}-@?b`{l&D5E^lI>8U38qSPY9z$twWppBaT{T1u671`a(@TnUb8 z(@2K+jb-!6Fb}n?em4)9m3boYVy@s?Ofu-uwI1;CICMXs}01hMMsEdX1!!WbWDBRO-2@ghD-Yo^b4B|FHN&M5#Dwm8E)h}8f7yFMLK>hXs1Z;=mA zplmv!#dOt;5bz!5v+B53;k4QQXaQwOO&Loa=0({<*@f-jH#=ZiR8vr0$u=(8hLR2V zKSJ5(^XJY|QwBILhpzY!`@!)&aBnu|{gy_IX5nry48scU)`h@Ms z{S^-r?|YT!KLeXo89xh{oxDOh%N7rOaKgbdW;@7?g))8?Fq2_L=6pB^q`eA+C1tz3 zVrtxzaZXYa4u65Eo!PzN0w!JF!Awj$zq)7DFr^Y!F$-=66W+=wxmVz6ewYX8pgH$L zT6nlH^m4LtxKcm7dKL4>I9Yk1s$t5bl4(pcoQfRQQMOV+%f*Cue@s~zp8J^deCq-!+mg^^*NWpUv>0CCS)Y!^8TkNl3 znZjKZ0hpptSM40(Lsa;?4cV&z?TILJY}185yb7i@R5e>bPyqvTfe9Yw-jF|C=RU z5{^mgc*Kis=PuH43D%+aoED_?tJ4fesSu$Gr)*j$!6v|C*V?qmngpL9!%Cbu=F2`} z)C^u&vH!`Zd)ZI1`oLo<4M~1TGwhU|>oDMS8<5HnQ@L+0w4H&2r*6yn*exsAlJf91 zRiB8;2Tj{~p_Rk4ZG%taA+t=S21l?dK4k79*V6Dk^@o?%aM%JMt#NGnzBa}I7tjdS zCE5!u^CjdtVG7k{BJgW6CaR%xpk&4oSBnT}SAvL|V-yf&fgn5p)JK%UP2y$e7c1nY={K@lMws6_zbcW7-;$$v_DPcu=w zB?-}y&X5Np5L>%*H;6ICg{;Iu4uTskBH79|aIg50HUOSCBy1VR9zJ#_w!qH{aeHt{ zUcpDPf#1EL5DvF|4|@XJ_Rquz!OU1RZ%yM|s!`~THx>l5#Gn@n^UtQym=X6ZB5&C_ zF97BbS~8|5bd58n_LyvpLB&7i=ysI8Knv9x8^{eHm;zE(XG&VtU2A0w=CRWrh0I1Xl4oR%1xTt+m=DkiTQrr3yV?3wR2uNp61>8%Ao+U352**@+<3v zb?Qa;0(16@^UvolACwA?s#;zXD|Ly#z{=N-nj^%wJ%`1X1vD}X9coNDlXl)Fdbc9V zJU2v%>6jPZ^WAFY@ZylY2ytZf@WQM&*i+>s*xYEGraHM!g?nSR&4tjMC`qF>UXYJq z2B1+fO;+KyD2CWak=boej}|HG+>9*p z+-*#l_CldBPj^hpdb#ETXDLrvWe&7+asCOoK>A?fI z@H^Lbrl1y~hWV`zFay-@B^H)oah}pK4~FKS4?psOOtBXGUrZh>UOgyGP{?8%8{ZC>2^s;x-4QvkPMj*#x0o4I`JB5iQChC0ACEXdRD$t;y&q3yv;s z=!7+b+;p97eQx18NwYQ-O7r>giV56a$wncSHjNKR{3_K4HA)%Iy2bul2G%3~ZURE1 zKy_7Mb6Hkagu_gcKhn?v;IPMTkp+?50JH30G}BbkEF)FOl%@D=+AKEojGJw?fc#Rw z$Lta4xvvRsgiiac=OblFJ-0XW0&BaFSvNyGN-id!1Fu9n+x2|<;bdXczNWz+EP$}wT5OehelmZ-)6ptcKEAgim2I) zXL;X*Yf7nPadiZVXD0}d`r?_dgDgK1gM-3%@(q8ffKFwFd} zYM%S*M(muf0{C8{ zehGl3VEzI&+JZmVMS)ZY-vu^Wh4SE?i&#z8Y*GgSu6;Tq*;a77lW)?DY*TCMydv~$ zEKet}tW2|KQ^Rj7SSn~$Z@Qr4c26hQ77Hg|iT#+j7aRtcM0RQ;axG-6v9n7h*8}=Y zA;glemYzQNnA{@rrJmg!A$N)}_5F?6{n3pu$hy^OZ5292M~mQfk50dDkUg16sL^!r z%xGxmGZmMg#ozs(-hsH`vxG*riR<0HnI0`2?>^%*XV|rA1TFbv#s#z59uW|}+i&?rZb`Y`qnX_UC;4%B&4N^?o&YMzE&H<^ksoJr6+@WXMq^+k$F^$a0 zjbc2_Zd;O$n0t<%O3b!<5mRG8%b~zsj3IpHbtvEg+HWR1jo!wB317S$lLuGby*mml zx8S!8@w*A%v*>!|a8;3*Y`tOnMdSk|;;bTAV$>hlcUi%ZHHE9dtn&id9cOl%`hx2N z!cycQnOanffhXk!x$g}x?d6t`QF0ULCb6%l4?a7%{5gA_dSmR1+!2v9g5Zec`RO_6 z4!4k*aho^@y}U&Ukr$z$g`N}ib!5!f31DY7 z`~`FovNr#qqlU6D)FJt2iY#>NJ{Io_54z@k6UmxhMB}!CpLf6(rp|zTDvM@t4*8*g zNXicg4wfyEFx823i1!D~MJ2Ro^C)uY6Yu`_9wWx#?sIHFodfsVJf}X+4JNo{swF%h zC2o#7pJir*stUe#nH3B@{7+BxyMU4RP2jEJ0eXRX?_`yoVO&~4OSB3GBw8msBRj*) zzqG&_dVVe+_17ww9h`TyR@?#RgS3`rX=Q{BnZAv=lfjVc?NFuF!V4&N_^xleQ+k*dFLdybvEQv4Egl;d<`pdg>?x1+l zT^@!e`!9s@2bN2~VO1oCK0|FC^FIk(EaJu_3Nw#Dc14_z{ZOSq2%ZD<84PUqFj;U< zb9lBG5@2VmknKV$84{Wwx54ICpkNds_U5bS?2me}K>IcqO{kRc>*j7cwYhX$sbxsY zmDsM^T)6AzZSDPd>Dot0=mjxIdDQTNi0YvKWHt)yOG>j0+D8f9Y0?H2CUIK0DgJ6= zZ#;~T#B@!0N4}u?+5bjNqs4EU-J0%Z68|aDqjiT-58-D`AWzh3FeAEySHe^bt}DX6 zf}IFK783+N$e>OMi#lM4T+E7)3VIPjG6 z-Nh@UDltnk7z(-&JjbylbTqv#kC`fcgSlQmuVptDFea7Mg%d>Dl*~tys(UA7^xXE z7KXCaC+LXFL9*vnYv8s#H)%DkS#J;+zi~xPbn7H?)2pEbuxjdbc?{e%Y8ZW*$)7AX zm;51#&IQg-$u9!EdH@kxYMHlSe zAvfHJWhq-Gh9;0j$X;PT#q!C$+pD!|kBa zv(7%JTvEp`GK7{RntZ8cKq0jiv1yeE`bQ!3R;|&uIo#Gx*gr*j0z{)zz z!}Xz;BbB3D*OkNhiKAQhfN%fygygwD%w0dF9A3V0^u?!^&wX@j1X%NdAfJ{gFRzZU zzL2ZG6g1N3+du!=HiO}G-ol(7W{BPSC(UWxXMG*nCSn8{sn;&*{ zLol~Hc2&ZX4iWil*PktHn8$Ni!hF9~+tDxK=Nf)m%WWq^K}L&DzKk8`j}vW_AjYh~ z2|#qGBoGYi#g0VqT$x9bqq9$nJ>qM8HYb9MJkq!&*yDxTk03VB<4c~?!E@ZibtbV$ z-Dx_xO__);I7@SL!OdO?HZKq)L%iR!oMD{vj7$FVJ1!wkDtWnteBfJ3dr)T)d)USp zDr>zi1E1RzO>L74DI4+5Z9A7R5pkFkQdE>jL1~s#;>#8$RGvUQbrGQ?0-EkXRF$Jl z<49gz!{y_+K^eKVbGs$x9|#t3MZ{evIB7WWUe72Z+M2^KEQD6e6p*b@xRpm}-H|M! zHm?vFxj3|7E(8Ex(uFw3kuM0xk||&es7YN_&MEOsH|it~wqRnqy-tX`7+N9|)Mn*? zSqH;jvsw0y`Nkc@!|kxW@DB)M&yeyKMe|9wHWwmF_RpRNJ5W)&{)^ZfQ3{TIIiuM?+6U>CK5E?b8-}C zv5aa#(h^g^dJ$JtL}VGl(ZmW(O{X zht+r(oTB8bllG|jP27-x*cq3O@^}P`wjP%7lURCZ1cxy{rb#D5a#>=!$Bwjbv`bFT zT9_}##IIS8;LbJi2vP{%4GX4Ne2vmL@TsovoTocPPpU<;n+G_EnUpl(x zR$&kKOxh>auGQuvo$W91<2u;attUgYg7Z3&^qh9NIj5C>V+Upo@T!9oP189o#ouXR zVRwGgy{_dXb9dw!b*?*kg?T-Xny>1yGJ8%(sIRcuJvRLu_O%}hL?Ct5ne-Dr<0qDp zwH@-bgJuA=!R}qercW$M=(JR(%OQuyB&CKomus}*4z$4E;0xC!g5$pQo}5L%n`C;>i!)Fta3)KYA4 zVi`@RrFPyO1fmMN%aiV<18vc)aH&tHrF>iklm91OB;j6RUX-7gOuO7DP3A_i*WUY0 zPo{UFaIjRV8ftUd6uGw80zXIJK2@rDwm#Xcsnb%Y2ay5v=3SEIf3PGf`^EHiA)Q%O z)+wTH;DlDB}TvA_CktLB^j>JqO48Emb2Rqi}39aFXPyc0NlSxLwm8Jv|n-?Urv|jhTtd^DXvH5Q%RK%Sg4ckHwx7)I`J z{%w$%&s*tS#BU-v7G;8&cV5|wdq!EwNo`UTiuD#CpFvKWra9>$0+bN4o`61HC z{@W*yxnkfya5k9@SpObV%2r$OagNASTA3}O?D@p_{^h{YuhtMGM$RN@$;X@`g|%M zw(H}!@BjWE|M8c?{D!-O1z4F_t7{w_lyK~E&FhUX!^LSR?TYAFkWXTo3+>jYkIu%^@DiL+aG3_ zz29xBF~dZUN;P{rGM+<-rE#9O(s(q5xP?1rXUL?o$=$^Q#S6+9@tB0AiyiL~VgFTx z(hG_}+lz?2R}pf_3-rTJ`j_tk2FS7RjS0aebhJV^YF)@DOd^_E60Ej>@p>j-6{Zib zOe=?#`r%CFXwqMr)gf7jRj1|=r_(~0_I-0DT>-AU04Pg z7w>B6a|4=rmMuQ3^_t=IMj;b|Pqw-tCujmX(`KWP#gf|O(E~$c*3S+R!n0orK*US53?&KkYaqBnHg>w0PXFP6I42d*m$>& zFE3T7Q^jF0!2O4ax*iv=F9Txcrj`yO+dwh$OMAn^$nd`c8SJ?g{Jwy40C!+j4p5@K zY#Cm;mp1k%#t!m#PCy=Yw>phBQ59MGbGy}fvYptsn`A7>-*X~=y;%~#kKILqY6^&B zpju|ayJ!UjjZBXeUtXvmeSYy}c1bzPEgjulIGj0uH1&xBssZ_XO0zwOVQ3T$c$r@~ zoSZnEECvZ|qQzIjc^UJNb*OpRRo}uwMv24spAX3q9HpDI#3ehL z!zUpUi}Ak+Wcr`MU}I&s#Ez{rCL)g%Mx3xhhRek6FRA>C7(c8EcK;KRk~Qs?xazRP zk?7ydM0+zzO4p*gF`)Gr$C8%iV1j%%I_F6>nbOl@o(%F{Y$H&QfdLnbA0h`$7Qcun zAhr)N0T2OLgGfLQdl^Aa_#l&&tUaJ`*aQW!79Alm9=EDkL8>;C|5>U)z#pJ*TN0!P z6WApNv>@D;wS~K?E{XtwBU>eOG-lAzsF9-~BS$l~K+s*u59<=4a`jm#NH#?++6n?V zd>;|Os%!_3HaW@vD-_|ec|$wHZT%r!p+GC#;-P}EVi;5FJJ*88d>(!J?BVo5E&F2p z^0S*Gc26q2((beM>hp~it5ch^^_q$9>V5qwHI@h%BRH#ReQI3q*8*4m_NvT#_-OEW zKxjhM=(XFM*X zjehXFQl7^TF4fzSci>8RpDg-DFjNFyX_%&FL^^_^;O$_~8QvsgnmRL?8w+tQnl_E3 z{$a+4lr6h3AYKN}2jQ3GzIq>YPoeGI5vT=A7;-Kbt{&amRE|cb533{Iz~zKU za$IT3Ijq}E*;)Yjvfago&-=a*%yCR=W1m&B&&b|mhFoX-5pA)F2^jHYFruZbHTW@<`>={OXr zn$dQu+)8ttDkOHmse)!~M{HK6Rn+#EKY5WWKYh4%Kw0;Iw2Q3EV zCv+Km%yl`6&)S`q%LJ$3@21m&_0BV!t29BE_vKP7WwWW?ggz&nmjBjH3yg$6C5*>g zk0&x9=xSshZ#9pgKj+M8xC%>+*aKcz4`6uXWn8F;v$Tn>7VI7}1eTn0+kdUPn83tAednF*bFtEyGy z!XD}j7T+WcwpTnse3|1Z4t5#V5}TtS}Fxbt*+6*Xz4B^6LimDH_^z2Vl%*gyBuX_G;%u`RtgonA;6n=tt}G;Y)!>* zO_H;?b15-f!);4Y?}nvmV^}LU9=0gWg+GEl4U=@?P!hJ%#p}!Hj5}C^7QkG=@`6Q0 z79m(t^s+#pBx>85An8$Dl|{s>2y7RiU5lLrEI#`!n~DHlW&@Xx+(B8P3B$uVbkj6Y zwMd_lO7odY>k9moVqF;ySm)EiE6fZtt(qzPu8Cbs1DQ{WY^SjIpa{6F9BeOpkxpPQ zy9IS#*%j)VrgB$1doNqs%TDcO=MENTo)14lH2e~jiRC8tvSWMMySR2M97Nuh;f1QQ zKI8B>`$5AFWN;B7Ut`m{tox$z*WS3t?m4ioxGC#8s>#Or9LF^)8tGQCKN$B!Vh;YLqNev zUsQ9*zM+>}M{l?Kq(W3aRfL zdQk@ih6B4x2Jqx?liP^)c2P|c+T0o#5P=<9!XujWj(HB%XH?C@M>T*JR!S8?5?wBZ zKs40DKpcWox=B?Tf++egD9Y8sDy@}FZ&KrlWi)}>2j3z z&^flpTX<-O$7rY$2qG@t1wf;s{HTltN)(s5$yNV?>o4DWQh9QZjoqt`F%LC68_3+X z29cI+UbFKt4S6W48CEEy0udUvb<^1PgoWr5?p*8Q6+8hS0FE5(k7l2j>-&{4BDQ+^ zX!>dW0d>9q*~PzXPEaAvUpCL<{cr`EI59yORcE&M(kd0-Jpb}8b@X&=4C-uh{4kzR zjCZkvuv@uJfYobKgA+6QwW#5fhE}$MH_92%KMvF?)d!`fDfUlK2Ih?(G&8A)o=}5R zN86gV(|_*V0l!9(#9-<}pAy-vjr)3z(^&7(jAXA7*8($Av&k``2EN=WEg7Q_Nw{{A zs7t+7z*t`LjHxEHh#HL|Ikq#P_l{Vvyvf&l#q=bcSBgMdCKssWM53T!|Yu%3x2ec(}2H6!4Fn%w4!zkqA8;#{uI@70|W!4!IZ|SMC03{}DFyrGD zPx4=R=OX`}>0wK~0+e`v4MkNFsu?vp-e>^l*yfE=N@?6y;y*FWn4VPAfWk$!WZG0a z#&akzN!S^tLoVLssgy`0W#@pZ;i7f#NM#xG9ZQgJ#nT_h336HlR}DZV_U~rLsHMs; z0bT=uY%*yG()&3hz%_7c*$q{jUVpFy-7okJ%jJ~X@$<% zRbDkd{A8PXRZug2>B;>kkC+Pv_U3)|d$>~{;2JeX(`SuDDh+%IDfWn9GE3(S2w5hQ zb$SgeB4Mk7?ns$@lTpkO!%fiz@d~`!?khL~TL4c$u)j8G?lJN$JcF)|%W{ud%;1kkhKV+UWXKQC{;m@4}Sv|5M1zkGfF^0l@x);}6XoETndvWAinIbQfX z3znJeMV>L)7J-XGI!@m1VNzKevW*eDZBZ>)QmhgSNt}vAmK}_Mxhzn)!i-tv1z2gC)~XW2XoGjzbr z(b7ra8$B0e8!=)kU^HB5}H;EE^w7hCxb4Jt1k)5ZUt#S zXY3U)6!t8Nc9WvYwnX%yE=Hjvd}`m2NImBRB@mzLzXt0kGoXu%=}^=FY@&6%m6 z6SQaZZD=>Iib99t`5Y{D~?o;doqA zOg&|oDGfrIg0WXnW3t`K2`F&oj;sB`(?U9eu3aN9D5D64|uk`|UI+Sp`@Q%tgji&8`>m+%Kh zEk#6>NGi|c?()b@c_%a_WwVX{@ZQTSrI!~+6^K)Q{t4Fk2)qAf{`z(O&sQa1($ncd^`5rfD6euHL-!zzKKDN|wAh>KYw)oR1+k`i~R+=vJ6D(GAZ zPYdfvlqc1l$(?Zsjvzf51V|q4Tx6ctB;XE0+qnKR|BXu`iX#iauWJLtqW;ltURU9?p zbd}rF{@Gapx5M)K*`9yNl1BG~bPPP(xlFE!nLm&xWYJ z!mA3ZUF_wa^0VWnQY_itNG2N#ny)UvMV40*lqrZ%VoNYX+zMKpB|9q?(59Qg&m}bc z>LIJOVq(boI3e)h#4+p*kE2_*y*GS=`)2_Px#XDuPl8OnluQaM$0_R)eIH(`9NoJ2 za-?!ty$vM$7mB{?yvk_)Z=(mX1>6FH4qic`fpJ@c;})#2oEeKgKSVuPfcxvWKumTC zVUAf5@*peQD)skYG$#8i6)<&f20xe3x~_+*AnR7o4YEO)1UuKq5OgUf`?Z=RW|_Ct zn7?PcrMS#To3#DpZRO;)YV)~gYIW{zx-4OHPaiH{TJ zhK4w_gM8)SQhEQul_1q*qA^$y*KX`Txa(a~_QziNDpKD6zrVBIPc&zof`T$dO{I*e z&ZYy9YQniUXw<{Li(u2|DXR)iO2(~V^de8smWYEt0N z2id6nFrxuqzlsAj7D%ng%k+Rjev)w~>o%a56I%Bqr5gpgNWh<>eb z(km==TEriD{G1LB%sY+@gq6oVY#v_2B$4?gtR`YB=jEb2ab5*wx8lZ<7H=__@aRC$ zAsX$~(rI13oPsJp+578kUF~H{FRp$0V(8A^@TbUjwTj!+L$X>qE^`Qj?3AO1M*`%W zAq(;DqEuR1!430>uTLYu9S+U5A9Pzw4~lWVyEk-ryQI9lSbCW&9?cI0uCB2ggAPmZ zV>0)U%PMcftv|L`(u6S=R}quWQ7I1r)Jp=mp2v1K@%v3;w6X)-Y+uthL>#&a2w68j zbUqDHHVBGiSbei?fBbTYJOcyPF`htoLzWX-wJ~H*S!FP^Y8_XtA)35_-)Dr{4mXMO zSQRsU0@$x=?NprP?pYw=P7ctR@1hTPQr3-&gAeKE2!i(*!uBC&_$J|E$eba2ot@IdL=`n-$rqES7MQ>1prpyp?CYbcBpNKtY zUY^fR7Wj*5h|-NBMHVF31JaCaMj&zlXyGQHeV}nsqY{&V2(aU(2lWu#D?PBEl?#Xf zSEOhQ!om{*CWh67125XP2nY8%PQj-ya;mv#LlXHe;pZF)u_!iiZ-Rqa7t_0|L*ejU zF9*jyoPo*;f!rt6|2K4F=Q8T7Ji>Ae<;i_fNMy;KZ0&r^M*>wSg^@L}U&&V2Y5($t z{n54O*{eWQ^=Rek!}a}(qhSzR`)*pDtmhNs&pxj|&#vsxPO*KwHUwB41(kGCL9_0u zb+?v`_J(cDUNkNca0n5fXABb!XxP0N*?=t)hRymd>!B$AEfYX73%|+Jqn1>Mt*Jq)a*? z%s0<#?qR7ALG_c`?*DdQ_P*UdP31f;t>W*?LhiC-$9}-yJmyJ|I3&nMdO^VdO5-vd z1Z;c$Ln6R8F@jqIuFDfdfGLE(ChWU~lYxIJd|pLOyd3e4dV;|0{C2MGT*dP{VGww~ z{N=&K#|(vX20*d9_50c*#?9vS_m3O980@bCH+IabuCVb5qsC7jw7LSX7i#R{Rs2;! z#xDNL4+9+AoT8oV&P5VJ80UJ~b&@&wYr&JH7xwhw?5l;8rOWo3L&`z5tATVT5@n{e z7G?cJj97P2>E?F^;RVt+nC>+K6T!sw?pU`TiFL=q(cd&5{s(Hm8nll-&5TFh`t@58 zJdtBU;_Qm(2`#3pEpWJPO$P*=P2c}D@y*&d^WV&Uv!VRg=r@bs-2Y~U{kPzUf@Y4S zYcHWsyp2n?(XHPm89_hvGR{6CK|7H}+sEq%unBR+KInMKqN>=mQS!LDQQ2!_!E00Y zM7^1a_Y9sf2a`Q&rXz;0+78xJ^4H%$(31c0&%gG)amx3W8NXYe(S2`e`o5~7WEvy^ zFNsp-vA@ zzFzuz`|G=3-}`#@>mvJG=opxY`Mt#8WBc2ZN{qh%w&UR}G9QmaDRJ)0j)4T$=-c7X zza75w?eNWShrj%G_&WZ)`t9%){Qij_QXcSnFM-Po9&H(Gg$wu{d|Ge%ouQYLmBW?# z;nk~0*Y0)NJl^m77D~r-P2?8YD{9NTwISiELde5gti(3(V6c_(vw*|TYv2)y?Pb9y zJ5OGc@TQ<7v@X4~GT(%IZXL-~M4=}*Ml-t$Ls;}V1P%{;cBDN5f5zXa;(iH#8kRk_ z#?Znvv-BSBTtt&{6P4HMPdaAAr{K|EU*Fvr+1Qmf(6MbC z${WAB)Msk3R9fv%8mW%a)6|E4&pg%dcS|YtCTK~a8T{T}|T?cN{StzU&N^^R|VG$_T78)xGg<@aV0Tro3yKOmR#XLd9T1qOgZ?wLjQmC(G!nDC75sQL zisD2KIPJy*BvYZI8&vw)qsp!f7Cd8PW+Ll>k@vTg{kjPjHq=j3-*Cwr3%JWfCW%BV z-5FD3e*^QU6n5{93Lca^ngo+5{ZMWqHYsx*2qaQrORrca!`;a)q(>RDFH)N5SnM*! zf!R?AVJ8xTJenQno8=yeW8fAM4&){G2@#)=U}{b9vI9NJr^i(ZLf;h0ctV=TM_-JF6fs^zMv!fR%Fnd2 zgE*K*xZrT@f{+@nan%DS;m8%s2&=Jc$hNZEWi|FF+`DPB+~f|3AO&_4n3Kf)fo_#6 zrl-0r9%8OyAfMeUO0%MZEg@W*hF3a0%Yk(e3W&JZNz@p14;F3c`!**SLFt0JFeOXG z@u;8_f<--o7A8m}v-R6uf~`bi`0Tis?sDsNnN;>HIpuKb1r2*ttxkesb{i@3fgYo6Z0c2)+{s@SQl2>T9gUlO%ck_;b^U83TIi ztkRc>_vz_Oa3KHf6x$E(hRw>20=<4|QpWc$CnE`Arl5`e2{XdLuHI4rs;#nbpl`e- zWsHY7NI#AMF5^ld`Z2{Ct(D_%s1y(s86_|h@7h>7W+J?Q}R z57`%yw6Uc8C_AK$L$K@@B-sX$YY1Q-;Bfal=jigTloZx4vXX!LzHS zXE#S)%&fim^3$&8^JDA}ad(6P>8aOepH=q19Nw?jW#0$+iLU)etNX*3_h%kFD}L;; zhwmGuXZ@&=zIdibPo^U5y^_-K1>_I2Ztf=l-Mp=7tUvZY%z;Q6_#gu}?qP2^XuX;Mt++*Orj&Vb*rjdjZ%fvNzsucgdguA%| zMojmqs~t6xF`WlN6t*~{n_7ywW#S^fVY@|dguLD1_TfGbg2Z8xS{gi_$Qhm82%?&9 zQ>O4PZK?c*ZOQc|eO4-9P9dfr0;8`ilFuu^QVY=!ONkXJeJK=T5!Z$+Vz72QcZKIS zT4nDoJFk20JZPbVEs-(RSbjW9bT&q&+c4=Xh*mCm^7Mzh?oDM6Qf*LEfIn96Cl)so z6yiGvEO^G+yT5tgJ6=%|DS#G&fan5G{;Rr(Sca~B5Sc%O8&uJA;2C`IqlhbuL%Wm#>N>kD0SfhuAS6szS%0!?TqDBMh496&Q$?3~v$>Eu>E0MLRo1H)f$0nA% zf;?Cayj}23Mwn-EpIx#ovA`xb<|7>O?9r#s9!?)z8-Mz6oz%X9)O9>tuRh;cIas`U zP?#XWF7LvQx07l&Fj_Y$#;4ON(478%@~*9^jcZH)m43?$s(G23_j8$Zx1P!t0)n44|jLGio!2UKAUj&aRjn7!88dv~|g zl6p5JbLyO`1WT>Gci(pJUTb~pTc53Q%LVIjGFwMk%Alo9f@B9!aHp65?#arM)F8Se zWx`vC9e5)&F+4~X{xDq6NW)R8Xlqb${Z+4v1{{#pg%}bCJ_~4dp~3v2qZDQRp^QIS zKg*bo;uZ5hgiRfl)mxqOO3l|K+BvVpKj&t{@1Nkb8+GSj&zU*EX3tyIBEYr3T|t978yV2;HX%8zFYS$XCm8A%YyW?Mla zThC7QL_80+z{gdT8*D37|H#Mo}H3xq&vspc@511C*Tbw;gtgg0t?wO2{MmcvKj)%XY zz>~=6s{09%a1U`U;MY4GDg@@vPw(z8pYP0HuUb2IIwT{9xfVMJ*3EFW0WRG>!DZVC zRYVu0jnI2YYZ6G1ydf#PzKwrZ8iW&egojhc0VV{Xd3CY&fERT6WbpFw^5yM&h$MQ_ z?4#O;ojK+U30?MLMHMVm^Nxak5U3kG;}jjP%Rb)FVIDWte7Itzy=xUN#9qR!rFJ?A zF|^pjT`}QzfriD==)Na*fjS93(S%<`JH3^Ei7i11!4poX zpi2qnWt??9}VwpU08|D3Z>7RbU|Hn6>cpT+-CCx`SJ;CXQ}< z^5~ZFHG2&(|0JK)2f!-v870b^2#?=9166}JN9^x7b8?|bJjkLW{`4FeNXPdj-CX`1T1YejLzR7DE4W`V~BAckly+6-Qy8OO@#jxS_o3`yq8&dy5(dFuCBRe zCZk#PvU`h}d`?DB=S?de2RIEl=<{6j54Ki)$f*=KK(3M?IE723kBb3$?{M{UL29~Y zo$~g9fQpnP7jg)V660$=auez)KVC^i$A1fvlyZTgXRM;b;?eye)9W^2y2w#HT0#1sezBh;wub zgCUM7RYKu6iG9i~MZRBONG!6DN2FNXuYC}{`r)KRoJ8*S$5z&b#Z9=M+atN1(h>i4mx|KhQ(9Dkr z^A4SkEUAc}_>talEy=6Mp9JF@B>4E~6{SpnSXjH<`X;gridqa~JVfOYf(m;)b(wFW zKJ#*v*lH=Q;tZ4tAX1KDCd$YIZK3$&ysQPJs;LqT5ghuX1Y_vz%UHp@)Js$nS!v~z z{~!{D=gGNN7F!9fk`nN}si3$*tKO$_)f3NUBzV_fwKw3-?rTtc0aekk62tKzBx!>1 z8v8WaiK6zIa8d*3=X6}n!#&h7k&}B3!Y-|y#VPU$w}pGT%u{nQ-F8nj3@y@%|$~PM78sk~cKn(f;_SKYXsSPJjIDi)#Xm3C^u@{dxrWNH|??Vm&6&jM;1` z1IUKMu`~n_DY0yv4iWLfLpV&0cJNe|?h7{tuM-=VC}>rfVGU;m4JR8;>>>ULjgR=b z(J6>?TZjJq+{Vzd6=#^Bot1j?gl?ZN=vR>|t&&Aq#VWWW)`L_KKacO;9v3kzy_&bG*B>KGWdO z9te}G?X|&>Mk0uK&TU8Q=a0E5QySqPtNtRHG3+kQ)gg%_X=iPI0E&C}ncxmkTk~?? zY*=oBLor3M*07&VD{1ER!q;r)23v_BQjEK1t)H0CtX1qmQ~rakN2MF6zyFidHhVsV0DmpLr0c1GkH6babTnw74#{2&I*F4&!+=_$={jBL(~xo2>5u;c0QD87_VhpMMT z%%$1FoYm$`*V)sNHQ2%$K(gzo@Od7BI|P(hzd}(gIxF44%`rfdH@E3c$vh))k;*5V z`?5}C4Lzlylg$||6u}m)05@L1zeV1N6Hs|s$>!RWfP8Pa;?S~VY3G^6tyT$V{bBff zI9)77S)03jG^l+nZeLD5)q+X8%$Hj%eZ2kT@}Ts~)+==6_D7j-_hFfRDUE(CmZWp1 z23Hf`^u8z}N9RTE&!AXbgM?qT6^ESXb!6+#P#I0F72Yli!>mo> zZIFBZh(csf!2nx)_D(_g=` zOyj0AfS5bkgs+F|TYZ|T6`a?@g|rWphqq#{$r>{IlnyRa=vKXgO@W6-E0H7d;#Dl( zko8sDfdeqPi74KW1;qns3|RmS&t>EfkB2}Ho^3xko}|QU9?Zb6Z;pUPiuFK&X^IKj z8Ud>>QA_?Aa!+eQKK=!Kq@!f5)Q%DYAaITHIvDUfc`co1W_qSxPN$i>YqC)oz>BRmS5@VDU7NJ43H-e%ORm;}yV{Gz z&z6q~6;8#+1W5Vt032XmzyWN6TB)0qC~$la8QM@^mpIoipsVlEqKBiI_t5xw9nY}G z3+dfK`gh|{{>P6U2B`#|_uoC)XW!kUSlR!+ zy7z8l`fP=-((mrfTnvuk6gZzBmEp8lEbu3LXGbGEpJ8hHY<>RX&Nz%XImd>U5jS=K zZx1K^2LOBj|4n-EAl}rCWKIec!zHUW? z_NXk>+=kwXxml)^1O0h+6& z8i-?o9s7=DrR}lQPF5b{_Y$^kTiP*FuEDQ<6cCR6tiI^Dc^goANYALH2|sS9377Uj{gTfxBkaA8Wzqy z#y8spe+#(w6)~U{fN%KY_P2cwLYy=o#C_yXxvq)ARY%etiVbx0H0N1Kjb_fusxPA% zQk^@(+Y4ydZ(+LoE6=|OSo@HutOjm_Si-@#XS>U1W4n>j?6(J{cgt%bo&2UWF7_PA zwtCET0aE8lw2oOG{R8kVX=if<=6;K1OjW$D6_iDfHlA1dk;h zK&sVEjTk_7JAQ6Ei=fD}524en?D4S8`HhF5xRGs#cmVc4;>k5zO1*sU>&#rzapu<# z1`0j`GgQn;-atdDzNQFS(<3%e$ZLNVyTMifQU4AeGRM$vvG0H2k6A)kVuqy%QwNMP z3_*&ATF-0!cwXiJ`v|MP11VN^K3iXO&7`hcYna<|2uO>4umu9h%ydSO6e(@Julc^% zd4E+<(!LZBG>8vO39NbJOwe1&C%vm%RA#kxkdIR)hO3SUGupzRJ0XlAii?{K|9Z`vC z{A6>tp*jB)te;$I={qO_$Codke|%W^I9dy!#E1otmM@9-Wz^yA}Li<2|9# zZnVygFmDInDF)07)fJ|2DH@WCP^g2&(OO!dYLKTaIL+HF)}SEgdC9+c;om0!-!}Mb z%BXEoV4+p?%oeDqRj5oU!}Mz|ujs+FRU|~0C4fsTXdgkEJD|!Ktj{A&c{Y{Zr(0}`$;}V` z6L379V3QE~3?*#II(N}wpOj9hti%`+=DD#E4FkQKZ#hUl9 zzH<#hB&ZIyE1JG~b98*~_&#{itZ@Snt5T{LTc1C(a1$s_jzle|<}Lq%wh^c;?#A`cq2l8mHaM#Gj3^yugsS&a!NB* zgMNl(&N`u*7z02uGi!7kH;n-483Z1S0>~Uj>N8ca_DWUSBHIe0}y^=>@@)=kuc!zK8=kHVpm#Z!a&#Rwy#kdEDQcPVFXhsX{g)o+K3=GiEol zrrCn3*w4!9xrCL}Bhq7KKgW*T$HT7le9pyEdNDfo>25M2Q#$~;Iz}?#=FN0E3@_`@ zJ8#F^t}iiFLHBw_}+-aV{#X_Pw+eILFdYr| z3O1svo0ijh6wSS%yPBRixI$T`Ut=qLs!QuGxLQ{4*DTxBx($upTGvQvrlmQM3W2jg}*XJ|Qa8$L{HY3Lj3v zgmo=AWB!7_XpIp}h#>7dCgSFMnn)F(0-p;!f?gDX_I1_ac~JjX+XL2}*R7e!*m=icj~YgRa^B);v|T$X&_kKy6jRxh6$2>ZQ^PO|NrTno58?5g zyJBnApRb{bw2fy`>(}4LGWCNpOMEX7rU^|Pzl(vGDXdF?io6h!DR)fQV3+-P1~2F*W}3p{+=ccJv-pU-DXkz4-6Z9ljy*3k zV0BoSJxfu|AYUQ?{N1IxOHI8ySX^T=S%Ap%GI5{+qpnID<`6OrLJw@!E9*9vuefN8vyH&3vnlT+urFqvD`%8Y`THuSeNfiOQsSh#j~N}zNFQpf zP!ML^cLl8br>JXPq6Cqarv8=FL>@@8?XW;%m&pEK!7nR_wC)JAZcqy0hWQU&o&z!F z*UfF|O!782Cqyef-Ir8#V*_P+vtGp zaM!K9wsufIJF@S$?$1e4^99={g~0gANg;b8In7&ia6y3FjRdY2Ra|bE$2dzV91SZD zbf53y4(2%ad@H-y}Wo@1dGAt55NYIQe_~3R{ky<~AT?e(!ZfjS- zZygJ!jZ|c*fDngP!vTgNg+$cpzs~%SaZCg8vOlWm4fD(n&sC=1dCo{zz*pXvB>)<{ z<|6smB^vZ>wIUJOheZ}h z`J{G~@VM;|eV9C{l`uc!P=p}Xsb5GGgU2trLIN11V9v&`VTK5#dNc+!t}P#AJ{ai0 ziAZ4(A8B1gIke+DZ0jIrutP0_A zt&hBYv3mA!D1QFO+4{YA#pmbKmG^_w%<2jRQ*dWMHuvq1!Yi`<-$Q~(FwujM9?mv)u@H}zY(%5@AM#ZEIfN4J!7CH!N+t$^1l{Fca?mYFpNu69(p z{(==z;5~eRypgl>Zlb$jW>Qx>ls>c9OymmP?0LO$RSrJ$G?5HrC7aOd&%K|bQt+cj zTV=3+@w{##b zvEE9VBr+M1U=P-;e~NViu0;XQ>Kds%s&nh5=-G;J4|lH4u!qy`bk>p`gs~aJafuex znPmSh23~%|SjuB$*zYSa?5RX!O^6CsFnB^=DSon#5subDkB%Vo=(7(K%aJ+vA~+D> ztw(jo%O{glb*fUt zQt!3P`xjr$5`cvsaYQ0-Jpb^9(}zsiCUbht4|o1AfQ zx2xEGt%gMd7*>L86jYQ4(ZlU{Yp@6~bn(3zy_tM2H7PS$U7>OYfg35AZTJOVL5%O3 z=to8^#Mb<>-$T^kKVVmM1cQ_VE2(_~MZUK23U9M;XD$SLdpf*&KKdeb;r;3Cg!gdo zlKX_NcBPCg)Y$VD<5+laq2aNl)~_Y)k8^khu_&TXp0Ax3s^Qe&2IRsWT(nExi_{WZ_) zMqH#tL^fW!UFW3(houlqi`uT(O5hb+F|Qbmec~iR%?oDh1^&M*7V!UU zAgA|&-azi?+0Ta{pwHb)_DD#n%F%wt=uhb)my&I#cB9ZtJ(3s!%I8DV^lP+t5+JZ# zRsN9CQi_`I|3LA;_VAY1IdFGa zcM#08=#y8t0)o|Xy&MxJOQeY4?a!CRs14HKw<{zRalb2k2W!RVdIv4@*X~~I4*L0~ zEtKG-keQ>nWcF?^C_WL#tjQvkt?Wj0SGi>oS*aEDU5oEp5y26~IPPv}{BA90J*6b= zF%GieqF1~@%i2bXx1g6GTnsj+E)GCowiX81fxI1CfHZ-3N!%g=b~`nSOK zyZh2Rh4HOH6g&iP%oAp1phtc z^sDu!!-$d;5G7&1I^+s$){Cn|{;%NnaZu+E0jw0uXiGaCM-hrcGJU6N67(Ju>Jj`! z29o|mcBtm3Ti`CI4ff7ro+5vv<`vU(?$O_Y$V8KOx&!xtPu3eXb@a^Pt>*N8qh$$- z?N_x|l~-F93WTEMj)v${s7Db3Ktd@11(hsFgM8;*WFt6)Dy%q1QTw^~kxHoi`YGw7 zW&U%;LQIilPQL%<7*i(Um9TE^0!{J=*LJ0wv zRYk)9+df4SglFe~cPMpHLtu?+C>9V%^D3aYMamX7#S4JqV$PLNh z@w?Hx*na)Me#3v>BohD<5o~lL zXUM91=mzRg$Y)5XIV}|*l;5Fc7|Z2ltGW&L#O~CSvS}gduq_oV3oX+rYhny&nb6n6 z$^ht2uor$`guH=tr?i#0ez6{1S`8!}p@Bj+(z#{3{`y}Cn@M*t$fvYKtcj^)_Czw4 z$)|F9pI7=sQn`^&L)J%7<^+xv)IdcD^7~Sazz5jPR>GPgJRZRk(BgI@!A=KVUI1v5 zLHP!srgB;)n>#wl4dqi^g!Bg4VT)4(u9_A&7Le2Sj1FBMQ%byB&2$CZ0J=%1zc!B0 zA>fQ*(h4}XS2Ynm`n=2{#-M}(z6{V44>~@oU+3e~0fPbG!jln=B)_Aoji9svx&YO6 z#B@eb)YDhv9UvIAxm+E-{Ptd`EP#R?nOGA@10fnW>)TI)dH%@P9Aa`hMTlqjAf$y* z83O1a`ku@vmV{7qIYgcI-#f68n~J|}m;!C~`{MdOdK|bMFy30gSZN!j&32*fS)duV zH_ILECMe(u8ZK1sA#$jg5yCiz3l);Y3*yb`!hzSlg3k2{<_yac-snv5iL$)#q%VPH>LLAA#_|C z58X)SZ*e4C zfm7u>?LI1kDm>ozxMNPab{cfc`C&1w9Hmn1pGe&Wc+ z_Y&W!;afvvZw*?-I_+(U?@YR8_m|r6`NaC!{1`MGAAO1yx)0zVKw^%aK3zB;eQ|uV z#ymkfR~)c~n(+t7W3D36xfg+*tRbaG#2#^fKs{Emm-o78EnX3Azuo=6`Fb?VemNaE zE9PBAnlc(M`0Lir+wmRc;=6;(@3t>$(-#Ztzzae@@LJ{K_VVT2J>_D07&t%jU%#up z9jm;(`w)1;+Y(P>lK?!dZ?EEC%3j@Yk!RKV&P08CT~KJ7oP6^x#~leK{emibJvc;% z<6RJ5_oi+766(xv2sX2|Ufdxc4xDX!WcA)I;r%^JbYFe>mUs{>;6NuoM@xto@9vNi zfHX;Ga$Cr=As1E^+}FC`=nBNiIS9NG*2xxaR35|R$w%avJoRPw77RacK|Iw>XFlq?*P}Hf$4B*$)xI-D$@t)7-nuRU4 zHOpnxfog>nTIGvat~Y>?R3Dz_OwRty$^O?MfPNeNl+)o==E%^2pUqCc`R<2Es_HoV zN#O~GlP3_rK48TrCq=WK08Tn)>oHJbze{3lk#MVaNjYq1+2x9Rd~ z@o3&{^l1rQy6hHh+pb+{k7Rh=7-BDGwaEn1MALxQ;Yl(<_0DIoEucJibrVsjb&uYZ zYc^=nmk2PSw676;JPX7CRK2Al8@M}mS5 zh0vTG#8TE9VV7VcX$N7FbqlYW{IyM1C^2<@VE zE5mRN1FOJ>$rLUn+4B7Cvk-()=;fRd?4}4-s?2fQjA2U9bNow_Dzm2zu!3#aC|@cbI<| zhEL*oNT&}D&UUM3rEi1>*_0}JPtEFaax&Qi!(JhwWQn|$=7q}j)oHU7jRz3oksVkP z=}a7m#*VDns@Im0MY5FVu z$dSZ}NsweeKFH)#iKN!0r4pLrm9+r%X?P*n{imh|*f_RHlJ5{%4Yoot)R=??q@@&V zNlB6$t^|n0AMC3UCZ~surhvYSG;vWzUD7S6;DUcF33kDH1vV`PbEH^ElMd_nRIgE4 zfCo3=Q={d~6p#e-&EyMv&5`Xo>I&)P$YA$ojt0~gQMJ+N@yG_w+O!nOmd_EnuBePc zne3hZ4F4g1J@KyhC@6B|gvg@twaUd7$0IJk-BT`SA2$^0@B|V$-2Ij)W1-rcWdz`h z9&oqtDjYhMel<9Dr=uZ7MH<u2~LRn;zeCwOO%CYpb>ix=p)xI5^Ha6tkFwicXdR z#+Jvb%YZk77xV)G)|L~bxLhRfk`H&bjl7sE#(=6AA`{1~u>c248Q?H=UiRJK z%GY7f&?{!qgpxpSj4tzgM~m#~ZAb#mUx8PKRSp#|V7VgPH?R5!*;6sQD=auP3K3i7 zOT}{d*8B<%+?heq5!gcx!g93K;WB#H04SJMG*z!G+Vlt)BBbTpE@Oh{2N(8FAFXo?6rI(R{re_bN{LO8v<7ir3tmNs)}B&NSx~`hw&l9orN?_?;So|7tQrOLb59W-W_4R$ZNKvG(JkBCKD`mm8bGn@gEu~Ii*7E1 zsk!ajK5LJ)XN;T?HXjBzvhH*PK=xsK=p<}5G(rxxWhI${Ep6D4 zf&F27AZYesd(0WmgiXChupwX%p6S>ACYXHe58`E;ru0YubD~s)NAl#@7A~H(Un~z74`@T*m#`sdDmbipLPvaQM0c7e}_tv(m8Kzx+7xt&=gb zxu3t$5=YMjUE{{yaA8R$9JAYng-Q$qF*v1HjuFg>l~AAG>%waryn&<+sysZ^}#FczY8L zm~C7jZB?hzOrgI0oLmmV=1QV%jG22n;>>zAEoGBX-3z{{sLVTB@TQ;R53ZGLIi+P7 z%Yim|Ar*3u!hvZektfrI%GJRLv#f2GwGYy zQ+~vY-Smgfq)r2zJND4%;4&cWKaj4~a+1?nu3rHpK>%jku&Huv`QdG|wyepriO^Ax z5lQ%sT*+`yTZtz{LdqgYb-;tl8Z;Xreerwvh49yS(ExB-*qYphwK=V@o*XFJ;`r(J zyHQXyn9Tz^^PlfGPbln`7A)va$GQ>`jM9ma-4C2rnkt<;$jsQUA)Ro=%=A80KI4wr zB@@x?`UbOKL^@C-(IKB%h?g^-jP+@K%*E}N&x3;rx*{HNMvEJXNW07$(Jn4q1DUkP zRgwY(lWGpMQDX^Ehi$+H`Bvo`Y&FuLs}^&^2kw^J2Ia1_rcE zXa;pS?TR(+IuGw)+inm-_#a0v`i&k%ocdrOM92o>NOtxEdb~Fo0wlbqWz|f^NG37p zf?cU7K^p`XUOReqxRmf4A zs+c9@+eZ}94{{bO=ZfsBFrp9qr@7_%(=4LnzJs6l(7|5=KOVjuVFtmiAIMLqx~~N+ z?INeVY)U4Q5ToynW-{N*yPikS925ts3d?uFfOGwcF*Km}s3~0wcb9TE`Wmz}>Nk@8 zrcAHW9_r4M@R^|vTjll0>oNA#Fo^aACkYnNXB8zm=&f>ZcG4ypcW6r@gxa7mT^LBB zjT5}&dzSFy2>Gnfu(*bKvZFR!^kpC@`j|PI&pE%lPvV8Vtjlrk{t1$T!`<6DnW>x{ z44=&SCfEvU7Kh_ae$W&w{X~NrWXMEIC<*J$q;zX)Ld%c>g{4Pb@O@s(6X0sD>m>XhhZz7c@S+IuO8{^*TK z>64h3z9LY~m$Y;DJ63@dLm0AXs$W==8BK*XqGU+AuEdODy8tBkKh=;09}=y#os{Tl zQNbP;l$_7=GR>X@hr%atT@apT>AEI(OfzUQ;-`2aotb$JxWfETcQls{MR!u|*MGXx zix_d%$ak<{1#!WC@=dJP0q|wSwTaE>`_i38cT!7f%KMi0l8t?cw+c#BI5aDU@)`R; zY&391WOHh%KVh0DbQy5`)JgUYGtL?yiXoZvc~$Mu;#$U(WEF1HprdXTylc4Q)r8P> zbtH&tDZ8nvXgyjypM#EvTGL5TQe&9t z>p5Y*S6_RAnjIT%PgH@8V6}+5opNY;_=$f=BL2?HfvK_zvgK9Mv-Z;3Sme2i2!3MK zJ#@hKqYLowh}T>WhuJ>GF1lru5Xl}^TmhsaRJ>oBRWMvIq^YKeBz4dxq&GBFd*R&$ zu-QOX`0H`DDD|$S4zeFeFc{Xh1SsIxGk|r@K`_c!{sEO35odEt_@u*PZJF4zEdl2wL zu{s*Kc5$eX368DcD6T`|?z_-j>_l3##C`!U=k8sOJyI^Gmfk*?ytw!9;`V*7tZ53! zps?!l(ZR*yBjwlK>C5kz-_C*o-}rWJ>Eh0P<)Sj`}cEcT&vd??SNi8~iDsX)kA3>L!7|_ZP zRj%jsj`^I%rU>x)iP^gVwNmJ@JM5P`j$hX758)c=rYNEy&ORrpwLGRIs$H}wagio& zw-Fp=EATCRSQO84*#9{8Re0G%`~~*(NRnf7UM@%~qFDVIby%3hnmypx6rZ@>Y=|)L zkb7xteXu6fTg;63UBm;H+{l1}Z54*asOM~hLoUWKSd88B;0POAEVz%UsXmP~h3LGN z#YrR*xqQsLkeZpS7oS+QjNTNZ*;8SKj2+mAWlT*NNJe;vzMBu=H&F zgl{4=Z|M4YEW$IN$;Z3ZK3zpMn*rVu zK#%NTa~uakf}>S6*=?A88tkx+6$=>5d9g~65ej>sZxRT;;5sQ`bQAQrd? z)fND?&DS>zP3dB;gCqJkc!P5aO+sQiu3if-1-wUd)@Z{#*UN#;a4>sFTrY}IRd11= zCYnC-Wg?GpxgWwZWUF(V3PxVNc>TabS4Z?=5cdTvbJu+VofvHQg&l~jDeeN3X1HY3 z`$4pky<0rLbMNF)5gIS--zD&peNx@|U>i2w1A(e3;7Os+-i9<^VMKE>yM{&Ne)+LtXH{pX#n z80*n(+1oGbg9-Kw%~`cf3lp?*+PQ30>pm^3#@G}ew6#3Jr!CJg5OIc7JfF#GQQxYm zY*b4lt)(C_tp)RJHW|@6t@cH>Om#(ltbP%7p?#5y`UcBriEcF)OJ+NyO}{iUU9m6W zVHV}BJH!@cvtyP@aWk^Hu8f9heXjI2+H&LLHszZ7KkQvwQyW>5{wwu1yAdWfqW?kn z^mgyW_G~-0ZxQ>lQAlMdBUGA_1Orc&Fc%vb+ufEu#&~7SO<<5@VX%yC@gVyh{sA#G z;BMppuqQLmshdiwItA{=8~b2fs&lJ4b?Q`RewklpR3I%;zF}f5usH@q!rCnbgj}?M zd4!H<1oq8A{B=4G6ZY!vn|sk&eb*I{rNPnhQpb)FiG~|&&J7y_%~xRlU^l7QBL%v^ zSv8TYFxjmp`?`Xs3d3bVPl%%r2GYWyO-6-`+u?=>&=p6@y1Ouv$>xd{wigUEa!l;% zobZ!tAiBtoA5gY*!ag@cMBO_3JN*nr=&T}JP8Bhqf=GNl&yE8F zb?xroO;=AIJbiz)`mXkn*yfd!tF!NFSA+cOoR|1?4&!L7WYZ0VBAn#yfSA-BK{R$i zO<;Vl`w7Fd?eK&-6wvWG8=hVv^mmabGxbO&Ft)e`e?vCQcqYOT zJ?x1hnc1(5W5EQsafb4^bM_7PHFUd!ziWcAdK&F?@Pm@5c6=Godgi>#-6zPnf(cNo zoMt-Qos0Mm`vQ*&)uZY1;r35Qv(tyy=ML|6^lcRp9d`S-8{$!j>)fUk_-Acj^r5+c9(igyZN$KpAg=I_w@i*0&sSIe%j@!b8@2q;gku`_MI&;H zX>jOLflK6FFjAY6!jLY7IY)CQT+Tq&Wn$w zt#2$`HF(y=xm4s@!74kQcr({r2hY3#J*{1Cu7fLHx0-0Kzn<=sNoWSb)8{TxtBZ@9 zE+GmX@!0=^>_q_v2DMMi$Pf{Np!Kf_eBJzIEf|zv10D~tcknM+4gAAtu8y2!ZBb(Y~u^It$$j`e(7NIM%CWpvc;wW$7Tf z+}bL$Dm083L$`*I$!;Pg+#cjgzubBB%a!?qkz2?hNv>ga$?$Z~kFZ0*1TKI(@8Uqukxs-B!NF>`zaxyn9$u-cOa@=NC?vW!winB6xH$ z{E6j5G}?i4vf+-HpY?bk$wtDrzN#VR`U^ZOll8i?zsr^*k?W0R-N**ZdoIgT41gb~ zy<%{|+nM1rNF@T}pT70^j$7X5e{Q8+EVR~x8vTyQzB)G;h*4gW5q zzsmx0VuGg&xzWgd+1qF>j*lK3>PpHO)L)MdgGWnB?b9`>+@xH)`P%c9<@!w=^&7~$ z^tE$pF3|R8WUSAg?;r7I|VIdm04cM?F%Ujj&8-Q5Om zaKPCmWBcI(`1Vc`ebV)Hhb&5vV5zscWvajrodWqqHt^rGq-3?X3mx1)D%XX-NvqpP z4k-gxoK=e=ztSo%rjhuCNYe>;ZVZSy$J?lm+-HNkja=~Y+o)r*q;>qegar$V2nDNP zmj&!{HnKxUkV@)#tI(kXiCJs^xN>jr20C=0`^Ph7_c?fL>^7Wap+E;pi;=P`?bag@ z8MvpkRoVUd#UglL+^-*wZoGX}`T51x;nZj(k^}cht!nYxiE%c}+g#;v@h%(Y+;~^m zR`0Gfx6{gz4A||%Tkjq5>2~v&k<6HSNc6zBhg2dw>uI|VhBCcjT@TuHk%^t=5+K4t zwz=6G?{4R45)A?AiY0W6;SUBv{>W>d1rSmYQGE>zv*W3s&jt%|_HCtK2r)tnDAI>F zd|FT>c-k=NEkRUZr@%z0iw_Tp6gaH_>B}Nt@)=+w1>vhtj_^UT>7Z#^_xvFck_r$0 zfv}>DFeIg+aFPO#`~d|qPgDe_LrC7|o>K72$({Q9>m5U|qd_GFL0M3-6XR~41ytgE z;5WjN5rl(E3XZ@ArVP!K&T|HsNkRD8Pk>i_sdJZXvj3gdXJq8Wu`NUgPG-;#eG?J_ zojGt)KxX_6A$qTz8ER4xzRD9k8R(Oa9iyN+1Fpq;xA8_l2zGKDL8`5|$YwyWX;z$!x6{k>q7qrWX4Z_TvI#lJPg;v+$y zQ*gy2n*VMni?0NKUV#=Lt>ByhE&gi7`9@qUjU|stOYy_wE z8{e%WMPZ#{!9Om5#?q)iI6!0R-sc~niH^*KXGykNUmW_9n?&RWMf{{Il^%*2sh&?{ zkqnGw)Vt!{@rVS=A{pDoo>O2jmm`o$XJQF<$-dZ+6a-`H$xJ)+1wfAKt zBoxK~JkFm6YC&s34%sA$OL6{`;L5jpy3YZu1feOPo7q?d_(bndv4;!kY{CL2f1oC5 z-RRhaNR{uSR=dj7-N;Z#J0-P13`6&kIf#{X1RR_Sp{}GeYJVz_LiupIdN}b+9@s!` zWMmQJR9^GZaEy}&^^@_dvVety0&B+!Kq#1CWlS(!Rl;yvMT0V&&m6+(CO&MUppx&*5o~4L zok2No21U!F#Xu7QZNMxXIYFpG@>(7_QgUa3GCbY8!h|9w7==I-j;{R#Z=eK*a5!;S zwCIc)7)FA4jZ@;&0m;~vgb`B{bWSrBR;((f)^A|SDIFL^(Gf%vI1r?|i^9C@G$DIG zLKJi%c#0MLn&h$8nh3{OU_JOZ_mA5k3Sa-~1w7U8VMbo#2JYo2Nc1~m(K6@2)D7P` z;ut;!1krI~mRcqTV;@Gbx^r%paBh}b=Vr-!vp&%jL`cbXr8A~+nexMBnDC?|;g#-d&Lxt~byW4!?1!0b z&aod!t=murhU2VMOEzEeVvx7kA-(8YtXC3V;TucyC3y3v6%@&|B=S9LdQ$6$%Td{Q ziUH)&bc%f{E@>v(CaAv2V_GCJmw(etQ|*T@k05dbIZf}^O`vejCRHx#L2h5#=nQt& z<&0~~g0$}$k~qxQ=xaqh%?XE?h*Zqkq{gpEg^wd%f~FW*UPyBeG0M({&4)}}W71kO zCS(04(X)=?#t>cxmW_bH!SV)wER!UMLA49gM6a^z-;&7aGg5WQi#$yS3Jn)u&b+B=W#>^=Kb1_sHQ9lcnFl2MTC8>5US)+ zaSAqLGSVZDV)aeLr6|Hjbkf>(zEd2Kr(aJ~7ya`!=muI;v1v4v>fPMz$%{2iYPnQ> zf3*rA)XDvuzt&d7x$AWi1$6hOLOR;J#g{yJHt{7Z?`~ZEbyGeS=8qWOlOs;xNgGXa zP$c^)cOoW;;Her{+CbO4Cpy@1_CXB8WgMc2HHxGlw$&V4;*NNS02YTS=THv9xykrM;`+M#*-_1=5${E(*(Hiuu|ix2B6pm4UGK^az=Ak}Zg zUXPO0nA9#~v1<0|N+R2>49DSQj{zT}K@D=BveK+PF!%)ujvl#@j1~%$PqY2j8HebY z^${Gg1%825S^i&d?kJm?)Utzc?p~ld-QKOT;wg55AVMVieFYS$(2atuzjbAA1Tmc# z$g4_Kok3tw(b2smwD`Sd_GN~8n5@6W~Oy$z$ z=7MSRlhURHGWGmOyuQTnyMacZ4a(Q5xa~lMG76Xwd4b6$G)mSX_=_l@;_Q@g!&*ys zSlAjiCeY2G(S1md3l4(fov3`mggEY37ZEKi5oL(dV#1oo;tHCq7Q{)aQ?ST<#}R~N zn;$oNvGPf>aQS!WWh$}yYHL69kP)*>TE>f}22t?MqLa;+Fid=FEGt{PcLqvaZw;WC z2)pwZwY@v6GyCqcN z!x>=aeYSWw`YbvZFc}Enj&HEBk5-EGlN}sQ+&TJt<(J&U
`X~m}nkNR5!pRsI8 zo(v8eb*1{cdNqgzlWxwGG`w$l>epT)ouRf};V;~RO=CvFFlCbo_hgw;i5A<)4WnFZ zZ%V%`+a{$-50gsvf06?5DhmFb?w1C_d_KS_W zdb0XiV9+8O)CYqONq#6{yzh)V+qr#1m~bUB1)s7v7kHfxoDv(nmM26Ek6wS{V_y8? z+l!J2wiLgiBfZ&3LO6)}pCrGa1VHWLH3$h%gR1l$*N)P`TLCFK5{kwSB}vVxiWINK!kl@O1OMV#PuT3!=w0Oj$o}g=ppQ3Km!yLYk1@7q3em zcMkoPQ}B1*dBqeGwu)`9IT2Skvgt-mXFYLc&v`W3lAOg${xf~fZU7{M@A{25jW-in z0BViwn?`eeK+Ukj*VE1QjQV<7xvcgX78onPdNFIS+27piN$9a&2zKvRzQK3ng2GfV z56G$vXO&CYWRK27LfOo_7d5kA&u9vmdUO~-g;y`Dno7**H`*Ypl8`Elp|RIFi|=={ z#5RlgAJiTl$6!C&0H@MmX-9N(K_rbWSV?Zn^k20t&bcG7(;}AfXt!0X31YH$txUJ8 zRDPfRCyk3Pv>5IeCuQMHN-SHp5*pemZEdi16Ycfv)Ci8bL#QfrAPIW@36dYyl&pP&>Q?;Ip;8vqf~N}Gu4FtwNYz+C zG;bOXP}@+f%%eX3vff;#>l9(9h`gay&IN*h>1-8YIKRxLqd0O7bRs_ z1pJPYc-9eAW7}`31UTAC)r+{f^dZXfIg6-W9-VrjPMO?$*-^=c<;svmw6tUuu^$bQ z6jTFw4@z+m^=s`gcL5j<~cc0ugv0bDa}^rH)#TaP>ib{33ml__l|W#^nnJ(HsJcw8PKSX8jW zi%~gcU5#PgngG9w&OksKe1(Vg0Wju>1>`Y<h9`%lI& z{*D7_{#i7w%$#M+$>~J5N)ZsO2fI~3nf7i&9?O>qW{tR3B*097R8{xJ_Wnkz5TRWK z0pl&UMxly#1)c_&8qK{e*AJ^>#~Uw>Ulf0SHT!dY+Y3p(+Bv>miULDH@1-Zu_2v`S z^>KdNqv?O#DAOs?o=b+&uV!>vghDif#FA|LkRA1-hl`{z48nm1weA)KgEKsK#x8oeA|!GNG0!eDi{9 zi5T!O6)}XnwW9M2@|l{}V+vJL*o&{IDW2gSJ@Zpw1(3E5m-m;aL<~?tbPmx%U@=@H zEAgrbe%%oySu^`fC5u_BDuP<%$UIt&pbhM$?75bupBy2#YiLEbIlEdfZOgi}=XSI% zSeqNt7S4-CJO!?D;L$$Zoa;-d@nmxyrrsE6uCVD(=zVHTQwEsUFg(N#=#nLEoL`#R zWIUnuK;lm2I{?(vndZyPa5mY)BT#hFv+{%1ttJOqO;#vr_qYWAR6)3Gk0dw%hpD@xj=S;iQSjs4YiJXo++#_A@LYORq8 zvmuO~6%n9ZCG+MOMruH@v_+D$^C0dEF&L{Nba|1i2Af#UE?{94O^rE+X`+G-#&^jG zQ5LoZXegUjgkEh!yzj&C;9L`{z1%Mdjrp^WCa+J0>v59|k8n?>tf&9bLwO)O>@9LQUx%KIxhbP|6=@#2a{N>bLmMxQn3*fwuS+L~@5wGP>iZV-bG_>_Wg z>Ef=a;HA2S-lp~4=km8>#lm}>kAe2cS~%ctTthq0D!D_iT9NU>Pr67pp)n_sQxtOy z`S&)syMefX>gN8FG2}~{c3M+5g-K?Col($Dc@i0xrqLpUoWJtc6~_YrE6SbSr+ayv z>X(k`$Jw{Mh!wQL)R4qT`Vj~dN^$#Wc1_&!@vB?E%xxZS-#)%wIxbBfZr?b%T{)V{ zMKYgY&&M-YtlL$>CUMDx=^s7B# zZDp;E<(Q~i+YjhwCJV9bJ}$g61}TczWy>VpmoZYD;VJDkPW>m*aC}a+IMGu&*wI$d zXP^rb<-Z`0rUH7ttVSe!vby#oRR|o$O+y{ZFZZ_?%;u!lQFXP*5muB`!ie<_1)1sS zA$9SG)D_|(dAO)>zhT&RZM1>kgDyvC&H2uVFu*96k{&Yr-IbKpt{Lhn`Ztdqa6GcbKz zB>$KIJ#gf1JKE!5Z(?tx_&IQ75@psuE0#|N`fJ!R;8lN)t$$pZvLe9)J&-BP z8;FUoFTBsKy&t(HPrlVQwt7w|dj)ZJQmAvQooR;)R@8Ga^OiSAIn+YqtXy~oss{T! zdjo{$P#>eH;{paEueN3EvN(K$EyLdM+&N_0ts|IE%)P4-IKBQ00%vui)r;xMoBO5_ zHwJ+)rpHt->9JuT;SJC8h1<`k{oVGDZ%wsZP4pTH?ubUmEzryTwKr)6uI}C9fvcQg zlz4H+Sv@?FLr&ZmyB*!b0agFP4m|BP%(&pf4dIFHiU+lj@G`>XMLHXb)Yb~{40hD^ ziN2gem5H$Jg21C2L>22#N=CJ1sL3FD(i`*$T(B0e>p1=7lg?_^LYVM*K;zuhFApBc z5SdXVBDFjIv3mGuB#_kXUw&H^#}2Ln4aba2$7J=zwWO9AG|axPJ~J*&E1FK}F|F&b zsgN3igO5~a=LeF~0}kj(wa3)XMA+rwa7?IX&H8;9O}V)HXR88|)qSNWvwsbed9jk> z!{}y-!cRO*a!#r@C(+m{NR3O45b`M>6U65hXK`{;FYhl;l9RgA%Up6a5KzAtLg}26 zPMOWQ0fJcqr^X?leh;}#5*Z6832X~e>ZkF3J;@%hsm5%6ES2~Y0y_Xv^-C>{HVu+a z*~1NN1u`n1S+{q!kJ!2?5R6SX?-e+72MefVxGf2xpzg=;=sbWS1~{2n z5nMr#w_<56SX``e3q2S9>b_?DwlZaza08KTVj3%r$#Avunc+^UI~E;{=W#D6!WFiH z&ht(-oz;NnZrH0ff=7kkzkOY z6J~rKXlG?{2y{6>c~Mh_fvIIQKCC8nrfemI6KFPg0dslrONcRv=_+jK|I6OHHMfmq z>7suH(Jws{VV)Cv?{i)!c2Dy$s~Zx z1i(aMt@W+%Tf#Q7=i)ec@;(0@Ag=88qU*{^yUd&fV32`RCs%=5mPUR((3?gj*2o4Lys7 z!i;11%d{_WU~Vwv8|xF8oWcGc;+6+NazmV_dKqSkh9&nLBCCz{ZHm2J(X3mzh#h44 zE#!$VGjL(5NhJT880}B$kT+tF#ewb;nmwEkw#k+s=e!ttv0$X*FYZ~>LUmGrsdP2m zA2-u!w!TH9Y~{-&wKm3+b8y3qr^5Zry{xYS`A(igMty36(e)9{|53yyI~ZUtPlgF) zZN%^+G`J%5?j!m1vElog%{LS|bq+j&oT{X04Q!-UFB^^i57t!)Z$ z+9s_mMn{v@R+x@Pt|0ZVpw7&5UPn-ql9FVQ;0HUlv4X8h`*AYC~VA~WjA=S=3pr}^8ON^jW}%k;)MY$yDZg`J0i`{84;gd$9OC| zcKS-wdnBkyUarZibz5Rz4!6{>f}KjH2^jF=(%zRA@p3@X^TXoKhg{)9?#{*1j`Orz z41Lf6{V-Yj@aY{YWv7-8#0O_{`pKtcyd$Ts>2sFyc}N>`U+j<(gu!Ck=MMoP?wwo&GDW({;EE7&oq&cG9$qkX!?JBrw-L}f*r;+t{ zB31>;Ose6btp7voX_XW?6|tG z*>s1`=3K6awe(0#o|CD%Sabib$zZTMdr6zK=Vq$uT4s$PRN58p4%aRmb*yq*eS4CI8D%(0E zO5p8@vUvfRcFt1@=)|(EGaRenJGN#0JlmtuR56ck7N&;b$jq;XeOv6WYGT-Q%Vcp; ze1z%J-gj=AGmvKX6ghdvuS)U~uymf{tl>Vl<3=~R-fwdNjQsE8`SYoR*VWzg`Kj~I zw&kGbU(Dl}K@!ZGN3}cdub<8FQRk=L%@~ij}sYX9-V02uO}nJx*Nvgt2u|0%(%}IcP$+-M|;SOs1pc9d;HmbdbHP!`kZGSyXfXjtY^KOUIr7x zM$(SLUP?FJoKe!yqcNX)x9wVYGd^F!y6S?jV_o$@i&-~aq@fg3SB-;?PD`T7ifT)1 z=!u%KP#FHlB~`glEbkyT$QF#$P(ry-tgStN#=7Z&ox*?Xvi<+lMSF0!V#<@i)&cVt zd%JAU7QA2`^y%sTpc9DNN`od?C(J43Y|MXCQ4MeICe<763D`1brb7KaKBcakTMzdb zF;^QwEnz2R;)7_}SIwC^`~dB$IKV3>UBXKzq-NOyIOPaAqPmZi%6-^a?+ATTN_vSW zgF~NXOCZxJ5t(hQFA3)Kk!|db`D`)~{f8R@K(F6K5>d!EuRo0@Ue?T)Yv~vFyt8`5 zPjq$AFvI=c0@6Q=#`Lfer($^!n?bFmc;{?sK!pAwHOQkoY&PH&(vtN}KD>9jITf9| zkAKVfcS>S21h`~T5|Jp#N8WimC6U}aCSa8!At4kg-NjF9q)_ISnWHq30i954APKxT z5DX|&xSyi}901tC)a?c0@g5+0aGTfi@=m~cpMCOux%44dxR`vbLHhRe1EA?{jQPpf zu8d*edQ7C_6kCPvvW+LZq83%wZyNS}!kqB$luZGghnnZaFSz2JaJ_}G+%GYfy8;EB zAiA5SxNfzvlf}?3g?bMgI~~^wky;J()q9vaLkTpCls&CA3cDDwUBjGe_Mvlc>>mC@ zR8OTx!lQAhy*X-x`xC<>!;~hY`^=^O88e_q;hDro@|RKLllm?a$2@XkP(bSR^H&EV zBW4Pto6o5R(CPJ874IzPQlmJA%3ZTq73U#^ABi7I(qMZ|N~sk(^v-6c)MW_Nm3b&n zBq__-V3}fro~bQ#t+!ttVOq=S_~{foRYnnU#4hJK-^4O98IwZD?DfdFnY zGpI)rRw9RH*o14@0m*Q`Ih07LUISKv5btf!9TB(6%G=;mlVp|+Hdon5MpAhsc-_-v z+1kbwiW&C8?<9+C%eC0bH@3&Al&qwjRc-kjq@BDk52{Kc8tRy^kyZi}mgQU^@oKWw zKjh_O9k#?3dXzC4gLrCXp%5W^*t0S|sI=iDRrwN{Wa4KsM@Mz`W)O)LW)xQvE_$!{ z`+)g^SAaPYYnUam?bI*ki57y!c663KIiJ6c+Qh4P*zDQF4klAn113|@Erg_nH8g_S zYZr4xlLDnzd;w%OW-oy$mBq%C95Ie9WtxS>hU2}hQksN|ZKaqhfhYc9e*WVAu~T>W zX8z*#6Yb*5S*U;t#JSZS*b7R` zz-F@woVuzEXWL{>K-$E#s7h+(M@TkaQ#Ug0Va?q*R2EoqdHV3fio31o^6+V{%Ep(Q z!ghd9g%# zt<3grXNE)mfMV}yu}mZMKW7=>D9}PE{$qjY&$i7YAg$gL+**~kJ#rRH;_JA#sjnsg zBS?vsBrgH|Yc7Vrn01f^FiTrCS4CeSGhYh+-~5v`dlM~y{^=z9HIGoD2gGC?n$acI z-NWtuMymY z7`@tncS9F%lSuRxtjwaCoMo6;x^Q06NvUP%b#fS)gp;!?4thr>OPF=ML*+djnt@({ zM;uIu0Qv|uWKtp9wDSrL{zk~w zkxZTL`2#ko(s>;&iyAnU<-D57z+V>M@+ZxLZ(fl-x|3~W+B^e!t1VJ&UgyhSnK&%T zr}>EBkrY~M-wx6l^eA~|DJUNYg{9c*G-~!F^`yzR0so*k6A#nsZKg+*)q<%q*#@uo zw*F8PQL!1$yxKrq3I2|W#W+hbG(k#pEf>eD#X%!f>ZO8ld}Z#)Dzgm|OnumPN$akc zB&w%Ty}Fy?0@hdDi>kR*UxUk>6G6hFxURNDv@SSiAVka`PJA=r+`X%6ZsZ%kjmmvc?}X~%y!E>-u%3dspNSi(piHq84a1HUz4q4&f6N%zG4>3U zu-*hcOLS$M%EHD~eC6)(B5Fwf(u6b>=o)hb5n_QjYk(t zpK0&s$3Em%=*D)%;le;*t?^_7|3WNK!-(W8NjKxB$Gn!=lYRV9My{2;v{}}!@LaLQ z8l}i{q4DG{e!knjm%SGG2%FXEhr#90919Z9yp9if!R^`R(0B>xn~xTN&qZO*aS^24 zz!t46FsI#XalDk{m{p8QZtxmtSSqb70+l;LO?n3Zj@j5&E@jhpSDkb(ZeM#)xq`(p zYiM=z%4Q@)y(8#XC5&lKQ!zBbr$Vw-3ELiES+!YgdIDig+u@we<*@Kw7+b-~SR?$M z7}d4#=!?54W?^Pt9zy^WLZ9O0JD{0lU|dsKKAVYG>`kArz3E%t?CkaHeElnk-@CJ{ zPUmPm&WZuCu+!@(753^ev+f_Guv-lyWlvSLthbQgl*c${#m0IfCj~;2cWv5g<2L;@ z!LM@EBg0KtHQT$H!!v3nPT4z9kBwfZXeRQT)*2-?1eXZe5rxW1jmKlSalC~EMY3d9-vHAA)Uxpmi8U1@kc!PE{pq{0 zhi^W8^loOK443nn=Wp{*&!29;o1LK%|HFuHC(-j&)HlY+qM7uWv)a^^rot%1+Y18E&gkYt)Om56~p9ia;w`*O2U46ru;kpwZaN`Z>rjWya?$W_K*NGmseem(wX?xIg#zj0ppa7Xd+J@^lW7 zWA!M@gTH_B(uL1w%x;Cwy^zm47{FDN-2q;u`j#$m*TR#j@&qoJ}%141r9j3zu229 zmno#lMDe?#h#vdB*i{+0ZhOfkI0yK?g5B-~3$6cWxLB}y{-wT_N+%Q73=FxmXNR62 zG=sDuX%Y?9pARNpETka@4+rA2V?-frFLx=1rj4)d!l3KXF77b^ji4@(G z`Wm|{?8}JVT7!io#cly#TSyd+-x^&pe*YWHw;)M3qqo-xr!!E-9m4iHoCuyre|-~f z6Gromw6(m!hAXfIj!!3bGmR|Z6u(3DLR1M)AbW8M4Z??LBjz!NpL4|UJK2*<*f4Jo z{yQY(2tL@rzUHntx|MmzS?Zv!j1hI%QLNAUu+JO1eoqUfn`W`ufJXipS>|vZB3W1NnU*fD$y%EALC~0gAMEfA7xyQ% ziSu4At`k$!ChM}najobNyo!!oW0dB;+P&|}diDA#$-qJ{hT`VgJhymVnLAf=O|yMzt7C|;A49>;Cqe%X4f z%7QXZ(1pzoxdZ;*71e1J>u{ns&HU(x=t}1u5KF6*eL{YA4k45+{C)!=7hV$-!JF(A zM=x6l&Xf?0DZ4dhA7IOph1aYiUZcho1DF0^KoIY4WEAA^658;{Kq8L0=ulvk8GEC~ z%SrL37@44l= z#-I-GP3zc)i6E9A#v9f!#rX+_+_6k1$8nCCMnBuV|1+on>F2Lc{~=qn7ZfXV?cK2yHu8xJ;-3 z^amq3kRE|B=|NH&0;vJ^qqE<1$VfADe*j`UeY)0TBRXqo1LM69f}stu>t<~ z@w|Oh+UPy1PmGTE5M39K($i60SzxEPH$5DrjY=)rrj{x2$&}Tg`r?_vXhK<6tS_Zh zxYueJ)nTV>4AyJSe|~MMWs3 zyC!3^hY+q6&@)D2DP@{6!+9MOXE@$lONj!@L579I?~bxFGSK}4jsfxIzc)q)jTZ~? zL@Euz;g+UigF1hTe5SlIS}fNy;G86Nx0O94Ap1$yynsqq-Mbf0liAmfby@c!ZT9rq8kuWw^~7v#+A0KuLth4oRu0Z3N3x zplScfMQ)ZyqP#JC%0exP5Yl1eNzFUh;|4PH2v+AcZgM7$8ZI4 z)qUtMRfT;1<3`~&#Sajk2S0z^3z)Zg@jdJzHw|>hQ|aN3Q^LjRW^)jqK|NmI=-AfX zS$@pEf>R5w>o`!qaaYGG$>5G&Wo{%aFzM6~Ii}uN{uVl8dEvsQFbFg;Mc z`fwY-@N6+kr8V{=01$wvgE9P$74@?!QX zVn>2V=Hy3pZ!4$+^WyT?n=k*S_k|NEz06r-Hpp3(7}m8V5e%pQPk8N}%qj%(1`=`h zC7Q86XjS1XI|)b zo&aJ%oxixz=zN)=%INwO!bhabm6zsdiR8LUh~HFc^9ouxXW7Zu zCu?tS70$9>yveV;D;xuMot^7wa4Ylj_6SBsLmNcBnsMoZNSi%DBYm2{?=3;ZYog%U zb;Nk5ffE3$ zp*->iQsjyPcb4e01sJ<~7Ls*W#BP=@0ZCpIwACd+edEt)MH5`N1Kd*)2reZH9}yZ% zCC#b`^NtD37nQq&)8gsMC~be00Zu`^WZSqOJ=IWG%Q-9^`Gjt*hpR|s}(6qYRrwW9~t4Y1nek^#_~8cgcL zybKvFKYBEtczG{uYNj@-XL?~zclfDzIioAr{C0-LRjh^tn95fte?zM>;aYq_aC?CH zmOY)bv*8doJz;-mWVx}^X}~wRvnZWLiI)PJ1Igb{aRW+w088q*b*Lix+2U>z4k}&) zYm*2We1Z_3>V02R>m@CYvb!1Y;UItyoN~~1D$;Rwq=>bI zVlO>LTpbGp_cz{W#==9YEz|LEZ@=<*>A_g&j~S>&9;nCa)7S4o z=+V*H#Fs?zJj-t5hwEy*Pg0(yrH3B?)U61(530@?O2K{=+dlc8&n_hul5?6WqJn_nU}Tt?ofRV@E$fZVPcL# zC9oF@=3ru|{yb)C18nN*F;!^CO~xW(8UWo48}SHi8)A)Y#@NsJgVAdas6HAkwh|Q5 zfPyM?MK<8Z#fVZwaGGr@)@k&ox;%G|-{?a|7@l`_LiGpQU?!|GjEVtP6#z|M04^tZ zPzl+Ml2i%%wq%)=W(dc!;sOXC`h552ZJeGhqBwH6Mv{8w+7K9akX0CZ7m>1v-kS> z37|IP+vmlafNU&-HZTWK#m-%C>`yexUm-Xnlc)p*5kVeOP^=&rvrd~F#<40{P}s_v zFqZFEFV^pC7ulVQ&Ldj zEj1^`L-u$9brSsTWjj`;GiS7WQOxqx5j5TfDb zBDmCA{J5WAc`%g4uZK2HB(b|TBA7Trq9<59W(#qeO@YHW@f8k|F3@DRa~a&QEMoK$+tOV5?yk3e7tda$xP za%;hu*uDNA1KkKG643#}C&@#$VrQ+`Vo;C$+u%Sr^ zI5{$yP7HKe$eZe0h4e_wjJM&XPz9`?!4W1D|8J zWg-hwYTrK@L$!C`d=r<7ky3U2#8wcToi+FvtIplhCvkN=ymHI@fCtUhRUehn+1SG! z1;=iyioyX+7)63hbFD_S_<7aF14{eCBgRMp-I>fKG&g(!434^r($GIZL;rx}&V5p1 zXjMrJ8(Q4T#Ihdl@W{1z$A{WIqsm%gWY*MG@5jYGL8`>;onuU7qI?oClg2DXh|@)H zSVD>@Y?3l}0l$!~OA6t+F32zyt^mM3aKrRbQ(_L=anzc1Q}fA^nQoQwgj5^6FG%aZ zEq9mg#-mLT+mGHQ)(fN+$~U8iY5=xvWViEv55cQ6)J0;(TW=<+nQ>9A)A=q@1}&6~ z2*>NZsVb5iC;@f`u^~3N0RWxw9@y4<#MQYRR?KDDCQUK@K-bU(!R4>}GQd*%{mUA& zyJ_1(1oVnHSL4tCrx1z6!`(6P~u)1xFG`3q*TrP6HBKDM;RRE9ys z0z9O!nMi5%r)j8E3#fwL-ttofD(G?6%_QjUC56Bva!sPm#$m)N53eri=wdHc8e2!G z(O41n;A>RW{W`)BMJ(#RWT%ukcLWe*^k-mN5{(Yjp-Ei~H;Ec8ifF^{L30RyO)$i1 zN?Latt1Qu?stPsG?^%C<{hC_bh(>j-&q$@4?OkPDp~d_i-+o;?IiKCcx=rh73KQi+ za6f{=32jTkw=tMGy`wA&zMp7<^ijftIAuhTJYZAVUco7_3}KVC4F#1hRvug|Zvw4@ zeflt0Wt%_DsQ_jj>~W9~oFXUzu?MuZt>adOw)+gyI@HZ@WjMgS>blD_g?9Re&?0St z^QavO_K`|y8KD6x)egdf$~b^e8y;&NNgKY{*quYmrRP7cA26 z!{$d>J>X?5Q)K@LTNRPfu_UXI9&rskL;`@_(FJE?FgZFB(bH^+7`DG$u9!M?ZGSF9ndI zvxb0U8$9ZtZ3hHEq1xuNqE<%1gUHb~nu7rltZgksJj#B0Yadq$)FDJTu3{WRS45ve z{%$M8AG!dUenoDl7Rg*LuG9sNw1Yb~yci?8?c}8=?u9h{%Eil8N0lxaOxo|MvEt6O}siX%1ZNPim zL6By{2N&djfX_9BRCq)BRz854U$JF29ySr1QNm#!BB>m*N`!iL#R)3ZUUdZAF{=sB zvtNg7ZL>2W&E0?5Lr!=ZJ&=cz8SaNH;?vpi1YeesUfdR0shxhRh^;81DsAzUKj=v_ zkx6OM#BeAIGN&5Ym&1Az1mDFXt@Q#b2Mkq}z^hF_loxs`ge)m4!QLqHLQ;GyAuT6u zMZ&z_sGaJ9QEe-vBm_cM+Fgu|2ytZRSTxpeqPmt<1FP+uwo|#QZ2>_b210#Wqo@Uf zn&eOas;>nskbiwy9F5o4w0M1YbRbcGnoJD6oJ|a|Ey=`aKj?AnIuI8zy%ph>KN!hW zxZgA)27ANy;X2e?KkhU84DBCzk2b3&^vfU*4A?vPJVL>uWSF#@FnG=_ANgpq&prCJ zgcdi4l z3TkI@;N{598^JPLGgJo4CADW;NQKQHbW;)SazUiLNDqkqvUBAL9eenuFNjxuUZji{ zu|A6<>6{9;6x*76Z0oiyF=P)1qt-b^>;`6t0R^LyI;ebw)0w1BWv{>&LhMG)6Fo{( zB$|Lu1@Gv+P$QbPx>-M$Lt=bX3J&J?f~HX95?> z4g$-&D0lfX&)GrS0!5Ie?Q&WG<|hfTd(>=OGl zr?D?hZk*5;tKpGPY^9YdAj$Ncq?W2|kf$ip1Rdc)&tFv(F)z9|aP8ayOtIGzXo(Tx z6|Fz&V=uY-qyGA%WQ4tfRG>Wa7c1x=lEfF0PXNHeTt3+J7-htGt?CfcUaUhZ7Hg$= zL!|!6qDgLkC*=t89)w7lr3@Av8>^sWN`E&{zd%+h0ywJ@l8J?wHCQBpup=keWgM}x z8QRhgT4QTNX%JAm{BDJ$g%{TQPJ&Z?EmMaPKWdB zbeehC?+rH(i(;pXD>t)N45z>K^o(kzv^>RK*!!QS9$}p5@ z7wUJ?~y!BU@Vk0JEdN^HjObxp(rB*yG2;Vc$)orK#RGDr=gpk@+d=}OfLh%hWr8Hk zl0#C(7~0cgn!T#47r=5AS5mxKy2GAK9=JK8f!EDJz3Lld@78XOUKqm&>Azz3&J^0_qFibT zY#y-9pV~Md$UpB)cLA&Wcg6sCJ=UTFPq{brl1D4}iOdQ^yLnugyIX=Y4n&zx;W(Zo zX4Ufah00I?%^cdEc{EUSRGgE2zfGB6o{$W`UCh3O3YV3Z8)df~{eUwYpR))4eERO| z=PQ0>x^QuLdssLxZnp(zekTjZ@b|AK-^lED30uF5GAiT$Exf+XtAwnVHNlm=?E$lN zV%G*hUAlYO;#mXQaN7f!aG~M`LstGu&ew%YtdI6g!_Z`v&mMZAPhT_maKHXDTZ z4wgw$`-OdNF;vC09DHOH^^nhm%oQ_+5WY&(!Old6xSr+!^&$51$VGS$6zvd{5H31LGNO zEy#6|sFN4cRx^$AN>f1+G&3zK%XlXei#t195yjv}>ieDQ#pKHSN5>koRWBwVf5_co zdt`AEB#;N;ulTZPz0KLYC}&c{E+4DPUU}3^RK_jZn#uOZ^D_h*nx9R20FRtHSj0_# zK#zEL4M@CNoa#+3ULDKt$NM090cQ9cNIwR4Je$3i0uVM9z}rn zpF5GB%La|3Y|s;n){<>;KWeXz+PELeMMVB!FCC(%(^_gEX%2yV<>I9%$NxQ?79y}z zFsaNTTGiyixJy)BQkjpoUe6XPNXfDvIah_M5|8+!9+gy~axdPY8nAC8EFk6y&YK#} zBA)q{b$DL2(?I05;XuD827K--yWcs)? zA7HDqseZ>r^Qg`~DYs@1Yg+wIVXIK!@qDw&!ijjqHfKQMfcdRMW<1Q+bNV+F z8R}A4ZOBZfGkVk<#TQ>F95r3IEPrqur2U*9t>HqM8FFQdi`wg&BXrs{B6iroY*|e=FhTUolWffD&rz^2A9bJ zNB?{lefk-wGm3t^CRD6xUzXArJ%OUq*u}*7hr745593eX-#@;%|K#F!^}~&hcon(W zX1k^;k`DA3ulLy_nPS!c^w=(3C{Y_piAc8qChRrr8<3pv|va6-KRrhlS&IBY%f z*Gxa?k~QzGMGBomPiScwYe1rDjzY5_BFn7x5^g>eKAjh&Bo{f#;__Y@$vfL?6sJKs zfc!GIHs8*m(ZZGM572PSI?mw1R%uA-Ytg5e!Gr zzAPyR`=9|qQNmmNK0>)daUU`1NqRk=Xw$p9%WvjZ*@AMu_tl%(!?)W-ADGj-yZ7GQ z+!2iqnj6{O7@8}DJ(=-TT8~CiNID)NS`d73+8%TmGiuZ+Ldu2)|AH@NNn}NQY8qKW z;|eSxOQp?OU}j^Yv7T>h+5|wc)a(!EwOOPa8o%m(^*22jISwT>XEhg>%XQj4!Jx4J zOcxE-A1%!$A`|r7Fmp`PCHrW|X&r|9#bZ<7RRq|?6FQnnY9G-PO3z`2J)=;-MvtaN z0h<=<5*x%vl_59&3Q36q$9REBZNwoS#;?ev6y_$oR1tm?SBdx=Ux^H?X2K1-ITao6JZG?kJS;i_zWy!iZHf-ILYO^lb6P)WFYxp4p82(ju;4-|nDYb!-MQf7Wr! zoYE!h|L`(Ja5h%4BJ>NhO@|l#0IjZ$W!D@r1)uW{iU$^``jA}k^0>Gi;@1a!WyG>W zKppEy9}`FA#FFcd*}Ay8!Kq~q7uZe=tBU$UBu#kY0?h6bbjy#cVPhbsEh5O9?L;%{ zSm4Or`|gf_X8Gk)4%Zy~a)8$v#lu&jo$c@`NE$`5TC*evIrt^|#PMY5 z@n>;hH9J#=0GMS-ubjj%;>xCzjwbBSYkJId1`E`I<~D|(#ZV*btp1gY-G?CtRoK}U zaR6CG4kk{>$~HhHLlRKcot$>Mz;+*^xW);z6^eWv!WW8=G@1JB#gCY@X*agr;RQiUlM*g9+K5}Qu>i=c7{syD!))ZV;8}#;MhHmao;uu4}rlLwK*}yL8mc(Y$(4k zo`@Ntp2Q~saC}&3LVPU>)>ui%uetG%X$<>qD~nWN%XC6nJ~L|e^ktZf)teF}rMhb$ zasj}*z+(H^a4-`RNe?{sA8iL-A|`tzwr+_wyJ8D#+_7*``|urv&W<}o@SKCL+Xr1? zUsb_Veh)`G-x^l{)!$vkJaSVU830EW!x)+5<%6ls;ie-I^0BHU^MkfJ-deR_&ym== zg`Nj#WUP2m>eAM|fE(EjMmn*GVT{i)fbkiIB@Wo<%R@?!aO;*83!sDWs{&-2yTg3+ z4-3cdOSiyTe=zogp{7T;Lsq$x?JK;2*mBu8YU@-G2X(G|<^iZ43IC2f!d>xog?;sg zRi_(;U38wRBA>TJ$+}0vks}wZ;(%NWoqNV)-3VJ~!n!iBYM=M&*y>GYSDj$d*fM$} zwbQYH7^?zYdgKtDkkan3arp5BnKq)T9J+ss8qjU5e$YpyoE$hG+h?R&pgkB%RdevyR;)-W4Z`ef zwT84hA+rsued$WQz0Pu!BRfP*y{&A@1I^W<}+jOZ1(r~v%6 zYCIL}QUe-VSOlvTHC3yu2;@B8b)gl`Qz;@(hFTSIvMQlt_YiAarLeMJ6K;=_^Pr!a zEJEo5(J8`RA2|>%K-6M7pDV2fAOFeBsG-P{TNyhqEq-_Rv$OF%n^Aa=qgvy8ZXj1c z&RkBQLWcx05lU5Sls9ZD*901Il*q4*IgNC?6I92Zq2f{P5G&x>siRBpZ#=>}PnDI6 z>3R0);{F}YW}MzZ(CJ1MnROgw+I&AZ`+hF3Os8A4VL=r{RRE&-6p@v2n_X%z$y#p0 zXNZ!wAut{i11xIYn8b4}h?577Rc`7=84;?A=Z`GmyzTk4;o!T1AiF^Ffoe8)5g*(= z`QXmj9$Yreb-}JZP&=J{<6dzsGwAff7He~ni6cj4cx(H`CI`Bl?NVur~B-eI^wLl zS@;PLHPqZcz~2vk&nXRlU;cxGY@;*2Z^IGVI9%${e|o{cU7oZe{?%u1R-O_b5UfBI z^mzW|9Cj)~{H)W>RRRD!YG3`>A^xb@1CO(Eko}KFEE5_Sim8UWMZ0!^@-D*MgUpcc zO(bI;LGItm!!9wzy~Xo3Vbdx~%aez=ciGG%Igm_A#o^X{4HqaCwu=$n0_oyc>Zl^+ z8b^m8&hwW8FA@&aaf|?AbZBfqV?|m{5K7V(PoIQMt0*KtC)#(suWC{BEuNPHn^sW= z`nqV@aTLg6#CvYE+)IBfrQe#YvG^gn3d1i!QsBe{dRe)%>?R4XUtSn~OH|CQ*|P1n zg&z9?$)+pB<8RHs|8o1oVn?8$t1$eQ$T6$geGU{YXTC^B$*~~OcR7Wo_!P2K$U9CU zkLdfBEum@^G`_j0cgH?GLhpNMXfJz(*u`5wJ;v@_T~uT2tDySJjF(9iA~^17%PO*% z2K}s$3X%B<5tiTQ$uvAfUlURKy~_^HZ}GeourXIf#>_U!O?U?9-R|tA@S2YH&lfvF zHZ7#4))sbC@3+as5J)3vI#89;MzR-zwZL!E!(k(p67;5UBA!kri0&lUP9vgWsz6E} zqMLCed0A8^_P(DI3UMF@=O8j|h$cCQj2;qKq>SH=EuOtW^^+vYsf-c#GR4{#(U7R& z2(s$Z`UG}0fy#7j%L9AY>l7p>nhe(M`60MZ*gfP6)UZqwT#XgHcduA@6ZgQkC8;zh z?icj*kl%#$rn*tY{Sow_n;I{L1gB&xLxnw9XXU~rTEv~JG^&j=#E0Lei(vb%V>(1x zoWX|UT907hABbD^e{QM@s`J}KYG5Rtxn?v~jv9PPkg!JR-%FQs9)9dtQ{WnbFo$_VnS@W984s`;6Ji{bg^WPQUWk6nGpks zUwpaLlUjeKzL^>{V2P#8M%;)bfMjF&0+EEul8O3@y}d~T1sAPF8_8hNpR$vv7gE3l zV^@#oQGXIOI3sMxOtaImOlSCFq;V%dw~W&A?cr!>2>2AyrzjI&>qwNQBAjVX(!DI= zSI$P>nn6seWIwQgT(BZi$@0A1l?RoqB2cF`)Ug?BL^xy{$F0idTS#VOCozs+py3+r zxySywWO^AC(_vQb5}Ld`zHL#)a)Qe+gQN=D^k@vt;`|Zz$NpQk8c@xyqC*_dsYxkl zW>fqaIA9xhkboWxl_#K$4WPh*LSafMvNcFX2Ai*Px|$@dDxMgFSLtHQ#h{tCJsixr z1I}0=+X8EKC6XYkYS^FFaM;v2H;s&(gvKlxGb)W72r!%0n3{p#(bTa!j%n4ZUN*gp zGRNt%lV{m|B$rK}P24yeuL9RBTRh7?g98Lf?R@s&ytdXvQWJRJ2|A!Ts0ap?zm7me zbMdp+wR_mS77PLO#ax@Rz&z4mcM{f`iDo%LAe%*av1HZyayBC9v5dQOoQ;N}osIn) zNIF}l+CXr6)snT?6U!ZL0l8quK#)kd^ zz?I~rzyEAS`@TF+(B#G4G40}6;ltGY#r-GmCub3=yj{7NMCg+9BFaxfyWNkE*`9WzpqkQNGT4$3!7<;Crd4mql04Rlm^Wb?3rym*=?u)+0@_) z7FuJ6J5W+jxltIXfZrwgEzj$yH7oJEw%=>QYG*78opp1zZ8h1dbTP#Rr^*=l=5GuZRa5db85V@o8-am+~uX&kcc-WD#R^uR2M72S-oMo zxZTwjz1D|bbi5@5e^+t;A`K+EfK~_r?@kDK_<@W!l~4q>Mw~!7!Xr-WyEGEt=DkLN zSrvsPSt#2<&dXt(Y87$d0@aYCj`9@8YCRH)$jy;tEGO^HSbGZFB;|vp>ezs=gMFxB zGV126GRpi5=0xbp@Tho$8`n}G9Ge*(Ne!68Ts$_FesOm&qnYSwu(vp8|H+6Nd>>&R zNnhgSJ)j7ZN5Wq3_5I-h60^*%Fu<&c>2?#GyZ~lWAPdm)0@x>J0fC|KH2Z^^HWX!R1Wp455JFFF$bimZAE4c*b3~FlepNc1MJSxJrm7kW*W!4u zj-W7vU{op61Am1w6=|(6SrB$$s-2bF`%aJF4292;JI(4W-{~;_$U4ALdX}cV8S^7FF`7-MDRZ4*5@E ziM(-sbjO}$?L4bq-?%NApzPZJ7j!x7(+swl#SUpahp4YJZGay^*#{u=d&#UtUUW~x zEP`o^spw=UHd79Bfpxb3b zq(`7n^Y&KZ&HYF3_D;TA+B^SZ{>`Tw%r~O&INNx>_Z8cCzISwfxA1m(^3A7@&KDmB z%ACN#UO%0FT^&0gn|u9i?z{V6y?*xSeD}`z;%(c&mUP(p(~Y;=ML1^hhP9~w)#q05 zc2Yw%2>)@o-=`vNb45jH9SJF&x_cKfc6ZINp6aR-fVOny5{L2zn#%bpqUkk#NDtaJ z6^|)XNC%B%G<0<&75angZSNv2h9}x|%!&CO9dlU;F@*MXs=ainX)*Yrj{p1>s&(^&C9il?qPgtk6-OmbxpKm9R2IrBp5{UB$h+$))C}xK}Ch%uH!hmgI7>Q?U4lQg%vf3%jvwReDVH z_C^8!YSYJiVs;_K6B6wGbX}mJ|NKMoQA}0p21xfxc3SE zEsHE~_FFR(8n!wLRAQ0Iw%~Y+TC2decd$hjKOCW9zK(GA`eoIpyttG9aBC8USgV{^ zs{u}3tX^!4aS>NUth=gIc@u$Eym8!gw=8x~L*J5M-9vkSgv)1%U0MMCBBwyO#x?S#Tk@wiPM;Axl(F1CnS7kruR8d{e_c z$Z_4g=PomPsl_TCQlj|ox5#`D zdcIE`5uCPh49bXuZFbZP#eG`@3$jCI5X`F~T1RYz*;cUal4@Q~Bb-D)QkjEY=n}em zj}<&aFsnsx7+vsq*;7?66r`b21$Ei2vK%9a~ZN6Q9_V(!s z`LA#8JUE|NBt6VasE+yFSoz)B{CD@hdV9D`?k)}RX6u&UF@6+?Na{^mx{qBbMg~MG z{zC~?4jVlH^$Zy;3Y8xlJ=#^irvp6E`x7{_$B3AaUefW<*iZiM+quHGbCqvrceQV4 zOYEO-uNT|YkXDg5ONPr!u7GIliXHuE@#h(2* zH~O3XZyx_;_M69S%x^lX9`+bWfhw^_^cj0<51^>+tEU2H6TB=IJ9g8@ zx_QW+@5fLt_XaARvA^DM%}r6SW`|sV8CBpB246T#SDmE5xneJ^xRFi`m>@?H&AhCE z+SRp=zzlm8!2~lSiKNEnp%;kr@y8!)DbEQR9{I`N{c6}S<0FxX#y)gJcyye+-km~t zf*lz0ejW@qb-vDhUHjA2pT@pE{L}c?l|N0ge<#1*`%_l?`shy+?BmIwCcdtG{p{=F z*H8X*{p;r)yNmzZum1igKA%O1amX9`apxOqq`~emdQ9(&H!^hbqeimFOf#F%?8qSR zWjFPIZL$aU@&77*Q-*&%AE90of8Efa9vxzfmM1`lw<6^+NIR7N@;$u^HjNoaYZ`MiY2w5B_p#Q@g4UW;#|0 zGy&u_dh4}o_5HXfa_H|YuF>kCon~)o7jgt z-az9D1~AIRjL%~1W7ZZRdB{F{p@|pwjFVm;Xkd`rkUw9sVA9T>;SHjHo`M$GxUa zya%UE`kI$L6gS%3&Rvg_iAR0Dw&DrDr;@@4PVX_bl+G@2u;XhJ<0!k28@n@&ogLEt zn!!YIA7E?+QldfAYX<>XuF>nM%gdhFVR&Lc{i-4E!w-HBPbT~>8yIcC$sY1M z*;DXf9gZ~)zi1phZ5-Ze9Bi|{TH|n{akz{-7PZDfk^KYv50=^Aj@=FTHI`CvYX_r7 zI?jIhG{84;bD(2`Ke7h>_3dQOh>`NT6RD)~W1mX?cMt`Q!)o5+=K_-d{nvWh|KUUA zt#}_vagrGfamIo*OXCW;0 zKldR*fOoWG(|$H=HP@&{$OZ^6&I(b;4iGW|Y}JO=W*I`j6~lef=5 zf3vc1eq-(YZpZFLbmO4>-GlMh$4{Y=M+^4Y|2`Hq28<{(VVNxyYBq=LQIDTRqlw{^ zmhLyS)JQ6A#I!WGt&jN+Hyj->k&aXdyJ9E_S_OqNoX zq3G-(Yxq5j5)dK8wvs$i51>h19SLl>284G^m_K_DYkt@=xfE~*c#r~3`;}Z97 zE!bNXBdzVI9_>%J@9nl8O4N&e@F^ha1b?!X;*P2VM|67U(GyPp^av= z-b}pD1Y8C6AYjxGeZp}=aPZFt@w5=9j%#`*HJa(IANMDf>jSgiV<6V8N41m*Cj=z1 zbOw20u>{@|;gCboU|hjKV}dHN!d9FQBmCqPb1z_YgDs;OTvzJ_5R?0<-e=x^y}vo=d{D(My;|VhL0% ziyV|nkW`Vpoo!1QA9Gq;AJa++VcS@{Eh%$Ob%?>w+mq!o3v?(~jHn84Zwxn05CknX zAF@J3f^G}{%EYqC zS43Mh1L_$6F~fG-CM{~8Kz0l0S7k8ch-R?*1V$}Ee4?BWbOZz=Vr)rE>&X;;#wW|N zTR>yNNEv+@Giku7@Y93*Y?GcTy0r$LnquM$uDKBFqBh$Yf{F#;4E5 zx4YAru*Bqr_B#Z1iY_t@K7v5lPmRi#*~^YWztNEq?`(# zze}3VvQTg=HYMoH$V-yC_j^J{qkBb(i@6mLiI`fsxc@}ExOYTBulH+^h;#Si%g;aL z?!4cmC;I#SV{wfT9)qNwwUzG=tM9)oT-=}3F6QzdZeNGv7uk;c+J~jti{(w#Jf1Hw zC^fEij$1t@)%(Vi6H=3$kqI_eXv1vd$su~L9+ooxwCha&1tF14oV*bl1MhPDgy zOSOI<$`IljPFke&?LD$Xr|GyOd42AE2atu>vZ*oDPz`ge-Ah6Q^Vq%2Ph*1E!B$Weru-ajJj-C z{RS@o(4^Af2-tDC&t3LGv&8;k>x^cTmH_d8DIC#J5`rWk#!-qQPqG#3c(6?yVi%So zsXsx#iuINMHn!6gq7l2RcqL2i3QjMJbS`V40Qpx{sTR>FFe9Q+fE21tQipJjS!$xZ zShi7El4=oqR;}kNHZcjhTI(ynT4&#%VMD(lWSuO?n4_C-60)rm*>fA6I!ep|bmJpYat-U&Y zRc3#moi4troMxR&@QQXic{=gx`KzZ$%Bf-Q&KMEOfkCdlDnkxX>2!v*r989>B$YJ> z%SgrIRrz%8)d`FRsRO({9*e1IRkj^8{6%saUKnepd}R<5{dN5jct7;RgZsoRPBsq` zG0i}$u17?pK#5|Yevrf%RS)xK`;vMLep?tFb}A1~fz}*Wo+fdL6HzRBlhhWPg&R6Y zWRaRg`!JIYg(PJ1+7xEGp^yYuvyf?Pmb`~e{T%!2^8jVJyanAY9`94a8frP|$Bol4}3yxC8aZyNk?qJZ+UPo<=<(*zCrwxT+ zm*@?Q%rX+Dk*)y|1Z?el^HMGd$mrVwF%`V!Y28`v_N_|~d}@B^hp)c*LoUsve1 zZwJoXoq77lk??4>thR)p(skU(wOM)$ON0b>#lwL(cQ!0BgS%@+Pkh%BPpYE5h5bxZ zm>KgjHB&eb z6m~O|c0CSLG0M&+F^~+^${l6j@8X7V$B}O1lh6Mgb-&nYAZiyM=QbMZjyy-q@3Ve) zPn2BSv|C~C=oqcHfhv$LjX$An#Qb98*6bXw`svD z9m`zUq)xJJj6d!GZ=BL0`089M;!$wmX8x$Rj-@7dxT)%cO%}li8X0p{P|N0wbEH717?%D-^c8o17O9vw|=6BTr z6k%TY!zo|22+|UM3lXA64WX#0>0nFFx8p9==2&HBf|n$AN{P41n%bFhre3DOHNFOu0#a15o&qV6?2)5HAu71jiNPfQL zc%oyrOC2Fgspb?e;Sb~V2bb$_jMiasQ-SUg4`^^_bo;S~s}HI1dKw39bbBd88qeRn zjtuop_MeWt{qa(`Y#!oCxNEo1e%urt6yp3sZkL@YCNBTtlS%_ZF5X5P<<~Kz#!0U! zBAW(fewe0G7rr_e)6vC6{^3vl=^PFN*8&{bPW>4;-1B9< zU=qV@CJQP5shp)lwuxigprJ!LpFN(e5f}!?76KX$oa@Ju{gG}~jHK+^KPDdPIPyA5 z4=iF;Q1STEJrwqcdJHhSa3c8HTW+>PozAS((+o=%uhv*Csvi$6n zi^rJ8P?O+@l0$KfIS)==L)7cZ$Xr0x4Vg@wsOz|24g$92XR!X@QLR7Dd;)6RLJ|Pg zb#Sr&CA4o{Ekz%=e!#=K;iEATl&Nr?yn2!RRacUNbR_R-Fn`R*{@p9@3)iU4J^X%Z z3jljp+Wx4R{TQOvtK>DLU0$d4z|x?(nwIg4>Y^%ll&)HM52v4hGPgXu98>Jlm}{$ZX{|1 zg?p(1UkwX4O2>pZjv=WAaV0nFT}zu@!}cz5zit-^d|X)^@>1akGS~lKAcKJIEt2oo zx3|d57@(9M6d?p?Z;{NU9>59@dAOo8hz06_w*A-!UF{lNVv6t`6|_6-?fzkqH%rgN zWpRRbx?4<6WK<36bL6(RkO0QvMgn&ifdtNe^id-e&?Nm$)e?G#?U?b%H=NC5pb}{+ z=@1MYO;UC6Ms;wtdSRh@ewBP!f)D2l)xkSFCXT)Saa9hnY1kxgFvMow~s!b3l#`)%D6+fFaQWJx}J4$4#Lj z2g(k*mGoJQf=5u<2LR%ZeVk^djR z*myZCz3hE?5ji~8Up#%WD7}~tLEa%S63F0T54W=$kHy1n-Yh*OdvXqtg&oq0+Ui3rH0!FSFL0^ zn@q|n#b{AWYyksHY|cT{NMI>QX`ao4f-*d;vPn7^8+?EH+`+Aadk3~R8f2Du2y1Pq{6i@{Jk-?h9DHITJdzNBISND} zO|f=(jJZ!S9$!ck9of6su{)ly%940@HSe%t2iP+b*hGZ-3#207n5{~il2D)LbtxxH zUn(t%sSt-)0bXS03aQhJGQWsnmkEry4F098P$zpOfo=?hs?(lWHw5~0@FwqZ`mq__AQ zr6RdjS?@!3lC>4&B)L#oD^%9zDR~KxmZWaJm1dde2;?MxNhumIUYYi7#pNc!?O?rE zrIQAR`P-Q(J;+gVV|!z#yKcFuGxxWd6RbvA(&ThA#Ix8XE)m*^tj?d6)-(`>3}^RR zboSdEtP_rA;t&l1QQ}rd`tcD{Ps5_Ck4;_DgL&FR?&oO?@yw7PxSS?+^N+0ZVP+s@ zG?}Or3&>(}E8I@FfP4j-X5_}OX9hQ6LHzar{*t}Hz5ZVYiA%gBxh^vE!nCVG_q!bj zYLebPxW0dRf`)-Yq&zf9zfFyjeP}?3$6wMU{TU`BNI3kv(n0!GKV06xPoo`qI@<>2 zz=GKj;Ufc*BOOw36i8--lNxF^f}h*$9APT*5o|yG_dE!A)?Xo+gpBZi!(|X8kZACT zV}AucII=E3wK}s}QV{t+t-}PcWr58G!&nsUH&j!B+M{@@Y$TN5g`VnTtHc=(;$~9pO^dxis~N_yS_#cMuLA6vMWg z(A@zzxd*Z23k*#95voAIyB;iAUlIhG>l~!%u7lfG1l1dwT#beq2RY7HR)+%J5XmXK zp0TlAfNduP(c0P?6Hpp`gsCQAcGEo21t%5sKKv{Sh;mJH`gsAN%=-YBiei|5X*Qqt zWRnhpnT;~h9kRfzXNf!P`wQceE94&lL~BqYR*yFgMd>Ekho`O5nhhKZLjd!qM>66mt4UVB{a&UI8IERB&z_ZI2Zyj?LBEWN)uMdOO@5 z1$lFCYHF)NLzh?nFo0V!eO9Dq&9i4lG38joP~op#30pAaX`n5g%jj9FHAd%g%xWc* zS-2ImO$BY^1c-cz1BYjIBN;pU(MOR_!fk+7QpcLe=je?@yAtm1XQ5v02)nDA8es_A zWy=ySRf9vRa+(aAl@*qDuG=}5%&MXdSStCxU5mRC31Yi@W=(r6rR17**$^%~t80YZ z2xiE+AJBr{MjYZ+L-)Idn8TmdX_HM8gh{#R z_27-_@V)B!uIk{e>d-@}dU2#WxKJIwSRH&=9lTpT&t%g_Er|!^_43T#g}t8gbol|d z$}X6ql8aSS68ycs@?v@E^>BH5j~T8<8HsBkBLPWBByO?oRFE!wtzW*MF}r5 ze6@%ZT8E+~h!?Fe+32ZU#LIq!nxDSdda?Xs{v|VpW`oLM?=Qvlj+w&Vmbisq%PHu$ z2?er@Ewj$>cB;C}WQ;}#d4}8v{SXvG5R<87$8^>b|?TO zc7~#~%-I%eFj)kMTwM6(tJzm0ukMonnOQY7b3c$)kca<uZc~fUz zF_eK=__=F0zdJ!*LnuI;@RASmP+Zz$lNpIc`1YinB#&>&*`6Z^Pv-e&x?!knnF!16 zEK(8j@Y*1bKql^rG7xuB@ZnCi_Xm7)rP_O&3PXfaf$*c;Jsa+;veC%oe!yHPuAhMTJZ<}QdC&^7_06xnDV-FLu@o?85Ml&AVYA`E-0p7213o#lTw$uBgLtbpsW2oL?7T`A zx-MH=wD6zH+nZdJ^Ko~jIA1B=trYKrRs%>hoU0TspqRr=9^CoYTr!)MGjdvH-uHeY zl{|!#SZ|wq=YE+bp3Dl{lL2V6`ea5+N{Q-|`&u#tuIZ(k3V~{R$)I?c=80xGCxawI zAe8y3mWV-s>_~D7#0j!7%*mLqFalNuQefT^9k^&U*ng;AVP*C6O|@NyMEO){)+OQ>te+;+0$l<_f&igv%1ApS+9LIR`jB5{5;4++7y_uYW|??IYj(*qxI>e) z>Y3##3D+_Ad5}{m5j>Al;M7c_g@d*dc%UsMh^^K+F!X9b)RMdx9lS;lgIozkvlQBn zh)^?iKalRG_x!KDQ|NxI;cJjjB33|r$-!4I#*l@)$1Nic;QR&$M5u^&b?o4$zJs1Q z;*=cRy&@fqu6(#Q{C;Zg;Az*vot1;W8-iG?qfk#hff1)TcGH8v*6G>vT?gSR*~=8* zkKsh4gh_=^EYiZYt*-R^NH`{B@k-P_#9SvSwQ#I5^9UR7jG4ZV!fuky?mDPQP$5O3tpn!yF0^>18A4`twLp} zC?1>6TC&z&n$!}qVY}H;=yAs}SMOG3VJlX%YdzBJEIU`Yj4TWcY>OA%(N17q2No1HuMe?VhllBrV_l)!4?@M0h+==o9HbqfZdu|)l!1DvXpqMM*5zPbr>q~ zJ(CTa+8=AsmKV_SLARP^Q?ViEMsO=!W#-pPO3`GtCq^+X@%l?XZY5e>3Gh>XXNcTG z7r|HMgGbTI7$a3Ip2iX<9yD~CU(W_Uz-fNwd95Ik2lIp|>7#g!Q0*TNdI_;vedQhx zZTTWfWgZV}3DHaHY6z9w9hQyaK_vXy1MtY#A?x3Ey9A2d+@duu*b!o|JOyWb+@h9? z$;T@4)!rNU`V&GFx2wHZ_z*?3g0#>@Xs5%}o;Bp_^TC8@rD5WgReG2kzOg=vAAc9E zdx})$6e5luViq1QHR6WEh3kpZVeYP4RS-X1T$k9$4-T(V`xtFrny|>E1ZXT`;RSQ~ zy*^y9!i(aDu^%9Q`_I)U1CrHReR4Ha{5EtwO1L@{rwS!7WcvxQ3EQlkC`6|z+A&da zPE&$N5H=lkMMEja<7Q=Q%8pdp#V9=x*F0Ws%@L%ZEVvZe3zezsc93+s>-ecKkMD3` zTYX!~r_yplE6o$<-o7k3m&8)AI^|okvmvJyyXeDhP&m~3iMuC;3Y>xmm_%Z6R&2YA=I19od))CavEclnA6fvMWjWsLc5mfhqHH{Cf8J~h|^3@G|HzF3@bVpMK zGDSFXG5{{YqHu(B8RD7V(h>3v#5`%~$VGfFSU`e&-xrbOZ~>_ZM>!QFH-Ik@G!^YQ zr(hAuhmqBTp}AUty>#$w^26o1_veN`Tqp|1I7cv8S#Svk1M5g`R9423jPS@&%@GWm z7Xr&k?0HhTFG5L5O_qhqd{1R=)D@BQsUbPB((mD? zbKX4aadf$&Mma|?l38#m&wcAo7JE>iYUC)POz(9W^|>l_*F-#S`SxCs8es39oO|EMA8{3{r9vK33K&4z5K3L|b9 z*NOj>R$@*pGI+~`(wt2u4+jLjn?RKS)E5MAU?_2@W8j`&t864XWJ8H%bPaTxna5SN z7FAQ5$-NZ34Uk^O+S0Psq8pju&C*aPYsV`Y!|^PAn$dKdV!#>8TAVRoP!))*Ni=~ea4T{ZEN;JyY;%s^ zmKldu)D|_)*v8gJ;+>+Ikqy!>EDAYNtU4iEGTc~)$}mpWe&7X+^JxE%n<`uafir{|WYT%ib@}0~uB)Ab|Lq#LESc;R0Pul#L8nY6VEU`1?v)FDn z24l;~e>Xhs>Y7%XWnT>H8sr_`dEx`i;rlzhoO87M^EM?j7;kgmM^flIX&VtF5v1s0WYPs$1Qq(>9;>ULp!omi2;kxR@MVrT9!gGpdC zIx{SU%*~`tS`230BpP~?sGxe&vHrrPZRQc09>>=?WDN$x4(QyuX`5)U;aO75xq#^a z%yV?iu#v*siwpZ3KZb~d{kA;tcJ$`{#>2o6*Xe)#QgT;s!N?Zu8)iX|5>9g3M!h!g zf-M{@a#wJPv?&-HB74L<3%g)I2-aiMUFZc5?=0x1cSRR$0~UE4$bf}jFy4br`K?r9 zksW8c!O4h)ai9Z-pART_A+BwKXTy7R2!?2&O~0p!SZL-Fjl-ZU#E2v8(1YJEx$&BO zWpyC@14uAd6L_WRt6+ErTXUaL^W$~TDAAY;Hs<%d5)QC<9$35GAQW!{5MG*EH)xhHr5>_;JV&V3w4iAO*K?lV$26hYi7RZcXs99-Oh z>k_u~aW$A5)T{-uAh9}no3VBA#Oesc&Urax=f-La!)$?oB zi=$zR=O@a`<@NHsRG!{Dx7WAVTYgYx8nKRRNi77xl}6BPU>4rnxk#YJ@nmOsr=P8- z=y;xDhp2g;V*8m;BAw6_sZDP;p65A&v5=PZDV?{vp>iwMzcS>w-VW$ln?YvP^P5Vd zG|x*^`idAqIU~WVUI$M�|2l|pT5+9wC+#`l*`$*r=QlvRyiFTU4To_{@DUbkz- zUk^*=#l6A3KJwkt>tS9p(>J-&T3cx?mncngYsWv%8j74f%S&Ynd756|oq~BIs025J zvTmW>cZ$)CptKT{KJXHp1STY8I-SaDDOol-dmo@d2JBXmpWia{gx;Z=DVY-Y z$2-}#yKN7LSxK$ai=?>j$*h>p<>-~CY7|EZUkD|R9yCwzL~br+o3Dal$wvRYM5 zqK3M7!kCY>C{?EVv8uP5W>b-0)**FjYQp`zgkiGFPK9d}HD><<|0p4a@D^#g{$SHI z%`4jRf-U3U>5ze@uOv&BqQxPb3@?}}7$tNse3o4JZ^(s@eIk6}8?0O@XHOo*l+tRu zDr>q(YiM(70%3!Q7@1cLGtpspKgmI3 zdJK;J3!CSh%2ElWtV+3eBQ_jS=#rHxXJuu6=k|SJy&FG(lrn^-sySu#!6(Cd-m3I) z3F~(AqI4G;+xY3K;7FcrChM&JM$IazJCt2Ck11#wCaQRC^k9u*GXJb18*&w6GTn1a z^bSKQ3&jH&c|V@TK{b)>0-ijIXRtOmj~^}^4W_Vix0!S{8BSysdkYnK@6wxjWKPbI z{4YaE-bs}9O`sFCRu@>=kLpF8@ABL1>`s=wkFTFT+y;;8RPp_og3s9v*cF9@?Ia*X zNPnZ!-9?YHG)_rx^IooHCQh;uUuG_7^ukGXg?m*q(7c7hwR}u~czKsYPExw%ZES>~w*-6htP4t;$fK)+QW=yEXHt~~zL)?dGjTU( zNO0E1HvE)9-d@e^TNm9jgF(VYTOVx!Z7N%UC>8E*HQWTdFHNmg3w1`ki^f$W43?^r zhqw_^X52>l4Zhg{^XZ`Q8_4$pbIHlSQ!O7B2$x0LI++y9guKY#@&t> zJOq_78MJpuv1J86l~#Rv>u!F@ZZY)G-y6B+7jfWtRuA_^La`prZ!Ur9^~ds#U#EST z9MBr->o7E_uLVeQziRI_y8I?4CUHcrT97KM!(?vj3dQ87AXo0U`#byDmjf?{UykgT z_BYx7b0p2(dO5Ve4H~WcyDz1#7&pEV=urdXkB_EinvqaQ>Usw5+>cD8&h-m~T-Hpj zM-Tg#Z`ZQszRtf+2MK?ChP1hSIf``YorcvXy&kYfKQ1reVD#vI!vUcXF}ZrNwRB9# zA|>S=;)jwtjFB}r_3Hks#aGE!a^hRGHVY2d4q$9~$7YZx2SOvC=+{DewBOqMmLI#y z8~x=C{|HIRrOKWAzVv;O{{?B(b-yUd$3~QfUGp7Ch?IP;hdcD{R{ma-6X8O*?YCvEpn^ZFKF^EH*=cvtvwgysV~0Uo73dasl$jHcq(>xi9PDT=KGU0MkLH*NFFMn zjq6&){J4UjyP`voZ{#0I34Wd6lB4Ms3(F@Q@ZWxAIZHmD(q@-cF7`>!(3P$QxNn2v z9f@@6BD;t-ZNOj~)sT{K-0HX<$@u_(ytB-w2H3Qh@ATXYxg^YhgLbYmenw zj1Av4q#6nedIqrIeysClGXyf4zOkcPzNYPiBEGjNk?25*JVp8c<`~T7jpx9Q3rC^? zSPP&l>W^U{{N%q0ARlfgU+hALoK?UK{v21ya0C3lpu!E%v=^tk-sX$7+)BF~8u(&8+F)b;nPuvWmi4S zR>vfBnG$BiH3K1p;H?FtZd`mfz~9o9=PFMhVT&0eRd51lwU_!dM}5O@HWi`!e{r={ z3$r}8cOQ9o!{xcCWk#;)Ok-VPqchSQ?o;L9buGqd%a=GIA;j0xYP5!oh}Eg9*93g3 zn?OCzXShb0{jS-^`k2$N-lnMr`#wXdE{OGsYb$5|9CL~XY78%B`iL6C>O5maO~j94 zdkL&CKw4gi5%S|eW$`=ZAYTn`!z1x8TWC9%x4I_cY(ouZu zXfSaU&pR=|p(%JVw5S>bbWI(wbGX67D)vnvx`2;z7$0m95TwvwRxo1Zu%G6le`G#^ z#>nZd;dh1X+5G`Vp}*7O#OZAI^l4T-`sZifkZr>R`EL>#eV^3kkvYLZyHZGCxdqYw zu3@aO8VOY9#h|EP0VnMiPUA(G=krd)nu!M>jZD9zP89IMY{#h%k3|!)zZz4+CCf-+ z5#f`XC?IiYJjUbQCTlp(*(yk#0l~eeH8DSm@r^=(M<$0g9y#O*z5!7`)nbo7XlDP5 z`|bUP#te6z?J}4{m1{(;K8OQ0Q3O(5v-vZ96_NV2KhN`Y=yrYEXNDbp~oj@T)1+&;m4!J)D zei?RR3tS8KmevzF|`6|R0q^sraj#ka=(&egbD2EZUGyrrxk+1>xqFo+m0%-lI z7PEBkIaYbWKO8ZJx)#$M{BmG?6SLuL0gTfmXW<`uxLHX}28R64OJg;xw0D9@H<3P! zUmt(v2qml`9!jI4g1iSlWd8||l!i~#i~IbM+I#R~G=TTE&Qs#=n7w^Cei+BPjpD%; zI~;QSvH)8qV0S7x%=x?Eq{BQHxwaR_9iNsrIv6)>LmlN*xFf`8rY?QL30sU{-RrLF zbtF`=TTRBV5_`>xq@xrE6@k2yDM7}X#{E9Ygo?pa!~t=9_jTgZT4CGdJw-;kwI}bes|wzXX$~n z2j8f=bX33qsx*m=ZcuXqN6iyuN6i+xTvkaD%;T?_M=uQcD@mAXmLETCvJkkq3HnR` z7^WLy2nHG3*)0GYK&$~DHQ|nt;f&SF!gwWfmjDN`oUVW?26D31m6|?ljqVM`Iy812 z)+07uo8F%4tt4RF%P#$o*UfI8|+n~dVNG{A>n*M~-5 zD|vbeQuR2Pdep!a;+Ckzu|^@9yC1N-?jsJ zBL4S&0wCOH?U7KJL$~s?P-L%U?9Ev(q{XxV*4#VLa-~N%+B)T|bmW@o*4p$i1qUMx z?r{`F9sL{>zl-+p6Oq>0inMj{yUonb6+S;WPn3TClKISspQ9y2+VeB+5pp}oY)qE8>(&N zkY}TkAFbF^PAJUZzi~Ra{?5+5Pi7G2jHTY+cnrdnX(_h_!5a=<2C!sp0CCN3ZRwfA z*gjZUUlxI?0QxdHyc-WaNM*zmVj{+Vq-e+cB~4&-mD=N#>>~suGnM6mN_H8E*i!^c z2GI>d)nc2KtaPm)Ad-VRu!RR0=OIbiT0cCK_(*;d(*LWx+$+y^MHKOnZ)fX=-*N;22}uQZID#Jh9aMjW z`gm!c8liw3GL=sY1n|V!_0gX(jfGV!2nQC09HZmz_-B7VM$RdIs_yiwZVeL?0*p59 z_tFc+eVzWVQwLi>GY$nzBQ6Vo^kg^Ou*EHx(8 zbW{14V7sTuvbcm!l2N|l*HMLW^LdK+ri3X#smfBEBLQz6zk5awTa=*EUo z8E{TS`QH$(B#va1;{5^mg8ze~q2$rv9A@z>)o9kl7mwl#M+0$D>c4d~xO+S?bripQ z6raUriKC$`vZ5B7^#9NhLblHwf}n!@WS6uOq@O3b`^5Y9+I}N;&ki|k_gOZ;oIR{ z4In#vG;%tgI2}(Z3+c=tIQzP?zJ%b+A#de_$UgwgR;6rhD6w^XKY=!)C;K&Od$lVx~S;tn=_S)cGO5SV`pgZHnHwy`VDu1VLRcf=$6f zkgE?q#T90kFfuoaC!N;`!V}^Q<9gx?2;xm5vW<)WQcNUAsb#4W$W00d8%YJ_kkSCK zuH#`l4|K(qXO;x(fn7&!2>o)9C6!Wk%6(oB9<%GL3lOxawRK+NZ>+v!7Tz zxWLr~)gY}$YiU&Mtzo{oMm}I6MF>94k#>&6Zk)P+dN3TasT6y+#4?RR#PKQsM@gX- z%uZTE)V@M~JdgH}$9?BUCHbdC7u%y>FDKr{{h_{^zfW)$R0W7gM18If@E5Z#x8%46 z8|fDz4KVl!krcr(C>Hrq-9uP=%g$R9m)OB3_yv(`Y#Us&!~AY;@w2c1s7Bt1zg4`i z!cq9(#z`qf_Q-#4PaKbI{8U^%o)~#Mo}~eKJ2m>#^9@o$YQ~LteCsd#H@vnB{EYVp z7Vo^BOOQyCvTw1DYw&|yjZxj~<&WkCX>#$g#I1PsSHJ$P-p;SZuOW()=iVfi-`vc+ zNvs4R2wwRi@jCr_HK0OyNrab|o!3FtW`IuMbsm@mpw8w8slNx=1ZI&<;Q8SYnEu=- zes(w|J*NXHh{yPhw(;nrW8;+jmqbK*nO+C-k*c1DvFm6kwpTJNJ#GZXR9W9^p)tLW>P;a)cz;;OD{)W4(xBfjPDm- z4)edm`^zt-?lqyl?*CNi(dtOa6?&l5pE@+8IciYr|0msSHLQN18~0^B{&9gnIcqJ4MyMRo{XTvk6_OPrK3d++HEA_=|YID!gp9f52<1qjqa?xWE7V7cR`GpFK zPE88P_Db4f{eDR6)50yzUuhA1>#u(OUqzZ}KNM8s!<5XaN?{uq*JRUl4=;$FBQqmOWO`1y!U>1Zr$X0&T2l zY<8iIBIVSC)D-eQ!I{Yh^sqd9QI9J;8 z&%9-4dC6?;)1z|EXjA1utk&ZPSQFfW{2*2LYJ@uT!Ctiw-Y`F*;wHB>1sRkm(ciqlw@zwVak zOZx!)5evuKVj=m!#FW-kEVUOVfHYMA3hn)OTfxFNGkH8VcYHrc%Y9qCxwkHzj%h|gD-1^!gu#%cK-8!RPc>VFPRBHPt_MZoxy?K%iTHO_a^eI?Hu?=H z0ylL_XT5xg6$3`csp+@rdk`5KnnXhA#L>_yMu1iYw*IbpjXbRJwlN9>HQqCr6a$Gc zMGV#yybRHb@tU){+0)r9I~`8FA1j_cn0O!0GJx1y!)IgaL6{N9jRB;arflB~22a-9 z7zt|W6+ECg>2g7{^Jv-0yZI3Pyo)m@BNTHB0@fy3jSb)=6YqM3Uz;rBUM8Yuj8p(! za|U7RY-M>()Fe$3WOeatGdaTr3}H;QxUU%G14H`G8pb3Fh;frt+&zl#OX8?rzx_oy zzqifTuo$Kf8!T$*d=a|dM8P&KL7>;?!eCg2zR;n@Nh!oY#7xgo!)E7j9R1ZCP=Np0 z7H`QFxTZ|VOjoJ1a5-f`&2`keR!+gH-4q=&Q7jh_3$9UIfijAp5HMu5K(Sd;<$_Z| z4aYRb6Clrsn2`%2){vwUX%|;EMVWj!#YcYUaEd`;JjMgi-|89hrb?f`89qr*p6q;k z@_fbxXifR}c}JO*lMVal%~j}VywTFz*pc6=wROdMTA~i1Y*_AWn0lKPG5dU5LOjjC zk>&rBMmuO^SbZ(sK>vuOGQQ_@YevYokpoG6+x3`6fS;t;VT;z@ZG>BpP3l8KA=F{@ ze|;e+Lea|Kdm1#x>uzwOPNG)Uq3{K`d@c(+!(Rl75!C6{1*RAhfebMJ-m1ZaU!+6y z|M!^Xxn9l9`to(#)3^s4R!rN1TW(D4m>ErpTD~N;m-Q$<(d{uehFq9A+#ipqntN&B8y)D_luuX*u8j>$|&__004E)fV464Mq zr2}c$79C`Got+xg%m(7EObXG81uCo}?Ft*|X(;y^(m8_o%Pu5HxS16>frPdyIjJwJ zQNdKrAV3wAub+wX6#So4!RLrs2X&fzq05DRj$H-PBiOsu-AGOCfDCe!;r+bBG3W{C zH|OS2+2ANu+WjtxA3Y{gLts8s7bUAok@c7n7R3!R=5CORKA%tAo5A{HCZoH>q0)6N zWW~ZAif*6ZYi1icdo0kjxrR@&)_8|iOnv?xs+4&J$vSFxG1!7tI&HWm7x2w0I)mto zdEA~C`Sy${(N)gE6|WPi8HKU{n>nURR%r@Xyq*C?HQV8N0B_4h=-YYSbYRqg)WmvI(`ggn2qP^&913 z=13~7Mk6=W6e17JA)ddt>~)Cm5Jh|TpSM8fiv90G(@xf~4p4mrWrEE5U7e5^vKR2H zYt=o9VCDXKgM@sP|Gmb>;S62v=X_M7$g@yIX3<>~#A-Mb(Nnl-Q_6tO5TUX};G zTw=dFL-{a<7Mxa$G8s-MmEsn#r)lo|hS7*=EAZ0770;LK+bkq(GQVkt)? z3^)+p$F8T1$*`a2{s5FL<|Oj+$LIp=xCQM1X?p+LufBcq?L+q8edW)-&9i??-#%df z-urfeeV+Yx1@`>8d+cwewLI1;?g%R@PZpRP9@YD!4S-a=_0gi+qm7W%yT4!mr_tXx zQDkcI_Zz=o|NYAE7yl{4{#|s7Lwdg@08lkrCmnA#pie(LI*9eDiR(veE*PJP^x)mN zq22(G$Stt>`eo&8Y`Z<}h}b6}YF2>Dzv;ut9`)iy~teb9EvEedAK)1?2C=a?( z;|Kvb#QgrK&f@MKC?1E>Lu6#fYQ(%ZnvKtm*O2pbH6;Hg)uYv;B`-=+K8^sys?pp7 zY&DR{Ln>0a)=s6hvnjNs2)_EVwX^O;MM{1ZJ0nh`eqQs4Pwh0P2hIwm23pn6`8a6R z$067K{DjD1WbWTMfLFo&D+yxPY7liP-FutoejxYZZndfl=;!T{y0a_5)g(3r;8yVC zYVwtL*PZhI-a%w5^KYV2^;IGD&9j7a4u#ldPiBq5AX=h|n$nJsHg_K1KAbclChdY#_UNf% zF+aPflQo+OhJoe_uXxdnVsG6YKXlz*O{=acX1&^6q@p8qcYseKfLPA#@pI5iVpnsY zU+fNM(47B}x^5K(XOPv6rGUh-=Gb&km@|BK3XNNu4~z@};yD?208LsB5jr`Z54a4R z<*?ScVhN=3-<+2~Q zcrk0<^9hypxP@;8>>Ugvc9>gyRa|Yiw!%&VGibwFc;g-dtrLm5O-+qkm`@bZ3_Fhx z%oKQfv$SnSs1w&XWDj=C zlPB#$5ELMKV0!t3#<6Kbj$<1M&uAT#;oe)>dV)n2egI>UNPiIDh_@X6;tHn%o1FjH z1FU$c#ta{RK7^e>vt$pQ%5U_+Cp52 zdVmOoSXm9hWn&x3Ggk$~F)!%ac`bs8Kqo*(QULU7_U zJ^>ey-UN!)C*BVjnech>^krY@BP(PCe0@4LcRE%+%|1ggF$>gm|FOh*$K-e}ki zCV^yQb53xl)?2kL#S1r$Uqs`hz3;a76|_HQpMSVN4IHQYqvwyGpXVMZL}U{n@*&KV zkYX`b#szI@jY}bOM4~kc;=)Q3AR;23!@B9Iyd$2+8W#~a$3-lr5Pc0&Z!4KaxMwzX((<+q|nS{nae34jbzyMPs z;thTk4Kn}H7JOM+s&Q5Rw!BVO+pwP(uwlaiyC;_9>fAI6DiS{HL11G8QFuE-+Q(=7 z1#G?^)InyW>*YGqN#bSk9f&bmg!WR$rY2=%b6FF7poc^k$_+>YS6(nKZRlTu04NQ+ z)2e!G+L^jqnbz+NV=mPb}W2^++wOp`O2@cFNzzoyA9md=Uv3tSspjLZu(KRvg_G zFxI`Eh1=6!n^0M1124A}sXRf)meloC z*gYYT&&*xr!ZHwa1O@#~TzJ~c7UUTdwgo4SHhaE91TO-G9ai>c&A zG^8Q|MRzdB&xWxgxWTH5Wfn6ypsbk1N>!2^Zek2(pT}14*abm1w$*9;J6x*RZzzUt zAxOqSFlGoucozGDnv7-ZK{Axe9X-s`#Hi091%kL7k0R823PR1tT=VKX3`FE-b66W#GJ-)O@&oISNGX8Vouw!c zSKA>^d`9O`vz8J1j1P&sKp`QnrkvEmY1+`IMfA6iLSNy&#DN#lPvq!r=@xq>o@Pqz zi4`b=&3^fIar!hfj&eJh(@X_LdbWl1&OQkC1PvS6nI=#Uo5ctc*eDfL(Yb?vGnkTs zoef?CD|Bq7vGyA`G#rnGdeWp*c`< z`w%}rltg*t-Yb?`+zCCxb)o3DW(k4$4sccQSG52?xn(HC9mgMEQ?4k_iw}k>Rs*d< z_KVCadNGNpEjbjzS5K+O3F+5ZXf00)IqRJukfA-15Ib_7Z~#2uwz%yx6F(h``pD8^%i)beQbR`Kt+g7GwF zTa+fyBBrY0Lsa(RAvWJpHL-wQRTbs~4Ub%l$54?+Rc@$#>Z_Y3)0wTw5!AIz4XX*N z9wyx${{c9DUIaMVFPyYX6Yo@Y(C5NQ)x=oRT1D7&jo>-YnQ1$NlG?{jEQ&p#U>x5l z%wR)&yP>dV&oHL*43)ND1kl$uRUl(rjQ-Y~!f*2&D#a;bbnifpr`^(dW1jJ?TepSy zPaRQIVmoxPrWZfciRv^0-!ZwlR&@!Bm}?Y^{m100C!;5`aEni}<|5{Xaj)5pe1XN!qS_hnsC1`KrYWw+?=<^Mb<)pCy6Gh6 zKbUtmxylx9@`u_px7)>=Y&8c3(aPl8rCgwR-u=?+!|`{cHP$aVPVFzJ-#*Ms{XV9! zVW-|B$)Tt^@;~E=J}vQIe)<=zjcJQp?5|f%TD5(oz2SJmVQ(@FmT%-rB{-0*SXgce&XV;0UOd*p;Ca9l^~Bzv%dTcA~+958Bt8=G{U9=WH|VLcwifh^!T zya}4PsE#E$k~WUg_ha+jbC1YQ2t9VeQ3NiWuCOY*g7fUNHRJCduN_B5qC+$kH!)sY zWR~IAxG4j0b8c07)GIOMU$|qY&o||Eorrx1T`FK4S`&`d8Y-S&v#dtde;=E?N`+|CT@1f6H!@i%trjo4f=TR=J| zbKU5conq1TaE(dJ6J{1(hY5Rv61EF3-Yky3tsI`s9{5qgfa8jH&z|g@txnl1V#r1o ze*r;+I?mogP}W78KzE^L-?ec!GjfB}K=rcGq~^00+^G&_ESQ1bvG)8#YFZwH@& zY<_|p=|SZeS}GA?JwxzX=xLbx=HeQDN(@bg1^VA6kqwKT!F4zMO3ZWX10&W~RA!g6 z-$pD9En_@#ZHRJMER?_!9rltO0n8bxlmO%3xS24I6_=p5WC;a~c+YbxJOLjwDS&KX ztgWPn-;m5@)Wv)P@O_8Scwoe&WN}64R+6xY+4urh$f$~HBmc=KDWr|Qh@+tf!Es4| zB>QE1D*yUm;WU#!%{)5IPF*I}5^F)30k^71HlNkfvn%50K2Xk%aqca~lfCONhHkIC(ydKw)Xyl= z&)*<@B{h5Z_`8j@?K|u)>$}L}E{-O-eeI55+vI_HgKhI-q+JHG`j4^OE5%%N0xr}B zDY&zSUnZ$&m<&YVF?W6r)Yg%GLaH}o=WxT*e)Pz@uc*3L-Ji=0 zzIzuD>Wd{mp3sojoWNz}3j*jGdC4(cRj;caD(R!ZR>Knp4%79B`Sv8ZGzfz6eZ7Bx z^vF;=nd(ny$=^l0m3i{d7|c!^!8!CK*(1T4ch8^@FpYzkRi36(3g@p`V}w=y1`hC? z!dVyWQ8jxcr7mk)=pvzGI*9>vtN@GE%8EcaE(J^Pd=^jl#sQ^xpr}eJlDq*|F{sXl zAHC&xK2cLzgz7{;NuBt|&>*{r0fhC_Oo`WGLNMW$zXVSnF#DL-v4Z0&++io-h}u#&NuCeW)91_!2lyH| z14hu+oKIcx!n1`&=+3dvYmpkp5 z+h>zsweM8@B$m#~_snv9u{WXgbv6CG5;T8!62 zP;^Se2EOIIYz&`}$?QQhNP}NY_!|6VsDdJvN@%`+EMz6kh-^{;lR#xDr^HOB$JT_9 zVN``owp7SuW08F4oak$>>rB!N#)b)l6zH9$ zi}qW991>b{_y~qBSaz6TGG1kyeGR3FPW zE+uPp7n4N=k?JW6bul6v)wRS-86IwFhHOmBkd67dgPEpTUX_6&W|x&C`mRwupUa)k zuPJ1gh76-Lq=1sy_*3BfSRL&;Ids=BPz}pen6I4a)WqP?DU;*T1`d%CoNYbd#GOl* zmIQMA{qdTmyeOk6QpR?@!>=!7va%*+@ zT{O9$V$%yr8pD1M*_-VllU=W8jHobUSEqaJD$_IBDy_6iTUaDKr{{2Z&c^TpLN>;~ z;TCM-wnc1hP{npjU&4Kb5V;Dxo+3ODALeBjR8XPxMTIgOI2LThAH@2L;0YkkaYF8e zSd6*Tp(X~^8?YF6wH9CWOLAB=VboVRIVyT)p@}(z=fswBG6VYpD83c34C1*cb-06D zGWhwHR7t_}IaVJiJmx z)ND{jiCGM<8}pDkLI;mo3FP2G702d-cDB@2Mv3eZb|&%xS?}Bje7OuKxLIT8@Rv^d zw;#2T!&9ooA&wp-GvyZl|3f6OK60k$ax*%A=o%CssgZzr4ufOJDZ%jzH5myjg5lXq zZlls#oz@vEO?;ohD^rXp#WM0cP}e#N)*zJjoyYheM~l~@Y99m(i2KrO662SV*cCNA zz#K1jaoi6vIsHIi)qAviCLV&;T410y{pC2$@Q*LhGr;-R2{96QZ!DKb%X;7SvN@G)@Wfw_9`w;PHelJ=5r z$YIU9Ep@QW>+nsKX_tXM&My3&2Mh;B}f@u$Vgi32IgFv#c2^D7L zKJ=bI_)WYonNo+DZ4bXo$R+sTmo^C1m0!63+{irV)zHteY&OdbrYU6j5&X=$N#>q6 z$e~RvTe}4UQBh6ttu7J+!mkk@e)rzQpV zjWGQ@E^j3u`&k{O^!mhw>;Ny-&_$tYHtB&&i|p%Rcbzx7pf8d}aDEZ@P~Aw0*M^8z z6Blv=#soHa-3UK)L!7UuqZZhLpFt5YnAHj%XsS^89%@SB$!jAiU`fK-QoR4yj@Aa^ ziYguf`Wo*|ssm{yrYfKEC&Ew3gsIRZfy_ve6tmo-Q9+i3r%nP#Af^fE-1J?XFcKS3 zqnA>BlHgBsq%Teda{L+Zi^Pr=*_7kTh{BBSqeUX=3z8&=U%8S{V<|Qney+6fQ=-ZJ zF0CPtHcXs-_MIG%Igd)M_0d*&%Sxbq3sk$fjnbt36XuixuVTUGyvHJvB9@4l$tX`D z>94X)DGD}bhh#6ICWVNTmTzI8lAB6A9WW=h?I8ZVrUTt6^2wp_#M_Nnb;GQpev7T; z%=c88(g%oY=3!BZN>tqPRGg|*jK@pGr%U6%G(R*WV9LFF3>Z*7ebAoD*k_C^mG~N2Aw|%ic4&A1$mK4XktC`q1esZ<%x$0EJ|!) z*pzvm!{AzIoW?R|sc88UYgXnyej&^G)BO3<3_@n!&pd>WP|b3)`u^swa{jXPA$RY* z@f53Bo-Ta1UB9Gi77PE03xWKa@u3*Qns6pw4l zr7|vWkWH+EgMya|_Or(`DexMyCRQ#ZIU+GV0loUDDUp%0-9Ldjk2_E)14#~EHKX8B zP0XYe@G~7+#B2$trD~9OA{eK524zLhy10;1b(cVCpc)=fPnl98Im;g6p(>!eV^r4& zO+ENB2m998r6{e?KSp`YC0Eg;`{K#eHxGwm5zM2&n|vgEBc@#iSQeQ~_$`&tUOfs2 zz-oYJ#N-t1n;^1kO(;*g+ZO zJK8=zR4B93K<=aci+djcZxWvC0E%>ah$jiFb>Od`9|^gI#4c46J|9o8xbj=UzKY<$ z%}^w7QW(Mxlf`&qTgq0AxsJa=4kn7t{A8lMGp zZEG@RJ<(yU;*JtAX3;?_AFZb65Itfj;sJH(nyI=6R1A-s?D@%_%pTX2lSverJTyOd zm~|l-op|C-X7%kxVn%@k&aC;#JDCkYu{Cq=_2DCCeVvVFAa`=R#6G-!@#WjhedeXN z$8+t<-X#?PVWH2v#|vkc7&*eW` zN*Gy`<^P5?ch4F5(2?KMqg|eINB;EN_Yk=1p?{Q;&}RyK+?xtqnMlfAQNk+F-s5(S zAQ%K~Y*pBu(?@@E^xUl8PEzY$G((-V)h8_4>RYYyf{3g*27LYLsaFWP1%w{3+p%W| zYS2ZBTo6uo%QP8E>0^%w+UTBJvIwKwtD=u%h@c5^2H3IXJ;$U=qJMbM0Qlf-$3lg! zWYG6e>cZ@$dy&2HCw;z!H?rb>BVB~f_fQmr5O$6~Q5RA33c6Vft>E^?Uer!KYl!EB zrN4?hw%BKP!3}0<6Djcl$w?W!wN4V^UKBWlX87yI%d>?S9`aKp5f07Q3LVo@zmCKr zgKAVUua~$g1mQ5t>J=DBSXNO2u9CqLO5)Ts4ATJQ#nedgcvw$;r+e^<9xX;u1f%0( z>nD}N?~#^JY!_jl37QUTvNCwI38(Q{rO{!jRt9OzL3qAs;m*Qm(5$E^U9IcBvmzx) zY^sET#tH>DHwCwk9*11AS?zshG&(K*`~v#SSez}4wyQIznaV}mpc^p>BD2~>!=a#b zYeo1C|JN7J$EOt=+ah>ye@2St*L5`qDm7r6p5VSG$|V~ozBL>280zNMhtiep5+Ewh zL(Ln=`P4+5%^iwSdoyza#HSx{9;dab9{r2r!yNJ*XXdZg*oe|^Zz=hYC0~5MlH>us zUwuc67ck`300V-=y9nCz0Cisiu|02A3IoG0ptw~aD9mfWjQ1W5cEnwGtGBU zHjhTxD27wZLLt<~kgMx;$2P{B-&pDb#xiG(E000~js~7CLeas_3lIy%H@e063li|8 zj@nPA-pt=5%aC`sR`3+k{P!O|M4dL6C5s|JayiVjW_e0pn!c zPP4|tgj>MHvD{-}_O`!OY|c~g%ifCj)0Nc>j|iDMw;-3Wv=v#J%`M1f5{u(jv|EJB zB>l$kfd`1Ry>)DrV{;E>+1t4Ga%yhjE3ofX{#A%Bedm-R==Y?XVtoL4^SDlSrMx-p$^m7X4SEB zi&6R~;Xr3u04sEvfvPetAjP=+p&^imlEw%E_2jQ_2&^ii>$jkZWOnLHxM?uzdY0k&g96 zwSFze{Exq89f>P@RP-AqVn_2)^r?Nk?8k3x=&uDUJPu(^R08>q3E36~0m60_qdr@{ zLp;{%AV~p{*e?u*W#VA2;4Z+N`3&!{%gAL!BYx)&>m)BRK-4KS)ZdOB1P=RH%d%x)@A!(p<-c@VvaBJ6HfgNeH6UVCgRT z>|QQyQt4H76c1EdW)EnLSz~4E(C~WNO>v1aqY8@I~W_P@v9R6 zrbREO_HBGE1&By^Io=d-_)EVL2xAe+hkWd|(1a#K>6Y9_a`RlVbi_>g;58C3J{}RS5sOv ziC+`(8v_ZInN5Rko;~kuFp_e+8m1zqFne%a+RJ4~>UAyb)*fscyg*!rX4}4+$IXey zF2H5y@c4jqs~#puN_s}|it=Z0?$-y+H}^|_?&hoGFfY`S?6tOUW39=GkV%1ZlCH$_ zu-rrGfypT6*^Tr0HFrmT0WAh9oVX`GxED(}`|HMy30BaI$Ah^*64*t*ba?Oi-F8zoG-JUTUoe8%?Qev)L8+nP# zLV&ByQRRVam9gJyvGr`KJATq#<#-!q5pD{=^zps~kv_S}obWxiG#?&2@&bqjx3Pkc zw~T%EQm1@x{cK@U9Qpm|-GykOQQ<5>NJ;_TsB~C^{#6?9`;f*9QIU0?Mx$7l?2n+F z7gDZ&b4N?XzImjt@KE|JFY)kQU*kW5(q2dy2wfFDGie< z@ReK7vrt+6J!%RTylM*KFrc|Uv?f5mpktY$_9IkY8?Hr2`IoCk*paaI`_d!?YTG$P zwh?RYjAHfCerT$NA;shcc2RzPEgnlH;?el97ac=yuH#2mI}D-!nI81H$X$~1Avm(Q zpK4;Oum{7gfaBYjcFDfrAA?*Xf5hlil{KpTX05X#Xufq>HQ9zU9#a0O#T6Y3vCXQA zRB+urZ6JSEwOCsK2ptwlL{MnDIh@B6xB3yV03xbYFYK`#H{r$7A|GMYb3<;y=6P0MS29vXC?%32`o(!-m;gpZLp&u`uiJZZO5;pcvXTuY_A?_ee5%K=$4tqMm-)T2!YmL z!om)3s<`qixPoqp*%)PmSejpL5o1UV9;MSwwLGJPQLs@VCIWx~5fW^Z_nx+&Fq;5hDtOcg~|f?|A5t zevLsfhB@ruW9J2~+4)d5@mKSScwc-Nbl=?XC--&rM?iBw1bx&e%qtj0Hvmb)j_dBn zrHpBT>$rseS_OSIKsdQ0l;_aJVxH6#3Xed+)uJv{LJkyqQip_aa!r~(2cjbo*+@rHk?SO) zoK8ZvG{F|1KDLj)(iQ2I?lU1OUc-}Eu8u|PJp1^J5~6b6bEUO9W}uG+GTC5#-Vku; zXINf;$b}ktUUSrj-noGd7wp#oB2LDNQ`{pSYGbC9oh@ONeg#KWl5{02 z1mo7polVed6KS-VHhkZcGOtxd1_y*M9maN6C|@l(9wDX`VL~Y}9_U zr*uKCYWkxEOQ|a7LPwOLR}YUiuYyG68e3&;)?2@5asFJV6fF{iRQ^Chda{8Qx)(@w zZ!ilpp#iWw5+2aRV$<{6!G7k?#`=9`O6jCp5}2AnmMRfLmPO52*VSlBgDv_uh=PF6xV&URd9E;tZH52)!BHQ>Xz;x{|W z&50TI|11L6LC4}Y%0bSz)-!tQbPwqYnv^P)38gXZiA{ER#*+x6dD0X3ri(m%itN$M zaLHXXTzLAFSB^n%n42vxp=jm0AJndmokK9`b&C0yg?J<_&t*Z@ny1_+XO^DCJzLmC zo>N8R{t9QJHu4Nq;m#X%T>Pmr{*e0N=@8^**kc{~%%9R-J1W6*q7L>(@Ie&s^ebu@ zbSriGY0d6o^_xh=6P{+a-aUTOZZu9a>u(-Cf4j1cp#Rm=u@y|E?qDJ`kJ^>nr(=^S zZn+O!)MFEvJKe(M>e%Vn9Ia-oA7rY&eVRE8k~kH{qLjA8lqJzgZxN5QB66QtRsac6 zp7Nx&RZ)p2c8HAT1}0W1m$`!bs*12kOPGM&qfU6Dlj;$$!*NWA@}h+X``|YT3iCRD zJZhvtziU*L^T~tnwx9BC=TGh_=X(nu9^U8046?1Oe0ccayV^nU1ZW)@o~x}on+cRT z)KN%f0@I1Bm>(?(hG(O;a1u@CEwrEWqU7OFlsu4KU4Yh@1~r|c(W*})A;nnh`HH#c ziX}zKju;i`%PpSeG`F~Dw^wi+OZvG@mcGyO3t{5#5g;Nb-UU4Ws35-u`~HL~A?clC zuuj)S$!e7+Crz)G;tGD5qgRywcgZ-7%V!Q#_#HVc=4hgHCZrjQA_YkWAQO5vae)hT zZak6GnpCP;#>6MD*)%AsH$>^k2KI2sbvJp}qpgjHdg|1g`W8IU z##2*7`m`ugshmM%58C=okJ6M8N3JsS2^Bv_uBtbn*ej}t-M}{Lfl#UKX ze93um&yX)oYmm#7mL`35QU@dbM~l}rw35=v^wA=iQ{Apn3EhZoFPhSz6QU3(k8IS3 zv~?V*PmpmM{*qWI9BRX1T}RJ+kvHV~@^k{7xtjHm#WPiq9k1l#<13g+rLG$I!O!a- zq5#rUwhIpvW-;qPDh+HmC>xu>XHR5JXGt!UlANslobgOz7wl=Gm&E9Qxa7p>4<&8toss%q zP=eKC;>}AQHb~}*A455zL{Bo^I~eavM>U7Jeav*PM{QbHhI4#i)S)3Sw~RH5M#7kD za>`z6{c3om=N~$^h+nx939}o)7JGM1SI@b;~%{d3F6`JN%fbbKEK5f>ajX5cWMawq~K74})? z+|*Ncw7ZP)k_LTs%iZ}N7{x5|h{9e(pMOFpVW>&iY6t^Z;sei6vC1RKk=?!H#2bg5 z$r#`c@^GPB0uWEYk-s(cf)~ga@65vhUw6^z<=(nmrV!6jDhqJ#LaVe7WDHzL79qYp z=$Doe)!QIEub(8MasNDI65*$Pw2I;Nzhl4bs5*e3z5Tiq=>ShlN?ZgENZm$`J6R4bOq5 z=sW_HyCk4CJ3tv`TL;+pa5N3&W2!Qw#*~Vb(bXJS{{5Dj`a0wDL5nB z$ZWb1Whg$(dQC@hXtHJFGacI9QqId93)J37H1=S^FN+6fv6ylRyO@GHQc*VPDS2wC zLM=-P*{5vx){lt*A!jtxD@x(al-w**M_h~67;sr+6@fE7WS^7_QKf2A64ad_&ctyn zZp{;`xsemLPi#&~MCgT(C5a52=ouV4l%DSB!xm{+tf#e7B00si6iF1 z!WWf`*J+@NII3!XX!Pn`xDUod!!zinsgp;s7nRUGs*<6Thlp%+dYwUH& zwva|M>xaK2rJQ^;9v(>3+wy2_kZPZ=#bYBOsrzg3!J&AJIXolmJv$hSv(@?f(E)1} zJ33%5VU@iwuc=|m@WaLNnccUawVv%CU?YU$9$XY6TA~;7nK(F5fmVLOGym~k_70C8 z6=55cM*oJU`iWQ%KEw5ZmIFWx2z0>bVmU}M%Y_cT9?_y&Oo?i244_@WE_&h`+rS3e z^YhrS7VBeMealV%H|DxH`!FyDs&eJ)+zlQdS0Oy!pEz1%H$)9dVRF-23A)j^AdUbg zLfmUW5$KSG5Uz*p{OnF9BTz@4(vlWb!-z7N4iBhFysu#_O=4|^;=HlH``gj=;s^&# zTGG?02#PX5r8uqoFe3xqV3}-$CY$1OjbV*A`%DlCv;^2}kDkNm@$oJVYP9ueEy|vC z2qEBk;511CT*0nyB$kZe!Uvthb-XO?$YBopH*9RI%?#XK7sE zoB-=kzXc;-rolhJD|;xe6Q5QSztTnEivquhI)o)}+#VcBL)@P_jVBPQC6C_ZBEn8? z`Hv$&LGBDy;7$sChIvJl`IpR9vV?t^qm7hXu3w|BceMVX{bJ=bQ+`|8Y**$_Gt=#vEo2%kpJuB-*f5qq%`Tm0*H6dRO?V_u+}-5Z zr0gng+(W8{EI^M&GpA!;AT7lDY4+wts1h{D?jke9`03cw)9lyKXLcL9g<~(Y&#vG| z8h9?GnqY&>texen?IIgFDBs{2w-74@2q?U6&@nrDj7{hnj&b*NY!%1;<=MT>xBHWv zz#?-qAX@4&Vs|0E2jXHap|B1$M<$BxOvoM*6~a^SBkKkOI_w8di+Kb~?Fo3*xZWRf zVQ@%Sk-pm_uIkvs0&XhU3~EmRyx6;^HX#vPWNBxO=rI)32g>vF5$!Bs!iw~sHz+ac zLNCb5zhO_30YjKHCIXW(uxLGag^@vnUx@}H(Dnph2Wz*YU!|hp#61+BS!a0%%o_Au zHtsKMVf$9`{?#Q8kQL;i_XAb4{lx+Y$9LOLziUh)UH#Mi`QzuHjr9KZ@`wC_^4)IZ z{gVeD?jOilw#SVRH>)4+K449O(qnN!`7m92fAe0*glr2b=Ti&tEw`YY=Wjyeg9pm{ z>WmrxcD``){OOkR{;Oxo`P|0)!Uj8g{{2%tJB;`||MbKC>Gvxe=li43M-EQQnitP! zpDX82Zk^w*2Sm<66dLI>x(P%pJ+$`_sXA)3F=Sto5+d4;rj@*(m#}a)k)n`@%3>#0 z_7J=0G1H{f`L#EgR|xsYk%`6><|G1_b?i)ZBW_y< zU}Sxay3L9m3wY22ys3Hru*43b)r$bd9>EJ$MShi%$YYSL2bRv9M9Z=+pwTrK>>~9^ zEmjO9q7H}L5+>2AQ1zG~EOx=w?$ZsdCD|UEmu%KO6U5S+@rS$7A~uf93BH=VqcMo< zz;aD8znj87+z~@+DBRfP0L><*f{5aVbJGL^YdYnKY`~E~3$umI2z8k!fTx)$1`v}x zmsG?}JDhW7bCiVCYS{5C38^U(CyhaML&sLRJjdpk&U`~H3WOi^FqAS+aUrr4&kN5- zr@0l4lVkde2G^9ps|_7nN@=nVoheA*(?nz|@cEd~tKKh{e0!>pqk{wIE|UCQxNgMb z1C2z_ele^bGJA4jFJywdPPR((nD+xwlq$0Kl<3ecM6@!urZk6pSl8=9;jZR{dnJLD z0`6X?b1FEjCVg7bakEVv#=L1jWyHs_F~HroL(7JUOsFX|Tq>l&cMIE;OwinpSlcFy zM^GGvOvmiugsN5~a6`xN;@N=cXb1Z(;-($WWwgehgwj2JN9cmVMc5IHNBr1Byh$|z z3i}raG+z86Wtd{cB4QdJCec8_c4UwpnSsAI)ZvJhP_KTI3yAjGQpN69adOBp*?}4G! zmE&C$F`9rOu?AzLcY~0GpIF0}&$3{PM{M>dZJ_E6r zf&m~9whquP5?d8vuPYL48;GrFhpxl4g=ei%Cpp1-RhJbhbsYG-Bwi^qUYR(oXtWuU zv8;;Tnq2?EgeS*o5rGyV#A#r8fk-!?;UQ{Olm}~^sEkIOp#a|-Ct}aR zlxXpcxWmL1)o(rAHF-3g?wcLv6LQ}LmIS!SHt-u@@OxU-am2)rAsByD!qp?Q7&Ky6 ziX;3?76R@Uh|`RHpTJ`ag5zu*k^c?7EPxjTl(DZ8^%8Oc>4!*4OrKFf zuQRxZBf*|v2S1IqaKNPifVk2Yarc5CX>0KFTKk$8k|uDacZAjniMG~p>P4gWfNqpt zo1hcF6%A<=>@+WGBj$KN&>F8I|F9B3JjE9(2p5ivylXZ!49IqHysMnt0=-@+p+Ql) zhMhdi4DHxeNY;iOd<)kD6Ny|1m1mC+6kMEORL@z%`=EWpI+?-4jfH0x*BHUOaCkCj zEsz+8CTza5$XR$u>mQKHk;pLHE>@FST-&S%^M=pZLECH`Y!vJ)IT*qrUV#l3NkRp-JaQcm%H=kpwfPH*gp80$}-;Mp+@5W?N7IJJFgp??djav)CSvf z_PBXEnxP5`_TkO;%Qr6z?7*|96R%$`w`XqCuJ*kz+FSeWFQ?mwx7x*dQnP9Azi1a% z-^|~{2crD~DnQ89?)XH5y*5$7TmXyI}<{!|K6`ZVUb0)uc5JSfQ&k-s^V zsC+^PAf5*BuYZ-$R3uUP+&JD{LVHqx{n+1v6|>nuSSgBntn464k(rZbMHDlLZ^v65 zi@@`35>612uqG-S6O_+(6i(<%Z(~9Ikdzu@&jzC1t{)v-k6%?(C3XGPR^ppGN1Mtw zcOtP^Jei8PRl)o+mKu(PuSL_b{vH8Ii}c5OJl+bxl(8e!L?RxI0&I)DPr0#L=4TxJU`LE^tDJ1oJU4I90^xlsX}u3W&kk z@gxpj*Ew4#*3N+(a#F)kMYYAm6!d_-TX~IiSJ-u+i>eSp;(8>S9vX_f)!Mw9oqF@| z#cAgDY4+A>W*gxG<63BeW8H$$f>M_$S_|g#KWG-rh z`R3K*J#d`e72PK(2G=QI*f9!LFmdZ!4>c*$=|MQXg%QJZXI{*;s+%ITF$Uobh=TA? zkTt^6z$ObKL{b!?t-NBP&NsqA5MtV;AW%1?jhykGQ!r4J0qzL#HDocYF^W+Kj2d{+ zzS|#d&wRmr&Nuf9XSeRPcb9>Le*K|`Vk*HVUwg|uSy(O5U4QxF&H5Dh%eFnq?y9^- zfhb-wekBF0*&#*nXNN>ZRWhWIXlRO<<#e;qwcCb}Xf+NqUu^$kva zLne&e6_)CTQS>7!oOG4F9i1uCq@m}@%Jp5GVh|@MFB-rdp0q2GYa>cb>y0Z{BQey- zqf!)~6(aI;eIuPvB+D6<@;=kg{x|yBe}!jP;(b~|`7}15MpM2kHhyVmP?NBW?Q|)@ zn1r;aVVC!5{8tv~^FwCT!6ruqy=c5Y5{_t5_8T5lQ%7rw2xKbhhn*bM*Z|UKyc@Q= zJ_tWoo(%a0V&vBa#J-N^IXu$92B~A+7fpNY3I<>c)-U7`R$tfcf3|m*>JF?vgGnvV z$y4wRp>GIW(>}MHz%rXafD%jqEHu4}d(pbn1{$Rhw#kfp=el?ejvrdiC>K4J3yOM9 zW}Q#yxcsXDl|A+_3Rk7`?-jt|aq+OE{8B`oW)Be0d<*f z7=RdtaPpzChG;M-M5m@?MchA|*gA`?>>I-K~y$=A>=w z-0^}f0wA3uK1p(_vIs^V7a#5@7o9a12h1mV z4Jq#*+(Wgzm!%K6d*_X(3b}IUk8gJN--2^WHW22xZUU+5T6qB1+|r#hK$Y&H_ojPm zi%xAjp)ead8nwnTxNz)4fzWJPK$_gc;}A@2q+IYvb+=KM&vSGSeZn~Lmp>__@${Ed z>cp>-=&uK#qCbz#*C!Usjt~%2HGWVv*~$6V%e;z#d-oVTI6MQ^$i|(#woo)<$b% z9lcUi*#meU%H8cr{_3J>j|5pnXE#LNZBvj`ZV+CQT;W~8OW6=W!i;j!O*XModL`os zAi3Az2s@J81FtxTT6qW$L+Dv)(Yi+h=AjQBBAgw^m^HxV287$3PCjY%)wwob2bbKx zGN0K;??Zj$;rP|G5;ytlAtgDKxbe-yR9cvf5ha@LRU)w%AjeV(4ZQduh_MuN;g6Qg zPQ07>7ANb;5)csf)6olGMqJQnfGN#9e%X5WtH+T*!)WsjmdaulW;xj+c; zynR`Cw|*Z$4HJ{}fdCQgtbmxvt+KOH5Xj3}O@M^|_>olu-N6SxNSuNP!b4vyn%@_g z!CPYdkA_aun-H9qi1sAXF&II&aEC;?cf`G~AB0Am)OuL!brrem63C(U^+jMO`^8}d zxN9s&I6gR}rh1~0-h`TnI003ph;w)HliHvfb~w+asZ)x$`43#9@>0boT~RWBLh_W} z06+LzV!up>76T3!Uc7YOer}#*Fh|(>5py1lUwl$Z3lGGAZm(xJ!QKo`4{(6@pLnB1 zHS2wFhd+TCHN;**Nq)efnnM0R=Hx}A|M^F1Iu&PDN}}iAB!8qpc1+r&#eVb?`=hQf zQ{qbeA9_UR($RcZrz86#gF`(7T69P{MBmo&AT%cWwG>?09$=M9X?;C?@o-Y^rDLN` z-43K<$ssKol{yidIvz4IG@!-g18_s9=I~O9{`4>81K1|^mhU(wSOGNtld`$G@@_CL xJ9Wcejy*%{^(bE;eOt$a*bl>e1a6?_Kl_RFB>n6suwbzd{~!5$+onim3jka-v04BC literal 0 HcmV?d00001 diff --git a/build-tools/build_iso/gather_packages.pl b/build-tools/build_iso/gather_packages.pl new file mode 100755 index 00000000..2a239930 --- /dev/null +++ b/build-tools/build_iso/gather_packages.pl @@ -0,0 +1,122 @@ +#!/usr/bin/perl + +# Copy/pasted from http://www.smorgasbork.com/content/gather_packages.txt +# As referenced by http://www.smorgasbork.com/2012/01/04/building-a-custom-centos-7-kickstart-disc-part-2/ + +use XML::Simple; + +my ($comps_file, $rpm_src_path, $rpm_dst_path, $arch, @extra_groups_and_packages) = @ARGV; + +if (!-e $comps_file) +{ + print_usage ("Can't find '$comps_file'"); +} +if (!-e $rpm_src_path) +{ + print_usage ("RPM source path '$rpm_src_path' does not exist"); +} +if (!-e $rpm_dst_path) +{ + print_usage ("RPM destination path '$rpm_dst_path' does not exist"); +} +if (!$arch) +{ + print_usage ("Architecture not specified"); +} + +#### we always gather core and base; note that for CentOS 7, we also need +#### to include the grub2 package, or installation will fail +@desired_groups = ('core', 'base', 'grub2'); +foreach (@extra_groups_and_packages) +{ + push (@desired_groups, $_); +} + +$regex = '^(' . join ('|', @desired_groups) . ')$'; + +print "reading $comps_file...\n"; +print "getting RPMs from $rpm_src_path...\n"; + +$xml = new XML::Simple; +$comps = $xml->XMLin($comps_file); + +$cmd = "rm $rpm_dst_path/*"; +print "$cmd\n"; +`$cmd`; + +%copied_groups = {}; +%copied_packages = {}; + +foreach $group (@{$comps->{group}}) +{ + $id = $group->{id}; + if ($id !~ m#$regex#) + { + next; + } + + print "#### group \@$id\n"; + $packagelist = $group->{packagelist}; + foreach $pr (@{$packagelist->{packagereq}}) + { + if ($pr->{type} eq 'optional' || $pr->{type} eq 'conditional') + { + next; + } + + $cmd = "cp $rpm_src_path/" . $pr->{content} . "-[0-9]*.$arch.rpm" + . " $rpm_src_path/" . $pr->{content} . "-[0-9]*.noarch.rpm $rpm_dst_path"; + print "$cmd\n"; + `$cmd 2>&1`; + + $copied_packages{$pr->{content}} = 1; + } + + $copied_groups{$group} = 1; +} + +#### assume that any strings that weren't matched in the comps file's group list +#### are actually packages + +foreach $group (@desired_groups) +{ + if ($copied_groups{$group}) + { + next; + } + + $cmd = "cp $rpm_src_path/" . $group . "-[0-9]*.$arch.rpm" + . " $rpm_src_path/" . $group . "-[0-9]*.noarch.rpm $rpm_dst_path"; + print "$cmd\n"; + `$cmd 2>&1`; +} + +sub print_usage +{ + my ($msg) = @_; + + ($msg) && print "$msg\n\n"; + + print <<__TEXT__; + +parse_comps.pl comps_file rpm_src_path arch [xtra_grps_and_pkgs] + + comps_file the full path to the comps.xml file (as provided + in the original distro + + rpm_src_path the full path to the directory of all RPMs from + the distro + + rpm_dst_path the full path to the directory where you want + to save the RPMs for your kickstart + + arch the target system architecture (e.g. x86_64) + + xtra_grps_and_pkgs a list of extra groups and packages, separated by spaces + + +__TEXT__ + + exit; +} + diff --git a/build-tools/build_iso/image-dev.inc b/build-tools/build_iso/image-dev.inc new file mode 100644 index 00000000..d5de593a --- /dev/null +++ b/build-tools/build_iso/image-dev.inc @@ -0,0 +1,4 @@ +# The following packages will not be included in the customer ISO +# +# They are exceptional packages only to be included in developer builds +enable-dev-patch diff --git a/build-tools/build_iso/image.inc b/build-tools/build_iso/image.inc new file mode 100644 index 00000000..d1fe0c3e --- /dev/null +++ b/build-tools/build_iso/image.inc @@ -0,0 +1,364 @@ +# List of CGTS packages to be included/installed in ISO +# If these have dependencies, they will be pulled in automatically +# +acpid +audit +build-info +bash +ceph-manager +cgcs-patch-agent +cgcs-patch-controller +cgts-mtce-compute +cgts-mtce-control +cgts-mtce-storage +computeconfig +computeconfig-standalone +computeconfig-subfunction +compute-huge +dnsmasq +dnsmasq-utils +dpkg +filesystem-scripts +fm-api +gdb +grub2 +grub2-efi +grub2-efi-modules +grub2-tools +haproxy +io-monitor +iperf3 +isomd5sum +iscsi-initiator-utils +iscsi-initiator-utils-iscsiuio +kmod-drbd +libevent +libtrap-handler +libvirt-docs +libvirt-python +logrotate +lshell +net-snmp-libs +net-snmp-python +net-snmp-utils +nfv +nfv-common +nfv-plugins +nfv-tools +nfv-vim +nfv-client +nova-utils +ntpdate +openldap +openldap-clients +openldap-servers +openssh +openssh-clients +openssh-server +openstack-ras +pam +parted +platform-util +platform-util-noncontroller +python-cephclient +python-keyring +qemu-kvm-tools-ev +rsync +sm-client +sm-tools +snmp-ext +storageconfig +sysinv +sysinv-agent +tsconfig +tboot +rabbitmq-server +cgts-client +python-django-horizon +libvirt +libvirt-client +libvirt-daemon +libvirt-daemon-config-network +libvirt-daemon-config-nwfilter +libvirt-daemon-driver-lxc +libvirt-daemon-driver-network +libvirt-daemon-driver-nodedev +libvirt-daemon-driver-nwfilter +libvirt-daemon-driver-qemu +libvirt-daemon-driver-secret +libvirt-daemon-driver-storage +nova-api-proxy +configutilities +controllerconfig +fm-common +fm-mgr +logmgmt +cgts-mtce-common +cgts-mtce-common-pmon +cgts-mtce-common-rmon +cgts-mtce-common-hostw +cgts-mtce-common-hwmon +cgts-mtce-common-guestAgent +cgts-mtce-common-guestServer +cgcs-patch +patch-alarm +# patch-boot-args is not ported to centos yet +wrs-ssl +tss2 +tpm2-tools +tpm2-openssl-engine +libtpms +swtpm +swtpm-tools +swtpm-cuse +monitor-tools +python2-aodhclient +openstack-aodh-api +openstack-aodh-evaluator +openstack-aodh-expirer +openstack-aodh-listener +openstack-aodh-notifier +python-ceilometer +#openstack-ceilometer-alarm provided in openstack-aodh-compat package +openstack-aodh-compat +openstack-ceilometer-api +openstack-ceilometer-collector +openstack-ceilometer-common +openstack-ceilometer-ipmi +openstack-ceilometer-notification +openstack-ceilometer-polling +openstack-cinder +openstack-glance +openstack-heat-api +openstack-heat-api-cfn +openstack-heat-api-cloudwatch +openstack-heat-common +openstack-heat-engine +openstack-ironic-api +openstack-ironic-common +openstack-ironic-conductor +python2-ironicclient +# doc generation turned off in Mitaka by default +#python-django-horizon-doc +openstack-keystone +openstack-murano-api +openstack-murano-cf-api +openstack-murano-common +openstack-murano-doc +openstack-murano-engine +openstack-murano-ui +openstack-murano-ui-doc +python2-muranoclient +python-muranoclient-doc +openstack-magnum-api +openstack-magnum-common +openstack-magnum-conductor +openstack-magnum-doc +openstack-magnum-ui +python2-magnumclient +python-magnum +python-magnumclient-doc +python-magnumclient-tests +python-magnum-tests +openstack-neutron +openstack-neutron-common +openstack-neutron-sriov-nic-agent +openstack-neutron-ml2 +openstack-neutron-openvswitch +python-neutron +openstack-nova-api +openstack-nova-cells +openstack-nova-cert +openstack-nova-common +openstack-nova-compute +openstack-nova-conductor +openstack-nova-console +openstack-nova-network +openstack-nova-novncproxy +# openstack-nova-objectstore removed in Mitaka +#openstack-nova-objectstore +openstack-nova-scheduler +openstack-nova-serialproxy +openstack-nova-spicehtml5proxy +openstack-nova-placement-api +python-nova +python2-novaclient +python2-openstackclient +python2-oslo-log +python2-django-openstack-auth +python2-six +#base-files is not yet ported to CENTOS +cgcs-users +namespace-utils +nss-pam-ldapd +# thin-provisioning-tools is not yet ported to CENTOS +# cluster-resource-agents is not yet ported to CENTOS +dhcp +nfs-utils +initscripts +# note -- the "systemd" package provides udev +systemd +# udev-extraconf is not yet ported to CENTOS +# cdrkit is only available as a src.rpm +python-d2to1 +# eucatools is not yet ported to CENTOS +facter +hiera +# libaugesas-ruby is not yet ported to CENTOS +nfscheck +python2-pecan +# pseudo not available +# ps-byte-compile is not yet ported to CENTOS +python +python-configobj +python-pep8 +# python-pyflakes is not yet ported to CENTOS +python2-rsa +# python-xmlrpclib is not yet ported to CENTOS +ruby-shadow +swig +syslinux +iotop +iptables +iptables-services +iptables-utils +ldapscripts +lighttpd +lighttpd-fastcgi +lighttpd-mod_geoip +lighttpd-mod_mysql_vhost +novnc +ntp +ntp-perl +procps-ng +python-daemon +python2-gunicorn +python-pyudev +python-smartpm +# shadow is not yet ported to CENTOS +shadow-utils +syslog-ng +syslog-ng-libdbi +drbd +drbd-bash-completion +# drbd-debuginfo +drbd-heartbeat +# drbd-kernel-debuginfo +drbd-pacemaker +drbd-udev +drbd-utils +# drbd-tools is not yet ported to CENTOS +# nfv-docs should not be included +# nfv-tests should not be included +curl +lvm2 +# tgt is ported, but not yet added here +sm +sm-common +sm-common-libs +sm-db +sm-api +sm-eru +time +puppet +puppet-manifests +openstack-dashboard +# openstack-dashboard-theme TODO: fix build-iso +dhclient +postgresql +postgresql-server +postgresql-contrib +python-psycopg2 +setup +targetcli +sudo +pxe-network-installer +strace +resource-agents +lldpd +wget +bind-utils +selinux-policy +pm-utils +centos-release +tcpdump +config-gate +config-gate-compute +sysstat +smartmontools +collector +io-scheduler +fm-doc +vm-topology +python2-wsme +ceph +ceph-common +ceph-fuse +ceph-radosgw +libcephfs1 +python-ceph-compat +python-cephfs +socat +irqbalance +kmod-e1000e +kmod-i40e +kmod-ixgbe +kmod-ixgbevf +kmod-tpm +kmod-integrity +vim-enhanced +wrs-heat-templates +# heat-contrib-nova-flavor is now included with openstack-heat +tis-extensions +tis-extensions-controller +qat17 +mlx4-config +host-guest-comm +guest-scale-helper +python-networking-odl +qemu-kvm-ev +qemu-img-ev +# for realtime kernel +kernel-rt +kernel-rt-kvm +kmod-e1000e-rt +kmod-i40e-rt +kmod-ixgbe-rt +kmod-tpm-rt +kmod-integrity-rt +rtctl +rt-setup +qat17-rt +kernel-rt-tools +# For low-latency compute +kmod-drbd-rt +snmp-audittrail +OVMF +# neutron bgp +python2-networking-bgpvpn +python-networking-bgpvpn-dashboard +python-networking-bgpvpn-heat +python2-neutron-dynamic-routing +python2-ryu +python-ryu-common +openvswitch +python2-networking-sfc +openstack-panko-api +openstack-panko-common +python-panko +python2-pankoclient +# distributed cloud +distributedcloud-dcmanager +distributedcloud-client-dcmanagerclient +distributedcloud-dcorch +# ima plugin for RPM +rpm-plugin-systemd-inhibit +platform-kickstarts +python-lefthandclient +python-3parclient + +# Add debugging tools +perf +zip +unzip +traceroute diff --git a/build-tools/build_iso/isolinux.cfg b/build-tools/build_iso/isolinux.cfg new file mode 100644 index 00000000..d6e00844 --- /dev/null +++ b/build-tools/build_iso/isolinux.cfg @@ -0,0 +1,125 @@ +default vesamenu.c32 +timeout 600 + +display boot.msg + +# Clear the screen when exiting the menu, instead of leaving the menu displayed. +# For vesamenu, this means the graphical background is still displayed without +# the menu itself for as long as the screen remains in graphics mode. +menu clear +menu background splash.png +menu title CentOS 7 +menu vshift 8 +menu rows 18 +menu margin 8 +#menu hidden +menu helpmsgrow 15 +menu tabmsgrow 13 + +# Border Area +menu color border * #00000000 #00000000 none + +# Selected item +menu color sel 0 #ffffffff #00000000 none + +# Title bar +menu color title 0 #ff7ba3d0 #00000000 none + +# Press [Tab] message +menu color tabmsg 0 #ff3a6496 #00000000 none + +# Unselected menu item +menu color unsel 0 #84b8ffff #00000000 none + +# Selected hotkey +menu color hotsel 0 #84b8ffff #00000000 none + +# Unselected hotkey +menu color hotkey 0 #ffffffff #00000000 none + +# Help text +menu color help 0 #ffffffff #00000000 none + +# A scrollbar of some type? Not sure. +menu color scrollbar 0 #ffffffff #ff355594 none + +# Timeout msg +menu color timeout 0 #ffffffff #00000000 none +menu color timeout_msg 0 #ffffffff #00000000 none + +# Command prompt text +menu color cmdmark 0 #84b8ffff #00000000 none +menu color cmdline 0 #ffffffff #00000000 none + +# Do not display the actual menu unless the user presses a key. All that is displayed is a timeout message. + +menu tabmsg Press Tab for full configuration options on menu items. + +menu separator # insert an empty line +menu separator # insert an empty line + +label tis + menu label ^Install Titanium Cloud + menu default + kernel vmlinuz + append initrd=initrd.img inst.ks=cdrom:/dev/cdrom:/ks/ks.cfg + +label linux + menu label ^Install CentOS 7 + kernel vmlinuz + append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 quiet + +label check + menu label Test this ^media & install CentOS 7 + kernel vmlinuz + append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rd.live.check quiet + +menu separator # insert an empty line + +# utilities submenu +menu begin ^Troubleshooting + menu title Troubleshooting + +label vesa + menu indent count 5 + menu label Install CentOS 7 in ^basic graphics mode + text help + Try this option out if you're having trouble installing + CentOS 7. + endtext + kernel vmlinuz + append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 xdriver=vesa nomodeset quiet + +label rescue + menu indent count 5 + menu label ^Rescue a CentOS system + text help + If the system will not boot, this lets you access files + and edit config files to try to get it booting again. + endtext + kernel vmlinuz + append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rescue quiet + +label memtest + menu label Run a ^memory test + text help + If your system is having issues, a problem with your + system's memory may be the cause. Use this utility to + see if the memory is working correctly. + endtext + kernel memtest + +menu separator # insert an empty line + +label local + menu label Boot from ^local drive + localboot 0xffff + +menu separator # insert an empty line +menu separator # insert an empty line + +label returntomain + menu label Return to ^main menu + menu exit + +menu end diff --git a/build-tools/build_iso/ks.cfg b/build-tools/build_iso/ks.cfg new file mode 100644 index 00000000..75877ade --- /dev/null +++ b/build-tools/build_iso/ks.cfg @@ -0,0 +1,36 @@ +install +text +lang en_US.UTF-8 +keyboard us +reboot --eject +firstboot --enable +auth --enableshadow --passalgo=sha512 + +# Network information +network --bootproto=dhcp --device=enp0s3 --onboot=on --ipv6=auto --activate +network --bootproto=static --device=enp0s8 --ip=10.10.10.12 --netmask=255.255.255.0 --ipv6=auto --activate +network --device=lo --hostname=localhost.localdomain + +rootpw --lock +timezone America/New_York --isUtc +user --groups=wheel --name=wrsroot --password=$6$c3gaCcJlh.rp//Yx$/mIjNNoUDS1qZldBL29YSJdsA9ttPA/nXN1CPsIcCmionXC22APT3IoRSd9j5dPiZoviDdQf7YxLsOYdieOQr/ --iscrypted --gecos="wrsroot" + +# System bootloader configuration +#bootloader --location=mbr --boot-drive=sda + +autopart --type=lvm +# Partition clearing information +clearpart --all --initlabel --drives=sda + +cdrom +#repo --name=base --baseurl=http://mirror.cogentco.com/pub/linux/centos/7/os/x86_64/ +#url --url="http://mirror.cogentco.com/pub/linux/centos/7/os/x86_64/" + +%packages --nobase --ignoremissing +@^minimal +@core +kexec-tools +net-tools +# CGTS packages +# end CGTS packages +%end diff --git a/build-tools/build_iso/minimal_rpm_list.txt b/build-tools/build_iso/minimal_rpm_list.txt new file mode 100644 index 00000000..8dd533a6 --- /dev/null +++ b/build-tools/build_iso/minimal_rpm_list.txt @@ -0,0 +1,258 @@ +acl +alsa-lib +audit +audit-libs +authconfig +basesystem +bind-libs-lite +bind-license +binutils +biosdevname +btrfs-progs +bzip2-libs +ca-certificates +centos-logos +chkconfig +coreutils +cpio +cracklib +cracklib-dicts +cronie +cronie-anacron +crontabs +cryptsetup +cryptsetup-libs +curl +cyrus-sasl-lib +dbus +dbus-glib +dbus-libs +dbus-python +device-mapper +device-mapper-event +device-mapper-event-libs +device-mapper-libs +device-mapper-multipath +device-mapper-multipath-libs +device-mapper-persistent-data +diffutils +dmidecode +dosfstools +dracut +dracut-config-rescue +dracut-network +e2fsprogs +e2fsprogs-libs +ebtables +efibootmgr +efivar-libs +elfutils-libelf +elfutils-libs +ethtool +expat +file +file-libs +filesystem +findutils +fipscheck +fipscheck-lib +firewalld +freetype +gawk +gdbm +gettext +gettext-libs +glib2 +glibc +glibc-common +glib-networking +gmp +gnupg2 +gnutls +gobject-introspection +gpgme +grep +groff-base +grub2 +grub2-efi +grub2-tools +grubby +gsettings-desktop-schemas +gzip +hardlink +hostname +hwdata +info +iproute +iprutils +iputils +jansson +json-c +kbd +kbd-legacy +kbd-misc +kernel-tools +kernel-tools-libs +kexec-tools +keyutils-libs +kmod +kmod-libs +kpartx +krb5-libs +less +libacl +libaio +libassuan +libattr +libblkid +libcap +libcap-ng +libcom_err +libconfig +libcroco +libcurl +libdaemon +libdb +libdb-utils +libdrm +libedit +libestr +libffi +libgcc +libgcrypt +libgomp +libgpg-error +libgudev1 +libidn +libmnl +libmodman +libmount +libndp +libnetfilter_conntrack +libnfnetlink +libnl +libnl3 +libnl3-cli +libpcap +libpciaccess +libpipeline +libproxy +libpwquality +libreport-filesystem +libselinux +libselinux-python +libselinux-utils +libsemanage +libsepol +libss +libssh2 +libstdc++ +libsysfs +libtasn1 +libteam +libunistring +libuser +libutempter +libuuid +libverto +libxml2 +libxslt +linux-firmware +lldpad +lsscsi +lua +lvm2 +lvm2-libs +lzo +make +man-db +mariadb-libs +mdadm +microcode_ctl +mokutil +mozjs17 +ncurses +ncurses-base +ncurses-libs +nettle +newt +newt-python +nspr +nss +nss-softokn +nss-softokn-freebl +nss-sysinit +nss-tools +nss-util +numactl-libs +openscap +openscap-scanner +openssl +openssl-libs +os-prober +p11-kit +p11-kit-trust +passwd +pciutils-libs +pcre +pinentry +pkgconfig +policycoreutils +popt +procps-ng +pth +pygobject3-base +pygpgme +pyliblzma +python +python-backports +python-backports-ssl_match_hostname +python-configobj +python-decorator +python-iniparse +python-libs +python-perf +python-pycurl +python-pyudev +python2-setuptools +python-slip +python-slip-dbus +python-urlgrabber +pyxattr +qrencode-libs +rdma +readline +rootfiles +rpm +rpm-build-libs +rpm-libs +rpm-python +sed +shared-mime-info +shim +shim-unsigned +slang +snappy +sqlite +systemd +systemd-libs +systemd-sysv +sysvinit-tools +tar +tcp_wrappers-libs +teamd +time +trousers +tzdata +ustr +util-linux +virt-what +which +xfsprogs +xml-common +xz +xz-libs +zlib +lksctp-tools +boost-thread +boost-system diff --git a/build-tools/build_iso/openstack_kilo.txt b/build-tools/build_iso/openstack_kilo.txt new file mode 100644 index 00000000..6150b175 --- /dev/null +++ b/build-tools/build_iso/openstack_kilo.txt @@ -0,0 +1,2 @@ +# Files copied in from /import/mirrors/CentOS/7.2.1511/cloud/x86_64/openstack-kilo + diff --git a/build-tools/build_minimal_iso/README b/build-tools/build_minimal_iso/README new file mode 100644 index 00000000..1259094d --- /dev/null +++ b/build-tools/build_minimal_iso/README @@ -0,0 +1,112 @@ +This document describes how to generate a DVD image (.iso) which installs +a minimal CentOS installation where the entirety of the installed system is +build from the provided source. + +There are three parts to this document: + How to build binary RPMs from source RPMS + How to build the install disk from the binary RPMS + How to install the minimal system + +------------------------------------------------------------------------------- +How to build the binary RPMs from the source RPMS +------------------------------------------------------------------------------- + +(note - building the binary RPMs is expected to take a long time, ~ 15 hours +on a typical system) + +The source RPMs in the "srcs" subdirectory are compiled in an environment +called "mock" which builds each package in a chroot jail to ensure the output +is not influenced by the build system. Mock is controlled by a config file. +The example srcs/build.cfg is provided as a starting point, however it does +to be adjusted for your build environment. In particular, the paths and repo +locations need to be configured for your system. It is highly recommended that +a local mirror of the CentOS repos be used for speed. The example config file +is configured to use an localhost http mirror of the CentOS repos. + +To build the binary RPMs from the source RPMs change to the "srcs" subdirectory +and execute the "build.sh" script. + +# cd srcs +# ./build.sh + +This will use build.cfg and mock to compile every source RPM listed in list.txt. +The output binary RPMs will be in srcs/results. There will also be success.txt +and fail.txt files which list any RPMs that failed to build. Debugging why RPMs +fail to build is beyond the scope of this document, however be aware that RPMs +often fail in the "check" phase of the build (i.e. the package compiled fine +but tests failed). Notably, the python package may fail due to a "test_nis" +failure, and the "attr" and "e2fsprogs" packages may or may not fail depending +on the host file system used for compilation. These failures may or may not be +false positives (for example, the mock environment does not have NIS configured +which is why python's test_nis reports a failure -- the code is actually fine, +we just can't run the test in the mock environment). + +To disable the check phase, add the line + +config_opts['rpmbuild_opts'] = '--nocheck' + +to build.cfg. You can then run build.sh again with list.txt containing +packages which failed: + +# cp list.txt list.txt.orig +# cp fail.txt list.txt +# ./build.sh + +------------------------------------------------------------------------------- +How to build the install disk from the binary RPMS +------------------------------------------------------------------------------- + +Once srcs/results is populated with binary RPMs, an installation disk can be +built. Edit the yum.conf file and place an (arbitrary) path for yum log and +cache locations, and make sure that the repository path points to srcs/results. +Run the build_centos.sh script to build the installation DVD: + +# ./build_centos.sh + +Scroll up the output to the top of the "Spawning worker" messages. You should +observe a line indicating that there are no remaining unresolved dependencies: + +... +Installing PKG=dhcp-common PKG_FILE=dhcp-common-4.2.5-42.el7.centos.tis.1.x86_64.rpm PKG_REL_PATH=dhcp-common-4.2.5-42.el7.centos.tis.1.x86_64.rpm PKG_PATH=/localdisk/loadbuild/jmckenna/centos/srcs/results/dhcp-common-4.2.5-42.el7.centos.tis.1.x86_64.rpm from repo local-std +dhcp-common +Debug: Packages still unresolved: + +Spawning worker 0 with 4 pkgs +Spawning worker 1 with 4 pkgs +Spawning worker 2 with 4 pkgs +... + +This is your confirmation that all required pacakges were found and installed +on the ISO. You should also now see a new file called "centosIso.iso": + +# ls -l centosIso.iso + +------------------------------------------------------------------------------- +How to install the minimal system +------------------------------------------------------------------------------- + +The centosIso.iso file can be burned to a DVD or booted in a virtual +environment. It is configured to self-install on boot. After installation, +a user with sudo access must be created manually. The system can then be +booted. + +Power the system on with the DVD inserted. A system install will take place +(takes approximately 2 minutes). The system will then report an error and +ask you if you wish to report a bug, debug, or quit. Hit control-alt-F2 to +switch to a terminal window. Enter the following commands to change to the +installed system root, and create a (wrsroot) with sudo access: + +cd /mnt/sysimage +chroot . +groupadd -r wrs +groupadd -f -g 345 wrs_protected +useradd -m -g wrs -G root,wrs_protected,wheel -d /home/wrsroot -p cBglipPpsKwBQ -s /bin/sh wrsroot +exit + +Change back to the main window with control-alt-F1. +Hit 3 (the "Quit" option). The system will reboot (make sure you eject +the DVD or use your BIOS to boot from hard disk rather than DVD; the installer +will re-run if the DVD boots again). + +You can log into the system as user "wrsroot" with password "wrsroot". + diff --git a/build-tools/build_minimal_iso/README.2 b/build-tools/build_minimal_iso/README.2 new file mode 100644 index 00000000..b50db0a3 --- /dev/null +++ b/build-tools/build_minimal_iso/README.2 @@ -0,0 +1,5 @@ +The files in this directory are to be used as described at +http://twiki.wrs.com/PBUeng/DeliveryExtras#Minimal_CentOS_install + +They include the scripts (and customer README) for building a minimual +CentOS ISO from our modified sources. diff --git a/build-tools/build_minimal_iso/build.cfg b/build-tools/build_minimal_iso/build.cfg new file mode 100644 index 00000000..76564b7e --- /dev/null +++ b/build-tools/build_minimal_iso/build.cfg @@ -0,0 +1,108 @@ +config_opts['root'] = 'jmckenna-centos/mock' +config_opts['target_arch'] = 'x86_64' +config_opts['legal_host_arches'] = ('x86_64',) +config_opts['chroot_setup_cmd'] = 'install @buildsys-build' +config_opts['dist'] = 'el7' # only useful for --resultdir variable subst +config_opts['releasever'] = '7' + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= + +# repos +[my-build] +name=my-build +baseurl=http://127.0.0.1:8088/localdisk/loadbuild/centos/src/results +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + +[base] +name=CentOS-$releasever - Base +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra +#baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/ +baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/os/$basearch/ +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7 + +#released updates +[updates] +name=CentOS-$releasever - Updates +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra +#baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/ +baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/updates/$basearch/ +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7 + +#additional packages that may be useful +[extras] +name=CentOS-$releasever - Extras +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra +#baseurl=http://mirror.centos.org/centos/$releasever/extras/$basearch/ +baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/extras/$basearch/ +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7 + +#additional packages that extend functionality of existing packages +[centosplus] +name=CentOS-$releasever - Plus +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus&infra=$infra +#baseurl=http://mirror.centos.org/centos/$releasever/centosplus/$basearch/ +baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/centosplus/$basearch/ +gpgcheck=1 +enabled=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7 + +[epel] +name=Extra Packages for Enterprise Linux 7 - $basearch +baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch +#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch +failovermethod=priority +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 + +[epel-debuginfo] +name=Extra Packages for Enterprise Linux 7 - $basearch - Debug +baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch/debug +#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch +failovermethod=priority +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 +gpgcheck=1 + +[epel-source] +name=Extra Packages for Enterprise Linux 7 - $basearch - Source +baseurl=http://download.fedoraproject.org/pub/epel/7/SRPMS +#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch +failovermethod=priority +enabled=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 +gpgcheck=1 + + +""" +config_opts['environment']['BUILD_BY'] = 'jmckenna' +config_opts['environment']['BUILD_DATE'] = '2016-10-31 14:27:28 -0400' +config_opts['environment']['REPO'] = '/localdisk/designer/jmckenna/dev0019/cgcs-root' +config_opts['environment']['WRS_GIT_BRANCH'] = 'CGCS_DEV_0019' +config_opts['environment']['CGCS_GIT_BRANCH'] = 'CGCS_DEV_0019' +config_opts['macros']['%_no_cgcs_license_check'] = '1' +config_opts['macros']['%_tis_build_type'] = 'std' +config_opts['chroot_setup_cmd'] = 'install @buildsys-build pigz lbzip2 yum shadow-utils rpm-build lbzip2 gcc glibc-headers make gcc-c++ java-devel' +config_opts['macros']['%__gzip'] = '/usr/bin/pigz' +config_opts['macros']['%__bzip2'] = '/usr/bin/lbzip2' +config_opts['macros']['%_patch_confdir'] = '%{_sysconfdir}/patching' +config_opts['macros']['%_patch_scripts'] = '%{_patch_confdir}/patch-scripts' +config_opts['macros']['%_runtime_patch_scripts'] = '/run/patching/patch-scripts' +config_opts['macros']['%_tis_dist'] = '.tis' +#config_opts['rpmbuild_opts'] = '--nocheck' diff --git a/build-tools/build_minimal_iso/build.sh b/build-tools/build_minimal_iso/build.sh new file mode 100755 index 00000000..5f857ec1 --- /dev/null +++ b/build-tools/build_minimal_iso/build.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +CREATEREPO=$(which createrepo_c) +if [ $? -ne 0 ]; then + CREATEREPO="createrepo" +fi + +# If a file listed in list.txt is missing, this function attempts to find the +# RPM and copy it to the local directory. This should not be required normally +# and is only used when collecting the source RPMs initially. +function findSrc { + local lookingFor=$1 + find $MY_REPO/cgcs-centos-repo/Source -name $lookingFor | xargs -I '{}' cp '{}' . + find $MY_REPO/cgcs-tis-repo/Source -name $lookingFor | xargs -I '{}' cp '{}' . + find $MY_WORKSPACE/std/rpmbuild/SRPMS -name $lookingFor | xargs -I '{}' cp '{}' . +} + +rm -f success.txt +rm -f fail.txt +rm -f missing.txt +mkdir -p results +infile=list.txt + +while read p; do + + if [ ! -f "$p" ]; then + findSrc $p + if [ ! -f "$p" ]; then + echo "couldn't find" >> missing.txt + echo "couldn't find $p" >> missing.txt + continue + fi + echo "found $p" + fi + + mock -r build.cfg $p --resultdir=results --no-clean + if [ $? -eq 0 ]; then + echo "$p" >> success.txt + cd results + $CREATEREPO . + cd .. + else + echo "$p" >> fail.txt + fi +done < $infile diff --git a/build-tools/build_minimal_iso/build_centos.sh b/build-tools/build_minimal_iso/build_centos.sh new file mode 100755 index 00000000..1fbcb030 --- /dev/null +++ b/build-tools/build_minimal_iso/build_centos.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +# Build a basic CentOS system + +CREATEREPO=$(which createrepo_c) +if [ $? -ne 0 ]; then + CREATEREPO="createrepo" +fi + +function final_touches { + # create the repo + cd ${ROOTDIR}/${DEST}/isolinux + $CREATEREPO -g ../comps.xml . + + # build the ISO + printf "Building image $OUTPUT_FILE\n" + cd ${ROOTDIR}/${DEST} + chmod 664 isolinux/isolinux.bin + mkisofs -o $OUTPUT_FILE \ + -R -D -A 'oe_iso_boot' -V 'oe_iso_boot' \ + -b isolinux.bin -c boot.cat -no-emul-boot \ + -boot-load-size 4 -boot-info-table \ + -eltorito-alt-boot \ + -e images/efiboot.img \ + -no-emul-boot \ + isolinux/ + + isohybrid --uefi $OUTPUT_FILE + implantisomd5 $OUTPUT_FILE + + cd $ROOTDIR +} + +function setup_disk { + tar xJf emptyInstaller.tar.xz + mkdir ${DEST}/isolinux/Packages +} + +function install_packages { + cd ${DEST}/isolinux/Packages + ROOT=${ROOTDIR} ../../../cgts_deps.sh --deps=../../../${MINIMAL} + cd ${ROOTDIR} +} + + +ROOTDIR=$PWD +INSTALLER_SRC=basicDisk +DEST=newDisk +PKGS_DIR=all_rpms +MINIMAL=minimal_rpm_list.txt +OUTPUT_FILE=${ROOTDIR}/centosIso.iso + +# Make a basic install disk (no packages, at this point) +rm -rf ${DEST} +setup_disk + +# install the packages (initially from minimal list, then resolve deps) +install_packages + +# build the .iso +final_touches + diff --git a/build-tools/build_minimal_iso/cgts_deps.sh b/build-tools/build_minimal_iso/cgts_deps.sh new file mode 100755 index 00000000..57143b6c --- /dev/null +++ b/build-tools/build_minimal_iso/cgts_deps.sh @@ -0,0 +1,222 @@ +#!/bin/env bash +function generate_dep_list { + TMP_RPM_DB=$(mktemp -d $(pwd)/tmp_rpm_db_XXXXXX) + mkdir -p $TMP_RPM_DB + rpm --initdb --dbpath $TMP_RPM_DB + rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' >> $DEPDETAILLISTFILE 2>&1 + rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' 2>&1 \ + | grep -v "error:" \ + | grep -v "warning:" \ + | grep -v "Preparing..." \ + | sed "s/ is needed by.*$//" | sed "s/ >=.*$//" | sort -u > $DEPLISTFILE + rm -rf $TMP_RPM_DB +} + +function install_deps { + local DEP_LIST="" + local DEP_LIST_FILE="$1" + + rm -f $TMPFILE + + while read DEP + do + DEP_LIST="${DEP_LIST} ${DEP}" + done < $DEP_LIST_FILE + + echo "Debug: List of deps to resolve: ${DEP_LIST}" + + if [ -z "${DEP_LIST}" ] + then + return 0 + fi + + # go through each repo and convert deps to packages + + for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do + echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --whatprovides ${DEP_LIST} --qf='%{name}'" + TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --qf='%{name}' --whatprovides ${DEP_LIST} | sed "s/kernel-debug/kernel/g" >> $TMPFILE + \rm -rf $TMP_DIR/yum-$USER-* + done + sort $TMPFILE -u > $TMPFILE1 + rm $TMPFILE + + DEP_LIST="" + while read DEP + do + DEP_LIST="${DEP_LIST} ${DEP}" + done < $TMPFILE1 + rm $TMPFILE1 + + # next go through each repo and install packages + local TARGETS=${DEP_LIST} + echo "Debug: Resolved list of deps to install: ${TARGETS}" + local UNRESOLVED + for REPOID in `grep '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do + UNRESOLVED=" $TARGETS " + + if [[ ! -z "${TARGETS// }" ]]; then + REPO_PATH=$(cat $YUM | sed -n "/^\[$REPOID\]\$/,\$p" | grep '^baseurl=' | head -n 1 | awk -F 'file://' '{print $2}' | sed 's:/$::') + >&2 echo "TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'" + TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" | sort -r -V >> $TMPFILE + \rm -rf $TMP_DIR/yum-$USER-* + + while read STR + do + >&2 echo "STR=$STR" + if [ "x$STR" == "x" ]; then + continue + fi + + PKG=`echo $STR | cut -d " " -f 1` + PKG_FILE=`echo $STR | cut -d " " -f 2` + PKG_REL_PATH=`echo $STR | cut -d " " -f 3` + PKG_PATH="${REPO_PATH}/${PKG_REL_PATH}" + + >&2 echo "Installing PKG=$PKG PKG_FILE=$PKG_FILE PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH from repo $REPOID" + cp $PKG_PATH . + if [ $? -ne 0 ] + then + >&2 echo " Here's what I have to work with..." + >&2 echo " TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $PKG --qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\"" + >&2 echo " PKG=$PKG PKG_FILE=$PKG_FILE REPO_PATH=$REPO_PATH PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH" + fi + + echo $UNRESOLVED | grep $PKG + echo $UNRESOLVED | grep $PKG >> /dev/null + if [ $? -eq 0 ]; then + echo "$PKG found in $REPOID as $PKG" >> $BUILT_REPORT + echo "$PKG_PATH" >> $BUILT_REPORT + UNRESOLVED=$(echo "$UNRESOLVED" | sed "s# $PKG # #g") + else + echo "$PKG satisfies unknown target in $REPOID" >> $BUILT_REPORT + echo " but it doesn't match targets, $UNRESOLVED" >> $BUILT_REPORT + echo " path $PKG_PATH" >> $BUILT_REPORT + FOUND_UNKNOWN=1 + fi + done < $TMPFILE #<<< "$(TMPDIR=$TMP_DIR repoquery -c $YUM --repoid=$REPOID --arch=x86_64,noarch --resolve $TARGETS --qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\" | sort -r -V)" + \rm -rf $TMP_DIR/yum-$USER-* + TARGETS="$UNRESOLVED" + fi + done + >&2 echo "Debug: Packages still unresolved: $UNRESOLVED" + echo "Debug: Packages still unresolved: $UNRESOLVED" >> $WARNINGS_REPORT + echo "Debug: Packages still unresolved: $UNRESOLVED" >> $BUILT_REPORT + >&2 echo "" +} + +function check_all_explicit_deps_installed +{ + + PKGS_TO_CHECK=" " + while read PKG_TO_ADD + do + PKGS_TO_CHECK="$PKGS_TO_CHECK ${PKG_TO_ADD}" + done < $DEPLISTFILE + rpm -qp ${INSTALLDIR}/*.rpm --qf="%{name}\n" > $TMPFILE + + echo "checking... $PKGS_TO_CHECK vs ${INSTALLED_PACKAGE}" + + while read INSTALLED_PACKAGE + do + echo $PKGS_TO_CHECK | grep -q "${INSTALLED_PACKAGE}" + if [ $? -eq 0 ]; then + PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE} //"` + PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE} / /"` + PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE}\$//"` + PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE}\$//"` + fi + done < $TMPFILE + + if [ -z "$PKGS_TO_CHECK" ] + then + >&2 echo "All explicitly specified packages resolved!" + else + >&2 echo "Could not resolve packages: $PKGS_TO_CHECK" + return 1 + fi + return 0 +} + +if [ "x${ROOT}" == "x" ]; then + ROOT=/localdisk/loadbuild/centos +fi + +ATTEMPTED=0 +DISCOVERED=0 +OUTPUT_DIR=${ROOT}/newDisk +YUM=${ROOT}/yum.conf +TMP_DIR=${ROOT}/tmp +DEPLISTFILE=${ROOT}/deps.txt +DEPDETAILLISTFILE=${ROOT}/deps_detail.txt +INSTALLDIR=${ROOT}/newDisk/isolinux/Packages + +BUILT_REPORT=${ROOT}/local.txt +WARNINGS_REPORT=${ROOT}/warnings.txt +LAST_TEST=${ROOT}/last_test.txt +TMPFILE=${ROOT}/cgts_deps_tmp.txt +TMPFILE1=${ROOT}/cgts_deps_tmp1.txt + +touch "$BUILT_REPORT" +touch "$WARNINGS_REPORT" + +for i in "$@" +do +case $i in + -d=*|--deps=*) + DEPS="${i#*=}" + shift # past argument=value + ;; +esac +done + +mkdir -p $TMP_DIR + +rm -f "$DEPDETAILLISTFILE" +# FIRST PASS we are being given a list of REQUIRED dependencies +if [ "${DEPS}x" != "x" ]; then + cat $DEPS | grep -v "^#" > $DEPLISTFILE + install_deps $DEPLISTFILE + if [ $? -ne 0 ]; then + exit 1 + fi +fi + +# check that we resolved them all +check_all_explicit_deps_installed +if [ $? -ne 0 ]; then + >&2 echo "Error -- could not install all explicitly listed packages" + exit 1 +fi + +ALL_RESOLVED=0 + +while [ $ALL_RESOLVED -eq 0 ]; do + cp $DEPLISTFILE $DEPLISTFILE.old + generate_dep_list + if [ ! -s $DEPLISTFILE ]; then + # no more dependencies! + ALL_RESOLVED=1 + else + DIFFLINES=`diff $DEPLISTFILE.old $DEPLISTFILE | wc -l` + if [ $DIFFLINES -eq 0 ]; then + >&2 echo "Warning: Infinite loop detected in dependency resolution. See $DEPLISTFILE for details -- exiting" + >&2 echo "These RPMS had problems (likely version conflicts)" + >&2 cat $DEPLISTFILE + + echo "Warning: Infinite loop detected in dependency resolution See $DEPLISTFILE for details -- exiting" >> $WARNINGS_REPORT + echo "These RPMS had problems (likely version conflicts)" >> $WARNINGS_REPORT + cat $DEPLISTFILE >> $WARNINGS_REPORT + + date > $LAST_TEST + + rm -f $DEPLISTFILE.old + exit 1 # nothing fixed + fi + install_deps $DEPLISTFILE + if [ $? -ne 0 ]; then + exit 1 + fi + fi +done + +exit 0 diff --git a/build-tools/build_minimal_iso/yum.conf b/build-tools/build_minimal_iso/yum.conf new file mode 100644 index 00000000..82c6be87 --- /dev/null +++ b/build-tools/build_minimal_iso/yum.conf @@ -0,0 +1,22 @@ + +[main] +cachedir=/localdisk/loadbuild/jmckenna/centos/yum/cache +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/localdisk/loadbuild/jmckenna/centos/yum/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= + +# repos +[local-std] +name=local-std +baseurl=file:///localdisk/loadbuild/jmckenna/centos/srcs/results +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 + diff --git a/build-tools/certificates/TiBoot.crt b/build-tools/certificates/TiBoot.crt new file mode 100644 index 0000000000000000000000000000000000000000..2b30d2daa4ae4cc1f2c1369c7a3ef96dfab7d09e GIT binary patch literal 830 zcmXqLVzx49Vp3w_WLRGED?cu6EwceH8>d#AN85K^Mn+av27`D*ZUas>=1>+kVJ2rs zLjeOm5Ql?@-9N7+u_!a&kRPZ6D8kOe=3i2hSe|GoZy*a2;NlSr&&*3v2+Ax=Em8=s zEG|jSEmrW%OE%;&-~uUN=3x%W3^tGx=QT7lG%_$WG&i+0Hi#1EH8L|aG_r(pY43n0 zMkQoVGqN%;H!<=v7&I|*F*PwVG8|)xUA+1HwC{5lf88H#%e1U|Zf4a?+p}?1C8bLf zYS{f3Z*=E85x8vE`NrET5*%xmTZJu?c(VTd46`3sO&=(8F6QG&|8VC=*nW+YHGh;2 zUztAnv&&s?ku}xc4^lU8dSuNnDUc@o`eR+|Dmq?T{FLOkjIJB(w9`GgEEzy&HvUuIyfXVAYfE^w#%04SmaNzGyG6N^rT- zw)>dKmy)LVDb3}-K4iQKJezB z&61}LbA2Yv>n)UCvagM8R+JYh`Yg!6D`E zUYcg9qAn%Nk}RyQ=5M!YTpN_7^1t*=k>>vi+l!tVzMJ5(aC*}bQ7 "$MY_YUM_CONF" + sed -i "s%\[main\]%&\ncachedir=$YUM_CACHE%" "$MY_YUM_CONF" + sed -i "s%logfile=.*%logfile=$YUM_DIR/yum.log%" "$MY_YUM_CONF" + sed -i "s%LOCAL_BASE%file://%g" "$MY_YUM_CONF" + sed -i "s%MIRROR_BASE%file:///import/mirrors%g" "$MY_YUM_CONF" + sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$MY_YUM_CONF" + sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR%g" "$MY_YUM_CONF" + sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$MY_YUM_CONF" + else + echo "ERROR: Could not find yum.conf or MOCK_CFG_PROTO" + exit 1 + fi +fi + +if [ ! -d "$YUM_CACHE" ]; then + mkdir -p "$YUM_CACHE" +fi + +echo "$MY_YUM_CONF" +exit 0 diff --git a/build-tools/create_dependancy_cache.py b/build-tools/create_dependancy_cache.py new file mode 100755 index 00000000..3a6c931a --- /dev/null +++ b/build-tools/create_dependancy_cache.py @@ -0,0 +1,674 @@ +#!/usr/bin/python + +import xml.etree.ElementTree as ET +import fnmatch +import os +import gzip +import getopt +import sys +import string + + +ns = { 'root': 'http://linux.duke.edu/metadata/common', + 'filelists': 'http://linux.duke.edu/metadata/filelists', + 'rpm': 'http://linux.duke.edu/metadata/rpm' } + +build_types=['std', 'rt'] +rpm_types=['RPM', 'SRPM'] +default_arch = 'x86_64' +default_arch_list = [ 'x86_64', 'noarch' ] +default_arch_by_type = {'RPM': [ 'x86_64', 'noarch' ], + 'SRPM': [ 'src' ] + } + +repodata_dir="/export/jenkins/mirrors" +if not os.path.isdir(repodata_dir): + repodata_dir="/import/mirrors" + if not os.path.isdir(repodata_dir): + print("ERROR: directory not found %s" % repodata_dir) + sys.exit(1) + +publish_cache_dir="%s/cgcs-tis-repo/dependancy-cache" % os.environ['MY_REPO'] +centos_repo_dir="%s/cgcs-centos-repo" % os.environ['MY_REPO'] +third_party_repo_dir="%s/cgcs-3rd-party-repo" % os.environ['MY_REPO'] +tis_repo_dir="%s/cgcs-tis-repo" % os.environ['MY_REPO'] +workspace_repo_dirs={} +for rt in rpm_types: + workspace_repo_dirs[rt]={} + for bt in build_types: + workspace_repo_dirs[rt][bt]="%s/%s/rpmbuild/%sS" % (os.environ['MY_WORKSPACE'], bt, rt) + +if not os.path.isdir(os.environ['MY_REPO']): + print("ERROR: directory not found MY_REPO=%s" % os.environ['MY_REPO']) + sys.exit(1) + +if not os.path.isdir(centos_repo_dir): + print("ERROR: directory not found %s" % centos_repo_dir) + sys.exit(1) + +if not os.path.isdir(third_party_repo_dir): + print("ERROR: directory not found %s" % third_party_repo_dir) + sys.exit(1) + +if not os.path.isdir(tis_repo_dir): + print("ERROR: directory not found %s" % tis_repo_dir) + sys.exit(1) + +# bin_rpm_mirror_roots = ["%s/fedora/epel/7" % repodata_dir, +# "%s/CentOS/7.2.1511" % repodata_dir, +# "%s/CentOS/tis-r3/" % repodata_dir ] + +# src_rpm_mirror_roots = ["%s/fedora/dl.fedoraproject.org/pub/epel/7/SRPMS" % repodata_dir, +# "%s/CentOS/vault.centos.org/7.2.1511" % repodata_dir, +# "%s/CentOS/tis-r3/Source" % repodata_dir ] + +bin_rpm_mirror_roots = ["%s/Binary" % centos_repo_dir, + "%s/Binary" % third_party_repo_dir ] + +src_rpm_mirror_roots = ["%s/Source" % centos_repo_dir, + "%s/Source" % third_party_repo_dir ] + +for bt in build_types: + bin_rpm_mirror_roots.append(workspace_repo_dirs['RPM'][bt]) + src_rpm_mirror_roots.append(workspace_repo_dirs['SRPM'][bt]) + +short_options='' +long_options=[ 'cache_dir=' ] + +options, remainder = getopt.getopt(sys.argv[1:], short_options, long_options) + +for opt, arg in options: + if opt in ('--cache_dir'): + publish_cache_dir = arg + +if not os.path.isdir(publish_cache_dir): + print("ERROR: directory not found %s" % publish_cache_dir) + sys.exit(1) + +# The Main data structure +pkg_data={} + +for rpm_type in rpm_types: + pkg_data[rpm_type]={} + + # map provided_name -> pkg_name + pkg_data[rpm_type]['providers']={} + + # map pkg_name -> required_names ... could be a pkg, capability or file + pkg_data[rpm_type]['requires']={} + + # map file_name -> pkg_name + pkg_data[rpm_type]['file_owners']={} + + # map pkg_name -> file_name + pkg_data[rpm_type]['files']={} + + # map pkg_name -> required_pkg_names ... only pkg names, and only direct requirement + pkg_data[rpm_type]['pkg_direct_requires']={} + + # map pkg_name -> required_pkg_names ... only pkg names, but this is the transitive list of all requirements + pkg_data[rpm_type]['pkg_transitive_requires']={} + + # map pkg_name -> descendant_pkgs ... only packages the directly require this package + pkg_data[rpm_type]['pkg_direct_descendants']={} + + # map pkg_name -> descendant_pkgs ... packages that have a transitive requiremant on this package + pkg_data[rpm_type]['pkg_transitive_descendants']={} + + # Map package name to a source rpm file name + pkg_data[rpm_type]['sourcerpm']={} + pkg_data[rpm_type]['binrpm']={} + + # Map file name to package name + pkg_data[rpm_type]['fn_to_name']={} + +pkg_data['SRPM']['pkg_direct_requires_rpm']={} +pkg_data['SRPM']['pkg_transitive_requires_rpm']={} + + +# Return a list of file paths, starting in 'dir', matching 'pattern' +# dir= directory to search under +# pattern= search for file or directory matching pattern, wildcards allowed +# recursive_depth= how many levels of directory before giving up +def file_search(dir, pattern, recursive_depth=0): + match_list = [] + new_depth = recursive_depth - 1 + # print "file_search(%s,%s,%s)" % (dir, pattern, recursive_depth) + for file in os.listdir(dir): + path = "%s/%s" % (dir, file) + if fnmatch.fnmatch(file, pattern): + print path + match_list.append(path) + elif (recursive_depth > 0) and os.path.isdir(path): + sub_list = [] + sub_list = file_search(path, pattern, recursive_depth=new_depth) + match_list.extend(sub_list) + return match_list + +# Return the list of .../repodate/*primary.xml.gz files +# rpm_type= 'RPM' or 'SRPM' +# arch= e.g. x86_64, only relevant of rpm_type=='RPM' +def get_repo_primary_data_list(rpm_type='RPM', arch_list=default_arch_list): + rpm_repo_roots = [] + rpm_repodata_roots = [] + repodata_list = [] + + if rpm_type == 'RPM': + for d in bin_rpm_mirror_roots: + sub_list = file_search(d, 'repodata', 25) + rpm_repodata_roots.extend(sub_list) + elif rpm_type == 'SRPM': + for d in src_rpm_mirror_roots: + sub_list = file_search(d, 'repodata', 5) + rpm_repodata_roots.extend(sub_list) + else: + print "invalid rpm_type '%s', valid types are %s" % (rpm_type, str(rpm_types)) + return repodata_list + + for d in rpm_repodata_roots: + sub_list = file_search(d, '*primary.xml.gz', 2) + repodata_list.extend(sub_list) + + return repodata_list + + +# Return the list of .../repodate/*filelists.xml.gz files +# rpm_type= 'RPM' or 'SRPM' +# arch= e.g. x86_64, only relevant of rpm_type=='RPM' +def get_repo_filelists_data_list(rpm_type='RPM', arch_list=default_arch_list): + rpm_repo_roots = [] + rpm_repodata_roots = [] + repodata_list = [] + + if rpm_type == 'RPM': + for d in bin_rpm_mirror_roots: + sub_list = file_search(d, 'repodata', 25) + rpm_repodata_roots.extend(sub_list) + elif rpm_type == 'SRPM': + for d in src_rpm_mirror_roots: + sub_list = file_search(d, 'repodata', 5) + rpm_repodata_roots.extend(sub_list) + else: + print "invalid rpm_type '%s', valid types are %s" % (rpm_type, str(rpm_types)) + return repodata_list + + for d in rpm_repodata_roots: + sub_list = file_search(d, '*filelists.xml.gz', 2) + repodata_list.extend(sub_list) + + return repodata_list + + + +# Process a list of repodata files (*filelists.xml.gz) and extract package data. +# Data is saved to the global 'pkg_data'. +def read_data_from_repodata_filelists_list(repodata_list, rpm_type='RPM', arch=default_arch): + for repodata_path in repodata_list: + read_data_from_filelists_xml_gz(repodata_path, rpm_type=rpm_type, arch=arch) + +# Process a single repodata file (*filelists.xml.gz) and extract package data. +# Data is saved to the global 'pkg_data'. +def read_data_from_filelists_xml_gz(repodata_path, rpm_type='RPM', arch=default_arch): + # print "repodata_path=%s" % repodata_path + infile = gzip.open(repodata_path) + root = ET.parse(infile).getroot() + for pkg in root.findall('filelists:package', ns): + name=pkg.get('name') + pkg_arch=pkg.get('arch') + + version="" + release="" + + if arch is not None: + if pkg_arch is None: + continue + if pkg_arch != arch: + continue + + v=pkg.find('filelists:version', ns) + if v is not None: + version=v.get('ver') + release=v.get('rel') + else: + print "%s: %s.%s has no 'filelists:version'" % (repodata_path, name, pkg_arch) + + # print "%s %s %s %s " % (name, pkg_arch, version, release) + + for f in pkg.findall('filelists:file', ns): + fn=f.text + # print " fn=%s -> plg=%s" % (fn, name) + if not name in pkg_data[rpm_type]['files']: + pkg_data[rpm_type]['files'][name]=[] + pkg_data[rpm_type]['files'][name].append(fn) + if not fn in pkg_data[rpm_type]['file_owners']: + pkg_data[rpm_type]['file_owners'][fn]=[] + pkg_data[rpm_type]['file_owners'][fn]=name + + + + + +# Process a list of repodata files (*primary.xml.gz) and extract package data. +# Data is saved to the global 'pkg_data'. +def read_data_from_repodata_primary_list(repodata_list, rpm_type='RPM', arch=default_arch): + for repodata_path in repodata_list: + read_data_from_primary_xml_gz(repodata_path, rpm_type=rpm_type, arch=arch) + +# Process a single repodata file (*primary.xml.gz) and extract package data. +# Data is saved to the global 'pkg_data'. +def read_data_from_primary_xml_gz(repodata_path, rpm_type='RPM', arch=default_arch): + # print "repodata_path=%s" % repodata_path + infile = gzip.open(repodata_path) + root = ET.parse(infile).getroot() + for pkg in root.findall('root:package', ns): + name=pkg.find('root:name', ns).text + pkg_arch=pkg.find('root:arch', ns).text + version="" + release="" + license="" + sourcerpm="" + + if arch is not None: + if pkg_arch is None: + continue + if pkg_arch != arch: + continue + + pkg_data[rpm_type]['providers'][name]=name + pkg_data[rpm_type]['files'][name]=[] + pkg_data[rpm_type]['requires'][name] = [] + pkg_data[rpm_type]['requires'][name].append(name) + + url=pkg.find('root:url', ns).text + v=pkg.find('root:version', ns) + if v is not None: + version=v.get('ver') + release=v.get('rel') + else: + print "%s: %s.%s has no 'root:version'" % (repodata_path, name, pkg_arch) + + fn="%s-%s-%s.%s.rpm" % (name, version, release, arch) + pkg_data[rpm_type]['fn_to_name'][fn]=name + + # SAL print "%s %s %s %s " % (name, pkg_arch, version, release) + print "%s %s %s %s " % (name, pkg_arch, version, release) + f=pkg.find('root:format', ns) + if f is not None: + license=f.find('rpm:license', ns).text + sourcerpm=f.find('rpm:sourcerpm', ns).text + if sourcerpm != "": + pkg_data[rpm_type]['sourcerpm'][name] = sourcerpm + # SAL print "--- requires ---" + print "--- requires ---" + r=f.find('rpm:requires', ns) + if r is not None: + for rr in r.findall('rpm:entry', ns): + required_name=rr.get('name') + # SAL print " %s" % required_name + print " %s" % required_name + pkg_data[rpm_type]['requires'][name].append(required_name) + else: + print "%s: %s.%s has no 'rpm:requires'" % (repodata_path, name, pkg_arch) + # print "--- provides ---" + p=f.find('rpm:provides', ns) + if p is not None: + for pp in p.findall('rpm:entry', ns): + provided_name=pp.get('name') + # print " %s" % provided_name + if name == "kernel-rt" and provided_name in pkg_data[rpm_type]['providers'] and pkg_data[rpm_type]['providers'][provided_name] == "kernel": + continue + if name.startswith('kernel-rt'): + alt_name=string.replace(name, 'kernel-rt', 'kernel') + if provided_name in pkg_data[rpm_type]['providers'] and pkg_data[rpm_type]['providers'][provided_name] == alt_name: + continue + pkg_data[rpm_type]['providers'][provided_name]=name + else: + print "%s: %s.%s has no 'rpm:provides'" % (repodata_path, name, pkg_arch) + # print "--- files ---" + for fn in f.findall('root:file', ns): + file_name=fn.text + # print " %s" % file_name + pkg_data[rpm_type]['files'][name].append(file_name) + if name == "kernel-rt" and file_name in pkg_data[rpm_type]['file_owners'] and pkg_data[rpm_type]['file_owners'][file_name] == "kernel": + continue + if name.startswith('kernel-rt'): + alt_name=string.replace(name, 'kernel-rt', 'kernel') + if provided_name in pkg_data[rpm_type]['file_owners'] and pkg_data[rpm_type]['file_owners'][file_name] == alt_name: + continue + pkg_data[rpm_type]['file_owners'][file_name]=name + else: + print "%s: %s.%s has no 'root:format'" % (repodata_path, name, pkg_arch) + # print "%s %s %s %s %s" % (name, pkg_arch, version, release, license) + infile.close + +def calulate_all_direct_requires_and_descendants(rpm_type='RPM'): + # print "calulate_all_direct_requires_and_descendants rpm_type=%s" % rpm_type + for name in pkg_data[rpm_type]['requires']: + calulate_pkg_direct_requires_and_descendants(name, rpm_type=rpm_type) + +def calulate_pkg_direct_requires_and_descendants(name, rpm_type='RPM'): + print "SAL: %s needs:" % name + if not rpm_type in pkg_data: + print "Error: unknown rpm_type '%s'" % rpm_type + return + + if not name in pkg_data[rpm_type]['requires']: + print "Note: No requires data for '%s'" % name + return + + for req in pkg_data[rpm_type]['requires'][name]: + pro = '???' + if rpm_type == 'RPM': + if req in pkg_data[rpm_type]['providers']: + pro = pkg_data[rpm_type]['providers'][req] + elif req in pkg_data[rpm_type]['file_owners']: + pro = pkg_data[rpm_type]['file_owners'][req] + else: + pro = '???' + print "package %s has unresolved requirement '%s'" % (name, req) + else: + # i.e. rpm_type == 'SRPM' + rpm_pro = '???' + if req in pkg_data['RPM']['providers']: + rpm_pro = pkg_data['RPM']['providers'][req] + elif req in pkg_data['RPM']['file_owners']: + rpm_pro = pkg_data['RPM']['file_owners'][req] + else: + rpm_pro = '???' + print "package %s has unresolved requirement '%s'" % (name, req) + + if rpm_pro is not None and rpm_pro != '???': + if not name in pkg_data[rpm_type]['pkg_direct_requires_rpm']: + pkg_data[rpm_type]['pkg_direct_requires_rpm'][name] = [] + if not rpm_pro in pkg_data[rpm_type]['pkg_direct_requires_rpm'][name]: + pkg_data[rpm_type]['pkg_direct_requires_rpm'][name].append(rpm_pro) + + if rpm_pro in pkg_data['RPM']['sourcerpm']: + fn = pkg_data['RPM']['sourcerpm'][rpm_pro] + if fn in pkg_data['SRPM']['fn_to_name']: + pro = pkg_data['SRPM']['fn_to_name'][fn] + else: + pro = '???' + print "package %s requires srpm file name %s" % (name,fn) + else: + pro = '???' + print "package %s requires rpm %s, but that rpm has no known srpm" % (name,rpm_pro) + + if pro is not None and pro != '???': + if not name in pkg_data[rpm_type]['pkg_direct_requires']: + pkg_data[rpm_type]['pkg_direct_requires'][name] = [] + if not pro in pkg_data[rpm_type]['pkg_direct_requires'][name]: + pkg_data[rpm_type]['pkg_direct_requires'][name].append(pro) + if not pro in pkg_data[rpm_type]['pkg_direct_descendants']: + pkg_data[rpm_type]['pkg_direct_descendants'][pro] = [] + if not name in pkg_data[rpm_type]['pkg_direct_descendants'][pro]: + pkg_data[rpm_type]['pkg_direct_descendants'][pro].append(name) + + print "SAL: %s -> %s" % (req, pro) + + + +def calulate_all_transitive_requires(rpm_type='RPM'): + for name in pkg_data[rpm_type]['pkg_direct_requires']: + calulate_pkg_transitive_requires(name, rpm_type=rpm_type) + +def calulate_pkg_transitive_requires(name, rpm_type='RPM'): + if not rpm_type in pkg_data: + print "Error: unknown rpm_type '%s'" % rpm_type + return + + if not name in pkg_data[rpm_type]['pkg_direct_requires']: + print "Note: No direct_requires data for '%s'" % name + return + + pkg_data[rpm_type]['pkg_transitive_requires'][name]=[] + if rpm_type != 'RPM': + pkg_data[rpm_type]['pkg_transitive_requires_rpm'][name]=[] + unresolved = [] + unresolved.append(name) + + while unresolved: + n = unresolved.pop(0) + # print "%s: remove %s" % (name, n) + if rpm_type == 'RPM': + direct_requires='pkg_direct_requires' + transitive_requires='pkg_transitive_requires' + else: + direct_requires='pkg_direct_requires_rpm' + transitive_requires='pkg_transitive_requires_rpm' + if n in pkg_data[rpm_type][direct_requires]: + for r in pkg_data[rpm_type][direct_requires][n]: + if r != name: + if not r in pkg_data[rpm_type][transitive_requires][name]: + pkg_data[rpm_type][transitive_requires][name].append(r) + if r in pkg_data['RPM']['pkg_transitive_requires']: + for r2 in pkg_data['RPM']['pkg_transitive_requires'][r]: + if r2 != name: + if not r2 in pkg_data[rpm_type][transitive_requires][name]: + pkg_data[rpm_type][transitive_requires][name].append(r2) + else: + if rpm_type == 'RPM': + unresolved.append(r) + else: + print "WARNING: calulate_pkg_transitive_requires: can't append rpm to SRPM list, name=%s, r=%s" % (name, r) + # print "%s: add %s" % (name, r) + if rpm_type != 'RPM': + for r in pkg_data[rpm_type]['pkg_transitive_requires_rpm'][name]: + if r in pkg_data['RPM']['sourcerpm']: + fn = pkg_data['RPM']['sourcerpm'][r] + if fn in pkg_data['SRPM']['fn_to_name']: + s = pkg_data['SRPM']['fn_to_name'][fn] + pkg_data[rpm_type]['pkg_transitive_requires'][name].append(s) + else: + print "package %s requires srpm file name %s, but srpm name is not known" % (name, fn) + else: + print "package %s requires rpm %s, but that rpm has no known srpm" % (name, r) + +def calulate_all_transitive_descendants(rpm_type='RPM'): + for name in pkg_data[rpm_type]['pkg_direct_descendants']: + calulate_pkg_transitive_descendants(name, rpm_type=rpm_type) + +def calulate_pkg_transitive_descendants(name, rpm_type='RPM'): + if not rpm_type in pkg_data: + print "Error: unknown rpm_type '%s'" % rpm_type + return + + if not name in pkg_data[rpm_type]['pkg_direct_descendants']: + print "Note: No direct_requires data for '%s'" % name + return + + pkg_data[rpm_type]['pkg_transitive_descendants'][name]=[] + unresolved = [] + unresolved.append(name) + + while unresolved: + n = unresolved.pop(0) + # print "%s: remove %s" % (name, n) + if n in pkg_data[rpm_type]['pkg_direct_descendants']: + for r in pkg_data[rpm_type]['pkg_direct_descendants'][n]: + if r != name: + if not r in pkg_data[rpm_type]['pkg_transitive_descendants'][name]: + pkg_data[rpm_type]['pkg_transitive_descendants'][name].append(r) + if r in pkg_data[rpm_type]['pkg_transitive_descendants']: + for n2 in pkg_data[rpm_type]['pkg_transitive_descendants'][r]: + if n2 != name: + if not n2 in pkg_data[rpm_type]['pkg_transitive_descendants'][name]: + pkg_data[rpm_type]['pkg_transitive_descendants'][name].append(n2) + else: + unresolved.append(r) + # print "%s: add %s" % (name, r) + +def create_dest_rpm_data(): + for name in sorted(pkg_data['RPM']['sourcerpm']): + fn=pkg_data['RPM']['sourcerpm'][name] + if fn in pkg_data['SRPM']['fn_to_name']: + sname = pkg_data['SRPM']['fn_to_name'][fn] + if not sname in pkg_data['SRPM']['binrpm']: + pkg_data['SRPM']['binrpm'][sname]=[] + pkg_data['SRPM']['binrpm'][sname].append(name) + +def create_cache(cache_dir): + for rpm_type in rpm_types: + print "" + print "==== %s ====" % rpm_type + print "" + rpm_repodata_primary_list = get_repo_primary_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type]) + for arch in default_arch_by_type[rpm_type]: + read_data_from_repodata_primary_list(rpm_repodata_primary_list, rpm_type=rpm_type, arch=arch) + rpm_repodata_filelists_list = get_repo_filelists_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type]) + for arch in default_arch_by_type[rpm_type]: + read_data_from_repodata_filelists_list(rpm_repodata_filelists_list, rpm_type=rpm_type, arch=arch) + calulate_all_direct_requires_and_descendants(rpm_type=rpm_type) + calulate_all_transitive_requires(rpm_type=rpm_type) + calulate_all_transitive_descendants(rpm_type=rpm_type) + + cache_name="%s/%s-direct-requires" % (cache_dir, rpm_type) + f=open(cache_name, "w") + for name in sorted(pkg_data[rpm_type]['pkg_direct_requires']): + print "%s needs %s" % (name, pkg_data[rpm_type]['pkg_direct_requires'][name]) + f.write("%s;" % name) + first=True + for req in sorted(pkg_data[rpm_type]['pkg_direct_requires'][name]): + if first: + first=False + f.write("%s" % req) + else: + f.write(",%s" % req) + f.write("\n") + f.close() + + cache_name="%s/%s-direct-descendants" % (cache_dir, rpm_type) + f=open(cache_name, "w") + for name in sorted(pkg_data[rpm_type]['pkg_direct_descendants']): + print "%s informs %s" % (name, pkg_data[rpm_type]['pkg_direct_descendants'][name]) + f.write("%s;" % name) + first=True + for req in sorted(pkg_data[rpm_type]['pkg_direct_descendants'][name]): + if first: + first=False + f.write("%s" % req) + else: + f.write(",%s" % req) + f.write("\n") + f.close() + + cache_name="%s/%s-transitive-requires" % (cache_dir, rpm_type) + f=open(cache_name, "w") + for name in sorted(pkg_data[rpm_type]['pkg_transitive_requires']): + f.write("%s;" % name) + first=True + for req in sorted(pkg_data[rpm_type]['pkg_transitive_requires'][name]): + if first: + first=False + f.write("%s" % req) + else: + f.write(",%s" % req) + f.write("\n") + f.close() + + cache_name="%s/%s-transitive-descendants" % (cache_dir, rpm_type) + f=open(cache_name, "w") + for name in sorted(pkg_data[rpm_type]['pkg_transitive_descendants']): + f.write("%s;" % name) + first=True + for req in sorted(pkg_data[rpm_type]['pkg_transitive_descendants'][name]): + if first: + first=False + f.write("%s" % req) + else: + f.write(",%s" % req) + f.write("\n") + f.close() + + if rpm_type != 'RPM': + cache_name="%s/%s-direct-requires-rpm" % (cache_dir, rpm_type) + f=open(cache_name, "w") + for name in sorted(pkg_data[rpm_type]['pkg_direct_requires_rpm']): + print "%s needs rpm %s" % (name, pkg_data[rpm_type]['pkg_direct_requires_rpm'][name]) + f.write("%s;" % name) + first=True + for req in sorted(pkg_data[rpm_type]['pkg_direct_requires_rpm'][name]): + if first: + first=False + f.write("%s" % req) + else: + f.write(",%s" % req) + f.write("\n") + f.close() + + cache_name="%s/%s-transitive-requires-rpm" % (cache_dir, rpm_type) + f=open(cache_name, "w") + for name in sorted(pkg_data[rpm_type]['pkg_transitive_requires_rpm']): + f.write("%s;" % name) + first=True + for req in sorted(pkg_data[rpm_type]['pkg_transitive_requires_rpm'][name]): + if first: + first=False + f.write("%s" % req) + else: + f.write(",%s" % req) + f.write("\n") + f.close() + + cache_name="%s/rpm-to-srpm" % cache_dir + f=open(cache_name, "w") + for name in sorted(pkg_data['RPM']['sourcerpm']): + f.write("%s;" % name) + fn=pkg_data['RPM']['sourcerpm'][name] + if fn in pkg_data['SRPM']['fn_to_name']: + sname = pkg_data['SRPM']['fn_to_name'][fn] + f.write("%s" % sname) + f.write("\n") + f.close() + + create_dest_rpm_data() + cache_name="%s/srpm-to-rpm" % cache_dir + f=open(cache_name, "w") + for name in sorted(pkg_data['SRPM']['binrpm']): + f.write("%s;" % name) + first=True + for bname in sorted(pkg_data['SRPM']['binrpm'][name]): + if first: + first=False + f.write("%s" % bname) + else: + f.write(",%s" % bname) + f.write("\n") + f.close() + + + +def test(): + for rpm_type in rpm_types: + print "" + print "==== %s ====" % rpm_type + print "" + rpm_repodata_primary_list = get_repo_primary_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type]) + for arch in default_arch_by_type[rpm_type]: + read_data_from_repodata_primary_list(rpm_repodata_primary_list, rpm_type=rpm_type, arch=arch) + rpm_repodata_filelists_list = get_repo_filelists_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type]) + for arch in default_arch_by_type[rpm_type]: + read_data_from_repodata_filelists_list(rpm_repodata_filelists_list, rpm_type=rpm_type, arch=arch) + calulate_all_direct_requires_and_descendants(rpm_type=rpm_type) + calulate_all_transitive_requires(rpm_type=rpm_type) + calulate_all_transitive_descendants(rpm_type=rpm_type) + + for name in pkg_data[rpm_type]['pkg_direct_requires']: + print "%s needs %s" % (name, pkg_data[rpm_type]['pkg_direct_requires'][name]) + + for name in pkg_data[rpm_type]['pkg_direct_descendants']: + print "%s informs %s" % (name, pkg_data[rpm_type]['pkg_direct_descendants'][name]) + + for name in pkg_data[rpm_type]['pkg_transitive_requires']: + print "%s needs %s" % (name, pkg_data[rpm_type]['pkg_transitive_requires'][name]) + print "" + + for name in pkg_data[rpm_type]['pkg_transitive_descendants']: + print "%s informs %s" % (name, pkg_data[rpm_type]['pkg_transitive_descendants'][name]) + print "" + + +if os.path.isdir(publish_cache_dir): + create_cache(publish_cache_dir) +else: + print "ERROR: Directory not found '%s" % publish_cache_dir diff --git a/build-tools/default_build_srpm b/build-tools/default_build_srpm new file mode 100755 index 00000000..6d3a168c --- /dev/null +++ b/build-tools/default_build_srpm @@ -0,0 +1,264 @@ +#!/bin/bash +# set -x + +source "$SRC_BASE/build-tools/spec-utils" +source "$SRC_BASE/build-tools/srpm-utils" + +CUR_DIR=`pwd` +BUILD_DIR="$RPMBUILD_BASE" + +if [ "x$DATA" == "x" ]; then + echo "ERROR: default_build_srpm (${LINENO}): Environment variable 'DATA' not defined." + exit 1 +fi + +srpm_source_build_data $DATA +if [ $? -ne 0 ]; then + echo "ERROR: default_build_srpm (${LINENO}): Failed to source build data from $DATA" + exit 1 +fi + +if [ "x$VERSION" == "x" ]; then + for SPEC in `find $SPECS_BASE -name '*.spec' | sort -V`; do + SPEC_PATH="$SPEC" + + VERSION_DERIVED=`spec_evaluate '%{version}' "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: default_build_srpm (${LINENO}): '%{version}' not found in '$PKG_BASE/$SPEC_PATH'" + VERSION_DERIVED="" + fi + + if [ "x$VERSION_DERIVED" != "x" ]; then + if [ "x$VERSION" == "x" ]; then + VERSION=$VERSION_DERIVED + else + if [ "x$SRC_DIR" != "x" ]; then + echo "ERROR: default_build_srpm (${LINENO}): multiple spec files found, can't set VERSION automatically" + exit 1 + fi + fi + fi + done + + if [ "x$VERSION" == "x" ]; then + if [ -f $SRC_DIR/PKG-INFO ]; then + VERSION=$(grep '^Version:' $SRC_DIR/PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') + fi + fi + + if [ "x$VERSION" != "x" ]; then + echo "Derived VERSION=$VERSION" + else + echo "ERROR: default_build_srpm (${LINENO}): Failed to derive a good VERSION from SPEC file, and none provided." + exit 1 + fi +fi + +if [ "x$TAR_NAME" == "x" ]; then + for SPEC in `find $SPECS_BASE -name '*.spec' | sort -V`; do + SPEC_PATH="$SPEC" + + SERVICE=`spec_find_global service "$SPEC_PATH" 2>> /dev/null` + if [ $? -eq 0 ]; then + if [ "x$TAR_NAME" == "x" ]; then + TAR_NAME=$SERVICE + else + if [ "x$SRC_DIR" != "x" ]; then + echo "ERROR: default_build_srpm (${LINENO}): multiple spec files found, can't set TAR_NAME automatically" + exit 1 + fi + fi + else + NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null` + if [ $? -eq 0 ]; then + if [ "x$TAR_NAME" == "x" ]; then + TAR_NAME=$NAME + else + if [ "x$SRC_DIR" != "x" ]; then + echo "ERROR: default_build_srpm (${LINENO}): multiple spec files found, can't set TAR_NAME automatically" + exit 1 + fi + fi + else + echo "WARNING: default_build_srpm (${LINENO}): 'Name' not found in '$SPEC_PATH'" + NAME="" + fi + fi + done + + if [ "x$TAR_NAME" == "x" ]; then + if [ -f $SRC_DIR/PKG-INFO ]; then + TAR_NAME=$(grep '^Name:' $SRC_DIR/PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//') + fi + fi + + if [ "x$TAR_NAME" != "x" ]; then + echo "Derived TAR_NAME=$TAR_NAME" + else + echo "ERROR: default_build_srpm (${LINENO}): Failed to derive a good TAR_NAME from SPEC file, and none provided." + exit 1 + fi +fi + +if [ "x$TAR" == "x" ]; then + TAR="$TAR_NAME-$VERSION.tar.gz" +fi + +SOURCE_PATH="$BUILD_DIR/SOURCES" +TAR_PATH="$SOURCE_PATH/$TAR" +STAGING="" + +if [ "x$COPY_LIST_TO_TAR" != "x" ] || [ "x$EXCLUDE_LIST_FROM_TAR" != "x" ]; then + STAGING="$BUILD_DIR/staging" + mkdir -p $STAGING +fi + +mkdir -p "$BUILD_DIR/SRPMS" +mkdir -p "$SOURCE_PATH" + +if [ "x$SRC_DIR" == "x" -a "x$COPY_LIST" == "x" -a "$ALLOW_EMPTY_RPM" != "true" ]; then + echo "ERROR: default_build_srpm (${LINENO}): '$PWD/$DATA' failed to provide at least one of 'SRC_DIR' or 'COPY_LIST'" + exit 1 +fi + +if [ "x$SRC_DIR" != "x" ]; then + if [ ! -d "$SRC_DIR" ]; then + echo "ERROR: default_build_srpm (${LINENO}): directory not found: '$SRC_DIR'" + exit 1 + fi +fi + +if [ "x$COPY_LIST" != "x" ]; then + echo "COPY_LIST: $COPY_LIST" + for p in $COPY_LIST; do + # echo "COPY_LIST: $p" + \cp -L -u -r -v $p $SOURCE_PATH + if [ $? -ne 0 ]; then + echo "ERROR: default_build_srpm (${LINENO}): COPY_LIST: file not found: '$p'" + exit 1 + fi + done +fi + +if [ "x$STAGING" != "x" ]; then + \cp -L -u -r -v $SRC_DIR $STAGING + echo "COPY_LIST_TO_TAR: $COPY_LIST_TO_TAR" + for p in $COPY_LIST_TO_TAR; do + # echo "COPY_LIST_TO_TAR: $p" + \cp -L -u -r -v $p $STAGING/$SRC_DIR + if [ $? -ne 0 ]; then + echo "ERROR: default_build_srpm (${LINENO}): COPY_LIST_TO_TAR: file not found: '$p'" + exit 1 + fi + done + echo "EXCLUDE_LIST_FROM_TAR: $EXCLUDE_LIST_FROM_TAR" + for p in $EXCLUDE_LIST_FROM_TAR; do + # echo "EXCLUDE_LIST_FROM_TAR: $p" + echo "rm -rf $STAGING/$SRC_DIR/$p" + \rm -rf $STAGING/$SRC_DIR/$p + if [ $? -ne 0 ]; then + echo "ERROR: default_build_srpm (${LINENO}): EXCLUDE_LIST_FROM_TAR: could not remove file: '$p'" + exit 1 + fi + done + +fi + +TRANSFORM=`echo "$SRC_DIR" | sed 's/^\./\\./' | sed 's:^/::'` + +if [ "x$STAGING" != "x" ]; then + pushd $STAGING +fi + +TAR_NEEDED=0 +if [ "x$SRC_DIR" != "x" ]; then + echo "SRC_DIR=$SRC_DIR" + if [ -f $TAR_PATH ]; then + n=`find . -cnewer $TAR_PATH -and ! -path './.git*' \ + -and ! -path './build/*' \ + -and ! -path './.pc/*' \ + -and ! -path './patches/*' \ + -and ! -path "./$DISTRO/*" \ + -and ! -path './pbr-*.egg/*' \ + | wc -l` + if [ $n -gt 0 ]; then + TAR_NEEDED=1 + fi + else + TAR_NEEDED=1 + fi +fi + +if [ $TAR_NEEDED -gt 0 ]; then + echo "Creating tar file: $TAR_PATH ..." + tar czf $TAR_PATH $SRC_DIR --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform "s,^$TRANSFORM,$TAR_NAME-$VERSION," + if [ $? -ne 0 ]; then + if [ "x$STAGING" != "x" ]; then + popd + fi + + echo "ERROR: default_build_srpm (${LINENO}): failed to create tar file, cmd: tar czf $TAR_PATH $SRC_DIR --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform \"s,^$TRANSFORM,$TAR_NAME-$VERSION,\"" + exit 1 + fi + echo "Created tar file: $TAR_PATH" +else + echo "Tar file not needed." +fi + +if [ "x$STAGING" != "x" ]; then + popd +fi + +if [ ! -d $BUILD_DIR/SPECS ]; then + echo "Spec directory '$BUILD_DIR/SPECS' does not exist" + exit 1 +fi + +if [ $(ls -1 $BUILD_DIR/SPECS/*.spec | wc -l) -eq 0 ]; then + echo "No spec files found in spec directory '$BUILD_DIR/SPECS'" + exit 1 +fi + +for SPEC in `ls -1 $BUILD_DIR/SPECS`; do + SPEC_PATH="$BUILD_DIR/SPECS/$SPEC" + RELEASE=`spec_find_tag Release "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: default_build_srpm (${LINENO}): 'Release' not found in '$SPEC_PATH'" + fi + NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: default_build_srpm (${LINENO}): 'Name' not found in '$SPEC_PATH'" + fi + SRPM="$NAME-$VERSION-$RELEASE.src.rpm" + SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM" + + spec_validate_tis_release $SPEC_PATH + if [ $? -ne 0 ]; then + echo "TIS Validation of $SPEC_PATH failed" + exit 1 + fi + + BUILD_NEEDED=0 + if [ -f $SRPM_PATH ]; then + n=`find . -cnewer $SRPM_PATH | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + else + BUILD_NEEDED=1 + fi + + if [ $BUILD_NEEDED -gt 0 ]; then + echo "SPEC file: $SPEC_PATH" + echo "SRPM build directory: $BUILD_DIR" + echo "TIS_PATCH_VER: $TIS_PATCH_VER" + + sed -i -e "1 i%define _tis_build_type $BUILD_TYPE" $SPEC_PATH + sed -i -e "1 i%define tis_patch_ver $TIS_PATCH_VER" $SPEC_PATH + rpmbuild -bs $SPEC_PATH --define="%_topdir $BUILD_DIR" --undefine=dist --define="_tis_dist .tis" + else + echo "SRPM build not needed" + fi +done + + diff --git a/build-tools/find_klm b/build-tools/find_klm new file mode 100755 index 00000000..f1604994 --- /dev/null +++ b/build-tools/find_klm @@ -0,0 +1,59 @@ +#!/bin/bash + +for r in $(find $MY_WORKSPACE/*/rpmbuild/RPMS -name '*.rpm'); do + f=$(basename $r) + find $MY_WORKSPACE/export/dist/isolinux/Packages | grep $f >> /dev/null + if [ $? -ne 0 ]; then + continue + fi + n=$(rpm -q --qf='%{NAME}\n' -p $r) + d=$(dirname $r) +# echo "f=$f" + for f in $(rpm -q -p -l $r | grep '[.]ko$' | head -n 1); do + FOUND=0 + s=$(rpm -q --info -p $r | grep 'Source RPM :' | awk -F: '{print $2}' | tr -d '[[:space:]]') + NAME=$(rpm -q --qf='%{NAME}\n' -p $d/$s) +# echo "NAME=$NAME" + for s2 in $(find $MY_WORKSPACE/*/rpmbuild/SRPMS -name "$NAME-[0-9]*.src.rpm"); do + NAME2=$(rpm -q --qf='%{NAME}\n' -p $s2) +# echo "NAME2=$NAME2" + if [ "${NAME}" == "${NAME2}" ]; then + echo $NAME | grep '[-]rt' >> /dev/null + if [ $? -ne 0 ]; then + echo $NAME + FOUND=1 + break + fi + # SIMPLE_NAME=$(echo $NAME | sed 's#-kmod##' | sed 's#-kernel##' | sed 's#^kernel$#linux#' | sed 's#^kernel-rt$#linux-rt#') + SIMPLE_NAME=$(echo $NAME | sed 's#^kernel$#linux#' | sed 's#^kernel-rt$#linux-rt#') +# echo "SIMPLE_NAME=$SIMPLE_NAME" + grep "[/]$SIMPLE_NAME$" $(for g in $(find $MY_REPO -type d -name .git); do d=$(dirname $g); find $d -name 'centos_pkg_dirs*'; done) >> /dev/null + if [ $? -eq 0 ]; then + echo $NAME + FOUND=1 + break + fi + SIMPLE_NAME=$(echo $NAME | sed 's#-rt$##' ) +# echo "SIMPLE_NAME=$SIMPLE_NAME" + grep "[/]$SIMPLE_NAME$" $(for g in $(find $MY_REPO -type d -name .git); do d=$(dirname $g); find $d -name 'centos_pkg_dirs*'; done) >> /dev/null + if [ $? -eq 0 ]; then + echo $SIMPLE_NAME + FOUND=1 + break + fi + SIMPLE_NAME2=$(echo $SIMPLE_NAME | sed 's#-kmod##' ) +# echo "SIMPLE_NAME2=$SIMPLE_NAME2" + grep "[/-]$SIMPLE_NAME2$" $(for g in $(find $MY_REPO -type d -name .git); do d=$(dirname $g); find $d -name 'centos_pkg_dirs*'; done) >> /dev/null + if [ $? -eq 0 ]; then + echo $SIMPLE_NAME + FOUND=1 + break + fi + fi + done + if [ $FOUND -eq 1 ]; then + break + fi + done +# done +done | sort --unique diff --git a/build-tools/find_patched_srpms_needing_upgrade b/build-tools/find_patched_srpms_needing_upgrade new file mode 100755 index 00000000..a57dc66a --- /dev/null +++ b/build-tools/find_patched_srpms_needing_upgrade @@ -0,0 +1,54 @@ +#!/bin/bash + +for f in `find $MY_REPO -name srpm_path`; do + orig_line=`cat $f` + first=`echo $orig_line | awk -F : '{print $1}'` + orig_path="/import/mirrors/$orig_line" + if [ "$first" == "mirror" ]; then + orig_path="/import/mirrors/"$(echo $orig_line | awk -F : '{print $2}'); + fi + if [ "$first" == "repo" ]; then + orig_path="$MY_REPO/"$(echo $orig_line | awk -F : '{print $2}') + continue + fi + + if [ ! -f $orig_path ]; then + echo "ERROR: bad srpm path: '$orig_path' derived from '$f'" + exit 1 + fi + + orig_dir=$(dirname $orig_path) + repodata_dir=$orig_dir/repodata + if [ ! -d $repodata_dir ]; then + repodata_dir=$orig_dir/../repodata + if [ ! -d $repodata_dir ]; then + repodata_dir=$orig_dir/../../repodata + if [ ! -d $repodata_dir ]; then + echo "ERROR: couldn't find repodata for '$orig_path'" + exit 1 + fi + fi + fi + + # echo "'$orig_path' -> '$repodata_dir'" + name=$(rpm -q --queryformat '%{NAME}\n' -p $orig_path 2>> /dev/null) + version=$(rpm -q --queryformat '%{VERSION}\n' -p $orig_path 2>> /dev/null) + release=$(rpm -q --queryformat '%{RELEASE}\n' -p $orig_path 2>> /dev/null) + orig_name=$(basename $orig_path) + best_name="$orig_name" + for n in `find $orig_dir -name $name-*`; do + if [ "$n" != "$orig_path" ]; then + new_name=$(rpm -q --queryformat '%{NAME}\n' -p $n) + if [ "$name" == "$new_name" ]; then + rpmdev-vercmp $(basename $n) $best_name >> /dev/null + if [ $? -eq 11 ]; then + best_name=$(basename $n) + fi + fi + fi + done + if [ "$best_name" != "$orig_name" ]; then + echo "$f: $orig_name ==> $best_name" + fi +done + diff --git a/build-tools/ip_report.py b/build-tools/ip_report.py new file mode 100755 index 00000000..3a6b5fbe --- /dev/null +++ b/build-tools/ip_report.py @@ -0,0 +1,517 @@ +#!/usr/bin/python + +import csv +import os +import rpm +import shutil +import subprocess +import sys +import getopt + + +class BinPackage(object): + def __init__(self, path, ts): + fdno = os.open(path, os.O_RDONLY) + hdr = ts.hdrFromFdno(path) + os.close(fdno) + + self.source = hdr[rpm.RPMTAG_SOURCERPM] + self.desc = hdr[rpm.RPMTAG_DESCRIPTION].replace('\n', ' ') + self.dirname = os.path.dirname(path) + self.filename = os.path.basename(path) + self.path = path + self.kernel_module = False + self.name = hdr[rpm.RPMTAG_NAME] + + # Does the package contain kernel modules? + for filename in hdr[rpm.RPMTAG_BASENAMES]: + assert isinstance(filename, basestring) + if filename.endswith('.ko'): + self.kernel_module = True + break + + +class SrcPackage(object): + def __init__(self, path=None): + self.bin_pkg = None + self.original_src = None + self.sha = 'SHA' + if path is None: + self.filename = None + self.path = None + else: + self.filename = os.path.basename(path) + self.path = path + ts = rpm.TransactionSet() + ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES) + fdno = os.open(self.path, os.O_RDONLY) + hdr = ts.hdrFromFdno(self.path) + os.close(fdno) + self.desc = hdr[rpm.RPMTAG_DESCRIPTION].replace('\n', ' ') + self.version = hdr[rpm.RPMTAG_VERSION] + '-' + hdr[rpm.RPMTAG_RELEASE] + self.licences = hdr[rpm.RPMTAG_LICENSE] + self.name = hdr[rpm.RPMTAG_NAME] + self.url = hdr[rpm.RPMTAG_URL] + + self.modified = None + self.kernel_module = False + self.disclosed_by = 'Jason McKenna' + self.shipped_as = 'Binary' + self.origin = 'Unknown' + self.notes = '' + self.wrs = False + + def __lt__(self, other): + me = self.name.lower() + them = other.name.lower() + if me == them: + return self.name < other.name + else: + return me < them + + +class IPReport(object): + __KNOWN_PATHS = [ + # CentOS 7.4 + ['/import/mirrors/CentOS/7.4.1708/os/Source/SPackages', + 'http://vault.centos.org/7.4.1708/os/Source/SPackages'], + ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/updates/Source/SPackages', + 'http://vault.centos.org/7.4.1708/updates/Source/SPackages'], + ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-newton/common', + 'http://vault.centos.org/7.4.1708/cloud/Source/openstack-newton/common'], + ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-newton', + 'http://vault.centos.org/7.4.1708/cloud/Source/openstack-newton'], + ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka/common', + 'http://vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka/common'], + ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka', + 'http://vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka'], + ['/import/mirrors/CentOS/7.4.1708/extras/Source/SPackages', + 'http://vault.centos.org/7.4.1708/extras/Source/SPackages'], + # CentOS 7.3 + ['/import/mirrors/CentOS/7.3.1611/os/Source/SPackages', + 'http://vault.centos.org/7.3.1611/os/Source/SPackages'], + ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/updates/Source/SPackages', + 'http://vault.centos.org/7.3.1611/updates/Source/SPackages'], + ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-newton/common', + 'http://vault.centos.org/7.3.1611/cloud/Source/openstack-newton/common'], + ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-newton', + 'http://vault.centos.org/7.3.1611/cloud/Source/openstack-newton'], + ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka/common', + 'http://vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka/common'], + ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka', + 'http://vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka'], + ['/import/mirrors/CentOS/7.3.1611/extras/Source/SPackages', + 'http://vault.centos.org/7.3.1611/extras/Source/SPackages'], + # CentOS 7.2 + ['/import/mirrors/CentOS/7.2.1511/os/Source/SPackages', 'http://vault.centos.org/7.2.1511/os/Source/SPackages'], + ['/import/mirrors/CentOS/vault.centos.org/7.2.1511/updates/Source/SPackages', + 'http://vault.centos.org/7.2.1511/updates/Source/SPackages'], + ['/import/mirrors/CentOS/vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka/common', + 'http://vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka/common'], + ['/import/mirrors/CentOS/vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka', + 'http://vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka'], + ['/import/mirrors/CentOS/7.2.1511/extras/Source/SPackages', + 'http://vault.centos.org/7.2.1511/extras/Source/SPackages'], + ['/import/mirrors/CentOS/tis-r4-CentOS/newton/Source', 'Unknown'], + ['/import/mirrors/CentOS/tis-r4-CentOS/tis-r4-3rd-Party', 'Unknown'] + + ] + + def __init__(self, workspace=None, repo=None): + self.workspace = None + self.repo = None + self.shipped_binaries = list() + self.built_binaries = list() + self.check_env() + if workspace is not None: + self.workspace = workspace + if repo is not None: + self.repo = repo + + # Generate a list of binaries that we shipped + for filename in os.listdir(self.workspace + '/export/dist/isolinux/Packages'): + if filename.endswith('rpm'): + self.shipped_binaries.append(filename) + + # Generate a list of binaries that we built ourselves + for build in ['rt', 'std']: + for filename in os.listdir(self.workspace + '/' + build + '/rpmbuild/RPMS/'): + if filename.endswith('rpm'): + self.built_binaries.append(filename) + + print ('Looking up packages for which we have source...') + self.original_src_pkgs = dict() + self.build_original_src_pkgs() + print ('Looking up packages we built...') + self.built_src_pkgs = dict() + self.build_built_src_pkgs() + print ('Looking up packages we built...') + self.hardcoded_lookup_dict = dict() + self.build_hardcoded_lookup_dict() + + def build_hardcoded_lookup_dict(self): + with open(self.repo + '/build-tools/source_lookup.txt', 'r') as lookup_file: + for line in lookup_file: + line = line.rstrip() + words = line.split() + if (words is not None) and (len(words) >= 2): + self.hardcoded_lookup_dict[words[1]] = (words[0], False) + + with open(self.repo + '/build-tools/wrs_orig.txt', 'r') as lookup_file: + for line in lookup_file: + line = line.rstrip() + words = line.split() + if (words is not None) and (len(words) >= 1): + self.hardcoded_lookup_dict[words[0]] = ('No download', True) + + @staticmethod + def path_to_origin(filepath): + for path in IPReport.__KNOWN_PATHS: + if filepath.startswith(path[0]) and (not path[1].lower().startswith('unknown')): + return path[1] + '/' + os.path.basename(filepath) + return 'Unknown' + + def hardcoded_lookup(self, package_name): + if package_name in self.hardcoded_lookup_dict.keys(): + return self.hardcoded_lookup_dict[package_name] + return None, False + + def check_env(self): + if 'MY_WORKSPACE' in os.environ: + self.workspace = os.environ['MY_WORKSPACE'] + else: + print 'Could not find $MY_WORKSPACE' + raise IOError('Could not fine $MY_WORKSPACE') + + if 'MY_REPO' in os.environ: + self.repo = os.environ['MY_REPO'] + else: + print 'Could not find $MY_REPO' + raise IOError('Could not fine $MY_REPO') + + def do_bin_pkgs(self): + print ('Gathering binary package information') + self.read_bin_pkgs() + + def read_bin_pkgs(self): + self.bin_pkgs = list() + ts = rpm.TransactionSet() + ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES) + for filename in self.shipped_binaries: + if filename.endswith('rpm'): + bin_pkg = BinPackage(self.workspace + '/export/dist/isolinux/Packages/' + filename, ts) + self.bin_pkgs.append(bin_pkg) + + def do_src_report(self, copy_packages=False, do_wrs=True, delta_file=None, output_path=None, strip_unchanged=False): + self.bin_to_src() + self.src_pkgs.sort() + + if delta_file is not None: + self.delta(delta_file) + + if output_path is None: + output_path = self.workspace + '/export/ip_report' + + # Create output dir (if required) + if not os.path.exists(output_path): + os.makedirs(output_path) + + # Create paths for RPMs (if required) + if copy_packages: + if not os.path.exists(output_path + '/non_wrs'): + shutil.rmtree(output_path + '/non_wrs', True) + os.makedirs(output_path + '/non_wrs') + if do_wrs: + shutil.rmtree(output_path + '/wrs', True) + os.makedirs(output_path + '/wrs') + + with open(output_path + '/srcreport.csv', 'wb') as src_report_file: + src_report_writer = csv.writer(src_report_file) + + # Write header row + src_report_writer.writerow( + ['Package File', 'File Name', 'Package Name', 'Version', 'SHA1', 'Disclosed By', + 'Description', 'Part Of (Runtime, Host, Both)', 'Modified (Yes, No)', 'Hardware Interfacing (Yes, No)', + 'License(s) Found', 'Package Download URL', 'Kernel module', 'Notes']) + + for src_pkg in self.src_pkgs: + if src_pkg.modified: + modified_string = 'Yes' + else: + modified_string = 'No' + if src_pkg.kernel_module: + kmod_string = 'Yes' + else: + kmod_string = 'No' + + # Copy the pacakge and get the SHA + if copy_packages: + if src_pkg.wrs is False: + shutil.copyfile(src_pkg.path, output_path + '/non_wrs/' + src_pkg.filename) + shasumout = subprocess.check_output( + ['shasum', output_path + '/non_wrs/' + src_pkg.filename]).split()[0] + src_pkg.sha = shasumout + if strip_unchanged and (src_pkg.notes.lower().startswith('unchanged')): + os.remove(output_path + '/non_wrs/' + src_pkg.filename) + else: + if do_wrs: + shutil.copyfile(src_pkg.path, output_path + '/wrs/' + src_pkg.filename) + shasumout = subprocess.check_output( + ['shasum', output_path + '/wrs/' + src_pkg.filename]).split()[0] + src_pkg.sha = shasumout + if strip_unchanged and (src_pkg.notes.lower().startswith('unchanged')): + os.remove(output_path + '/wrs/' + src_pkg.filename) + + if do_wrs or (src_pkg.wrs is False): + src_report_writer.writerow( + [src_pkg.filename, src_pkg.name, src_pkg.version, src_pkg.sha, src_pkg.disclosed_by, + src_pkg.desc, 'Runtime', src_pkg.shipped_as, modified_string, 'No', src_pkg.licences, + src_pkg.origin, kmod_string, src_pkg.notes]) + if 'unknown' in src_pkg.origin.lower(): + print ( + 'Warning: Could not determine origin of ' + src_pkg.name + '. Please investigate/populate manually') + + def bin_to_src(self): + self.src_pkgs = list() + src_pkg_names = list() + for bin_pkg in self.bin_pkgs: + if src_pkg_names.__contains__(bin_pkg.source): + if bin_pkg.kernel_module: + for src_pkg in self.src_pkgs: + if src_pkg.filename == bin_pkg.source: + src_pkg.kernel_module = True + break + + continue + + # if we reach here, then the source package is not yet in our db. + # we first search for the source package in the built-rpms + if 'shim-signed' in bin_pkg.source: + for tmp in self.built_src_pkgs: + if 'shim-signed' in tmp: + print ('shim-signed hack -- ' + bin_pkg.source + ' to ' + tmp) + bin_pkg.source = tmp + break + if 'shim-unsigned' in bin_pkg.source: + for tmp in self.built_src_pkgs: + if 'shim-0' in tmp: + print ('shim-unsigned hack -- ' + bin_pkg.source + ' to ' + tmp) + bin_pkg.source = tmp + break + if 'grub2-efi-pxeboot' in bin_pkg.source: + for tmp in self.built_src_pkgs: + if 'grub2-2' in tmp: + print ('grub2-efi-pxeboot hack -- ' + bin_pkg.source + ' to ' + tmp) + bin_pkg.source = tmp + break + + if bin_pkg.source in self.built_src_pkgs: + src_pkg = self.built_src_pkgs[bin_pkg.source] + src_pkg.modified = True + + # First guess, we see if there's an original source with the source package name + # (this is 99% of the cases) + src_pkg_orig_name = src_pkg.name + if src_pkg_orig_name in self.original_src_pkgs: + src_pkg.original_src = self.original_src_pkgs[src_pkg_orig_name] + src_pkg.origin = src_pkg.original_src.origin + + else: + src_pkg_path = self.locate_in_mirror(bin_pkg.source) + if not os.path.isabs(src_pkg_path): + continue + src_pkg = SrcPackage(src_pkg_path) + src_pkg.origin = IPReport.path_to_origin(src_pkg_path) + src_pkg.modified = False + + if bin_pkg.kernel_module: + src_pkg.kernel_module = True + + src_pkg_names.append(bin_pkg.source) + self.src_pkgs.append(src_pkg) + + if src_pkg.origin.lower() == 'unknown': + if 'windriver' in src_pkg.licences.lower(): + src_pkg.origin = 'No download' + else: + if src_pkg.url is not None: + src_pkg.origin = src_pkg.url + + if 'unknown' in src_pkg.origin.lower(): + (orig, is_wrs) = self.hardcoded_lookup(src_pkg.name) + if orig is not None: + src_pkg.origin = orig + src_pkg.wrs = is_wrs + + if (src_pkg.origin.lower() == 'no download') and ('windriver' in src_pkg.licences.lower()): + src_pkg.wrs = True + + def locate_in_mirror(self, filename): + """ takes an RPM filename and finds the full path of the file """ + + fullpath = None + + filename = filename.replace('mirror:', self.repo + '/cgcs-centos-repo/') + filename = filename.replace('repo:', self.repo + '/') + filename = filename.replace('3rd_party:', self.repo + '/cgcs-3rd-party-repo/') + + # At this point, filename could be a complete path (incl symlink), or just a filename + best_guess = filename + filename = os.path.basename(filename) + + for path in IPReport.__KNOWN_PATHS: + if os.path.exists(path[0] + '/' + filename): + fullpath = path[0] + '/' + filename + break + + if fullpath is not None: + return fullpath + else: + return best_guess + + def build_original_src_pkgs(self): + for root, dirs, files in os.walk(self.repo): + for name in files: + if name == 'srpm_path': + with open(os.path.join(root, 'srpm_path'), 'r') as srpm_path_file: + original_srpm_file = srpm_path_file.readline().rstrip() + original_src_pkg_path = self.locate_in_mirror(original_srpm_file) + original_src_pkg = SrcPackage(original_src_pkg_path) + original_src_pkg.origin = IPReport.path_to_origin(original_src_pkg_path) + self.original_src_pkgs[original_src_pkg.name] = original_src_pkg + + def build_built_src_pkgs(self): + """ Create a dict of any source package that we built ourselves """ + for build in ['std', 'rt']: + for root, dirs, files in os.walk(self.workspace + '/' + build + '/rpmbuild/SRPMS'): + for name in files: + if name.endswith('.src.rpm'): + built_src_pkg = SrcPackage(os.path.join(root, name)) + self.built_src_pkgs[built_src_pkg.filename] = built_src_pkg + + def delta(self, orig_report): + if orig_report is None: + return + delta_src_pkgs = self.read_last_report(orig_report) + + for pkg in self.src_pkgs: + if pkg.name in delta_src_pkgs: + old_pkg = delta_src_pkgs[pkg.name] + if old_pkg.version == pkg.version: + pkg.notes = 'Unchanged' + else: + pkg.notes = 'New version' + else: + pkg.notes = 'New package' + + def read_last_report(self, orig_report): + orig_pkg_dict = dict() + with open(orig_report, 'rb') as orig_report_file: + orig_report_reader = csv.reader(orig_report_file) + doneHeader = False + for row in orig_report_reader: + if (not doneHeader) and ('package file name' in row[0].lower()): + doneHeader = True + continue + doneHeader = True + orig_pkg = SrcPackage() + orig_pkg.filename = row[0] + orig_pkg.name = row[1] + orig_pkg.version = row[2] + # sha = row[3] + orig_pkg.disclosed_by = row[4] + orig_pkg.desc = row[5] + # runtime = row[6] + orig_pkg.shipped_as = row[7] + if row[8].lower is 'yes': + orig_pkg.modified = True + else: + orig_pkg.modifed = False + # hardware interfacing = row[9] + orig_pkg.licences = row[10] + orig_pkg.origin = row[11] + if row[12].lower is 'yes': + orig_pkg.kernel_module = True + else: + orig_pkg.kernel_module = False + orig_pkg_dict[orig_pkg.name] = orig_pkg + + return orig_pkg_dict + + +def main(argv): + # handle command line arguments + # -h/--help -- help + # -n/--no-copy -- do not copy files (saves time) + # -d/--delta= -- compare with an ealier report + # -o/--output= -- output report/binaries to specified path + # -w/--workspace= -- use specified workspace instead of $WORKSPACE + # -r/--repo= -- use sepeciied repo instead of $MY_REPO + # -s -- strip (remove) unchanged packages from copy out directory + + try: + opts, args = getopt.getopt(argv, "hnd:o:w:r:s", + ["delta=", "help", "no-copy", "workspace=", "repo=", "output=", "--strip"]) + except getopt.GetoptError: + # todo - output help + sys.exit(2) + delta_file = None + do_copy = True + workspace = None + repo = None + output_path = None + strip_unchanged = False + + for opt, arg in opts: + if opt in ('-h', '--help'): + print 'usage:' + print ' ip_report.py [options]' + print ' Creates and IP report in $MY_WORKSPACE/export/ip_report ' + print ' Source RPMs (both Wind River and non WR) are placed in subdirs within that path' + print '' + print 'Options:' + print ' -h/--help - this help' + print ' -d /--delta= - create "notes" field, comparing report with a previous report' + print ' -n/--no-copy - do not copy files into subdirs (this is faster, but means you' + print ' don\'t get SHA sums for files)' + print ' -w /--workspace= - use the specified path as workspace, instead of $MY_WORKSPACE' + print ' -r /--repo= - use the specified path as repo, instead of $MY_REPO' + print ' -o /--output= - output to specified path (instead of $MY_WORKSPACE/export/ip_report)' + print ' -s/--strip - strip (remove) unchanged files if copied' + exit() + elif opt in ('-d', '--delta'): + delta_file = os.path.normpath(arg) + delta_file = os.path.expanduser(delta_file) + if not os.path.exists(delta_file): + print 'Cannot locate ' + delta_file + exit(1) + elif opt in ('-w', '--workspace'): + workspace = os.path.normpath(arg) + workspace = os.path.expanduser(workspace) + elif opt in ('-r', '--repo'): + repo = os.path.normpath(arg) + repo = os.path.expanduser(repo) + elif opt in ('-o', '--output'): + output_path = os.path.normpath(arg) + output_path = os.path.expanduser(output_path) + elif opt in ('-n', '--no-copy'): + do_copy = False + elif opt in ('-s', '--strip-unchanged'): + strip_unchanged = True + + print ('Doing IP report') + if delta_file is not None: + print 'Delta from ' + delta_file + else: + print 'No delta specified' + ip_report = IPReport(workspace=workspace, repo=repo) + + ip_report.do_bin_pkgs() + ip_report.do_src_report(copy_packages=do_copy, + delta_file=delta_file, + output_path=output_path, + strip_unchanged=strip_unchanged) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/build-tools/make-installer-images.sh b/build-tools/make-installer-images.sh new file mode 100755 index 00000000..10e967cc --- /dev/null +++ b/build-tools/make-installer-images.sh @@ -0,0 +1,244 @@ +#!/bin/bash -e +## this script is called by "update-pxe-network-installer" and run in "sudo" +## created by Yong Hu (yong.hu@intel.com), 05/24/2018 + +function clean_rootfs() { + rootfs_dir=$1 + echo "--> remove old files in original rootfs" + conf="$(ls ${rootfs_dir}/etc/ld.so.conf.d/kernel-*.conf)" + echo "conf basename = $(basename $conf)" + old_version="tbd" + if [ -f $conf ];then + old_version="$(echo $(basename $conf) | rev | cut -d'.' -f2- | rev | cut -d'-' -f2-)" + fi + echo "old version is $old_version" + # remove old files in original initrd.img + # do this in chroot to avoid accidentialy wrong operations on host root +chroot $rootfs_dir /bin/bash -x < " + echo "kernel_mode: std or rt" + exit -1; +fi + +work_dir=$1 +mode=$2 +output_dir=$work_dir/output +if [ ! -d $output_dir ];then mkdir -p $output_dir; fi + +if [ "$mode" != "std" ] && [ "$mode" != "rt" ]; then + echo "ERROR: wrong kernel mode, must be std or rt" + exit -1 +fi + +timestamp=$(date +%F_%H%M) + +echo "---------------- start to make new initrd.img and vmlinuz -------------" +ORIG_INITRD=$work_dir/orig/initrd.img +if [ ! -f $ORIG_INITRD ];then + echo "ERROR: $ORIG_INITRD does NOT exist!" + exit -1 +fi + +kernel_rpms_dir=$work_dir/kernel-rpms +if [ ! -d $kernel_rpms_dir ];then + echo "ERROR: $kernel_rpms_dir does NOT exist!" + exit -1 +fi + +initrd_root=$work_dir/initrd.work +if [ -d $initrd_root ];then + rm -rf $initrd_root +fi +mkdir -p $initrd_root + +cd $initrd_root +# uncompress initrd.img +echo "--> uncompress original initrd.img" +/usr/bin/xzcat $ORIG_INITRD | cpio -i + +echo "--> clean up $initrd_root" +clean_rootfs $initrd_root + +echo "--> extract files from new kernel and its modular rpms to initrd root" +for kf in $kernel_rpms_dir/$mode/*.rpm ; do rpm2cpio $kf | cpio -idu; done + +# by now new kernel and its modules exist! +# find new kernel in /boot/ +echo "--> get new kernel image: vmlinuz" +new_kernel="$(ls ./boot/vmlinuz-*)" +echo $new_kernel +if [ -f $new_kernel ];then + #copy out the new kernel + if [ $mode == "std" ];then + if [ -f $output_dir/new-vmlinuz ]; then + mv -f $output_dir/new-vmlinuz $output_dir/vmlinuz-bakcup-$timestamp + fi + cp -f $new_kernel $output_dir/new-vmlinuz + else + if [ -f $output_dir/new-vmlinuz-rt ]; then + mv -f $output_dir/new-vmlinuz-rt $output_dir/vmlinuz-rt-bakcup-$timestamp + fi + cp -f $new_kernel $output_dir/new-vmlinuz-rt + fi + kernel_name=$(basename $new_kernel) + new_ver=$(echo $kernel_name | cut -d'-' -f2-) + echo $new_ver +else + echo "ERROR: new kernel is NOT found!" + exit -1 +fi + +echo "-->check module dependencies in new initrd.img in chroot context" +chroot $initrd_root /bin/bash -x < Rebuild the initrd" +if [ -f $output_dir/new-initrd.img ]; then + mv -f $output_dir/new-initrd.img $output_dir/initrd.img-bakcup-$timestamp +fi +find . | cpio -o -H newc | xz --check=crc32 --x86 --lzma2=dict=512KiB > $output_dir/new-initrd.img +if [ $? != 0 ];then + echo "ERROR: failed to create new initrd.img" + exit -1 +fi + +cd $work_dir + +if [ -f $output_dir/new-initrd.img ];then + ls -l $output_dir/new-initrd.img +else + echo "ERROR: new-initrd.img is not generated!" + exit -1 +fi + +if [ -f $output_dir/new-vmlinuz ];then + ls -l $output_dir/new-vmlinuz +else + echo "ERROR: new-vmlinuz is not generated!" + exit -1 +fi + +echo "---------------- start to make new squashfs.img -------------" +ORIG_SQUASHFS=$work_dir/orig/squashfs.img +if [ ! -f $ORIG_SQUASHFS ];then + echo "ERROR: $ORIG_SQUASHFS does NOT exist!" + exit -1 +fi + +rootfs_rpms_dir=$work_dir/rootfs-rpms +if [ ! -d $rootfs_rpms_dir ];then + echo "ERROR: $rootfs_rpms_dir does NOT exist!" + exit -1 +fi + +# make squashfs.mnt and ready and umounted +if [ ! -d $work_dir/squashfs.mnt ];then + mkdir -p $work_dir/squashfs.mnt +else + # in case it was mounted previously + mnt_path=$(mount | grep "squashfs.mnt" | cut -d' ' -f3-3) + if [ x"$mnt_path" != "x" ] && [ "$(basename $mnt_path)" == "squashfs.mnt" ];then + umount $work_dir/squashfs.mnt + fi +fi + +# make squashfs.work ready and umounted +squashfs_root="$work_dir/squashfs.work" +# Now mount the rootfs.img file: +if [ ! -d $squashfs_root ];then + mkdir -p $squashfs_root +else + # in case it was mounted previously + mnt_path=$(mount | grep "$(basename $squashfs_root)" | cut -d' ' -f3-3) + if [ x"$mnt_path" != "x" ] && [ "$(basename $mnt_path)" == "$(basename $squashfs_root)" ];then + umount $squashfs_root + fi +fi + +echo $ORIG_SQUASHFS +mount -o loop -t squashfs $ORIG_SQUASHFS $work_dir/squashfs.mnt + +if [ ! -d ./LiveOS ];then mkdir -p ./LiveOS ; fi + +echo "--> copy rootfs.img from original squashfs.img to LiveOS folder" +cp -f ./squashfs.mnt/LiveOS/rootfs.img ./LiveOS/. + +echo "--> done to copy rootfs.img, umount squashfs.mnt" +umount ./squashfs.mnt + +echo "--> mount rootfs.img into $squashfs_root" +mount -o loop LiveOS/rootfs.img $squashfs_root + +echo "--> clean up ./squashfs-rootfs from original squashfs.img in chroot context" +clean_rootfs $squashfs_root + +cd $squashfs_root +echo "--> extract files from rootfs-rpms to squashfs root" +for ff in $rootfs_rpms_dir/*.rpm ; do rpm2cpio $ff | cpio -idu; done + +echo "--> extract files from kernel and its modular rpms to squashfs root" +for kf in $kernel_rpms_dir/$mode/*.rpm ; do rpm2cpio $kf | cpio -idu; done + +echo "-->check module dependencies in new squashfs.img in chroot context" +#we are using the same new kernel-xxx.rpm, so the $new_ver is the same +chroot $squashfs_root /bin/bash -x < unmount $squashfs_root" +umount $squashfs_root +#rename the old version +if [ -f $output_dir/new-squashfs.img ]; then + mv -f $output_dir/new-squashfs.img $output_dir/squashfs.img-backup-$timestamp +fi + +echo "--> make the new squashfs image" +mksquashfs LiveOS $output_dir/new-squashfs.img -keep-as-directory -comp xz -b 1M +if [ $? == 0 ];then + ls -l $output_dir/new-squashfs.img +else + echo "ERROR: failed to make a new squashfs.img" + exit -1 +fi + +echo "--> done successfully!" diff --git a/build-tools/mirror_rebase/copy_external_mirror_to_tis_mirror b/build-tools/mirror_rebase/copy_external_mirror_to_tis_mirror new file mode 100644 index 00000000..c4362f45 --- /dev/null +++ b/build-tools/mirror_rebase/copy_external_mirror_to_tis_mirror @@ -0,0 +1,229 @@ +#!/bin/bash + +if [ "$USER" != "jenkins" ]; then + echo "ERROR: only jenkins should run this" + return 1 +fi + +CENTOS_BASE_VERSION=7.2.1511 +EPEL_BASE_VERSION=7 +BIN_ARCH=x86_64 + +BIN_DIRS="/export/jenkins/mirrors/CentOS/$CENTOS_BASE_VERSION/updates/x86_64 + /export/jenkins/mirrors/CentOS/$CENTOS_BASE_VERSION/cloud/x86_64/openstack-mitaka + /export/jenkins/mirrors/CentOS/$CENTOS_BASE_VERSION/os/x86_64 + /export/jenkins/mirrors/CentOS/$CENTOS_BASE_VERSION/extras/x86_64 + /export/jenkins/mirrors/fedora/epel/$EPEL_BASE_VERSION/x86_64 + " +SRC_DIRS="/export/jenkins/mirrors/CentOS/vault.centos.org/$CENTOS_BASE_VERSION/updates/Source + /export/jenkins/mirrors/CentOS/vault.centos.org/$CENTOS_BASE_VERSION/cloud/Source/openstack-mitaka + /export/jenkins/mirrors/CentOS/vault.centos.org/$CENTOS_BASE_VERSION/os/Source + /export/jenkins/mirrors/CentOS/vault.centos.org/$CENTOS_BASE_VERSION/extras/Source + /export/jenkins/mirrors/fedora/dl.fedoraproject.org/pub/epel/$EPEL_BASE_VERSION/SRPMS + " +TESTING_BIN_DIRS="/export/jenkins/mirrors/fedora/epel/testing/7/x86_64 + " +TESTING_SRC_DIRS="/export/jenkins/mirrors/dl.fedoraproject.org/pub/epel/testing/7/SRPMS + " + +BIN_ARCH=x86_64 + +NEW_DIR="/export/jenkins/mirrors/CentOS/tis-r3-CentOS/mitaka" +BACKUP_DIR="/export/jenkins/mirrors/CentOS/tis-r3-CentOS/mitaka.old" + +CREATEREPO=$(which createrepo_c) +if [ $? -ne 0 ]; then + CREATEREPO="createrepo" +fi + +if [ -d $BACKUP_DIR ]; then + rm -rf $BACKUP_DIR +fi +cp -r $NEW_DIR $BACKUP_DIR + +BIN_DEST="$NEW_DIR/Binary" +SRC_DEST="$NEW_DIR/Source" +LOG_DEST="$NEW_DIR/Data" +DATA_DEST="$NEW_DIR/Data" + +mkdir -p $BIN_DEST $SRC_DEST $LOG_DEST + +WHITE_LIST_FILE="$DATA_DIR/white_list.txt" +BLACK_LIST_FILE="$DATA_DIR/black_list.txt" +OBSOLETE_LOG="$LOG_DEST/obsolete.log" +NO_SOURCERPM_LOG="$LOG_DEST/no_sourcerpm.log" +SOURCERPM_NOT_FOUND_LOG="$LOG_DEST/sourcerpm_not_found.log" +BIN_COPY_LOG="$LOG_DEST/bin_copy.log" +SRC_COPY_LOG="$LOG_DEST/src_copy.log" +NO_INSTALLED_COPY="$LOG_DEST/installed_copy.log" +BLACKLISTED_LOG="$LOG_DEST/blacklisted.log" +WHITELISTED_LOG="$LOG_DEST/whitelisted.log" + +rm -f $OBSOLETE_LOG $NO_SOURCERPM_LOG $SOURCERPM_NOT_FOUND_LOG $BIN_COPY_LOG $SRC_COPY_LOG $NO_INSTALLED_COPY $BLACKLISTED_LOG $WHITELISTED_LOG + +LAST_INSTALLED_NAME="" +LAST_NAME="" +NAME="" + +# The following is similar to a find for *.rpm files, +# but we transform the path into ## . +# Then do a reverse 'version' sort, so that the newest version +# of the rpm appears first, no matter what directory it originates from. +for dat in $((for bd in `echo $BIN_DIRS`; do + for br in $(find $bd/ -name '*.rpm' ); do + d=$(dirname $br) + b=$(basename $br) + s=$(echo $b | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$s#$b#$d" + done + done) | sort -r -V) +do + b=$(echo "$dat" | awk -F '#' '{ print $2 }') + d=$(echo "$dat" | awk -F '#' '{ print $3 }') + br="$d/$b" + echo $br + + MATCH=$(grep "^$b" $BLACK_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + echo "$bs" >> $BLACKLISTED_LOG + continue + fi + + SOURCERPM=$(rpm -q --queryformat='%{SOURCERPM}' -p $br --nosignature) + if [ "x$SOURCERPM" != "x" ]; then + MATCH=$(grep "^$SOURCERPM" $BLACK_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + echo "$bs" >> $BLACKLISTED_LOG + echo "$SOURCERPM" >> $BLACKLISTED_LOG + continue + fi + fi + + ARCH=$(rpm -q --queryformat='%{ARCH}' -p $br --nosignature) + LAST_NAME="$NAME" + NAME=$(rpm -q --queryformat='%{NAME}' -p $br --nosignature) + if [ "$NAME" != "$LAST_NAME" ] && [ "$LAST_NAME" != "$LAST_INSTALLED_NAME" ]; then + echo "$LAST_NAME" >> $NO_INSTALLED_COPY + fi + if [ "$ARCH" == "$BIN_ARCH" ] || [ "$ARCH" == "noarch" ]; then + if [ "$NAME" != "$LAST_INSTALLED_NAME" ]; then + if [ "x$SOURCERPM" != "x" ]; then + bs=$(find $SRC_DIRS -name $SOURCERPM | head -n 1) + if [ "x$bs" != "x" ]; then + mkdir -p $BIN_DEST/$ARCH + if [ ! -f $BIN_DEST/$ARCH/$b ]; then + cp -v $br $BIN_DEST/$ARCH/ + echo "$br" >> $BIN_COPY_LOG + fi + if [ ! -f $SRC_DEST/$SOURCERPM ]; then + cp -v $bs $SRC_DEST/ + echo "$bs" >> $SRC_COPY_LOG + fi + LAST_INSTALLED_NAME=$NAME + else + echo "$SOURCERPM not found" + echo "$br" >> $SOURCERPM_NOT_FOUND_LOG + fi + else + echo "no SOURCERPM for $br" + echo "$br" >> $NO_SOURCERPM_LOG + fi + else + echo "$br is obsolete" + echo "$br" >> $OBSOLETE_LOG + fi + fi +done + +for dat in $((for bd in $(echo $BIN_DIRS; echo $TESTING_BIN_DIRS); do + for br in $(find $bd/ -name '*.rpm'); do + d=$(dirname $br) + b=$(basename $br) + s=$(echo $b | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$s#$b#$d" + done + done) | sort -r -V) +do + b=$(echo "$dat" | awk -F '#' '{ print $2 }') + d=$(echo "$dat" | awk -F '#' '{ print $3 }') + br="$d/$b" + echo $br + + MATCH=$(grep "^$b" $WHITE_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + echo "$bs" >> $WHITELISTED_LOG + else + continue + fi + + SOURCERPM=$(rpm -q --queryformat='%{SOURCERPM}' -p $br --nosignature) + if [ "x$SOURCERPM" != "x" ]; then + grep "^$SOURCERPM" $WHITE_LIST_FILE >> /dev/null || true + if [ $? -eq 0 ]; then + echo "$bs" >> $WHITELISTED_LOG + echo "$SOURCERPM" >> $WHITELISTED_LOG + else + continue + fi + fi + + ARCH=$(rpm -q --queryformat='%{ARCH}' -p $br --nosignature) + LAST_NAME="$NAME" + NAME=$(rpm -q --queryformat='%{NAME}' -p $br --nosignature) + if [ "$NAME" != "$LAST_NAME" ] && [ "$LAST_NAME" != "$LAST_INSTALLED_NAME" ]; then + echo "$LAST_NAME" >> $NO_INSTALLED_COPY + fi + if [ "$ARCH" == "$BIN_ARCH" ] || [ "$ARCH" == "noarch" ]; then + if [ "$NAME" != "$LAST_INSTALLED_NAME" ]; then + if [ "x$SOURCERPM" != "x" ]; then + bs=$(find $SRC_DIRS $TESTING_SRC_DIRS -name $SOURCERPM | head -n 1) + if [ "x$bs" != "x" ]; then + mkdir -p $BIN_DEST/$ARCH + if [ ! -f $BIN_DEST/$ARCH/$b ]; then + cp -v $br $BIN_DEST/$ARCH/ + echo "$br" >> $BIN_COPY_LOG + fi + if [ ! -f $SRC_DEST/$SOURCERPM ]; then + cp -v $bs $SRC_DEST/ + echo "$bs" >> $SRC_COPY_LOG + fi + LAST_INSTALLED_NAME=$NAME + else + echo "$SOURCERPM not found" + echo "$br" >> $SOURCERPM_NOT_FOUND_LOG + fi + else + echo "no SOURCERPM for $br" + echo "$br" >> $NO_SOURCERPM_LOG + fi + else + echo "$br is obsolete" + echo "$br" >> $OBSOLETE_LOG + fi + fi +done + + +repodata_update () { + DIR=${1} + ( + cd $DIR + for d in `find -L . -type d -name repodata`; do + (cd $d/.. + for c in $(find repodata -name '*comps*xml'); do + mv -f $c comps.xml + done + rm -rf repodata + if [ -f comps.xml ]; then + $CREATEREPO -g comps.xml --workers $(cat /usr/bin/nproc) $(pwd) + else + $CREATEREPO --workers $(cat /usr/bin/nproc) $(pwd) + fi + ) + done + ) +return 0 +} + +repodata_update $SRC_DEST +repodata_update $BIN_DEST diff --git a/build-tools/mirror_rebase/copy_external_mirror_to_tis_mirror.old b/build-tools/mirror_rebase/copy_external_mirror_to_tis_mirror.old new file mode 100755 index 00000000..f48ef459 --- /dev/null +++ b/build-tools/mirror_rebase/copy_external_mirror_to_tis_mirror.old @@ -0,0 +1,216 @@ +#!/bin/bash + +if [ "$USER" != "jenkins" ]; then + echo "ERROR: only jenkins should run this" + return 1 +fi + +CENTOS_BASE_VERSION=7.2.1511 +EPEL_BASE_VERSION=7 +BIN_ARCH=x86_64 + +BIN_DIRS="/export/jenkins/mirrors/CentOS/$CENTOS_BASE_VERSION/updates/x86_64 + /export/jenkins/mirrors/CentOS/$CENTOS_BASE_VERSION/cloud/x86_64/openstack-mitaka + /export/jenkins/mirrors/CentOS/$CENTOS_BASE_VERSION/os/x86_64 + /export/jenkins/mirrors/CentOS/$CENTOS_BASE_VERSION/extras/x86_64 + /export/jenkins/mirrors/fedora/epel/$EPEL_BASE_VERSION/x86_64 + " +SRC_DIRS="/export/jenkins/mirrors/CentOS/vault.centos.org/$CENTOS_BASE_VERSION/updates/Source + /export/jenkins/mirrors/CentOS/vault.centos.org/$CENTOS_BASE_VERSION/cloud/Source/openstack-mitaka + /export/jenkins/mirrors/CentOS/vault.centos.org/$CENTOS_BASE_VERSION/os/Source + /export/jenkins/mirrors/CentOS/vault.centos.org/$CENTOS_BASE_VERSION/extras/Source + /export/jenkins/mirrors/fedora/dl.fedoraproject.org/pub/epel/$EPEL_BASE_VERSION/SRPMS + " +TESTING_BIN_DIRS="/export/jenkins/mirrors/fedora/epel/testing/7/x86_64 + " +TESTING_SRC_DIRS="/export/jenkins/mirrors/dl.fedoraproject.org/pub/epel/testing/7/SRPMS + " + +ORIG_DIR="/export/jenkins/mirrors/CentOS/tis-r3-CentOS/mitaka" +NEW_DIR="/export/jenkins/mirrors/CentOS/tis-r3-CentOS/mitaka-2" +cp -r $ORIG_DIR $NEW_DIR + +BIN_DEST="$NEW_DIR/Binary" +SRC_DEST="$NEW_DIR/Source" +LOG_DEST="$NEW_DIR/Data" +DATA_DEST="$NEW_DIR/Data" + +rm -rf $LOG_DEST +mkdir -p $BIN_DEST $SRC_DEST $LOG_DEST + +WHITE_LIST_FILE="$DATA_DIR/white_list.txt" +BLACK_LIST_FILE="$DATA_DIR/black_list.txt" +OBSOLETE_LOG="$LOG_DEST/obsolete.log" +NO_SOURCERPM_LOG="$LOG_DEST/no_sourcerpm.log" +SOURCERPM_NOT_FOUND_LOG="$LOG_DEST/sourcerpm_not_found.log" +BIN_COPY_LOG="$LOG_DEST/bin_copy.log" +SRC_COPY_LOG="$LOG_DEST/src_copy.log" +NO_INSTALLED_COPY="$LOG_DEST/installed_copy.log" +BLACKLISTED_LOG="$LOG_DEST/blacklisted.log" +WHITELISTED_LOG="$LOG_DEST/whitelisted.log" + +rm -f $OBSOLETE_LOG $NO_SOURCERPM_LOG $SOURCERPM_NOT_FOUND_LOG $BIN_COPY_LOG $SRC_COPY_LOG $NO_INSTALLED_COPY $BLACKLISTED_LOG $WHITELISTED_LOG + +LAST_INSTALLED_NAME="" +LAST_NAME="" +NAME="" + +# The following is similar to a find for *.rpm files, +# but we transform the path into ## . +# Then do a reverse 'version' sort, so that the newest version +# of the rpm appears first, no matter what directory it originates from. +for dat in $((for bd in `echo $BIN_DIRS`; do + for br in $(find $bd/ -name '*.rpm' ); do + d=$(dirname $br) + b=$(basename $br) + s=$(echo $b | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$s#$b#$d" + done + done) | sort -r -V) +do + b=$(echo "$dat" | awk -F '#' '{ print $2 }') + d=$(echo "$dat" | awk -F '#' '{ print $3 }') + br="$d/$b" + echo $br + + MATCH=$(grep "^$b" $BLACK_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + echo "$bs" >> $BLACKLISTED_LOG + continue + fi + + SOURCERPM=$(rpm -q --queryformat='%{SOURCERPM}' -p $br --nosignature) + if [ "x$SOURCERPM" != "x" ]; then + MATCH=$(grep "^$SOURCERPM" $BLACK_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + echo "$bs" >> $BLACKLISTED_LOG + echo "$SOURCERPM" >> $BLACKLISTED_LOG + continue + fi + fi + + ARCH=$(rpm -q --queryformat='%{ARCH}' -p $br --nosignature) + LAST_NAME="$NAME" + NAME=$(rpm -q --queryformat='%{NAME}' -p $br --nosignature) + if [ "$NAME" != "$LAST_NAME" ] && [ "$LAST_NAME" != "$LAST_INSTALLED_NAME" ]; then + echo "$LAST_NAME" >> $NO_INSTALLED_COPY + fi + if [ "$ARCH" == "$BIN_ARCH" ] || [ "$ARCH" == "noarch" ]; then + if [ "$NAME" != "$LAST_INSTALLED_NAME" ]; then + if [ "x$SOURCERPM" != "x" ]; then + bs=$(find $SRC_DIRS -name $SOURCERPM | head -n 1) + if [ "x$bs" != "x" ]; then + mkdir -p $BIN_DEST/$ARCH + if [ ! -f $BIN_DEST/$ARCH/$b ]; then + cp -v $br $BIN_DEST/$ARCH/ + echo "$br" >> $BIN_COPY_LOG + fi + if [ ! -f $SRC_DEST/$SOURCERPM ]; then + cp -v $bs $SRC_DEST/ + echo "$bs" >> $SRC_COPY_LOG + fi + LAST_INSTALLED_NAME=$NAME + else + echo "$SOURCERPM not found" + echo "$br" >> $SOURCERPM_NOT_FOUND_LOG + fi + else + echo "no SOURCERPM for $br" + echo "$br" >> $NO_SOURCERPM_LOG + fi + else + echo "$br is obsolete" + echo "$br" >> $OBSOLETE_LOG + fi + fi +done + +for dat in $((for bd in $(echo $BIN_DIRS; echo $TESTING_BIN_DIRS); do + for br in $(find $bd/ -name '*.rpm'); do + d=$(dirname $br) + b=$(basename $br) + s=$(echo $b | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$s#$b#$d" + done + done) | sort -r -V) +do + b=$(echo "$dat" | awk -F '#' '{ print $2 }') + d=$(echo "$dat" | awk -F '#' '{ print $3 }') + br="$d/$b" + echo $br + + MATCH=$(grep "^$b" $WHITE_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + echo "$bs" >> $WHITELISTED_LOG + else + continue + fi + + SOURCERPM=$(rpm -q --queryformat='%{SOURCERPM}' -p $br --nosignature) + if [ "x$SOURCERPM" != "x" ]; then + grep "^$SOURCERPM" $WHITE_LIST_FILE >> /dev/null || true + if [ $? -eq 0 ]; then + echo "$bs" >> $WHITELISTED_LOG + echo "$SOURCERPM" >> $WHITELISTED_LOG + else + continue + fi + fi + + ARCH=$(rpm -q --queryformat='%{ARCH}' -p $br --nosignature) + LAST_NAME="$NAME" + NAME=$(rpm -q --queryformat='%{NAME}' -p $br --nosignature) + if [ "$NAME" != "$LAST_NAME" ] && [ "$LAST_NAME" != "$LAST_INSTALLED_NAME" ]; then + echo "$LAST_NAME" >> $NO_INSTALLED_COPY + fi + if [ "$ARCH" == "$BIN_ARCH" ] || [ "$ARCH" == "noarch" ]; then + if [ "$NAME" != "$LAST_INSTALLED_NAME" ]; then + if [ "x$SOURCERPM" != "x" ]; then + bs=$(find $SRC_DIRS $TESTING_SRC_DIRS -name $SOURCERPM | head -n 1) + if [ "x$bs" != "x" ]; then + mkdir -p $BIN_DEST/$ARCH + if [ ! -f $BIN_DEST/$ARCH/$b ]; then + cp -v $br $BIN_DEST/$ARCH/ + echo "$br" >> $BIN_COPY_LOG + fi + if [ ! -f $SRC_DEST/$SOURCERPM ]; then + cp -v $bs $SRC_DEST/ + echo "$bs" >> $SRC_COPY_LOG + fi + LAST_INSTALLED_NAME=$NAME + else + echo "$SOURCERPM not found" + echo "$br" >> $SOURCERPM_NOT_FOUND_LOG + fi + else + echo "no SOURCERPM for $br" + echo "$br" >> $NO_SOURCERPM_LOG + fi + else + echo "$br is obsolete" + echo "$br" >> $OBSOLETE_LOG + fi + fi +done + + +TMP_DIR=$(mktemp -d /tmp/copy_external_mirror_to_tis_mirror_XXXXXX) +if [ $? -ne 0 ]; then + echo "Failed to create temporary directory" + return 1 +fi + +(cd $ORIG_DIR; find . | sort -V > $TMP_DIR/pre) +(cd $NEW_DIR; find . | sort -V > $TMP_DIR/post) +echo "Listing deletions" +diff $TMP_DIR/pre $TMP_DIR/post | grep -v '^< ./Data/' | grep '^<' +if [ $? -eq 0 ]; then + echo + echo "Cowardly refusing to alter $ORIG_DIR due to deletions: please see $NEW_DIR" + return 1 +fi + +mv -f $ORIG_DIR $ORIG_DIR.old +mv -f $NEW_DIR $ORIG_DIR +rm -rf $TMP_DIR +return 0 diff --git a/build-tools/mirror_rebase/link_cgcs_centos_repo b/build-tools/mirror_rebase/link_cgcs_centos_repo new file mode 100755 index 00000000..58d0dc0b --- /dev/null +++ b/build-tools/mirror_rebase/link_cgcs_centos_repo @@ -0,0 +1,225 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r5-CentOS/newton +# +# This step updates Binary links, and adds Source links +# + +MIRROR_ROOT=/import/mirrors/CentOS/tis-r5-CentOS/newton +BIN_ROOT=$MIRROR_ROOT/Binary +SRC_ROOT=$MIRROR_ROOT/Source +BLACK_LIST_FILE=$MIRROR_ROOT/Data/black_list.txt +WHITE_LIST_FILE=$MIRROR_ROOT/Data/white_list.txt + +cd $MY_REPO/cgcs-centos-repo + +if [ $? -ne 0 ]; then + echo 'ERROR: failed to cd to $MY_REPO/cgcs-centos-repo' + return 1 +fi + +names=' ' +snames=' ' + +do_work () { + dat=${1} + + b=$(echo "$dat" | awk -F '#' '{ print $2 }') + d=$(echo "$dat" | awk -F '#' '{ print $3 }') + r="$d/$b" + + DEBUG_INFO=0 + MATCH=$(echo $b | grep '[-]debuginfo-') + if [ "$MATCH" != "" ]; then + DEBUG_INFO=1 + fi + + MATCH=$(grep "^$b" $BLACK_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + echo "NOTE: '$b' is black listed" + continue + fi + + if [ $DEBUG_INFO -eq 1 ]; then + sb=$(rpm -q --info --nosignature -p $r | grep '^Source RPM : ' | sed 's#^Source RPM : ##') + if [ "x$sb" == "x" ]; then + echo "ERROR: no source rpm listed for '$b'" + continue + fi + s=$(find Source -name "$sb") + if [ "x$s" == "x" ]; then + echo "NOTE: no source rpm '$sb' found for '$b'" + continue + fi + fi + + name=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $r) + + MATCH=$(grep "^$b" $WHITE_LIST_FILE || true) + if [ "$MATCH" == "" ]; then + # Not white listed, check for python2 alternative + # python-rpm-macros-3-6.1 is a notable case white_list case... + # We need BOTH python-rpm-macros-3-6.1 and python2-rpm-macros-3-6.1 + # so substituting python-rpm-macros-3-6.1 with python2-rpm-macros-3-6.1 is an error + + altname=$(echo $name | sed 's#^python-#python2-#') + if [ "$altname" != "$name" ]; then + # look for python2 alternative + sb=$(rpm -q --info --nosignature -p $r | grep '^Source RPM : ' | sed 's#^Source RPM : ##') + + if [ "x$sb" != "x" ]; then + MATCH=$(grep "^$sb" $BLACK_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + echo "NOTE: '$sb' is black listed, so '$b' is ignored" + continue + fi + fi + + for dat2 in $(for br in $(find $BIN_ROOT -name "$altname-*.rpm" | grep -v '.src.rpm$' | grep -v '[-]debuginfo-'); do + ddd=$(dirname $br) + bbb=$(basename $br) + sss=$(echo $bbb | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$sss#$bbb#$ddd" + done | sort -r -V) + do + b2=$(echo "$dat2" | awk -F '#' '{ print $2 }') + d2=$(echo "$dat2" | awk -F '#' '{ print $3 }') + r2="$d2/$b2" + name2=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $r2) + if [ "$name2" != "$altname" ]; then + continue + fi + sb2=$(rpm -q --info --nosignature -p $r2 | grep '^Source RPM : ' | sed 's#^Source RPM : ##') + # if [ "$sb" == "$sb2" ]; then + # continue + # fi + + sbs=$(echo $sb | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + sbs2=$(echo $sb2 | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + newer=$((echo $sbs; echo $sbs2) | sort -r -V | head -n 1) + if [ "$sbs" != "$sbs2" ]; then + if [ "$newer" == "$sbs2" ]; then + # swap alternate for original + for link in $(find Binary -name $b); do + echo "SUGGEST: rm $link" + git rm -f $link + done + + r=$r2 + name=$name2 + b=$b2 + break + fi + fi + done + fi + fi + + echo "$names" | grep " $name " >> /dev/null + if [ $? -ne 0 ]; then + sb=$(rpm -q --info --nosignature -p $r | grep '^Source RPM : ' | sed 's#^Source RPM : ##') + if [ "x$sb" == "x" ]; then + echo "ERROR: no source rpm listed for '$b'" + continue + fi + s=$(find $SRC_ROOT -name "$sb") + if [ "x$s" == "x" ]; then + echo "ERROR: no source rpm '$sb' found for '$b'" + continue + fi + + if [ "x$sb" != "x" ]; then + MATCH=$(grep "^$sb" $BLACK_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + echo "NOTE: '$sb' is black listed, so '$b' is ignored" + continue + fi + fi + + sname=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $s) + + lb=$(find Binary -name "$b") + if [ "x$lb" == "x" ]; then + echo "MISSING: '$b'" + link=$(echo $r | sed "s#^$MIRROR_ROOT/##") + echo "SUGGEST: ln -s $r $link" + ln -s $r $link + git add $link + else + echo "OK: '$b'" + fi + + for r2 in $(find Binary -name "$name-*.rpm"); do + b2=$(basename $r2) + if [ "$b" != "$b2" ]; then + name2=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $r2) + if [ "$name" == "$name2" ]; then + MATCH=$(grep "^$b2" $WHITE_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + link=$(echo $r2 | sed "s#^$MIRROR_ROOT/##") + echo "SUGGEST: rm $link" + git rm -f $link + fi + fi + fi + done + + if [ $DEBUG_INFO -eq 0 ]; then + # Not a debuginfo therefore we can pull in new src.rpm + names="${names}${name} " + lsb=$(find Source -name "$sb") + if [ "x$lsb" == "x" ]; then + echo "MISSING: '$sb'" + link=$(echo $s | sed "s#^$MIRROR_ROOT/##") + echo "SUGGEST: ln -s $s $link" + ln -s $s $link + git add $link + else + echo "OK: '$sb'" + fi + + echo "$names" | grep " $name " >> /dev/null + if [ $? -ne 0 ]; then + for s2 in $(find Source -name "$sname-*.rpm"); do + sb2=$(basename $s2) + if [ "$sb" != "$sb2" ]; then + sname2=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $s2) + if [ "$sname" == "$sname2" ]; then + MATCH=$(grep "^$sb2" $WHITE_LIST_FILE || true) + if [ "$MATCH" != "" ]; then + link=$(echo $s2 | sed "s#^$MIRROR_ROOT/##") + echo "SUGGEST: rm $link" + git rm -f $link + fi + fi + fi + done + fi + snames="${snames}${sname} " + fi + fi +} + + +for dat in $(for br in $(find $BIN_ROOT -name '*.rpm' | grep -v '.src.rpm$' | grep -v '[-]debuginfo-'); do + d=$(dirname $br) + b=$(basename $br) + s=$(echo $b | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$s#$b#$d" + done | sort -r -V) +do + do_work ${dat} +done + +for dat in $(for br in $(find $BIN_ROOT -name '*.rpm' | grep -v '.src.rpm$' | grep '[-]debuginfo-'); do + d=$(dirname $br) + b=$(basename $br) + s=$(echo $b | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$s#$b#$d" + done | sort -r -V) +do + do_work ${dat} +done diff --git a/build-tools/mirror_rebase/link_cgcs_centos_repo_2 b/build-tools/mirror_rebase/link_cgcs_centos_repo_2 new file mode 100755 index 00000000..0da71646 --- /dev/null +++ b/build-tools/mirror_rebase/link_cgcs_centos_repo_2 @@ -0,0 +1,82 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r5-CentOS/newton +# +# This step removes obsolete or broken Source links +# + +MIRROR_ROOT=/import/mirrors/CentOS/tis-r5-CentOS/newton +BIN_ROOT=$MIRROR_ROOT/Binary +SRC_ROOT=$MIRROR_ROOT/Source + +cd $MY_REPO/cgcs-centos-repo + +if [ $? -ne 0 ]; then + echo 'ERROR: failed to cd to $MY_REPO/cgcs-centos-repo' + return 1 +fi + + +# Clean broken and obsolete srpm links +snames=" " +for dat in $(for br in $(find Source -name '*.src.rpm'); do + d=$(dirname $br) + b=$(basename $br) + s=$(echo $b | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$s#$b#$d" + done | sort -r -V) +do + sb=$(echo "$dat" | awk -F '#' '{ print $2 }') + d=$(echo "$dat" | awk -F '#' '{ print $3 }') + s="$d/$sb" + + if [ ! -f $s ]; then + continue + fi + + link=$(readlink $s) + if [ ! -f $link ]; then + echo "ERROR: '$sb' link to non-existant file '$link'" + echo "SUGGEST: rm $s" + git rm -f $s + continue + fi + + echo $link | grep "$MIRROR_ROOT" >> /dev/null + if [ $? -ne 0 ]; then + echo "ERROR: '$sb' links to unexpected file '$link'" + echo "SUGGEST: rm $s" + git rm -f $s + continue + fi + + sname=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $s) + echo "$snames" | grep " $sname " >> /dev/null + if [ $? -ne 0 ]; then + if [ "x$sname" != "x" ]; then + for s2 in $(find Source -name "$sname-*.src.rpm"); do + sb2=$(basename $s2) + if [ "$sb" != "$sb2" ]; then + sname2=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $s2) + if [ "$sname" == "$sname2" ]; then + link=$(echo $s2 | sed "s#^$MIRROR_ROOT/##") + echo "SUGGEST: rm $link, due to $sb" + git rm -f $link + + for r3 in $(find Binary -name "$sname-*.rpm"); do + sb3=$(rpm -q --info --nosignature -p $r3 | grep '^Source RPM : ' | sed 's#^Source RPM : ##') + if [ "$sb3" == "$sb2" ]; then + echo "SUGGEST: rm $r3, due to $sb2" + fi + done + fi + fi + done + fi + snames="${snames}${sname} " + fi +done + diff --git a/build-tools/mirror_rebase/link_cgcs_centos_repo_3 b/build-tools/mirror_rebase/link_cgcs_centos_repo_3 new file mode 100755 index 00000000..e1aeb732 --- /dev/null +++ b/build-tools/mirror_rebase/link_cgcs_centos_repo_3 @@ -0,0 +1,40 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r5-CentOS/newton +# +# This step removes broken Binary links +# + +MIRROR_ROOT=/import/mirrors/CentOS/tis-r5-CentOS/newton + +cd $MY_REPO/cgcs-centos-repo + +if [ $? -ne 0 ]; then + echo 'ERROR: failed to cd to $MY_REPO/cgcs-centos-repo' + return 1 +fi + +# Clean broken rpm links +for r in $(find Binary -name '*.rpm' | grep -v '.src.rpm$' | sort -r -V); do + b=$(basename $r) + + link=$(readlink $r) + if [ ! -f $link ]; then + echo "ERROR: '$b' link to non-existant file '$link'" + echo "SUGGEST: rm $r" + git rm -f $r + continue + fi + + echo $link | grep "$MIRROR_ROOT" >> /dev/null + if [ $? -ne 0 ]; then + echo "ERROR: '$b' links to unexpected file '$link'" + echo "SUGGEST: rm $r" + git rm -f $r + continue + fi +done + diff --git a/build-tools/mirror_rebase/link_cgcs_centos_repo_4 b/build-tools/mirror_rebase/link_cgcs_centos_repo_4 new file mode 100755 index 00000000..e769979f --- /dev/null +++ b/build-tools/mirror_rebase/link_cgcs_centos_repo_4 @@ -0,0 +1,176 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r5-CentOS/newton +# +# This step removes obsolete Binary links +# + +cd $MY_REPO/cgcs-centos-repo +MIRROR_ROOT=/import/mirrors/CentOS/tis-r5-CentOS/newton +BIN_ROOT=$MIRROR_ROOT/Binary +SRC_ROOT=$MIRROR_ROOT/Source + +cd $MY_REPO/cgcs-centos-repo + +if [ $? -ne 0 ]; then + echo 'ERROR: failed to cd to $MY_REPO/cgcs-centos-repo' + return 1 +fi + +for dat in $(for br in $(find Binary -name '*.rpm' | grep -v '.src.rpm$' ); do + d=$(dirname $br) + b=$(basename $br) + s=$(echo $b | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$s#$b#$d" + done | sort -r -V) +do + b=$(echo "$dat" | awk -F '#' '{ print $2 }') + d=$(echo "$dat" | awk -F '#' '{ print $3 }') + r="$d/$b" + name=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $r) + + link=$(readlink $r) + sb=$(rpm -q --info --nosignature -p $r | grep '^Source RPM : ' | sed 's#^Source RPM : ##') + if [ "x$sb" == "x" ]; then + echo "ERROR: no source rpm listed for '$b'" + continue + fi + s=$(find Source -name "$sb") + if [ "x$s" == "x" ]; then + DELETED=0 + altname=$(echo $name | sed 's#^python-#python2-#') + if [ "$altname" != "$name" ]; then + # look for python2 alternative + + for dat2 in $(for br in $(find $BIN_ROOT -name "$altname-*.rpm" | grep -v '.src.rpm$' | grep -v '[-]debuginfo-'); do + ddd=$(dirname $br) + bbb=$(basename $br) + sss=$(echo $bbb | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + echo "$sss#$bbb#$ddd" + done | sort -r -V) + do + b2=$(echo "$dat2" | awk -F '#' '{ print $2 }') + d2=$(echo "$dat2" | awk -F '#' '{ print $3 }') + r2="$d2/$b2" + + name2=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $r2) + if [ "$name2" != "$altname" ]; then + continue + fi + sb2=$(rpm -q --info --nosignature -p $r2 | grep '^Source RPM : ' | sed 's#^Source RPM : ##') + if [ "$sb" == "$sb2" ]; then + continue + fi + + sbs=$(echo $sb | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + sbs2=$(echo $sb2 | sed -e 's#.centos.#.#' -e 's#.el7.#.#' -e 's#.el7_##' -e 's#.rpm$##' -e 's#.src$##' -e 's#.noarch$##' -e 's#.x86_64$##') + newer=$((echo $sbs; echo $sbs2) | sort -r -V | head -n 1) + if [ "$sbs" != "$sbs2" ]; then + if [ "$newer" == "$sbs2" ]; then + # swap alternate for original + echo "SUGGEST: rm $r" + git rm -f $r + + DELETED=1 + fi + fi + done + fi + + if [ $DELETED -eq 0 ]; then + echo "ERROR: no source rpm '$sb' found for '$b'" + echo "SUGGEST: rm $r" + git rm -f $r + continue + fi + fi +done + +# +# The following would delete all binary rpms that Titanium Cloud would otherwise compile. +# However for bootstrapping the build there are some packages that we must have... +# e.g. bash, kernel-headers, .... +# So we will need something smarter than to just delete everything. +# +# TMP_DIR=$(mktemp -d /tmp/link_cgcs_centos_repo_XXXXXX) +# +# BUILT_SRPMS_FILE=$TMP_DIR/built_srpms_file +# +# for r in $(for c in $(find $MY_REPO -type d -name centos); do +# for sp in $(find $c -name srpm_path); do +# echo "$sp: $(cat $sp)" +# done +# done | grep 'mirror:' | awk -F ' ' '{ print $2 }') ; do +# b=$(basename $r) +# s=$(find $MY_REPO/cgcs-centos-repo/Source/ -name $b) +# n=$(rpm -q --qf '%{NAME}' --nosignature -p $s) +# echo "$n:$b" >> $BUILT_SRPMS_FILE +# done +# +# cd $MY_REPO/cgcs-centos-repo +# +# for r in $(find Binary -name '*.rpm'); do +# b=$(basename $r) +# sb=$(rpm -q --info --nosignature -p $r | grep '^Source RPM :' | sed 's#^Source RPM : ##') +# if [ "x$sb" != "x" ]; then +# s=$(find Source/ -name $sb) +# if [ "x$s" != "x" ]; then +# n=$(rpm -q --qf '%{NAME}' --nosignature -p $s) +# grep "^$n:" $BUILT_SRPMS_FILE +# if [ $? -eq 0 ]; then +# git rm -f $r +# fi +# fi +# fi +# done +# +# \rm $BUILT_SRPMS_FILE +# rmdir $TMP_DIR + +TMP_DIR=$(mktemp -d /tmp/link_cgcs_centos_repo_XXXXXX) +EXCLUDE_LIST=$TMP_DIR/exclude_list + +# List od packages we compile from scratch, not from centos srpm, goes to $EXCLUDE_LIST +for g in $(find $MY_REPO -type d -name .git); do + d=$(dirname $g) + for cpd in $(find $d -maxdepth 1 -name 'centos_pkg_dir*'); do + ( + cd $d + for pd in $(cat $cpd); do + ( + cd $pd/centos + if [ -f srpm_path ]; then + continue + fi + for spec in $(find . -name '*.spec'); do + n=$(spec_find_tag 'name' $spec '/tmp/' '0' 2> /dev/null) + echo "$spec: $n" + echo $n >> $EXCLUDE_LIST + done + ) + done + ) + done +done + +cd $MY_REPO/cgcs-centos-repo +for r in $(find Binary -name '*.rpm'); do + s=$(rpm -q --info --nosignature -p $r | grep '^Source RPM' | sed 's#^Source RPM ..##') + n=$(rpm -q --nosignature --qf '%{NAME}' -p $(find Source -name $s) ) + grep "^$n$" $EXCLUDE_LIST + if [ $? -eq 0 ]; then + echo "rm $r" + git rm $r + fi +done + +\rm -r $TMP_DIR + +echo "REMINDER: inspect changes in $MY_REPO/cgcs-centos-repo and commit with ..." +echo ' TIMESTAMP=$(date +"%Y-%m-%d")' +echo ' git commit -m "JENKINS: repo update $TIMESTAMP"' + + diff --git a/build-tools/mirror_rebase/link_cgcs_centos_repo_5 b/build-tools/mirror_rebase/link_cgcs_centos_repo_5 new file mode 100755 index 00000000..6b8d053d --- /dev/null +++ b/build-tools/mirror_rebase/link_cgcs_centos_repo_5 @@ -0,0 +1,94 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r5-CentOS/newton +# +# Search for tis patched centos src.rpm's that have been upversioned +# + +cd $MY_REPO/cgcs-centos-repo +# OLD_MIRROR_ROOT=/import/mirrors/CentOS/tis-r5-CentOS/mitaka +OLD_MIRROR_ROOT=/import/mirrors/CentOS/tis-r5-CentOS/newton +# MIRROR_ROOT=/import/mirrors/CentOS/tis-r5-CentOS/newton +OLD_THIRD_MIRROR_ROOT=/import/mirrors/CentOS/tis-r5-CentOS/tis-r4-3rd-Party +# THIRD_MIRROR_ROOT=/import/mirrors/CentOS/tis-r5-CentOS/tis-r4-3rd-Party +# BIN_ROOT=$MIRROR_ROOT/Binary +# SRC_ROOT=$MIRROR_ROOT/Source +UPVERSION_LOG=$MY_WORKSPACE/upversion.log + +REPO_DOWNLOADS_ROOT="$MY_REPO" +NEW_MIRROR_ROOT="$MY_REPO/cgcs-centos-repo" +THIRD_PARTY_ROOT="$MY_REPO/cgcs-3rd-party-repo" + + +if [ -f $UPVERSION_LOG ]; then + rm -f $UPVERSION_LOG +fi + +cd $MY_REPO + +for g in $(find $MY_REPO -type d -name .git); do + d=$(dirname $g) + for pf in $(find $d -maxdepth 1 -name 'centos_pkg_dirs*'); do + if [ -f $pf ]; then + for p in $(cat $pf); do + pkg_dir="$d/$p" + sf="$pkg_dir/centos/srpm_path" + if [ -f $sf ]; then + for s in $(grep '^[^#]' $sf); do + ORIG_SRPM_PATH="" + # absolute path source rpms + echo "$s" | grep "^/" >/dev/null && ORIG_SRPM_PATH=$s + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle repo: definitions + echo "$s" | grep "^repo:" >/dev/null && ORIG_SRPM_PATH=$(echo $s | sed "s%^repo:%$REPO_DOWNLOADS_ROOT/%") + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle 3rd_party: definitions + echo "$s" | grep "^3rd_party:" >/dev/null && ORIG_SRPM_PATH=$(echo $s | sed "s%^3rd_party:%$THIRD_PARTY_ROOT/%") + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # handle mirror: definitions + # SAL TEMPORARY echo "$s" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=`echo $s | sed "s%^mirror:%$MIRROR_ROOT/%"` + echo "$s" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=$(echo $s | sed "s%^mirror:%$NEW_MIRROR_ROOT/%" | sed "s#CentOS/tis-r4-CentOS/kilo/##" | sed "s#CentOS/tis-r4-CentOS/mitaka/##" | sed "s#CentOS/tis-r4-CentOS/newton/##") + fi + + if [ "${ORIG_SRPM_PATH}x" == "x" ]; then + # we haven't found a valid prefix yet, so assume it's a legacy + # file (mirror: interpretation) + ORIG_SRPM_PATH="$NEW_MIRROR_ROOT/$s" + fi + + if [ ! -f $ORIG_SRPM_PATH ]; then + b=$(basename "$ORIG_SRPM_PATH") + old_srpm=$(find $OLD_MIRROR_ROOT $OLD_THIRD_MIRROR_ROOT -name $b | head -n 1) + old_name=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $old_srpm) + if [ "$old_name" == "" ]; then + echo "FAILED to find name for '$b', ORIG_SRPM_PATH='$ORIG_SRPM_PATH'" + exit 1 + fi + NEW_SRPM_PATH="" + for new_srpm in $(find $NEW_MIRROR_ROOT/Source $THIRD_PARTY_ROOT/Source -name "$old_name-[0-9]*.src.rpm"); do + new_name=$(rpm -q --nosignature --queryformat '%{NAME}\n' -p $new_srpm) + if [ "$new_name" == "$old_name" ]; then + NEW_SRPM_PATH=$new_srpm + break + fi + done + nb=$(basename $NEW_SRPM_PATH) + echo "FIX: '$sf' : '$b' -> '$nb'" + echo "$old_name#$sf#$s#$b#$nb" >> $UPVERSION_LOG + fi + + done + fi + done + fi + done +done + diff --git a/build-tools/mirror_rebase/link_cgcs_centos_repo_6 b/build-tools/mirror_rebase/link_cgcs_centos_repo_6 new file mode 100755 index 00000000..ec4ca002 --- /dev/null +++ b/build-tools/mirror_rebase/link_cgcs_centos_repo_6 @@ -0,0 +1,91 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r5-CentOS/newton +# +# start an edit session for packages to be upgraded - pre upgrade version +# + +UPVERSION_LOG=$MY_WORKSPACE/upversion.log +if [ "x$WORKING_BRANCH" == "x" ]; then + WORKING_BRANCH=CGCS_DEV_0029_rebase_7_4 +fi + +if [ ! -f $UPVERSION_LOG ]; then + echo "ERROR: Can't find UPVERSION_LOG at '$UPVERSION_LOG'" +fi + + +# One step back to see the old symlinks +cd $MY_REPO/cgcs-3rd-party-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$(pwd)'" + exit 1 +fi + +git checkout HEAD^ + +cd $MY_REPO/cgcs-centos-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$(pwd)'" + exit 1 +fi + +git checkout HEAD^ + +# +# +# +FAILED="" +for dat in $(cat $UPVERSION_LOG); do + name=$(echo $dat | awk -F '#' '{print $1}') + srpm_path=$(echo $dat | awk -F '#' '{print $2}') + old_src_rpm=$(echo $dat | awk -F '#' '{print $4}') + new_src_rpm=$(echo $dat | awk -F '#' '{print $5}') + + echo "$name $old_src_rpm $new_src_rpm" + + if [ "$name" == "kernel" ]; then + build-pkgs --std --edit --clean $name + elif [ "$name" == "kernel-rt" ]; then + build-pkgs --rt --edit --clean $name + else + build-pkgs --edit --clean $name + fi + if [ $? -ne 0 ]; then + echo "ERROR: failed cmd 'build-pkgs --edit --clean $name'" + FAILED="$name $FAILED" + break + fi + echo "$? <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" + if [ "$name" == "kernel" ]; then + build-pkgs --std --edit $name + elif [ "$name" == "kernel-rt" ]; then + build-pkgs --rt --edit $name + else + build-pkgs --edit $name + fi + if [ $? -ne 0 ]; then + echo "ERROR: failed cmd 'build-pkgs --edit $name'" + FAILED="$name $FAILED" + break + fi + echo "$? <=<=<=<=<=<=<=<=<=<=<=<=<=<=<=<=" +done + +cd $MY_REPO/cgcs-3rd-party-repo +git checkout $WORKING_BRANCH + +cd $MY_REPO/cgcs-centos-repo +git checkout $WORKING_BRANCH + +if [ "$FAILED" != "" ]; then + echo "Failed build-pkgs --edit for ... $FAILED" + exit 1 +fi + + diff --git a/build-tools/mirror_rebase/link_cgcs_centos_repo_7 b/build-tools/mirror_rebase/link_cgcs_centos_repo_7 new file mode 100755 index 00000000..49e9ddd4 --- /dev/null +++ b/build-tools/mirror_rebase/link_cgcs_centos_repo_7 @@ -0,0 +1,84 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r5-CentOS/newton +# +# Update srpm_path for packages to be upgraded +# + +UPVERSION_LOG=$MY_WORKSPACE/upversion.log +if [ "x$ORIGIN_BRANCH" == "x" ]; then + ORIGIN_BRANCH=CGCS_DEV_0029 +fi +if [ "x$WORKING_BRANCH" == "x" ]; then + WORKING_BRANCH=CGCS_DEV_0029_rebase_7_4 +fi + +if [ ! -f $UPVERSION_LOG ]; then + echo "ERROR: Can't find UPVERSION_LOG at '$UPVERSION_LOG'" +fi + + +# One step back to see the old symlinks +cd $MY_REPO + +FAILED="" +for dat in $(cat $UPVERSION_LOG); do + name=$(echo $dat | awk -F '#' '{print $1}') + srpm_path=$(echo $dat | awk -F '#' '{print $2}') + old_src_rpm=$(echo $dat | awk -F '#' '{print $4}') + new_src_rpm=$(echo $dat | awk -F '#' '{print $5}') + + ( + cd $(dirname $srpm_path) + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD) + if [ "$CURRENT_BRANCH" != "$WORKING_BRANCH" ]; then + git checkout $WORKING_BRANCH + if [ $? -ne 0 ]; then + git checkout $ORIGIN_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: Can't checkout branch '$ORIGIN_BRANCH' in directory '$(pwd)'" + exit 1 + fi + + git checkout -b $WORKING_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to 'git checkout -b $WORKING_BRANCH' from '$(pwd)'" + exit 1 + else + echo "created branch '$WORKING_BRANCH' at '$(pwd)'" + fi + fi + fi + + sed -i "s#$old_src_rpm#$new_src_rpm#" $srpm_path + if [ $? -ne 0 ]; then + echo "ERROR: sed failed '$old_src_rpm' -> '$new_src_rpm'" + exit 1 + else + echo "updated $srpm_path: '$old_src_rpm' -> '$new_src_rpm'" + fi + + exit 0 + ) + + if [ $? -ne 0 ]; then + echo "ERROR: failed while working on package '$name' at '$srpm_path'" + exit 1 + fi +done + +echo "" +for d in $(for dat in $(cat $UPVERSION_LOG); do srpm_path=$(echo $dat | awk -F '#' '{print $2}'); ( cd $(dirname $srpm_path); git rev-parse --show-toplevel ); done | sort --unique); do + ( + cd $d + echo "cd $d" + for f in $(git status --porcelain | grep 'srpm_path$' | awk '{print $2}'); do + echo "git add $f"; + done + echo "git commit -m 'srpm_path updates for centos rebase'" + ) +done +echo "" diff --git a/build-tools/mirror_rebase/link_cgcs_centos_repo_8 b/build-tools/mirror_rebase/link_cgcs_centos_repo_8 new file mode 100755 index 00000000..1b480cd5 --- /dev/null +++ b/build-tools/mirror_rebase/link_cgcs_centos_repo_8 @@ -0,0 +1,65 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r5-CentOS/newton +# +# Start an edit session for packages to be upgraded - post upgrade version +# + +UPVERSION_LOG=$MY_WORKSPACE/upversion.log +if [ "x$WORKING_BRANCH" == "x" ]; then + WORKING_BRANCH=CGCS_DEV_0029_rebase_7_4 +fi + +if [ ! -f $UPVERSION_LOG ]; then + echo "ERROR: Can't find UPVERSION_LOG at '$UPVERSION_LOG'" +fi + + +# Restore new symlinks +cd $MY_REPO/cgcs-3rd-party-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$MY_REPO/cgcs-3rd-party-repo'" + exit 1 +fi + +cd $MY_REPO/cgcs-centos-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$MY_REPO/cgcs-centos-repo'" + exit 1 +fi + +FAILED="" +for dat in $(cat $UPVERSION_LOG); do + name=$(echo $dat | awk -F '#' '{print $1}') + srpm_path=$(echo $dat | awk -F '#' '{print $2}') + old_src_rpm=$(echo $dat | awk -F '#' '{print $4}') + new_src_rpm=$(echo $dat | awk -F '#' '{print $5}') + + echo "$name $old_src_rpm $new_src_rpm" + + if [ "$name" == "kernel" ]; then + build-pkgs --std --edit $name --no-meta-patch + elif [ "$name" == "kernel-rt" ]; then + build-pkgs --rt --edit $name --no-meta-patch + else + build-pkgs --edit $name --no-meta-patch + fi + if [ $? -ne 0 ]; then + echo "ERROR: failed cmd 'build-pkgs --edit $name'" + FAILED="$name $FAILED" + break + fi + echo "$? <=<=<=<=<=<=<=<=<=<=<=<=<=<=<=<=" +done + +if [ "$FAILED" != "" ]; then + echo "Failed build-pkgs --edit for ... $FAILED" + exit 1 +fi + + diff --git a/build-tools/mirror_rebase/link_cgcs_centos_repo_9 b/build-tools/mirror_rebase/link_cgcs_centos_repo_9 new file mode 100755 index 00000000..c0c62ea3 --- /dev/null +++ b/build-tools/mirror_rebase/link_cgcs_centos_repo_9 @@ -0,0 +1,346 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r5-CentOS/newton +# +# Search for tis patched centos src.rpm's that have been upversioned +# + +UPVERSION_LOG=$MY_WORKSPACE/upversion.log +if [ "x$WORKING_BRANCH" == "x" ]; then + WORKING_BRANCH=CGCS_DEV_0029_rebase_7_4 +fi + +if [ ! -f $UPVERSION_LOG ]; then + echo "ERROR: Can't find UPVERSION_LOG at '$UPVERSION_LOG'" +fi + +if [ "$DISPLAY" == "" ]; then + echo "ERROR: X-Windows 'DISPLAY' variable not set. This script needs to open pop-up windows." + usage + exit 1 +fi + +# restore new symlinks +cd $MY_REPO/cgcs-3rd-party-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$MY_REPO/cgcs-3rd-party-repo'" + exit 1 +fi + +cd $MY_REPO/cgcs-centos-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$MY_REPO/cgcs-centos-repo'" + exit 1 +fi + +FAILED="" +build_types="std rt" +for dat in $(cat $UPVERSION_LOG); do + ( + name=$(echo $dat | awk -F '#' '{print $1}') + srpm_path=$(echo $dat | awk -F '#' '{print $2}') + old_src_rpm=$(echo $dat | awk -F '#' '{print $4}') + new_src_rpm=$(echo $dat | awk -F '#' '{print $5}') + + PKG_DIR=$(dirname $(dirname $srpm_path)) + OLD_BRANCH=$(echo $old_src_rpm | sed 's#[.]src[.]rpm$##') + NEW_BRANCH=$(echo $new_src_rpm | sed 's#[.]src[.]rpm$##') + + WORK_META_DIR="" + for dd in $build_types; do + WORK_META_DIR=$MY_WORKSPACE/$dd/srpm_work/$name/rpmbuild + echo "WORK_META_DIR=$WORK_META_DIR" + if [ -d $WORK_META_DIR ]; then + break; + else + WORK_META_DIR="" + fi + done + if [ "$WORK_META_DIR" == "" ]; then + echo "ERROR: failed to find srpm_work directory for '$name'" + exit 1 + fi + + # WORK_SRC_DIR=$(dirname $(find $MY_WORKSPACE/srpm_work/$name/gits/ -type d -name .git)) + NEW_WORK_SRC_DIR="" + OLD_WORK_SRC_DIR="" + for dd in $build_types; do + for g in $(find $MY_WORKSPACE/$dd/srpm_work/$name/gits/ -type d -name .git); do + d=$(dirname $g) + if [ -d $d ]; then + cd $d; + git tag | grep pre_wrs_ >> /dev/null + if [ $? -ne 0 ]; then + continue + fi + git checkout $OLD_BRANCH 2>> /dev/null + if [ $? -eq 0 ]; then + OLD_WORK_SRC_DIR=$d + fi + git checkout $NEW_BRANCH 2>> /dev/null + if [ $? -eq 0 ]; then + NEW_WORK_SRC_DIR=$d + fi + fi + done + done + if [ "$WORK_META_DIR" == "" ]; then + echo "ERROR: failed to find srpm_work directory for '$name'" + exit 1 + fi + + echo "$name $old_src_rpm $new_src_rpm" + echo "PKG_DIR=$PKG_DIR" + echo "OLD_BRANCH=$OLD_BRANCH" + echo "NEW_BRANCH=$NEW_BRANCH" + echo "WORK_META_DIR=$WORK_META_DIR" + echo "OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR" + echo "NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR" + echo "" + + ( + cd $WORK_META_DIR + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd to WORK_META_DIR=$WORK_META_DIR" + exit 1 + fi + echo "--- old meta git log (oldest to newest) ---" + git checkout $OLD_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH" + exit 1 + fi + (git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit ; echo "") | sed '/^$/d' | tac + PATCH_COMMIT_LIST=$((git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit ; echo "") | sed '/^$/d' | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }') + echo "--- new meta git log (oldest to newest) ---" + git checkout $NEW_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH" + exit 1 + fi + (git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit ; echo "") | sed '/^$/d' | tac + REFERENCE_COMMIT=$((git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit ; echo "") | sed '/^$/d' | head -n 1 | awk '{ print $2 }') + echo "" + + for COMMIT in ${PATCH_COMMIT_LIST}; do + echo "git cherry-pick $COMMIT" + git --no-pager show "$COMMIT" + git cherry-pick "$COMMIT" + if [ $? -ne 0 ]; then + echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files" + git status --porcelain | grep '^UU ' | awk '{ print $2}' + echo "pwd=$(pwd)" + # gitk & + echo "git mergetool --no-prompt" + git mergetool --no-prompt + # for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do + # xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'" + # if [ $? -ne 0 ]; then + # echo "ERROR: problem launching editor on " + # exit 1 + # fi + # done + echo "git cherry-pick --continue" + git cherry-pick --continue + fi + done + + PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT) + if [ $? -ne 0 ]; then + echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT" + exit 1 + fi + for PATCH_FILE in ${PATCH_LIST}; do + PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//') + echo "$PATCH_FILE -> $PATCH_TARGET" + N=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*" | wc -l) + if [ $N -eq 1 ]; then + PATCH_DEST=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*") + echo "cp -f $PATCH_FILE $PATCH_DEST" + \cp -f $PATCH_FILE $PATCH_DEST + if [ $? -ne 0 ]; then + echo "ERROR: copy failed $WORK_META_DIR/$PATCH_FILE -> $PATCH_DEST" + exit 1 + fi + else + echo "ERROR: Don't know what destination file name to use for patch '$WORK_META_DIR/$PATCH_FILE' derived from commit $COMMIT, and to be copied to '$PKG_DIR/centos/meta_patches'" + fi + done + + echo "" + echo "" + ) + + if [ $? -ne 0 ]; then + FAILED=$name + break + fi + + ( + echo "--- old git log (oldest to newest) ---" + cd $OLD_WORK_SRC_DIR + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd to OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR" + exit 1 + fi + + git checkout $OLD_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH in directory '$OLD_WORK_SRC_DIR'" + exit 1 + fi + + (git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit; echo "") | sed '/^$/d' | tac + PATCH_COMMIT_LIST=$((git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit; echo "") | sed '/^$/d' | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }') + + echo "--- new git log (oldest to newest) ---" + cd $NEW_WORK_SRC_DIR + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd to NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR" + exit 1 + fi + + git checkout $NEW_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH in directory '$NEW_WORK_SRC_DIR'" + exit 1 + fi + + (git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit; echo "") | sed '/^$/d' | tac + REFERENCE_COMMIT=$((git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit ; echo "") | sed '/^$/d' | head -n 1 | awk '{ print $2 }') + echo "" + + if [ "$OLD_WORK_SRC_DIR" == "$NEW_WORK_SRC_DIR" ]; then + for COMMIT in ${PATCH_COMMIT_LIST}; do + echo "git cherry-pick $COMMIT" + git --no-pager show "$COMMIT" + git cherry-pick "$COMMIT" + if [ $? -ne 0 ]; then + echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files" + git status --porcelain | grep '^UU ' | awk '{ print $2}' + echo "pwd=$(pwd)" + # gitk & + echo "git mergetool --no-prompt" + git mergetool --no-prompt + # for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do + # xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'" + # if [ $? -ne 0 ]; then + # echo "ERROR: problem launching editor on " + # exit 1 + # fi + # done + echo "git cherry-pick --continue" + git cherry-pick --continue + fi + done + else + cd $OLD_WORK_SRC_DIR + PATCH_LIST=$(git format-patch -n pre_wrs_$OLD_BRANCH) + if [ $? -ne 0 ]; then + echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=pre_wrs_$OLD_BRANCH" + exit 1 + fi + cd $NEW_WORK_SRC_DIR + for PATCH_FILE in ${PATCH_LIST}; do + echo "=== Apply $PATCH_FILE ===" + cat $OLD_WORK_SRC_DIR/$PATCH_FILE + cat $OLD_WORK_SRC_DIR/$PATCH_FILE | patch -p1 + if [ $? -ne 0 ]; then + for REJECT in $(find . -name '*.rej'); do + FILE_NAME=$(echo $REJECT | sed 's#.rej$##') + cd $OLD_WORK_SRC_DIR + gitk $FILE_NAME & + cd $NEW_WORK_SRC_DIR + if [ -f $FILE_NAME ] && [ -f $FILE_NAME.orig ]; then + \cp -f $FILE_NAME.orig $FILE_NAME + xterm -e "vi $FILE_NAME $REJECT" + rm -f $REJECT + rm -f $FILE_NAME.orig + fi + done + fi + + git add --all + MSG=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//') + git commit -m "WRS: $MSG" + done + + fi + + PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT) + if [ $? -ne 0 ]; then + echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT" + exit 1 + fi + for PATCH_FILE in ${PATCH_LIST}; do + PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch[0-9]*-//' | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch//' | sed 's/.patch$//') + echo "$PATCH_FILE -> $PATCH_TARGET" + PKG_PATCH_DIR="$PKG_DIR/centos/patches" + N=0 + if [ -d "$PKG_PATCH_DIR" ]; then + N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l) + fi + if [ $N -ne 1 ]; then + PKG_PATCH_DIR="$PKG_DIR" + if [ -d "$PKG_PATCH_DIR" ]; then + N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l) + fi + fi + echo "N=$N" + echo "PKG_PATCH_DIR=$PKG_PATCH_DIR" + + if [ $N -eq 1 ]; then + PATCH_DEST=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]') + echo "meld $PATCH_FILE -> $PATCH_DEST" + meld $PATCH_FILE $PATCH_DEST + if [ $? -ne 0 ]; then + echo "ERROR: meld failed $WORK_SRC_DIR/$PATCH_FILE -> $PATCH_DEST" + exit 1 + fi + else + echo "ERROR: Don't know what destination file name to use for patch '$OLD_WORK_SRC_DIR/$PATCH_FILE', and to be copied to '$PKG_PATCH_DIR'" + fi + done + + echo "" + echo "" + ) + + if [ $? -ne 0 ]; then + FAILED=$name + break + fi + + ) + + +done + +if [ "$FAILED" != "" ]; then + echo "Failed for ... $FAILED" + exit 1 +fi + +echo "" +for d in $(for dat in $(cat $UPVERSION_LOG); do srpm_path=$(echo $dat | awk -F '#' '{print $2}'); ( cd $(dirname $srpm_path); git rev-parse --show-toplevel ); done | sort --unique); do + ( + cd $d + echo "cd $d" + for f in $(git status --porcelain | awk '{print $2}'); do + echo "git add $f"; + done + if [ "$PATCH_ID" == "" ]; then + echo "git commit -m 'rebased patches'" + else + echo "git commit -m 'rebased patches'" + fi + ) +done +echo "" + + diff --git a/build-tools/mirror_rebase/tarball_upgrade b/build-tools/mirror_rebase/tarball_upgrade new file mode 100755 index 00000000..38417261 --- /dev/null +++ b/build-tools/mirror_rebase/tarball_upgrade @@ -0,0 +1,330 @@ +#!/bin/bash + +# +# Part of the monthly mirror update +# +# Update symlinks in cgcs-centos-repo to point to the latest version of packages in /import/mirrors/CentOS/tis-r4-CentOS/newton +# +# Search for tis patched centos src.rpm's that have been upversioned +# + +UPVERSION_LOG=$MY_WORKSPACE/upversion.log +if [ "x$WORKING_BRANCH" == "x" ]; then + WORKING_BRANCH=CGCS_DEV_0026_may_rebase +fi + +if [ ! -f $UPVERSION_LOG ]; then + echo "ERROR: Can't find UPVERSION_LOG at '$UPVERSION_LOG'" +fi + +if [ "$DISPLAY" == "" ]; then + echo "ERROR: X-Windows 'DISPLAY' variable not set. This script needs to open pop-up windows." + usage + exit 1 +fi + +# restore new symlinks +cd $MY_REPO/cgcs-3rd-party-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$MY_REPO/cgcs-3rd-party-repo'" + exit 1 +fi + +cd $MY_REPO/cgcs-centos-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$MY_REPO/cgcs-centos-repo'" + exit 1 +fi + +FAILED="" +build_types="std rt" +for dat in $(cat $UPVERSION_LOG); do + ( + name=$(echo $dat | awk -F '#' '{print $1}') + srpm_path=$(echo $dat | awk -F '#' '{print $2}') + old_src_rpm=$(echo $dat | awk -F '#' '{print $4}') + new_src_rpm=$(echo $dat | awk -F '#' '{print $5}') + + PKG_DIR=$(dirname $(dirname $srpm_path)) + OLD_BRANCH=$(echo $old_src_rpm | sed 's#[.]src[.]rpm$##') + NEW_BRANCH=$(echo $new_src_rpm | sed 's#[.]src[.]rpm$##') + + WORK_META_DIR="" + for dd in $build_types; do + WORK_META_DIR=$MY_WORKSPACE/$dd/srpm_work/$name/rpmbuild + echo "WORK_META_DIR=$WORK_META_DIR" + if [ -d $WORK_META_DIR ]; then + break; + else + WORK_META_DIR="" + fi + done + if [ "$WORK_META_DIR" == "" ]; then + echo "ERROR: failed to find srpm_work directory for '$name'" + exit 1 + fi + + # WORK_SRC_DIR=$(dirname $(find $MY_WORKSPACE/srpm_work/$name/gits/ -type d -name .git)) + NEW_WORK_SRC_DIR="" + OLD_WORK_SRC_DIR="" + for dd in $build_types; do + for g in $(find $MY_WORKSPACE/$dd/srpm_work/$name/gits/ -type d -name .git); do + d=$(dirname $g) + if [ -d $d ]; then + cd $d; + git tag | grep pre_wrs_ >> /dev/null + if [ $? -ne 0 ]; then + continue + fi + git checkout $OLD_BRANCH 2>> /dev/null + if [ $? -eq 0 ]; then + OLD_WORK_SRC_DIR=$d + fi + git checkout $NEW_BRANCH 2>> /dev/null + if [ $? -eq 0 ]; then + NEW_WORK_SRC_DIR=$d + fi + fi + done + done + if [ "$WORK_META_DIR" == "" ]; then + echo "ERROR: failed to find srpm_work directory for '$name'" + exit 1 + fi + + echo "$name $old_src_rpm $new_src_rpm" + echo "PKG_DIR=$PKG_DIR" + echo "OLD_BRANCH=$OLD_BRANCH" + echo "NEW_BRANCH=$NEW_BRANCH" + echo "WORK_META_DIR=$WORK_META_DIR" + echo "OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR" + echo "NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR" + echo "" + + ( + cd $WORK_META_DIR + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd to WORK_META_DIR=$WORK_META_DIR" + exit 1 + fi + echo "--- old meta git log (oldest to newest) ---" + git checkout $OLD_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH" + exit 1 + fi + (git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit ; echo "") | sed '/^$/d' | tac + PATCH_COMMIT_LIST=$((git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit ; echo "") | sed '/^$/d' | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }') + echo "--- new meta git log (oldest to newest) ---" + git checkout $NEW_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH" + exit 1 + fi + (git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit ; echo "") | sed '/^$/d' | tac + REFERENCE_COMMIT=$((git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit ; echo "") | sed '/^$/d' | head -n 1 | awk '{ print $2 }') + echo "" + + for COMMIT in ${PATCH_COMMIT_LIST}; do + echo "git cherry-pick $COMMIT" + git --no-pager show "$COMMIT" + git cherry-pick "$COMMIT" + if [ $? -ne 0 ]; then + echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files" + git status --porcelain | grep '^UU ' | awk '{ print $2}' + echo "pwd=$(pwd)" + # gitk & + echo "git mergetool --no-prompt" + git mergetool --no-prompt + # for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do + # xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'" + # if [ $? -ne 0 ]; then + # echo "ERROR: problem launching editor on " + # exit 1 + # fi + # done + echo "git cherry-pick --continue" + git cherry-pick --continue + fi + done + + PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT) + if [ $? -ne 0 ]; then + echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT" + exit 1 + fi + for PATCH_FILE in ${PATCH_LIST}; do + PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//') + echo "$PATCH_FILE -> $PATCH_TARGET" + N=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*" | wc -l) + if [ $N -eq 1 ]; then + PATCH_DEST=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*") + echo "cp -f $PATCH_FILE $PATCH_DEST" + \cp -f $PATCH_FILE $PATCH_DEST + if [ $? -ne 0 ]; then + echo "ERROR: copy failed $WORK_META_DIR/$PATCH_FILE -> $PATCH_DEST" + exit 1 + fi + else + echo "ERROR: Don't know what destination file name to use for patch '$WORK_META_DIR/$PATCH_FILE' derived from commit $COMMIT, and to be copied to '$PKG_DIR/centos/meta_patches'" + fi + done + + echo "" + echo "" + ) + + if [ $? -ne 0 ]; then + FAILED=$name + break + fi + + ( + echo "--- old git log (oldest to newest) ---" + cd $OLD_WORK_SRC_DIR + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd to OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR" + exit 1 + fi + + git checkout $OLD_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH in directory '$OLD_WORK_SRC_DIR'" + exit 1 + fi + + (git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit; echo "") | sed '/^$/d' | tac + PATCH_COMMIT_LIST=$((git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit; echo "") | sed '/^$/d' | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }') + + echo "--- new git log (oldest to newest) ---" + cd $NEW_WORK_SRC_DIR + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd to NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR" + exit 1 + fi + + git checkout $NEW_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH in directory '$NEW_WORK_SRC_DIR'" + exit 1 + fi + + (git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit; echo "") | sed '/^$/d' | tac + REFERENCE_COMMIT=$((git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit ; echo "") | sed '/^$/d' | head -n 1 | awk '{ print $2 }') + echo "" + + if [ "$OLD_WORK_SRC_DIR" == "$NEW_WORK_SRC_DIR" ]; then + for COMMIT in ${PATCH_COMMIT_LIST}; do + echo "git cherry-pick $COMMIT" + git --no-pager show "$COMMIT" + git cherry-pick "$COMMIT" + if [ $? -ne 0 ]; then + echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files" + git status --porcelain | grep '^UU ' | awk '{ print $2}' + echo "pwd=$(pwd)" + # gitk & + echo "git mergetool --no-prompt" + git mergetool --no-prompt + # for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do + # xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'" + # if [ $? -ne 0 ]; then + # echo "ERROR: problem launching editor on " + # exit 1 + # fi + # done + echo "git cherry-pick --continue" + git cherry-pick --continue + fi + done + else + cd $OLD_WORK_SRC_DIR + PATCH_LIST=$(git format-patch -n pre_wrs_$OLD_BRANCH) + if [ $? -ne 0 ]; then + echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=pre_wrs_$OLD_BRANCH" + exit 1 + fi + cd $NEW_WORK_SRC_DIR + for PATCH_FILE in ${PATCH_LIST}; do + echo "=== Apply $PATCH_FILE ===" + cat $OLD_WORK_SRC_DIR/$PATCH_FILE + cat $OLD_WORK_SRC_DIR/$PATCH_FILE | patch -p1 + if [ $? -ne 0 ]; then + for REJECT in $(find . -name '*.rej'); do + FILE_NAME=$(echo $REJECT | sed 's#.rej$##') + cd $OLD_WORK_SRC_DIR + gitk $FILE_NAME & + cd $NEW_WORK_SRC_DIR + if [ -f $FILE_NAME ] && [ -f $FILE_NAME.orig ]; then + \cp -f $FILE_NAME.orig $FILE_NAME + xterm -e "vi $FILE_NAME $REJECT" + rm -f $REJECT + rm -f $FILE_NAME.orig + fi + done + fi + + git add --all + MSG=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//') + git commit -m "WRS: $MSG" + done + + fi + + PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT) + if [ $? -ne 0 ]; then + echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT" + exit 1 + fi + for PATCH_FILE in ${PATCH_LIST}; do + PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch[0-9]*-//' | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch//' | sed 's/.patch$//') + echo "$PATCH_FILE -> $PATCH_TARGET" + PKG_PATCH_DIR="$PKG_DIR/centos/patches" + N=0 + if [ -d "$PKG_PATCH_DIR" ]; then + N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l) + fi + if [ $N -ne 1 ]; then + PKG_PATCH_DIR="$PKG_DIR" + if [ -d "$PKG_PATCH_DIR" ]; then + N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l) + fi + fi + echo "N=$N" + echo "PKG_PATCH_DIR=$PKG_PATCH_DIR" + + if [ $N -eq 1 ]; then + PATCH_DEST=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]') + echo "meld $PATCH_FILE -> $PATCH_DEST" + meld $PATCH_FILE $PATCH_DEST + if [ $? -ne 0 ]; then + echo "ERROR: meld failed $WORK_SRC_DIR/$PATCH_FILE -> $PATCH_DEST" + exit 1 + fi + else + echo "ERROR: Don't know what destination file name to use for patch '$OLD_WORK_SRC_DIR/$PATCH_FILE', and to be copied to '$PKG_PATCH_DIR'" + fi + done + + echo "" + echo "" + ) + + if [ $? -ne 0 ]; then + FAILED=$name + break + fi + + ) + + +done + +if [ "$FAILED" != "" ]; then + echo "Failed for ... $FAILED" + exit 1 +fi + + + diff --git a/build-tools/mk/_sign_pkgs.mk b/build-tools/mk/_sign_pkgs.mk new file mode 100644 index 00000000..48c3c419 --- /dev/null +++ b/build-tools/mk/_sign_pkgs.mk @@ -0,0 +1,31 @@ + +# +# this makefile is used by the build-iso process to add file signature to all rpms +# +# it requires a private key, passed as the variable KEY + +PKGS_LIST := $(wildcard *.rpm) + +# we need to skip the signature of some packages that +# might be installed in file systems that do not support extended attributes +# in the case of shim- and grub2-efi-, the UEFI configuration installs them in a VFAT file system +PKGS_TO_SKIP := $(wildcard grub2-efi-[0-9]*.x86_64.rpm shim-[0-9]*.x86_64.rpm) + +PKGS_TO_SIGN = $(filter-out $(PKGS_TO_SKIP),$(PKGS_LIST)) + +define _pkg_sign_tmpl + +_sign_$1 : + @ rpmsign --signfiles --fskpath=$(KEY) $1 + @ chown mockbuild $1 + @ chgrp users $1 + +sign : _sign_$1 + +endef + +sign : + @echo signed all packages + +$(foreach file,$(PKGS_TO_SIGN),$(eval $(call _pkg_sign_tmpl,$(file)))) + diff --git a/build-tools/mock_cfg_to_yum_conf.py b/build-tools/mock_cfg_to_yum_conf.py new file mode 100755 index 00000000..cc8b75cd --- /dev/null +++ b/build-tools/mock_cfg_to_yum_conf.py @@ -0,0 +1,9 @@ +#!/usr/bin/python + +import sys + +FN=sys.argv[1] +variables={} +variables['config_opts']={} +execfile( FN, variables ) +print variables['config_opts']['yum.conf'] diff --git a/build-tools/mockchain-parallel b/build-tools/mockchain-parallel new file mode 100755 index 00000000..0e1fd407 --- /dev/null +++ b/build-tools/mockchain-parallel @@ -0,0 +1,1207 @@ +#!/usr/bin/python -tt +# -*- coding: utf-8 -*- +# vim: noai:ts=4:sw=4:expandtab + +# by skvidal@fedoraproject.org +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# copyright 2012 Red Hat, Inc. + +# SUMMARY +# mockchain +# take a mock config and a series of srpms +# rebuild them one at a time +# adding each to a local repo +# so they are available as build deps to next pkg being built +from __future__ import print_function + +import cgi +# pylint: disable=deprecated-module +import optparse +import os +import re +import shutil +import subprocess +import sys +import tempfile +import time +import multiprocessing +import signal +import psutil + +import requests +# pylint: disable=import-error +from six.moves.urllib_parse import urlsplit + +import mockbuild.util + +from rpmUtils.miscutils import splitFilename + + +# all of the variables below are substituted by the build system +__VERSION__="1.3.4" +SYSCONFDIR="/etc" +PYTHONDIR="/usr/lib/python2.7/site-packages" +PKGPYTHONDIR="/usr/lib/python2.7/site-packages/mockbuild" +MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock") +# end build system subs + +mockconfig_path = '/etc/mock' + +def rpmName(path): + filename = os.path.basename(path) + (n, v, r, e, a) = splitFilename(filename) + return n + +def createrepo(path): + global max_workers + if os.path.exists(path + '/repodata/repomd.xml'): + comm = ['/usr/bin/createrepo_c', '--update', '--retain-old-md', "%d" % max_workers, "--workers", "%d" % max_workers, path] + else: + comm = ['/usr/bin/createrepo_c', "--workers", "%d" % max_workers, path] + cmd = subprocess.Popen( + comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = cmd.communicate() + return out, err + + +g_opts = optparse.Values() + +def parse_args(args): + parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]') + parser.add_option( + '-r', '--root', default=None, dest='chroot', + metavar="CONFIG", + help="chroot config name/base to use in the mock build") + parser.add_option( + '-l', '--localrepo', default=None, + help="local path for the local repo, defaults to making its own") + parser.add_option( + '-c', '--continue', default=False, action='store_true', + dest='cont', + help="if a pkg fails to build, continue to the next one") + parser.add_option( + '-a', '--addrepo', default=[], action='append', + dest='repos', + help="add these repo baseurls to the chroot's yum config") + parser.add_option( + '--recurse', default=False, action='store_true', + help="if more than one pkg and it fails to build, try to build the rest and come back to it") + parser.add_option( + '--log', default=None, dest='logfile', + help="log to the file named by this option, defaults to not logging") + parser.add_option( + '--workers', default=1, dest='max_workers', + help="number of parallel build jobs") + parser.add_option( + '--worker-resources', default="", dest='worker_resources', + help="colon seperated list, how much mem in gb for each workers temfs") + parser.add_option( + '--basedir', default='/var/lib/mock', dest='basedir', + help="path to workspace") + parser.add_option( + '--tmp_prefix', default=None, dest='tmp_prefix', + help="tmp dir prefix - will default to username-pid if not specified") + parser.add_option( + '-m', '--mock-option', default=[], action='append', + dest='mock_option', + help="option to pass directly to mock") + parser.add_option( + '--mark-slow-name', default=[], action='append', + dest='slow_pkg_names_raw', + help="package name that is known to build slowly") + parser.add_option( + '--mark-slow-path', default=[], action='append', + dest='slow_pkgs_raw', + help="package path that is known to build slowly") + parser.add_option( + '--mark-big-name', default=[], action='append', + dest='big_pkg_names_raw', + help="package name that is known to require a lot of disk space to build") + parser.add_option( + '--mark-big-path', default=[], action='append', + dest='big_pkgs_raw', + help="package path that is known to require a lot of disk space to build") + parser.add_option( + '--srpm-dependency-file', default=None, + dest='srpm_dependency_file', + help="path to srpm dependency file") + parser.add_option( + '--rpm-dependency-file', default=None, + dest='rpm_dependency_file', + help="path to rpm dependency file") + parser.add_option( + '--rpm-to-srpm-map-file', default=None, + dest='rpm_to_srpm_map_file', + help="path to rpm to srpm map file") + + opts, args = parser.parse_args(args) + if opts.recurse: + opts.cont = True + + if not opts.chroot: + print("You must provide an argument to -r for the mock chroot") + sys.exit(1) + + if len(sys.argv) < 3: + print("You must specify at least 1 package to build") + sys.exit(1) + + return opts, args + + +REPOS_ID = [] + +slow_pkg_names={} +slow_pkgs={} +big_pkg_names={} +big_pkgs={} + +def generate_repo_id(baseurl): + """ generate repository id for yum.conf out of baseurl """ + repoid = "/".join(baseurl.split('//')[1:]).replace('/', '_') + repoid = re.sub(r'[^a-zA-Z0-9_]', '', repoid) + suffix = '' + i = 1 + while repoid + suffix in REPOS_ID: + suffix = str(i) + i += 1 + repoid = repoid + suffix + REPOS_ID.append(repoid) + return repoid + + +def set_build_idx(infile, destfile, build_idx, tmpfs_size_gb, opts): + # log(opts.logfile, "set_build_idx: infile=%s, destfile=%s, build_idx=%d, tmpfs_size_gb=%d" % (infile, destfile, build_idx, tmpfs_size_gb)) + + try: + with open(infile) as f: + code = compile(f.read(), infile, 'exec') + # pylint: disable=exec-used + exec(code) + + config_opts['root'] = config_opts['root'].replace('b0', 'b{0}'.format(build_idx)) + config_opts['cache_topdir'] = config_opts['cache_topdir'].replace('b0', 'b{0}'.format(build_idx)) + # log(opts.logfile, "set_build_idx: root=%s" % config_opts['root']) + # log(opts.logfile, "set_build_idx: cache_topdir=%s" % config_opts['cache_topdir']) + if tmpfs_size_gb > 0: + config_opts['plugin_conf']['tmpfs_enable'] = True + config_opts['plugin_conf']['tmpfs_opts'] = {} + config_opts['plugin_conf']['tmpfs_opts']['required_ram_mb'] = 1024 + config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'] = "%dg" % tmpfs_size_gb + config_opts['plugin_conf']['tmpfs_opts']['mode'] = '0755' + config_opts['plugin_conf']['tmpfs_opts']['keep_mounted'] = True + # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_enable=%s" % config_opts['plugin_conf']['tmpfs_enable']) + # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_opts->max_fs_size=%s" % config_opts['plugin_conf']['tmpfs_opts']['max_fs_size']) + + with open(destfile, 'w') as br_dest: + for k, v in list(config_opts.items()): + br_dest.write("config_opts[%r] = %r\n" % (k, v)) + + try: + log(opts.logfile, "set_build_idx: os.makedirs %s" % config_opts['cache_topdir']) + if not os.path.isdir(config_opts['cache_topdir']): + os.makedirs(config_opts['cache_topdir'], exist_ok=True) + except (IOError, OSError): + return False, "Could not create dir: %s" % config_opts['cache_topdir'] + + cache_dir = "%s/%s/mock" % (config_opts['basedir'], config_opts['root']) + try: + log(opts.logfile, "set_build_idx: os.makedirs %s" % cache_dir) + if not os.path.isdir(cache_dir): + os.makedirs(cache_dir) + except (IOError, OSError): + return False, "Could not create dir: %s" % cache_dir + + return True, '' + except (IOError, OSError): + return False, "Could not write mock config to %s" % destfile + + return True, '' + +def set_basedir(infile, destfile, basedir, opts): + log(opts.logfile, "set_basedir: infile=%s, destfile=%s, basedir=%s" % (infile, destfile, basedir)) + try: + with open(infile) as f: + code = compile(f.read(), infile, 'exec') + # pylint: disable=exec-used + exec(code) + + config_opts['basedir'] = basedir + config_opts['resultdir'] = '{0}/result'.format(basedir) + config_opts['backup_base_dir'] = '{0}/backup'.format(basedir) + config_opts['root'] = 'mock/b0' + config_opts['cache_topdir'] = '{0}/cache/b0'.format(basedir) + + with open(destfile, 'w') as br_dest: + for k, v in list(config_opts.items()): + br_dest.write("config_opts[%r] = %r\n" % (k, v)) + return True, '' + except (IOError, OSError): + return False, "Could not write mock config to %s" % destfile + + return True, '' + +def add_local_repo(infile, destfile, baseurl, repoid=None): + """take a mock chroot config and add a repo to it's yum.conf + infile = mock chroot config file + destfile = where to save out the result + baseurl = baseurl of repo you wish to add""" + global config_opts + + try: + with open(infile) as f: + code = compile(f.read(), infile, 'exec') + # pylint: disable=exec-used + exec(code) + if not repoid: + repoid = generate_repo_id(baseurl) + else: + REPOS_ID.append(repoid) + localyumrepo = """ +[%s] +name=%s +baseurl=%s +enabled=1 +skip_if_unavailable=1 +metadata_expire=0 +cost=1 +best=1 +""" % (repoid, baseurl, baseurl) + + config_opts['yum.conf'] += localyumrepo + with open(destfile, 'w') as br_dest: + for k, v in list(config_opts.items()): + br_dest.write("config_opts[%r] = %r\n" % (k, v)) + return True, '' + except (IOError, OSError): + return False, "Could not write mock config to %s" % destfile + + return True, '' + + +def do_build(opts, cfg, pkg): + + # returns 0, cmd, out, err = failure + # returns 1, cmd, out, err = success + # returns 2, None, None, None = already built + + signal.signal(signal.SIGTERM, child_signal_handler) + signal.signal(signal.SIGINT, child_signal_handler) + signal.signal(signal.SIGHUP, child_signal_handler) + signal.signal(signal.SIGABRT, child_signal_handler) + s_pkg = os.path.basename(pkg) + pdn = s_pkg.replace('.src.rpm', '') + resdir = '%s/%s' % (opts.local_repo_dir, pdn) + resdir = os.path.normpath(resdir) + if not os.path.exists(resdir): + os.makedirs(resdir) + + success_file = resdir + '/success' + fail_file = resdir + '/fail' + + if os.path.exists(success_file): + # return 2, None, None, None + sys.exit(2) + + # clean it up if we're starting over :) + if os.path.exists(fail_file): + os.unlink(fail_file) + + if opts.uniqueext == '': + mockcmd = ['/usr/bin/mock', + '--configdir', opts.config_path, + '--resultdir', resdir, + '-r', cfg, ] + else: + mockcmd = ['/usr/bin/mock', + '--configdir', opts.config_path, + '--resultdir', resdir, + '--uniqueext', opts.uniqueext, + '-r', cfg, ] + # heuristic here, if user pass for mock "-d foo", but we must be care to leave + # "-d'foo bar'" or "--define='foo bar'" as is + compiled_re_1 = re.compile(r'^(-\S)\s+(.+)') + compiled_re_2 = re.compile(r'^(--[^ =])[ =](\.+)') + for option in opts.mock_option: + r_match = compiled_re_1.match(option) + if r_match: + mockcmd.extend([r_match.group(1), r_match.group(2)]) + else: + r_match = compiled_re_2.match(option) + if r_match: + mockcmd.extend([r_match.group(1), r_match.group(2)]) + else: + mockcmd.append(option) + + print('building %s' % s_pkg) + mockcmd.append(pkg) + # print("mockcmd: %s" % str(mockcmd)) + cmd = subprocess.Popen( + mockcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = cmd.communicate() + if cmd.returncode == 0: + with open(success_file, 'w') as f: + f.write('done\n') + ret = 1 + else: + if (isinstance(err, bytes)): + err = err.decode("utf-8") + sys.stderr.write(err) + with open(fail_file, 'w') as f: + f.write('undone\n') + ret = 0 + + # return ret, cmd, out, err + sys.exit(ret) + + +def log(lf, msg): + if lf: + now = time.time() + try: + with open(lf, 'a') as f: + f.write(str(now) + ':' + msg + '\n') + except (IOError, OSError) as e: + print('Could not write to logfile %s - %s' % (lf, str(e))) + print(msg) + + +config_opts = {} + +worker_data = [] +workers = 0 +max_workers = 1 + +build_env = [] + +failed = [] +built_pkgs = [] + +local_repo_dir = "" + +pkg_to_name={} +name_to_pkg={} +srpm_dependencies_direct={} +rpm_dependencies_direct={} +rpm_to_srpm_map={} +no_dep_list = [ "bash", "kernel" , "kernel-rt" ] + + +def init_build_env(slots, opts, config_opts_in): + global build_env + + orig_chroot_name=config_opts_in['chroot_name'] + orig_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(orig_chroot_name)) + # build_env.append({'state': 'Idle', 'cfg': orig_mock_config}) + for i in range(0,slots): + new_chroot_name = "{0}.b{1}".format(orig_chroot_name, i) + new_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(new_chroot_name)) + tmpfs_size_gb = 0 + if opts.worker_resources == "": + if i > 0: + tmpfs_size_gb = 2 * (1 + slots - i) + else: + resource_array=opts.worker_resources.split(':') + if i < len(resource_array): + tmpfs_size_gb=int(resource_array[i]) + else: + log(opts.logfile, "Error: worker-resources argument '%s' does not supply info for all %d workers" % (opts.worker_resources, slots)) + sys.exit(1) + if i == 0 and tmpfs_size_gb != 0: + log(opts.logfile, "Error: worker-resources argument '%s' must pass '0' as first value" % (opts.worker_resources, slots)) + sys.exit(1) + build_env.append({'state': 'Idle', 'cfg': new_mock_config, 'fs_size_gb': tmpfs_size_gb}) + + res, msg = set_build_idx(orig_mock_config, new_mock_config, i, tmpfs_size_gb, opts) + if not res: + log(opts.logfile, "Error: Could not write out local config: %s" % msg) + sys.exit(1) + + +idle_build_env_last_awarded = 0 +def get_idle_build_env(slots): + global build_env + global idle_build_env_last_awarded + visited = 0 + + if slots < 1: + return -1 + + i = idle_build_env_last_awarded - 1 + if i < 0 or i >= slots: + i = slots - 1 + + while visited < slots: + if build_env[i]['state'] == 'Idle': + build_env[i]['state'] = 'Busy' + idle_build_env_last_awarded = i + return i + visited = visited + 1 + i = i - 1 + if i < 0: + i = slots - 1 + return -1 + +def release_build_env(idx): + global build_env + + build_env[idx]['state'] = 'Idle' + +def get_best_rc(a, b): + print("get_best_rc: a=%s" % str(a)) + print("get_best_rc: b=%s" % str(b)) + if (b == {}) and (a != {}): + return a + if (a == {}) and (b != {}): + return b + + if (b['build_name'] is None) and (not a['build_name'] is None): + return a + if (a['build_name'] is None) and (not b['build_name'] is None): + return b + + if a['unbuilt_deps'] < b['unbuilt_deps']: + return a + if b['unbuilt_deps'] < a['unbuilt_deps']: + return b + + if a['depth'] < b['depth']: + return a + if b['depth'] < a['depth']: + return b + + print("get_best_rc: uncertain %s vs %s" % (a,b)) + return a + +unbuilt_dep_list_print=False +def unbuilt_dep_list(name, unbuilt_pkg_names, depth, checked=None): + global srpm_dependencies_direct + global rpm_dependencies_direct + global rpm_to_srpm_map + global no_dep_list + global unbuilt_dep_list_print + + first_iteration=False + unbuilt = [] + if name in no_dep_list: + return unbuilt + + if checked is None: + first_iteration=True + checked=[] + + # Count unbuild dependencies + if first_iteration: + dependencies_direct=srpm_dependencies_direct + else: + dependencies_direct=rpm_dependencies_direct + + if name in dependencies_direct: + for rdep in dependencies_direct[name]: + sdep='???' + if rdep in rpm_to_srpm_map: + sdep = rpm_to_srpm_map[rdep] + if rdep != name and sdep != name and not rdep in checked: + if (not first_iteration) and (sdep in no_dep_list): + continue + checked.append(rdep) + if sdep in unbuilt_pkg_names: + if not sdep in unbuilt: + unbuilt.append(sdep) + if depth > 0: + child_unbuilt = unbuilt_dep_list(rdep, unbuilt_pkg_names, depth-1, checked) + for sub_sdep in child_unbuilt: + if sub_sdep != name: + if not sub_sdep in unbuilt: + unbuilt.append(sub_sdep) + + return unbuilt + +def can_build_at_idx(build_idx, name, opts): + global pkg_to_name + global name_to_pkg + global big_pkgs + global big_pkg_names + global slow_pkgs + global slow_pkg_names + global build_env + + fs_size_gb = 0 + size_gb = 0 + speed = 0 + pkg = name_to_pkg[name] + if name in big_pkg_names: + size_gb=big_pkg_names[name] + if pkg in big_pkgs: + size_gb=big_pkgs[pkg] + if name in slow_pkg_names: + speed=slow_pkg_names[name] + if pkg in slow_pkgs: + speed=slow_pkgs[pkg] + fs_size_gb = build_env[build_idx]['fs_size_gb'] + return fs_size_gb == 0 or fs_size_gb >= size_gb + +def schedule(build_idx, pkgs, opts): + global worker_data + global pkg_to_name + global name_to_pkg + global big_pkgs + global big_pkg_names + global slow_pkgs + global slow_pkg_names + + unbuilt_pkg_names=[] + building_pkg_names=[] + unprioritized_pkg_names=[] + + for pkg in pkgs: + name = pkg_to_name[pkg] + unbuilt_pkg_names.append(name) + unprioritized_pkg_names.append(name) + + prioritized_pkg_names=[] + + for wd in worker_data: + pkg = wd['pkg'] + if not pkg is None: + name = pkg_to_name[pkg] + building_pkg_names.append(name) + + # log(opts.logfile, "schedule: build_idx=%d start" % build_idx) + if len(big_pkg_names) or len(big_pkgs): + next_unprioritized_pkg_names = unprioritized_pkg_names[:] + for name in unprioritized_pkg_names: + pkg = name_to_pkg[name] + if name in big_pkg_names or pkg in big_pkgs: + prioritized_pkg_names.append(name) + next_unprioritized_pkg_names.remove(name) + unprioritized_pkg_names = next_unprioritized_pkg_names[:] + + if len(slow_pkg_names) or len(slow_pkgs): + next_unprioritized_pkg_names = unprioritized_pkg_names[:] + for name in unprioritized_pkg_names: + pkg = name_to_pkg[name] + if name in slow_pkg_names or pkg in slow_pkgs: + if can_build_at_idx(build_idx, name, opts): + prioritized_pkg_names.append(name) + next_unprioritized_pkg_names.remove(name) + unprioritized_pkg_names = next_unprioritized_pkg_names[:] + + for name in unprioritized_pkg_names: + if can_build_at_idx(build_idx, name, opts): + prioritized_pkg_names.append(name) + + name_out = schedule2(build_idx, prioritized_pkg_names, unbuilt_pkg_names, building_pkg_names, opts) + if not name_out is None: + pkg_out = name_to_pkg[name_out] + else: + pkg_out = None + # log(opts.logfile, "schedule: failed to translate '%s' to a pkg" % name_out) + # log(opts.logfile, "schedule: build_idx=%d end: out = %s -> %s" % (build_idx, str(name_out), str(pkg_out))) + return pkg_out + + +def schedule2(build_idx, pkg_names, unbuilt_pkg_names, building_pkg_names, opts): + global pkg_to_name + global name_to_pkg + global no_dep_list + + max_depth = 3 + + if len(pkg_names) == 0: + return None + + unbuilt_deps={} + building_deps={} + for depth in range(max_depth,-1,-1): + unbuilt_deps[depth]={} + building_deps[depth]={} + + for depth in range(max_depth,-1,-1): + checked=[] + reordered_pkg_names = pkg_names[:] + # for name in reordered_pkg_names: + while len(reordered_pkg_names): + name = reordered_pkg_names.pop(0) + if name in checked: + continue + + # log(opts.logfile, "checked.append(%s)" % name) + checked.append(name) + + pkg = name_to_pkg[name] + # log(opts.logfile, "schedule2: check '%s', depth %d" % (name, depth)) + if not name in unbuilt_deps[depth]: + unbuilt_deps[depth][name] = unbuilt_dep_list(name, unbuilt_pkg_names, depth) + if not name in building_deps[depth]: + building_deps[depth][name] = unbuilt_dep_list(name, building_pkg_names, depth) + # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, depth, unbuilt_deps[depth][name])) + # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, depth, building_deps[depth][name])) + if len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 0: + if can_build_at_idx(build_idx, name, opts): + log(opts.logfile, "schedule2: no unbuilt deps for '%s'" % name) + return name + else: + # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx)) + continue + + if not name in unbuilt_deps[0]: + unbuilt_deps[0][name] = unbuilt_dep_list(name, unbuilt_pkg_names, 0) + if not name in building_deps[0]: + building_deps[0][name] = unbuilt_dep_list(name, building_pkg_names, 0) + # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, 0, unbuilt_deps[0][name])) + # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, 0, building_deps[0][name])) + if (len(building_deps[depth][name]) == 0 and len(unbuilt_deps[depth][name]) == 1 and unbuilt_deps[depth][name][0] in no_dep_list) or (len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 1 and building_deps[depth][name][0] in no_dep_list): + if len(unbuilt_deps[0][name]) == 0 and len(building_deps[0][name]) == 0: + if can_build_at_idx(build_idx, name, opts): + log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep" % name) + return name + else: + # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx)) + continue + + loop = False + for dep_name in unbuilt_deps[depth][name]: + if name == dep_name: + continue + + # log(opts.logfile, "name=%s depends on dep_name=%s, depth=%d" % (name, dep_name, depth)) + if dep_name in checked: + continue + + # log(opts.logfile, "schedule2: check '%s' indirect" % dep_name) + if not dep_name in unbuilt_deps[depth]: + unbuilt_deps[depth][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, depth) + if not dep_name in building_deps[depth]: + building_deps[depth][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, depth) + # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, unbuilt_deps[depth][dep_name])) + # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, building_deps[depth][dep_name])) + if len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 0: + if can_build_at_idx(build_idx, dep_name, opts): + log(opts.logfile, "schedule2: deps: no unbuilt deps for '%s', working towards '%s'" % (dep_name, name)) + return dep_name + + if not dep_name in unbuilt_deps[0]: + unbuilt_deps[0][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, 0) + if not dep_name in building_deps[0]: + building_deps[0][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, 0) + # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, unbuilt_deps[0][dep_name])) + # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, building_deps[0][dep_name])) + if (len(building_deps[depth][dep_name]) == 0 and len(unbuilt_deps[depth][dep_name]) == 1 and unbuilt_deps[depth][dep_name][0] in no_dep_list) or (len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 1 and building_deps[depth][dep_name][0] in no_dep_list): + if len(unbuilt_deps[0][dep_name]) == 0 and len(building_deps[0][dep_name]) == 0: + if can_build_at_idx(build_idx, dep_name, opts): + log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, working towards '%s'" % (dep_name, name)) + return dep_name + + if name in unbuilt_deps[0][dep_name]: + loop = True + # log(opts.logfile, "schedule2: loop detected: %s <-> %s" % (name, dep_name)) + + if loop and len(building_deps[depth][name]) == 0: + log(opts.logfile, "schedule2: loop detected, try to build '%s'" % name) + return name + + for dep_name in unbuilt_deps[depth][name]: + if dep_name in reordered_pkg_names: + # log(opts.logfile, "schedule2: promote %s to work toward %s" % (dep_name, name)) + reordered_pkg_names.remove(dep_name) + reordered_pkg_names.insert(0,dep_name) + + # log(opts.logfile, "schedule2: Nothing buildable at this time") + return None + + + + +def read_deps(opts): + read_srpm_deps(opts) + read_rpm_deps(opts) + read_map_deps(opts) + +def read_srpm_deps(opts): + global srpm_dependencies_direct + + if opts.srpm_dependency_file == None: + return + + if not os.path.exists(opts.srpm_dependency_file): + log(opts.logfile, "File not found: %s" % opts.srpm_dependency_file) + sys.exit(1) + + with open(opts.srpm_dependency_file) as f: + lines = f.readlines() + for line in lines: + (name,deps) = line.rstrip().split(';') + srpm_dependencies_direct[name]=deps.split(',') + +def read_rpm_deps(opts): + global rpm_dependencies_direct + + if opts.rpm_dependency_file == None: + return + + if not os.path.exists(opts.rpm_dependency_file): + log(opts.logfile, "File not found: %s" % opts.rpm_dependency_file) + sys.exit(1) + + with open(opts.rpm_dependency_file) as f: + lines = f.readlines() + for line in lines: + (name,deps) = line.rstrip().split(';') + rpm_dependencies_direct[name]=deps.split(',') + +def read_map_deps(opts): + global rpm_to_srpm_map + + if opts.rpm_to_srpm_map_file == None: + return + + if not os.path.exists(opts.rpm_to_srpm_map_file): + log(opts.logfile, "File not found: %s" % opts.rpm_to_srpm_map_file) + sys.exit(1) + + with open(opts.rpm_to_srpm_map_file) as f: + lines = f.readlines() + for line in lines: + (rpm,srpm) = line.rstrip().split(';') + rpm_to_srpm_map[rpm]=srpm + + +def reaper(opts): + global built_pkgs + global failed + global worker_data + global workers + + reaped = 0 + need_createrepo = False + last_reaped = -1 + while reaped > last_reaped: + last_reaped = reaped + for wd in worker_data: + p = wd['proc'] + ret = p.exitcode + if ret is not None: + pkg = wd['pkg'] + b = int(wd['build_index']) + p.join() + worker_data.remove(wd) + workers = workers - 1 + reaped = reaped + 1 + release_build_env(b) + + log(opts.logfile, "End build on 'b%d': %s" % (b, pkg)) + + if ret == 0: + failed.append(pkg) + log(opts.logfile, "Error building %s on 'b%d'." % (os.path.basename(pkg), b)) + if opts.recurse and not stop_signal: + log(opts.logfile, "Will try to build again (if some other package will succeed).") + else: + log(opts.logfile, "See logs/results in %s" % opts.local_repo_dir) + elif ret == 1: + log(opts.logfile, "Success building %s on 'b%d'" % (os.path.basename(pkg), b)) + built_pkgs.append(pkg) + need_createrepo = True + elif ret == 2: + log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg)) + + if need_createrepo: + # createrepo with the new pkgs + err = createrepo(opts.local_repo_dir)[1] + if err.strip(): + log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir) + log(opts.logfile, "Err: %s" % err) + + return reaped + +stop_signal = False + +def on_terminate(proc): + print("process {} terminated with exit code {}".format(proc, proc.returncode)) + +def kill_proc_and_descentents(parent, need_stop=False, verbose=False): + global g_opts + + if need_stop: + if verbose: + log(g_opts.logfile, "Stop %d" % parent.pid) + + try: + parent.send_signal(signal.SIGSTOP) + except: + # perhaps mock still running as root, give it a sec to drop pivledges and try again + time.sleep(1) + parent.send_signal(signal.SIGSTOP) + + children = parent.children(recursive=False) + + for p in children: + kill_proc_and_descentents(p, need_stop=True, verbose=verbose) + + if verbose: + log(g_opts.logfile, "Terminate %d" % parent.pid) + + # parent.send_signal(signal.SIGTERM) + try: + parent.terminate() + except: + # perhaps mock still running as root, give it a sec to drop pivledges and try again + time.sleep(1) + parent.terminate() + + if need_stop: + if verbose: + log(g_opts.logfile, "Continue %d" % parent.pid) + + parent.send_signal(signal.SIGCONT) + + +def child_signal_handler(signum, frame): + global g_opts + my_pid = os.getpid() + # log(g_opts.logfile, "--------- child %d recieved signal %d" % (my_pid, signum)) + p = psutil.Process(my_pid) + kill_proc_and_descentents(p) + try: + sys.exit(0) + except SystemExit as e: + os._exit(0) + +def signal_handler(signum, frame): + global g_opts + global stop_signal + global workers + global worker_data + stop_signal = True + + # Signal processes to complete + log(g_opts.logfile, "recieved signal %d, Terminating children" % signum) + for wd in worker_data: + p = wd['proc'] + ret = p.exitcode + if ret is None: + # log(g_opts.logfile, "terminate child %d" % p.pid) + p.terminate() + else: + log(g_opts.logfile, "child return code was %d" % ret) + + # Wait for remaining processes to complete + log(g_opts.logfile, "===== wait for signaled jobs to complete =====") + while len(worker_data) > 0: + log(g_opts.logfile, " remaining workers: %d" % workers) + reaped = reaper(g_opts) + if reaped == 0: + time.sleep(0.1) + + try: + sys.exit(1) + except SystemExit as e: + os._exit(1) + +def main(args): + opts, args = parse_args(args) + # take mock config + list of pkgs + + global g_opts + global stop_signal + global build_env + global worker_data + global workers + global max_workers + + global slow_pkg_names + global slow_pkgs + global big_pkg_names + global big_pkgs + max_workers = int(opts.max_workers) + + global failed + global built_pkgs + + cfg = opts.chroot + pkgs = args[1:] + + # transform slow/big package options into dictionaries + for line in opts.slow_pkg_names_raw: + speed,name = line.split(":") + if speed != "": + slow_pkg_names[name]=int(speed) + for line in opts.slow_pkgs_raw: + speed,pkg = line.split(":") + if speed != "": + slow_pkgs[pkg]=int(speed) + for line in opts.big_pkg_names_raw: + size_gb,name = line.split(":") + if size_gb != "": + big_pkg_names[name]=int(size_gb) + for line in opts.big_pkgs_raw: + size_gb,pkg = line.split(":") + if size_gb != "": + big_pkgs[pkg]=int(size_gb) + + # Set up a mapping between pkg path and pkg name + global pkg_to_name + global name_to_pkg + for pkg in pkgs: + if not pkg.endswith('.rpm'): + log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg) + continue + + try: + name = rpmName(pkg) + except OSError as e: + print("Could not parse rpm %s" % pkg) + sys.exit(1) + + pkg_to_name[pkg] = name + name_to_pkg[name] = pkg + + read_deps(opts) + + global config_opts + config_opts = mockbuild.util.load_config(mockconfig_path, cfg, None, __VERSION__, PKGPYTHONDIR) + + if not opts.tmp_prefix: + try: + opts.tmp_prefix = os.getlogin() + except OSError as e: + print("Could not find login name for tmp dir prefix add --tmp_prefix") + sys.exit(1) + pid = os.getpid() + opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid) + + if opts.basedir != "/var/lib/mock": + opts.uniqueext = '' + + # create a tempdir for our local info + if opts.localrepo: + local_tmp_dir = os.path.abspath(opts.localrepo) + if not os.path.exists(local_tmp_dir): + os.makedirs(local_tmp_dir) + os.chmod(local_tmp_dir, 0o755) + else: + pre = 'mock-chain-%s-' % opts.uniqueext + local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp') + os.chmod(local_tmp_dir, 0o755) + + if opts.logfile: + opts.logfile = os.path.join(local_tmp_dir, opts.logfile) + if os.path.exists(opts.logfile): + os.unlink(opts.logfile) + + log(opts.logfile, "starting logfile: %s" % opts.logfile) + + opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + config_opts['chroot_name'] + '/') + + if not os.path.exists(opts.local_repo_dir): + os.makedirs(opts.local_repo_dir, mode=0o755) + + local_baseurl = "file://%s" % opts.local_repo_dir + log(opts.logfile, "results dir: %s" % opts.local_repo_dir) + opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + config_opts['chroot_name'] + '/') + + if not os.path.exists(opts.config_path): + os.makedirs(opts.config_path, mode=0o755) + + log(opts.logfile, "config dir: %s" % opts.config_path) + + my_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(config_opts['chroot_name'])) + + # modify with localrepo + res, msg = add_local_repo(config_opts['config_file'], my_mock_config, local_baseurl, 'local_build_repo') + if not res: + log(opts.logfile, "Error: Could not write out local config: %s" % msg) + sys.exit(1) + + for baseurl in opts.repos: + res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl) + if not res: + log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg)) + sys.exit(1) + + res, msg = set_basedir(my_mock_config, my_mock_config, opts.basedir, opts) + if not res: + log(opts.logfile, "Error: Could not write out local config: %s" % msg) + sys.exit(1) + + # these files needed from the mock.config dir to make mock run + for fn in ['site-defaults.cfg', 'logging.ini']: + pth = mockconfig_path + '/' + fn + shutil.copyfile(pth, opts.config_path + '/' + fn) + + # createrepo on it + err = createrepo(opts.local_repo_dir)[1] + if err.strip(): + log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir) + log(opts.logfile, "Err: %s" % err) + sys.exit(1) + + init_build_env(max_workers, opts, config_opts) + + download_dir = tempfile.mkdtemp() + downloaded_pkgs = {} + built_pkgs = [] + try_again = True + to_be_built = pkgs + return_code = 0 + num_of_tries = 0 + + g_opts = opts + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGHUP, signal_handler) + signal.signal(signal.SIGABRT, signal_handler) + + while try_again and not stop_signal: + num_of_tries += 1 + failed = [] + + log(opts.logfile, "===== iteration %d start =====" % num_of_tries) + + to_be_built_scheduled = to_be_built[:] + + need_reap = False + while len(to_be_built_scheduled) > 0: + # Free up a worker + while need_reap or workers >= max_workers: + need_reap = False + reaped = reaper(opts) + if reaped == 0: + time.sleep(0.1) + + if workers < max_workers: + workers = workers + 1 + + b = get_idle_build_env(max_workers) + if b < 0: + log(opts.logfile, "Failed to find idle build env for: %s" % pkg) + workers = workers - 1 + need_reap = True + continue + + pkg = schedule(b, to_be_built_scheduled, opts) + if pkg is None: + if workers <= 1: + # Remember we have one build environmnet reserved, so can't test for zero workers + log(opts.logfile, "failed to schedule from: %s" % to_be_built_scheduled) + pkg = to_be_built_scheduled[0] + log(opts.logfile, "All workers idle, forcing build of pkg=%s" % pkg) + else: + release_build_env(b) + workers = workers - 1 + need_reap = True + continue + + to_be_built_scheduled.remove(pkg) + + if not pkg.endswith('.rpm'): + log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg) + failed.append(pkg) + release_build_env(b) + need_reap = True + continue + + elif pkg.startswith('http://') or pkg.startswith('https://') or pkg.startswith('ftp://'): + url = pkg + try: + log(opts.logfile, 'Fetching %s' % url) + r = requests.get(url) + # pylint: disable=no-member + if r.status_code == requests.codes.ok: + fn = urlsplit(r.url).path.rsplit('/', 1)[1] + if 'content-disposition' in r.headers: + _, params = cgi.parse_header(r.headers['content-disposition']) + if 'filename' in params and params['filename']: + fn = params['filename'] + pkg = download_dir + '/' + fn + with open(pkg, 'wb') as fd: + for chunk in r.iter_content(4096): + fd.write(chunk) + except Exception as e: + log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e))) + failed.append(url) + release_build_env(b) + need_reap = True + continue + else: + downloaded_pkgs[pkg] = url + + log(opts.logfile, "Start build on 'b%d': %s" % (b, pkg)) + # ret = do_build(opts, config_opts['chroot_name'], pkg)[0] + p = multiprocessing.Process(target=do_build, args=(opts, build_env[b]['cfg'], pkg)) + worker_data.append({'proc': p, 'pkg': pkg, 'build_index': int(b)}) + p.start() + + # Wait for remaining processes to complete + log(opts.logfile, "===== wait for last jobs in iteration %d to complete =====" % num_of_tries) + while workers > 0: + reaped = reaper(opts) + if reaped == 0: + time.sleep(0.1) + log(opts.logfile, "===== iteration %d complete =====" % num_of_tries) + + if failed and opts.recurse: + log(opts.logfile, "failed=%s" % failed) + log(opts.logfile, "to_be_built=%s" % to_be_built) + if len(failed) != len(to_be_built): + to_be_built = failed + try_again = True + log(opts.logfile, 'Some package succeeded, some failed.') + log(opts.logfile, 'Trying to rebuild %s failed pkgs, because --recurse is set.' % len(failed)) + else: + if max_workers > 1: + max_workers = 1 + to_be_built = failed + try_again = True + log(opts.logfile, 'Some package failed under parallel build.') + log(opts.logfile, 'Trying to rebuild %s failed pkgs with single thread, because --recurse is set.' % len(failed)) + else: + log(opts.logfile, "") + log(opts.logfile, "*** Build Failed ***") + log(opts.logfile, "Tried %s times - following pkgs could not be successfully built:" % num_of_tries) + log(opts.logfile, "*** Build Failed ***") + for pkg in failed: + msg = pkg + if pkg in downloaded_pkgs: + msg = downloaded_pkgs[pkg] + log(opts.logfile, msg) + log(opts.logfile, "") + try_again = False + else: + try_again = False + if failed: + return_code = 2 + + # cleaning up our download dir + shutil.rmtree(download_dir, ignore_errors=True) + + log(opts.logfile, "") + log(opts.logfile, "Results out to: %s" % opts.local_repo_dir) + log(opts.logfile, "") + log(opts.logfile, "Pkgs built: %s" % len(built_pkgs)) + if built_pkgs: + if failed: + if len(built_pkgs): + log(opts.logfile, "Some packages successfully built in this order:") + else: + log(opts.logfile, "Packages successfully built in this order:") + for pkg in built_pkgs: + log(opts.logfile, pkg) + return return_code + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/build-tools/modify-build-cfg b/build-tools/modify-build-cfg new file mode 100755 index 00000000..d7625756 --- /dev/null +++ b/build-tools/modify-build-cfg @@ -0,0 +1,130 @@ +#!/bin/sh + +# This script modifies a mock configuration file (typically $MY_BUILD_CFG) +# to add build time environment variables to the mock environment (things +# like what branch we're building on, etc). +# +# For reasons of security, the host environment variables cannot normally be +# passed through to the mock environment, so this scripts sets the variables +# to literal values. +# +# usage: modify-build-cfg [file.cfg] +# + +MOCK_CFG_PROTO="$MY_REPO/cgcs-centos-repo/mock.cfg.proto" +if [ ! -f "$MOCK_CFG_PROTO" ]; then + echo "ERROR: Couldn't find mock config prototype at '$MOCK_CFG_PROTO'" + exit 1 +fi + +if [ "${1}x" == "x" ]; then + FILE=$MY_BUILD_CFG +else + FILE=$1 +fi + +if [ -f $MOCK_CFG_PROTO ]; then + if [ -f $FILE ]; then + NEWER=$(find "$MOCK_CFG_PROTO" -newer "$FILE") + if [ "x$NEWER" != "x" ]; then + \rm -f -v "$FILE" + fi + fi +fi + +if [ ! -f $FILE ]; then + if [ -z $MY_BUILD_ENVIRONMENT ] || [ -z $MY_BUILD_DIR ] || [ -z $MY_REPO ]; then + echo "Can't create $FILE without MY_BUILD_ENVIRONMENT, MY_BUILD_DIR and MY_REPO environment variables" + exit 1 + fi + + echo "Recreating $FILE" + \cp -f -v "$MOCK_CFG_PROTO" "$FILE" + if [ $? -ne 0 ]; then + echo "Couldn't find config file '$FILE', nor construct it from '$MOCK_CFG_PROTO'" + exit 1 + fi + + sed -i "s%LOCAL_BASE%http://127.0.0.1:8088%g" "$FILE" + sed -i "s%MIRROR_BASE%http://127.0.0.1:8088%g" "$FILE" + sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g" "$FILE" + sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g" "$FILE" + sed -i "s%/MY_REPO_DIR%$MY_REPO%g" "$FILE" + + # Disable all local-* repos for the build-types other than the current one + for bt in std rt; do + if [ "$bt" != "$BUILD_TYPE" ]; then + # Use the range of lines starting with pattern [local-$bt] until the next line starting with [] + sed -i "/^\[local-$bt\]/,/^\[/ s/enabled=1/enabled=0/" $FILE + fi + done +fi + + +# Add environment variables to mock config if they don't exist +grep -q "config_opts\['environment'\]\['BUILD_BY'\]" $FILE || \ + echo "config_opts['environment']['BUILD_BY']" >> $FILE + +grep -q "config_opts\['environment'\]\['BUILD_DATE'\]" $FILE || \ + echo "config_opts['environment']['BUILD_DATE']" >> $FILE + +grep -q "config_opts\['environment'\]\['REPO'\]" $FILE || \ + echo "config_opts['environment']['REPO']" >> $FILE + +grep -q "config_opts\['environment'\]\['WRS_GIT_BRANCH'\]" $FILE || \ + echo "config_opts['environment']['WRS_GIT_BRANCH']" >> $FILE + +grep -q "config_opts\['environment'\]\['CGCS_GIT_BRANCH'\]" $FILE || \ + echo "config_opts['environment']['CGCS_GIT_BRANCH']" >> $FILE + +if [ -z $FORMAL_BUILD ]; then + grep -q "config_opts\['macros'\]\['%_no_cgcs_license_check'\] = '1'" $FILE || \ + echo "config_opts['macros']['%_no_cgcs_license_check'] = '1'" >> $FILE +else + sed -i "/config_opts\['macros'\]\['%_no_cgcs_license_check'\] = '1'/d" $FILE +fi + +grep -q "config_opts\['macros'\]\['%_tis_build_type'\] = '$BUILD_TYPE'" $FILE || \ + echo "config_opts['macros']['%_tis_build_type'] = '$BUILD_TYPE'" >> $FILE + +if [ -f /usr/lib64/nosync/nosync.so ]; then + grep -q "config_opts\['nosync'\] = True" $FILE || \ + echo "config_opts['nosync'] = True" >> $FILE +fi + +grep -q "config_opts\['chroot_setup_cmd'\] = 'install @buildsys-build pigz lbzip2 yum'" $FILE || \ + echo "config_opts['chroot_setup_cmd'] = 'install @buildsys-build pigz lbzip2 yum'" >> $FILE + +# +# Read macros from tis.macros to add to the build config file, +# for use in RPM spec files +# +RPM_MACROS=$MY_REPO/build-tools/tis.macros +sed 's/#.*//' $RPM_MACROS | grep '=' | while IFS='=' read name value; do + # Check if the entry already exists. If so, go to next line + grep -q "^config_opts\['macros'\]\['${name}'\] = '${value}'$" $FILE && continue + + # Update or add the entry + grep -q "^config_opts\['macros'\]\['${name}'\]" $FILE + if [ $? -eq 0 ]; then + sed -i -r "s#^(config_opts\['macros'\]\['${name}'\]).*#\1 = '${value}'#" $FILE + else + echo "config_opts['macros']['${name}'] = '${value}'" >> $FILE + fi +done + +# okay, now we have lines for each env var. Generate the correct values + +BUILD_DATE=`date "+%F %T %z"` +CGCS_GIT_BRANCH=`cd $MY_REPO/addons/wr-cgcs/layers/cgcs/; git rev-parse --abbrev-ref HEAD` +WRS_GIT_BRANCH=`cd $MY_REPO; git rev-parse --abbrev-ref HEAD` +REPO=$MY_REPO + +# Finally, our good friend sed will place the values in the mock config file +sed -i \ + -e "s#config_opts\['environment'\]\['BUILD_BY'\].*#config_opts\['environment'\]\['BUILD_BY'\] = '$USER'#" \ + -e "s#config_opts\['environment'\]\['BUILD_DATE'\].*#config_opts\['environment'\]\['BUILD_DATE'\] = '$BUILD_DATE'#" \ + -e "s#config_opts\['environment'\]\['REPO'\].*#config_opts\['environment'\]\['REPO'\] = '$REPO'#" \ + -e "s#config_opts\['environment'\]\['WRS_GIT_BRANCH'\].*#config_opts\['environment'\]\['WRS_GIT_BRANCH'\] = '$WRS_GIT_BRANCH'#" \ + -e "s#config_opts\['environment'\]\['CGCS_GIT_BRANCH'\].*#config_opts\['environment'\]\['CGCS_GIT_BRANCH'\] = '$CGCS_GIT_BRANCH'#" \ + $FILE diff --git a/build-tools/patch-iso b/build-tools/patch-iso new file mode 100755 index 00000000..47a8c1f2 --- /dev/null +++ b/build-tools/patch-iso @@ -0,0 +1,320 @@ +#!/bin/bash +# +# Utility for adding patches to an unpatched ISO +# + +if [ -z "${MY_REPO}" ]; then + echo "Required environment variable MY_REPO is not set" + exit 1 +fi + +SETUP_PATCH_REPO=${MY_REPO}/addons/wr-cgcs/layers/cgcs/extras.ND/scripts/setup_patch_repo.sh +if [ ! -x ${SETUP_PATCH_REPO} ]; then + echo "Cannot find or execute ${SETUP_PATCH_REPO}" + exit 1 +fi + +REPO_UPGRADES_DIR=${MY_REPO}/addons/wr-cgcs/layers/cgcs/common-bsp/files/upgrades +RELEASE_INFO=${MY_REPO}/addons/wr-cgcs/layers/cgcs/middleware/recipes-common/build-info/release-info.inc +PLATFORM_RELEASE=$(source $RELEASE_INFO && echo $PLATFORM_RELEASE) + +function usage() { + echo "" + echo "Usage: " + echo " $(basename $0) -i -o [ -u ] ..." + echo " -i : Specify input ISO file" + echo " -o : Specify output ISO file" + echo " -u : Update with upgrades files from ${REPO_UPGRADES_DIR}" + echo "" +} + +function extract_pkg_from_patch_repo() { + local repodir=${BUILDDIR}/patches + local pkgname=$1 + + local pkgfile=$(repoquery --repofrompath local,${repodir} --location -q ${pkgname}) + if [ -z "${pkgfile}" ]; then + return 1 + fi + + rpm2cpio ${pkgfile/file://} | cpio -idmv + if [ $? -ne 0 ]; then + echo "Failed to extract $pkgname files from ${pkgfile/file://}" + exit 1 + fi +} + +declare INPUT_ISO= +declare OUTPUT_ISO= +declare ORIG_PWD=$PWD +declare DO_UPGRADES=1 + +while getopts "i:o:u" opt; do + case $opt in + i) + INPUT_ISO=$OPTARG + ;; + o) + OUTPUT_ISO=$OPTARG + ;; + u) + DO_UPGRADES=0 + ;; + *) + usage + exit 1 + ;; + esac +done + +if [ -z "$INPUT_ISO" -o -z "$OUTPUT_ISO" ]; then + usage + exit 1 +fi + +if [ ! -f ${INPUT_ISO} ]; then + echo "Input file does not exist: ${INPUT_ISO}" + exit 1 +fi + +if [ -f ${OUTPUT_ISO} ]; then + echo "Output file already exists: ${OUTPUT_ISO}" + exit 1 +fi + +shift $((OPTIND-1)) + +if [ $# -le 0 ]; then + usage + exit +fi + +for pf in $@; do + if [ ! -f $pf ]; then + echo "Patch file $pf does not exist" + exit 1 + fi + + if [[ ! $pf =~ \.patch$ ]]; then + echo "Specified file $pf does not have .patch extension" + exit 1 + fi +done + +declare MNTDIR= +declare BUILDDIR= +declare WORKDIR= + +function cleanup() { + if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then + guestunmount $MNTDIR + \rmdir $MNTDIR + fi + + if [ -n "$BUILDDIR" -a -d "$BUILDDIR" ]; then + \rm -rf $BUILDDIR + fi + + if [ -n "$WORKDIR" -a -d "$WORKDIR" ]; then + \rm -rf $WORKDIR + fi +} + +trap cleanup EXIT + +MNTDIR=$(mktemp -d -p $PWD patchiso_mnt_XXXXXX) +if [ -z "${MNTDIR}" -o ! -d ${MNTDIR} ]; then + echo "Failed to create mntdir. Aborting..." + exit $rc +fi + +BUILDDIR=$(mktemp -d -p $PWD patchiso_build_XXXXXX) +if [ -z "${BUILDDIR}" -o ! -d ${BUILDDIR} ]; then + echo "Failed to create builddir. Aborting..." + exit $rc +fi + +# Mount the ISO +guestmount -a ${INPUT_ISO} -m /dev/sda1 --ro ${MNTDIR} +rc=$? +if [ $rc -ne 0 ]; then + echo "Call to guestmount failed with rc=$rc. Aborting..." + exit $rc +fi + +rsync -a ${MNTDIR}/ ${BUILDDIR}/ +rc=$? +if [ $rc -ne 0 ]; then + echo "Call to rsync ISO content. Aborting..." + exit $rc +fi + +guestunmount ${MNTDIR} +\rmdir ${MNTDIR} + +# Setup the patch repo +${SETUP_PATCH_REPO} -o ${BUILDDIR}/patches $@ +rc=$? +if [ $rc -ne 0 ]; then + echo "Call to $(basename ${SETUP_PATCH_REPO}) failed with rc=$rc. Aborting..." + exit $rc +fi + +# Look for components that need modification +#extract_pkg_from_patch_repo +WORKDIR=$(mktemp -d -p $PWD patchiso_work_XXXXXX) +if [ -z "${WORKDIR}" -o ! -d ${WORKDIR} ]; then + echo "Failed to create workdir. Aborting..." + exit $rc +fi + +\cd ${WORKDIR} +\mkdir extract +\cd extract + +# Changes to copied files here must also be reflected in build-iso + +extract_pkg_from_patch_repo platform-kickstarts +if [ $? -eq 0 ]; then + # Replace files + \rm -f ${BUILDDIR}/*ks.cfg && + \cp --preserve=all www/pages/feed/rel-*/*.cfg ${BUILDDIR}/ && + \cp --preserve=all ${BUILDDIR}/controller_ks.cfg ${BUILDDIR}/ks.cfg + if [ $? -ne 0 ]; then + echo "Failed to copy extracted kickstarts" + exit 1 + fi +fi +\cd ${WORKDIR} +\rm -rf extract + +\mkdir extract +\cd extract +extract_pkg_from_patch_repo platform-kickstarts-pxeboot +if [ $? -eq 0 ]; then + # Replace files + \rm -f ${BUILDDIR}/pxeboot/pxeboot_controller.cfg \ + ${BUILDDIR}/pxeboot/pxeboot_smallsystem.cfg \ + ${BUILDDIR}/pxeboot/pxeboot_smallsystem_lowlatency.cfg && + \cp --preserve=all pxeboot/* ${BUILDDIR}/pxeboot/ + if [ $? -ne 0 ]; then + echo "Failed to copy extracted pxeboot kickstarts" + exit 1 + fi +fi +\cd ${WORKDIR} +\rm -rf extract + +\mkdir extract +\cd extract +extract_pkg_from_patch_repo pxe-network-installer +if [ $? -eq 0 ]; then + # Replace files + \rm -f ${BUILDDIR}/pxeboot/pxelinux.0 \ + ${BUILDDIR}/pxeboot/menu.c32 \ + ${BUILDDIR}/pxeboot/chain.c32 && + \cp --preserve=all pxeboot/pxelinux.0 pxeboot/menu.c32 pxeboot/chain.c32 ${BUILDDIR}/pxeboot/ + if [ $? -ne 0 ]; then + echo "Error: Could not copy all files from installer" + exit 1 + fi + + for f in pxeboot/EFI/centos/x86_64-efi/*; do + \rm -f ${BUILDDIR}/${f} + done + \cp --preserve=all pxeboot/EFI/centos/x86_64-efi/* ${BUILDDIR}/pxeboot/EFI/centos/x86_64-efi/ + if [ $? -ne 0 ]; then + echo "Error: Could not copy all files from installer" + exit 1 + fi + + \rm -f ${BUILDDIR}/LiveOS/squashfs.img && + \cp --preserve=all www/pages/feed/rel-*/LiveOS/squashfs.img ${BUILDDIR}/LiveOS/ + if [ $? -ne 0 ]; then + echo "Error: Could not copy squashfs from LiveOS" + exit 1 + fi + + # Replace vmlinuz and initrd.img with our own pre-built ones + \rm -f \ + ${BUILDDIR}/vmlinuz \ + ${BUILDDIR}/images/pxeboot/vmlinuz \ + ${BUILDDIR}/initrd.img \ + ${BUILDDIR}/images/pxeboot/initrd.img && + \cp --preserve=all pxeboot/rel-*/installer-bzImage_1.0 \ + ${BUILDDIR}/vmlinuz && + \cp --preserve=all pxeboot/rel-*/installer-bzImage_1.0 \ + ${BUILDDIR}/images/pxeboot/vmlinuz && + \cp --preserve=all pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \ + ${BUILDDIR}/initrd.img && + \cp --preserve=all pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \ + ${BUILDDIR}/images/pxeboot/initrd.img + if [ $? -ne 0 ]; then + echo "Error: Failed to copy installer images" + exit 1 + fi +fi +\cd ${WORKDIR} +\rm -rf extract + +\mkdir extract +\cd extract +extract_pkg_from_patch_repo grub2-efi-pxeboot +if [ $? -eq 0 ]; then + # Replace files + \rm -f ${BUILDDIR}/pxeboot/EFI/grubx64.efi && + \cp --preserve=all pxeboot/EFI/grubx64.efi ${BUILDDIR}/pxeboot/EFI/ + if [ $? -ne 0 ]; then + echo "Error: Failed to copy grub2-efi-pxeboot files" + exit 1 + fi +fi +\cd ${WORKDIR} +\rm -rf extract + +\cd ${ORIG_PWD} + +if [ ${DO_UPGRADES} -eq 0 ]; then + # Changes to copied files here must also be reflected in build-iso + + echo "Updating upgrade support files" + ISO_UPGRADES_DIR="${BUILDDIR}/upgrades" + \rm -rf ${ISO_UPGRADES_DIR} + \mkdir ${ISO_UPGRADES_DIR} + \cp ${REPO_UPGRADES_DIR}/* ${ISO_UPGRADES_DIR} + sed -i "s/xxxSW_VERSIONxxx/${PLATFORM_RELEASE}/g" ${ISO_UPGRADES_DIR}/metadata.xml + chmod +x ${ISO_UPGRADES_DIR}/*.sh + # Write the version out (used in upgrade scripts - this is the same as SW_VERSION) + echo "VERSION=$PLATFORM_RELEASE" > ${ISO_UPGRADES_DIR}/version +fi + +# Rebuild the ISO +mkisofs -o ${OUTPUT_ISO} \ + -R -D -A 'oe_iso_boot' -V 'oe_iso_boot' \ + -quiet \ + -b isolinux.bin -c boot.cat -no-emul-boot \ + -boot-load-size 4 -boot-info-table \ + -eltorito-alt-boot \ + -e images/efiboot.img \ + -no-emul-boot \ + ${BUILDDIR} + +isohybrid --uefi ${OUTPUT_ISO} +implantisomd5 ${OUTPUT_ISO} + +# Sign the .iso with the developer private key +# Signing with the formal key is only to be done for customer release +# and is a manual step afterwards, as with the GA ISO +openssl dgst -sha256 \ + -sign ${MY_REPO}/build-tools/signing/dev-private-key.pem \ + -binary \ + -out ${OUTPUT_ISO/%.iso/.sig} \ + ${OUTPUT_ISO} +rc=$? +if [ $rc -ne 0 ]; then + echo "Call to $(basename ${SETUP_PATCH_REPO}) failed with rc=$rc. Aborting..." + exit $rc +fi + +echo "Patched ISO: ${OUTPUT_ISO}" + diff --git a/build-tools/patch_rebase_1 b/build-tools/patch_rebase_1 new file mode 100755 index 00000000..cdf2018c --- /dev/null +++ b/build-tools/patch_rebase_1 @@ -0,0 +1,130 @@ +#!/bin/bash + +# +# Start an edit session for packages to be upgraded - pre upgrade version +# + +usage () { + echo "" + echo "Step 1: Start an edit session for packages to be upgraded - pre upgrade version" + echo "" + echo "Usage: " + echo " patch_rebase_1 [--origin_branch ] [--working_branch ] [--upversion_data ]" + echo "" + echo "Assumes cgcs-centos-repo already has a working_branch commit that sets the new symlinks." + echo "" + echo "The upversion_data file has data on all the src.rpm being updated in the format:" + echo " export UPVERSION_DATA=$MY_WORKSPACE/upversion.log" + echo " PKG=lighttpd" + echo " OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm" + echo " NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm" + echo " SRPM_PATH=$MY_REPO/addons/wr-cgcs/layers/cgcs/recipes-extended/lighttpd/centos/srpm_path" + echo " echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA" + echo "" +} + +TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"` +eval set -- "$TEMP" + +ORIGIN_BRANCH="" +WORKING_BRANCH="" +UPVERSION_LOG="" +HELP=0 + +while true ; do + case "$1" in + --origin_branch) shift ; ORIGIN_BRANCH="$1" ; shift ;; + --working_branch) shift ; WORKING_BRANCH="$1" ; shift ;; + --upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;; + -h|--help) HELP=1 ; shift ;; + --) shift ; break ;; + *) usage; exit 1 ;; + esac +done + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ "$UPVERSION_LOG" == "" ]; then + UPVERSION_LOG=$UPVERSION_DATA +fi + +if [ "$UPVERSION_LOG" == "" ]; then + echo "ERROR: please specify location of upversion data" + usage + exit 1 +fi + +if [ ! -f "$UPVERSION_LOG" ]; then + echo "File not found: '$UPVERSION_LOG'" + exit 1 +fi + +if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then + ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH + WORKING_BRANCH=$MY_PATCH_BRANCH +fi + +if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then + ORIGIN_BRANCH=$SOURCE_BRANCH + WORKING_BRANCH=$MY_BRANCH +fi + +if [ "$ORIGIN_BRANCH" == "" ]; then + echo "ERROR: please specify a origin branch" + usage + exit 1 +fi + +if [ "$WORKING_BRANCH" == "" ]; then + echo "ERROR: please specify a working branch" + usage + exit 1 +fi + +# One step back to see the old symlinks +cd $MY_REPO/cgcs-centos-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$(pwd)'" + exit 1 +fi + +git checkout HEAD^ + +FAILED="" +for dat in $(cat $UPVERSION_LOG); do + name=$(echo $dat | awk -F '#' '{print $1}') + srpm_path=$(echo $dat | awk -F '#' '{print $2}') + old_src_rpm=$(echo $dat | awk -F '#' '{print $4}') + new_src_rpm=$(echo $dat | awk -F '#' '{print $5}') + + echo "$name $old_src_rpm $new_src_rpm" + + build-pkgs --edit --clean $name + if [ $? -ne 0 ]; then + echo "ERROR: failed cmd 'build-pkgs --edit --clean $name'" + FAILED="$name $FAILED" + break + fi + echo "$? <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" + build-pkgs --edit $name + if [ $? -ne 0 ]; then + echo "ERROR: failed cmd 'build-pkgs --edit $name'" + FAILED="$name $FAILED" + break + fi + echo "$? <=<=<=<=<=<=<=<=<=<=<=<=<=<=<=<=" +done + +cd $MY_REPO/cgcs-centos-repo +git checkout $WORKING_BRANCH + +if [ "$FAILED" != "" ]; then + echo "Failed build-pkgs --edit for ... $FAILED" + exit 1 +fi + + diff --git a/build-tools/patch_rebase_2 b/build-tools/patch_rebase_2 new file mode 100755 index 00000000..ddcd1e5f --- /dev/null +++ b/build-tools/patch_rebase_2 @@ -0,0 +1,148 @@ +#!/bin/bash + +# +# Update srpm_path for packages to be upgraded +# + +usage () { + echo "" + echo "Step 2: Update srpm_path for packages to be upgraded" + echo "" + echo "Usage: " + echo " patch_rebase_2 [--origin_branch ] [--working_branch ] [--upversion_data ]" + echo "" + echo "Assumes cgcs-centos-repo already has a working_branch commit that sets the new symlinks." + echo "" + echo "The upversion_data file has data on all the src.rpm being updated in the format:" + echo " export UPVERSION_DATA=$MY_WORKSPACE/upversion.log" + echo " PKG=lighttpd" + echo " OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm" + echo " NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm" + echo " SRPM_PATH=$MY_REPO/addons/wr-cgcs/layers/cgcs/recipes-extended/lighttpd/centos/srpm_path" + echo " echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA" + echo "" +} + + +TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"` +eval set -- "$TEMP" + +ORIGIN_BRANCH="" +WORKING_BRANCH="" +UPVERSION_LOG="" +HELP=0 + +while true ; do + case "$1" in + --origin_branch) shift ; ORIGIN_BRANCH="$1" ; shift ;; + --working_branch) shift ; WORKING_BRANCH="$1" ; shift ;; + --upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;; + -h|--help) HELP=1 ; shift ;; + --) shift ; break ;; + *) usage; exit 1 ;; + esac +done + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ "$UPVERSION_LOG" == "" ]; then + UPVERSION_LOG=$UPVERSION_DATA +fi + +if [ "$UPVERSION_LOG" == "" ]; then + echo "ERROR: please specify location of upversion data" + usage + exit 1 +fi + +if [ ! -f "$UPVERSION_LOG" ]; then + echo "File not found: '$UPVERSION_LOG'" + exit 1 +fi + +if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then + ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH + WORKING_BRANCH=$MY_PATCH_BRANCH +fi + +if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then + ORIGIN_BRANCH=$SOURCE_BRANCH + WORKING_BRANCH=$MY_BRANCH +fi + +if [ "$ORIGIN_BRANCH" == "" ]; then + echo "ERROR: please specify a origin branch" + usage + exit 1 +fi + +if [ "$WORKING_BRANCH" == "" ]; then + echo "ERROR: please specify a working branch" + usage + exit 1 +fi + +# One step back to see the old symlinks +cd $MY_REPO + +FAILED="" +for dat in $(cat $UPVERSION_LOG); do + name=$(echo $dat | awk -F '#' '{print $1}') + srpm_path=$(echo $dat | awk -F '#' '{print $2}') + old_src_rpm=$(echo $dat | awk -F '#' '{print $4}') + new_src_rpm=$(echo $dat | awk -F '#' '{print $5}') + + ( + cd $(dirname $srpm_path) + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD) + if [ "$CURRENT_BRANCH" != "$WORKING_BRANCH" ]; then + git checkout $WORKING_BRANCH + if [ $? -ne 0 ]; then + git checkout $ORIGIN_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: Can't checkout branch '$ORIGIN_BRANCH' in directory '$(pwd)'" + exit 1 + fi + + git checkout -b $WORKING_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to 'git checkout -b $WORKING_BRANCH' from '$(pwd)'" + exit 1 + else + echo "created branch '$WORKING_BRANCH' at '$(pwd)'" + fi + fi + fi + + sed -i "s#$old_src_rpm#$new_src_rpm#" $srpm_path + if [ $? -ne 0 ]; then + echo "ERROR: sed failed '$old_src_rpm' -> '$new_src_rpm'" + exit 1 + else + echo "updated $srpm_path: '$old_src_rpm' -> '$new_src_rpm'" + fi + + exit 0 + ) + + if [ $? -ne 0 ]; then + echo "ERROR: failed while working on package '$name' at '$srpm_path'" + exit 1 + fi +done + +echo "" +for d in $(for dat in $(cat $UPVERSION_LOG); do srpm_path=$(echo $dat | awk -F '#' '{print $2}'); ( cd $(dirname $srpm_path); git rev-parse --show-toplevel ); done | sort --unique); do + ( + cd $d + echo "cd $d" + for f in $(git status --porcelain | grep 'srpm_path$' | awk '{print $2}'); do + echo "git add $f"; + done + echo "git commit -m 'srpm_path updates for patch $PATCH_ID'" + ) +done +echo "" diff --git a/build-tools/patch_rebase_3 b/build-tools/patch_rebase_3 new file mode 100755 index 00000000..b6bb66c2 --- /dev/null +++ b/build-tools/patch_rebase_3 @@ -0,0 +1,119 @@ +#!/bin/bash + +# +# Start an edit session for packages to be upgraded - post upgrade version +# + +usage () { + echo "" + echo "Step 3: Start an edit session for packages to be upgraded - post upgrade version" + echo "" + echo "Usage: " + echo " patch_rebase_3 [--origin_branch ] [--working_branch ] [--upversion_data ]" + echo "" + echo "Assumes cgcs-centos-repo already has a working_branch commit that sets the new symlinks." + echo "" + echo "The upversion_data file has data on all the src.rpm being updated in the format:" + echo " export UPVERSION_DATA=$MY_WORKSPACE/upversion.log" + echo " PKG=lighttpd" + echo " OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm" + echo " NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm" + echo " SRPM_PATH=$MY_REPO/addons/wr-cgcs/layers/cgcs/recipes-extended/lighttpd/centos/srpm_path" + echo " echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA" + echo "" +} + + +TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"` +eval set -- "$TEMP" + +ORIGIN_BRANCH="" +WORKING_BRANCH="" +UPVERSION_LOG="" +HELP=0 + +while true ; do + case "$1" in + --origin_branch) shift ; ORIGIN_BRANCH="$1" ; shift ;; + --working_branch) shift ; WORKING_BRANCH="$1" ; shift ;; + --upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;; + -h|--help) HELP=1 ; shift ;; + --) shift ; break ;; + *) usage; exit 1 ;; + esac +done + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ "$UPVERSION_LOG" == "" ]; then + UPVERSION_LOG=$UPVERSION_DATA +fi + +if [ "$UPVERSION_LOG" == "" ]; then + echo "ERROR: please specify location of upversion data" + usage + exit 1 +fi + +if [ ! -f "$UPVERSION_LOG" ]; then + echo "File not found: '$UPVERSION_LOG'" + exit 1 +fi + +if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then + ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH + WORKING_BRANCH=$MY_PATCH_BRANCH +fi + +if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then + ORIGIN_BRANCH=$SOURCE_BRANCH + WORKING_BRANCH=$MY_BRANCH +fi + +if [ "$ORIGIN_BRANCH" == "" ]; then + echo "ERROR: please specify a origin branch" + usage + exit 1 +fi + +if [ "$WORKING_BRANCH" == "" ]; then + echo "ERROR: please specify a working branch" + usage + exit 1 +fi + +# One step back to see the old symlinks +cd $MY_REPO/cgcs-centos-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$MY_REPO/cgcs-centos-repo'" + exit 1 +fi + +FAILED="" +for dat in $(cat $UPVERSION_LOG); do + name=$(echo $dat | awk -F '#' '{print $1}') + srpm_path=$(echo $dat | awk -F '#' '{print $2}') + old_src_rpm=$(echo $dat | awk -F '#' '{print $4}') + new_src_rpm=$(echo $dat | awk -F '#' '{print $5}') + + echo "$name $old_src_rpm $new_src_rpm" + + build-pkgs --edit $name --no-meta-patch + if [ $? -ne 0 ]; then + echo "ERROR: failed cmd 'build-pkgs --edit $name'" + FAILED="$name $FAILED" + break + fi + echo "$? <=<=<=<=<=<=<=<=<=<=<=<=<=<=<=<=" +done + +if [ "$FAILED" != "" ]; then + echo "Failed build-pkgs --edit for ... $FAILED" + exit 1 +fi + + diff --git a/build-tools/patch_rebase_4 b/build-tools/patch_rebase_4 new file mode 100755 index 00000000..96c469f0 --- /dev/null +++ b/build-tools/patch_rebase_4 @@ -0,0 +1,403 @@ +#!/bin/bash + +# +# Migrate Titanium Cloud patches to the new package version +# + +usage () { + echo "" + echo "Step 4: Migrate Titanium Cloud patches to the new package version" + echo "" + echo "Usage: " + echo " patch_rebase_4 [--origin_branch ] [--working_branch ] [--upversion_data ]" + echo "" + echo "Assumes cgcs-centos-repo already has a working_branch commit that sets the new symlinks." + echo "" + echo "The upversion_data file has data on all the src.rpm being updated in the format:" + echo " export UPVERSION_DATA=$MY_WORKSPACE/upversion.log" + echo " PKG=lighttpd" + echo " OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm" + echo " NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm" + echo " SRPM_PATH=$MY_REPO/addons/wr-cgcs/layers/cgcs/recipes-extended/lighttpd/centos/srpm_path" + echo " echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA" + echo "" +} + + +TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"` +eval set -- "$TEMP" + +ORIGIN_BRANCH="" +WORKING_BRANCH="" +UPVERSION_LOG="" +HELP=0 + +while true ; do + case "$1" in + --origin_branch) shift ; ORIGIN_BRANCH="$1" ; shift ;; + --working_branch) shift ; WORKING_BRANCH="$1" ; shift ;; + --upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;; + -h|--help) HELP=1 ; shift ;; + --) shift ; break ;; + *) usage; exit 1 ;; + esac +done + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ "$UPVERSION_LOG" == "" ]; then + UPVERSION_LOG=$UPVERSION_DATA +fi + +if [ "$UPVERSION_LOG" == "" ]; then + echo "ERROR: please specify location of upversion data" + usage + exit 1 +fi + +if [ ! -f "$UPVERSION_LOG" ]; then + echo "File not found: '$UPVERSION_LOG'" + exit 1 +fi + +if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then + ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH + WORKING_BRANCH=$MY_PATCH_BRANCH +fi + +if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then + ORIGIN_BRANCH=$SOURCE_BRANCH + WORKING_BRANCH=$MY_BRANCH +fi + +if [ "$ORIGIN_BRANCH" == "" ]; then + echo "ERROR: please specify a origin branch" + usage + exit 1 +fi + +if [ "$WORKING_BRANCH" == "" ]; then + echo "ERROR: please specify a working branch" + usage + exit 1 +fi + +if [ "$DISPLAY" == "" ]; then + echo "ERROR: X-Windows 'DISPLAY' variable not set. This script needs to open pop-up windows." + usage + exit 1 +fi + +# One step back to see the old symlinks +cd $MY_REPO/cgcs-centos-repo +git checkout $WORKING_BRANCH +if [ $? != 0 ]; then + echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$MY_REPO/cgcs-centos-repo'" + exit 1 +fi + +FAILED="" +build_types="std rt" +for dat in $(cat $UPVERSION_LOG); do + ( + name=$(echo $dat | awk -F '#' '{print $1}') + srpm_path=$(echo $dat | awk -F '#' '{print $2}') + old_src_rpm=$(echo $dat | awk -F '#' '{print $4}') + new_src_rpm=$(echo $dat | awk -F '#' '{print $5}') + + PKG_DIR=$(dirname $(dirname $srpm_path)) + OLD_BRANCH=$(echo $old_src_rpm | sed 's#[.]src[.]rpm$##') + NEW_BRANCH=$(echo $new_src_rpm | sed 's#[.]src[.]rpm$##') + + WORK_META_DIR="" + for dd in $build_types; do + WORK_META_DIR=$MY_WORKSPACE/$dd/srpm_work/$name/rpmbuild + echo "WORK_META_DIR=$WORK_META_DIR" + if [ -d $WORK_META_DIR ]; then + break; + else + WORK_META_DIR="" + fi + done + if [ "$WORK_META_DIR" == "" ]; then + echo "ERROR: failed to find srpm_work directory for '$name'" + exit 1 + fi + + # WORK_SRC_DIR=$(dirname $(find $MY_WORKSPACE/srpm_work/$name/gits/ -type d -name .git)) + NEW_WORK_SRC_DIR="" + OLD_WORK_SRC_DIR="" + for dd in $build_types; do + for g in $(find $MY_WORKSPACE/$dd/srpm_work/$name/gits/ -type d -name .git); do + d=$(dirname $g) + if [ -d $d ]; then + cd $d; + git tag | grep pre_wrs_ >> /dev/null + if [ $? -ne 0 ]; then + continue + fi + git checkout $OLD_BRANCH 2>> /dev/null + if [ $? -eq 0 ]; then + OLD_WORK_SRC_DIR=$d + fi + git checkout $NEW_BRANCH 2>> /dev/null + if [ $? -eq 0 ]; then + NEW_WORK_SRC_DIR=$d + fi + fi + done + done + if [ "$WORK_META_DIR" == "" ]; then + echo "ERROR: failed to find srpm_work directory for '$name'" + exit 1 + fi + + echo "$name $old_src_rpm $new_src_rpm" + echo "PKG_DIR=$PKG_DIR" + echo "OLD_BRANCH=$OLD_BRANCH" + echo "NEW_BRANCH=$NEW_BRANCH" + echo "WORK_META_DIR=$WORK_META_DIR" + echo "OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR" + echo "NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR" + echo "" + + ( + cd $WORK_META_DIR + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd to WORK_META_DIR=$WORK_META_DIR" + exit 1 + fi + echo "--- old meta git log (oldest to newest) ---" + git checkout $OLD_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH" + exit 1 + fi + git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac + PATCH_COMMIT_LIST=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }') + echo "--- new meta git log (oldest to newest) ---" + git checkout $NEW_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH" + exit 1 + fi + git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac + REFERENCE_COMMIT=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | head -n 1 | awk '{ print $2 }') + echo "" + + for COMMIT in ${PATCH_COMMIT_LIST}; do + echo "git cherry-pick $COMMIT" + git cherry-pick "$COMMIT" + if [ $? -ne 0 ]; then + echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files" + git status --porcelain | grep '^UU ' | awk '{ print $2}' + echo "pwd=$(pwd)" + # gitk & + echo "git mergetool --no-prompt" + git mergetool --no-prompt + # for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do + # xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'" + # if [ $? -ne 0 ]; then + # echo "ERROR: problem launching editor on " + # exit 1 + # fi + # done + echo "git cherry-pick --continue" + git cherry-pick --continue + fi + done + + PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT) + if [ $? -ne 0 ]; then + echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT" + exit 1 + fi + for PATCH_FILE in ${PATCH_LIST}; do + PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//') + echo "$PATCH_FILE -> $PATCH_TARGET" + N=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*" | wc -l) + if [ $N -eq 1 ]; then + PATCH_DEST=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*") + echo "cp -f $PATCH_FILE $PATCH_DEST" + \cp -f $PATCH_FILE $PATCH_DEST + if [ $? -ne 0 ]; then + echo "ERROR: copy failed $WORK_META_DIR/$PATCH_FILE -> $PATCH_DEST" + exit 1 + fi + else + echo "ERROR: Don't know what destination file name to use for patch '$WORK_META_DIR/$PATCH_FILE' derived from commit $COMMIT, and to be copied to '$PKG_DIR/centos/meta_patches'" + fi + done + + echo "" + echo "" + ) + + if [ $? -ne 0 ]; then + FAILED=$name + break + fi + + ( + echo "--- old git log (oldest to newest) ---" + cd $OLD_WORK_SRC_DIR + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd to OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR" + exit 1 + fi + + git checkout $OLD_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH in directory '$OLD_WORK_SRC_DIR'" + exit 1 + fi + + git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac + PATCH_COMMIT_LIST=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }') + + echo "--- new git log (oldest to newest) ---" + cd $NEW_WORK_SRC_DIR + if [ $? -ne 0 ]; then + echo "ERROR: failed to cd to NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR" + exit 1 + fi + + git checkout $NEW_BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH in directory '$NEW_WORK_SRC_DIR'" + exit 1 + fi + + git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac + REFERENCE_COMMIT=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | head -n 1 | awk '{ print $2 }') + echo "" + + if [ "$OLD_WORK_SRC_DIR" == "$NEW_WORK_SRC_DIR" ]; then + for COMMIT in ${PATCH_COMMIT_LIST}; do + echo "git cherry-pick $COMMIT" + git cherry-pick "$COMMIT" + if [ $? -ne 0 ]; then + echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files" + git status --porcelain | grep '^UU ' | awk '{ print $2}' + echo "pwd=$(pwd)" + # gitk & + echo "git mergetool --no-prompt" + git mergetool --no-prompt + # for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do + # xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'" + # if [ $? -ne 0 ]; then + # echo "ERROR: problem launching editor on " + # exit 1 + # fi + # done + echo "git cherry-pick --continue" + git cherry-pick --continue + fi + done + else + cd $OLD_WORK_SRC_DIR + PATCH_LIST=$(git format-patch -n pre_wrs_$OLD_BRANCH) + if [ $? -ne 0 ]; then + echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=pre_wrs_$OLD_BRANCH" + exit 1 + fi + cd $NEW_WORK_SRC_DIR + for PATCH_FILE in ${PATCH_LIST}; do + cat $OLD_WORK_SRC_DIR/$PATCH_FILE | patch -p1 + if [ $? -ne 0 ]; then + for REJECT in $(find . -name '*.rej'); do + FILE_NAME=$(echo $REJECT | sed 's#.rej$##') + cd $OLD_WORK_SRC_DIR + gitk $FILE_NAME & + cd $NEW_WORK_SRC_DIR + if [ -f $FILE_NAME ] && [ -f $FILE_NAME.orig ]; then + \cp -f $FILE_NAME.orig $FILE_NAME + xterm -e "vi $FILE_NAME $REJECT" + rm -f $REJECT + rm -f $FILE_NAME.orig + fi + done + fi + + git add --all + MSG=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//') + git commit -m "WRS: $MSG" + done + + fi + + PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT) + if [ $? -ne 0 ]; then + echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT" + exit 1 + fi + for PATCH_FILE in ${PATCH_LIST}; do + PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch[0-9]*-//' | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch//' | sed 's/.patch$//') + echo "$PATCH_FILE -> $PATCH_TARGET" + PKG_PATCH_DIR="$PKG_DIR/centos/patches" + N=0 + if [ -d "$PKG_PATCH_DIR" ]; then + N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l) + fi + if [ $N -ne 1 ]; then + PKG_PATCH_DIR="$PKG_DIR" + if [ -d "$PKG_PATCH_DIR" ]; then + N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l) + fi + fi + echo "N=$N" + echo "PKG_PATCH_DIR=$PKG_PATCH_DIR" + + if [ $N -eq 1 ]; then + PATCH_DEST=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]') + echo "meld $PATCH_FILE -> $PATCH_DEST" + meld $PATCH_FILE $PATCH_DEST + if [ $? -ne 0 ]; then + echo "ERROR: meld failed $WORK_SRC_DIR/$PATCH_FILE -> $PATCH_DEST" + exit 1 + fi + else + echo "ERROR: Don't know what destination file name to use for patch '$OLD_WORK_SRC_DIR/$PATCH_FILE', and to be copied to '$PKG_PATCH_DIR'" + fi + done + + echo "" + echo "" + ) + + if [ $? -ne 0 ]; then + FAILED=$name + break + fi + + ) + + +done + +if [ "$FAILED" != "" ]; then + echo "Failed for ... $FAILED" + exit 1 +fi + +echo "" +for d in $(for dat in $(cat $UPVERSION_LOG); do srpm_path=$(echo $dat | awk -F '#' '{print $2}'); ( cd $(dirname $srpm_path); git rev-parse --show-toplevel ); done | sort --unique); do + ( + cd $d + echo "cd $d" + for f in $(git status --porcelain | awk '{print $2}'); do + echo "git add $f"; + done + if [ "$PATCH_ID" == "" ]; then + echo "git commit -m 'rebased patches'" + else + echo "git commit -m 'rebased patches for patch $PATCH_ID'" + fi + ) +done +echo "" + + diff --git a/build-tools/sign-build b/build-tools/sign-build new file mode 100755 index 00000000..ef496187 --- /dev/null +++ b/build-tools/sign-build @@ -0,0 +1,506 @@ +#!/bin/bash + +# This script calls into an external signing server to perform signing of some +# packages in the system. The old packages (which are typically generated by +# the build system and signed by placeholder keys) are overwritten by the new +# packages. +# +# Three types of packages are signed: +# kernels (both std and lowlatency, aka "rt", kernels) +# grub +# shim +# +# Kernels and grub are generated by producing (under the normal build system) +# two packages -- a package containing the unsigned binaries, and a package +# containing binaries signed with temporary keys. All the "accessories" (files, +# scripts, etc) are included in the package containing the signed-with-temp-keys +# files. The signing server will take both packages, sign the unsigned +# binaries, and replace the files in the signed package with the newly signed +# ones. +# +# Typical flow/artifacts +# kernel.src.rpm -> produces kernel.rpm and kernel-unsigned.rpm +# kernel.rpm -> initially contains binaries signed with a temporary key +# -> contains all files used by the kernel +# -> can be installed and used in a system (it just won't +# secure boot since the key is just a temp key) +# kernel-unsigned.rpm -> contains just unsigned kernel binaries +# +# The signing server will take both packages, sign the binaries in +# kernel-unsigned.rpm with our private key, and replace the binaries in +# kernel.rpm with the new binaries. The kernel.rpm can then be replaced by the +# version generated by the signing server. +# +# Shim is a bit of a different beast. +# +# There are two source packages - shim and shim-signed. Frustratingly, "shim" +# source produces a "shim-unsigned" binary output. "shim-signed" produces a +# "shim" binary output. +# +# The "shim-signed" source RPM doesn't contain source code -- it just contains +# instructions to take the "shim-unsigned" binaries, sign them, and package the +# output. We've modified the shim-signed RPM to (rather than sign with a temp +# key) use "presigned" binaries from shim-unsigned if the files exist. (It will +# still use a temp key of no presigned files are found, which is how the build +# system normally runs). +# +# The signing server will unpack the shim-unsigned package, sign the binaries +# (as "presigned") and repack the package. +# +# A rebuild of shim-signed by the build server is then required. +# +# Thanks for bearing with me in the convoluted discussion, above. + + +# Script flow: +# - call signing server to sign kernels (if they exist and are new, as with +# other RPMs) +# - replace old kernel packages with newly signed ones +# - call signing server to sign grub (and replace old version with the newly +# signed one) +# - call signing server to sign shim-unsigned (replace old version) +# - rebuild shim-signed +# - update our repos to advertize all newly replaced packages + +# check_if_pkg_needs_signing +# +# Checks to see if a given package needs to be signed. We maintain a list of +# MD5 sums for RPMs we have signed. Thus, we can easily see if we've already +# signed a package. +# +# Returns 1 if the package does need signing, or 0 if package does not +# +# This function expects the package specified to exist. +function check_if_pkg_needs_signing +{ + local PKG_TO_CHECK=$1 + + if [ ! -e ${SIGNED_PKG_DB} ]; then + # We haven't signed anything before, so this package needs signing + return 1 + fi + + local SIGNED_PKG_MD5=`grep ${PKG_TO_CHECK} ${SIGNED_PKG_DB} | cut -d ' ' -f 1` + if [ "x${SIGNED_PKG_MD5}" == "x" ]; then + # We have no record of having signed the package -- needs signing + return 1 + fi + + local CURRENT_MD5=`md5sum ${PKG_TO_CHECK} | cut -d ' ' -f 1` + if [ "${CURRENT_MD5}" != "${SIGNED_PKG_MD5}" ]; then + # The package has been regenerated since we last signed it -- needs + # signing again + return 1 + fi + + # The package md5 sum matches the md5sum of the package when it was last + # signed. + return 0 +} + +# update_signed_pkg_list +# +# Updated our list of signed packages with the md5 sum of a recently signed +# package. +# +# This function expects the package to exist. +function update_signed_pkg_list +{ + local PKG_TO_ADD=$1 + + if [ ! -e ${SIGNED_PKG_DB} ]; then + touch ${SIGNED_PKG_DB} + fi + + # remove current entry for package + local TMPFILE=`mktemp` + grep -v $(basename ${PKG_TO_ADD}) ${SIGNED_PKG_DB} > ${TMPFILE} + mv ${TMPFILE} ${SIGNED_PKG_DB} + + # add MD5 for package to the package list + md5sum ${PKG_TO_ADD} >> ${SIGNED_PKG_DB} +} + + +# update_repo +# +# Updates either the standard or rt repo with latest packages +# Checks that you specified a repo, and that the path exists. +# +# There are actually now two places we need to update -- the +# rpmbuild/RPMS/ path, as well as the results/.../ path +function update_repo +{ + local BUILD_TYPE=$1 + local EXTRA_PARAMS="" + local RETCODE=0 + local repopath="" + + if [ "x$BUILD_TYPE" == "x" ]; then + return 1 + fi + + if [ "x$MY_BUILD_ENVIRONMENT_TOP" == "x" ]; then + return 1 + fi + + for repopath in "$MY_WORKSPACE/$BUILD_TYPE/rpmbuild/RPMS" "$MY_WORKSPACE/$BUILD_TYPE/results/${MY_BUILD_ENVIRONMENT_TOP}-$BUILD_TYPE"; do + if [ ! -d "$repopath" ]; then + echo "Error - cannot find path $repopath" + return 1 + fi + + cd $repopath + if [ -f comps.xml ]; then + EXTRA_PARAMS="-g comps.xml" + fi + createrepo --update $EXTRA_PARAMS . > /dev/null + RETCODE=$? + cd - > /dev/null + if [ 0$RETCODE -ne 0 ]; then + return $RETCODE + fi + done + + return $RETCODE +} + +# sign_shims - find and sign any shim package that we need to +# +function sign_shims +{ + SHIM=`find $MY_WORKSPACE/std/rpmbuild/RPMS -name "shim-unsigned-*.$ARCH.rpm" | grep -v debuginfo` + if [ "x${SHIM}" == "x" ]; then + echo "Warning -- cannot find shim package to sign" + return 0 + fi + sign shim $SHIM + + return $? +} + +# sign_grubs - find and sign any grub package that we need to. +# Grub (and kernel) are initially signed with temporary keys, so +# we need to upload both the complete package, as well as the +# unsigned binaries +# +function sign_grubs +{ + GRUB=`find $MY_WORKSPACE/std/rpmbuild/RPMS -name "grub2-efi-[1-9]*.$ARCH.rpm"` + UNSIGNED_GRUB=`find $MY_WORKSPACE/std/rpmbuild/RPMS -name "grub2-efi-unsigned*.$ARCH.rpm"` + if [ "x${GRUB}" == "x" ]; then + echo "Warning -- cannot find GRUB package to sign" + return 0 + fi + if [ "x${UNSIGNED_GRUB}" == "x" ]; then + echo "Warning -- cannot find unsigned GRUB package to sign" + return 0 + fi + + sign grub2 $GRUB $UNSIGNED_GRUB + return $? +} + +# sign_kernels - find and sign any kernel package that we need to. +# +function sign_kernels +{ + sign_kernel "std" "" + sign_kernel "rt" "-rt" +} + +# sign_kernel - find and sign kernel package if we need to. +# Kernels (and grub) are initially signed with temporary keys, so +# we need to upload both the complete package, as well as the +# unsigned binaries +function sign_kernel +{ + local KERNEL_PATH=$1 + local KERNEL_EXTRA=$2 + KERNEL=`find $MY_WORKSPACE/${KERNEL_PATH}/rpmbuild/RPMS -name "kernel${KERNEL_EXTRA}-[1-9]*.$ARCH.rpm"` + UNSIGNED_KERNEL=`find $MY_WORKSPACE/${KERNEL_PATH}/rpmbuild/RPMS -name "kernel${KERNEL_EXTRA}-unsigned-[1-9]*.$ARCH.rpm"` + if [ "x${KERNEL}" == "x" ]; then + echo "Warning -- cannot find kernel package to sign in ${KERNEL_PATH}" + return 0 + fi + if [ "x${UNSIGNED_KERNEL}" == "x" ]; then + echo "Warning -- cannot find unsigned kernel package to sign in ${KERNEL_PATH}" + return 0 + fi + + sign kernel $KERNEL $UNSIGNED_KERNEL + if [ $? -ne 0 ]; then + return $? + fi +} + +# rebuild_pkgs - rebuild any packages that need to be updated from the newly +# signed binaries +# +function rebuild_pkgs +{ + local LOGFILE="$MY_WORKSPACE/export/signed-rebuild.log" + local PKGS_TO_REBUILD=${REBUILD_LIST} + + if [ "x${PKGS_TO_REBUILD}" == "x" ]; then + # No rebuilds required, return cleanly + return 0 + fi + + # If we reach this point, then we have one or more packages to be rebuilt + + # first, update the repo so it is aware of the "latest" binaries + update_repo std + if [ $? -ne 0 ]; then + echo "Could not update signed packages -- could not update repo" + return 1 + fi + + echo "Performing rebuild of packages: $PKGS_TO_REBUILD" + FORMAL_BUILD=0 build-pkgs --no-descendants --no-build-info --no-required --careful $PKGS_TO_REBUILD > $LOGFILE 2>&1 + + if [ $? -ne 0 ]; then + echo "Could not rebuild packages: $PKGS_TO_REBUILD -- see $LOGFILE for details" + return 1 + fi + + echo "Done" + return 0 +} + +# sign [pkg_containing_unsigned_bins] +# +# This routine uploads a package to the signing server, instructs the signing +# signing server to do its' magic, and downloads the updated (signed) package +# from the signing server. +# +# Accessing the signing server -- the signing server cannot just be logged +# into by anyone. A small number of users (Jason McKenna, Scott Little, Greg +# Waines, etc) have permission to log in as themselves. In addition, there is +# a user "signing" who is unique to the server. The "jenkins" user on our +# build servers has permission to login/upload files as "signing" due to Jenkins' +# private SSH key being added to the signing user's list of keys. This means +# that Jenkins can upload and run commands on the server as "signing". +# +# In addition to uploading files as signing, the signing user has permissions to +# run a single command (/opt/signing/sign.sh) as a sudo root user. The signing +# user does not have access to modify the script or to run any other commands as +# root. The sign.sh script will take inputs (the files that jenkins has +# uploaded), verify the contents, sign the images against private keys, and +# output a new .rpm contianing the signed version of the files. Assuming all +# is successful, the filename of the signed output file is returned, and the +# jenkins user can then use that filename to download the file (the "signing" +# user does not have access to remove or modify the file once it's created). +# +# All operations done on the signing server are logged in muliple places, and +# the output RPM artifacts are timestamped to ensure that they are not +# overwritten by subsequent calls to sign.sh. +# +# kernel and grub package types require you to specify/upload the unsigned +# packages as well as the normal binary +function sign +{ + local TYPE=$1 + local FILE=$2 + local UNSIGNED=$3 + local UNSIGNED_OPTION="" + local TMPFILE=`mktemp /tmp/sign.XXXXXXXX` + + # Don't sign if we've already signed it + check_if_pkg_needs_signing ${FILE} + if [ $? -eq 0 ]; then + echo "Not signing ${FILE} as we previously signed it" + return 0 + fi + + echo "Signing $FILE" + + # upload the original package + scp -q $FILE $SIGNING_USER@$SIGNING_SERVER:$UPLOAD_PATH + if [ $? -ne 0 ]; then + echo "Failed to upload file $FILE" + \rm -f $TMPFILE + return 1 + fi + + # upload the unsigned package (if specified) + if [ "x$UNSIGNED" != "x" ]; then + scp -q $UNSIGNED $SIGNING_USER@$SIGNING_SERVER:$UPLOAD_PATH + if [ $? -ne 0 ]; then + echo "Failed to upload file $UNSIGNED" + \rm -f $TMPFILE + return 1 + fi + UNSIGNED=$(basename $UNSIGNED) + UNSIGNED_OPTION="-u $UPLOAD_PATH/$UNSIGNED" + fi + + # Call the magic script on the signing server. Note that the user + # ($SIGNING_USER) has sudo permissions but only to invoke this one script. + # The signing user cannot make other sudo calls. + # + # We place output in $TMPFILE to extract the output file name later + # + ssh $SIGNING_USER@$SIGNING_SERVER sudo $SIGNING_SCRIPT -i $UPLOAD_PATH/$(basename $FILE) $UNSIGNED_OPTION -t $TYPE > $TMPFILE 2>&1 + if [ $? -ne 0 ]; then + echo "Signing of $FILE failed" + \rm -f $TMPFILE + return 1 + fi + + # The signing server script will output the name by which the newly signed + # RPM can be found. This will be a unique filename (based on the unique + # upload directory generated by the "-r" option above). + # + # The reason for this is so that we can archive all output files + # and examine them later without them being overwriten. File paths are + # typically of the form + # + # /export/signed_images/XXXXXXX_grub2-efi-2.02-0.44.el7.centos.tis.3.x86_64.rpm + # + # Extract the output name, and copy the RPM back into our system + # (Note that we overwrite our original version of the RPM) + # + # Note that some packages (like grub) may produce multiple output RPMs (i.e. + # multiple lines list output files. + OUTPUT=`grep "Output written:" $TMPFILE | sed "s/Output written: //"` + + # Check that we got something + if [ "x$OUTPUT" == "x" ]; then + echo "Could not determine output file -- check logs on signing server for errors" + \cp $TMPFILE $MY_WORKSPACE/export/signing.log + \rm -f $TMPFILE + return 1 + fi + + # The signing script can return multiple output files, if appropriate for + # the input RPM source type. Copy each output RPM to our repo + # Note that after we download the file we extract the base package name + # from the RPM to find the name of the file that it *should* be named + # + # example: + # we'd download "Zrqyeuzw_kernel-3.10.0-514.2.2.el7.20.tis.x86_64.rpm" + # we'd figure out that the RPM name should be "kernel" + # we look for "kernel" in the RPM filename, and rename + # "Zrqyeuzw_kernel-3.10.0-514.2.2.el7.20.tis.x86_64.rpm" to + # "kernel-3.10.0-514.2.2.el7.20.tis.x86_64.rpm" + while read OUTPUT_FILE; do + + # Download the file from the signing server + local DOWNLOAD_FILENAME=$(basename $OUTPUT_FILE) + scp -q $SIGNING_USER@$SIGNING_SERVER:$OUTPUT_FILE $(dirname $FILE) + if [ $? -ne 0 ]; then + \rm -f $TMPFILE + echo "Copying file from signing server failed" + return 1 + fi + echo "Successfully retrieved $OUTPUT_FILE" + + # figure out what the file should be named (strip away leading chars) + local RPM_NAME=`rpm -qp $(dirname $FILE)/$DOWNLOAD_FILENAME --qf="%{name}"` + local CORRECT_OUTPUT_FILE_NAME=`echo $DOWNLOAD_FILENAME | sed "s/^.*$RPM_NAME/$RPM_NAME/"` + + # rename the file + \mv -f $(dirname $FILE)/$DOWNLOAD_FILENAME $(dirname $FILE)/$CORRECT_OUTPUT_FILE_NAME + + # replace the version of the file in results + # + # Potential hiccup in future -- this code currenty replaces any output file in EITHER + # std or rt results which matches the filename we just downloaded from the signing. + # server. This means there could be an issue where we sign something-ver-rel.arch.rpm + # but we expect different versions of that RPM in std and in rt. Currently, we do not + # have any RPMs which have that problem (all produced RPMs in rt have the "-rt" suffix + # let along any "signed" rpms) but it's something of which to be aware. + # + # Also, note that we do not expect multiple RPMs in each repo to have the same filename. + # We use "head -n 1" to handle that, but again it shouldn't happen. + # + for buildtype in std rt; do + x=`find $MY_WORKSPACE/$buildtype/results/${MY_BUILD_ENVIRONMENT_TOP}-$buildtype -name $CORRECT_OUTPUT_FILE_NAME | head -n 1` + if [ ! -z "$x" ]; then + cp $(dirname $FILE)/$CORRECT_OUTPUT_FILE_NAME $x + fi + done + + echo "Have signed file $(dirname $FILE)/$CORRECT_OUTPUT_FILE_NAME" + done <<< "$OUTPUT" + + \rm -f $TMPFILE + + # If we just signed a shim package, flag that shim needs to be rebuilt + if [ "${TYPE}" == "shim" ]; then + REBUILD_LIST="${REBUILD_LIST} shim-signed" + fi + + echo "Done" + update_signed_pkg_list ${FILE} + + return 0 +} + +# Main script + +if [ "x$MY_WORKSPACE" == "x" ]; then + echo "Environment not set up -- abort" + exit 1 +fi + +ARCH="x86_64" +SIGNING_SERVER=yow-tiks01 +SIGNING_USER=signing +SIGNING_SCRIPT=/opt/signing/sign.sh +UPLOAD_PATH=`ssh $SIGNING_USER@$SIGNING_SERVER sudo $SIGNING_SCRIPT -r` +SIGNED_PKG_DB=${MY_WORKSPACE}/signed_pkg_list.txt +REBUILD_LIST="" +MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT} + +# Check that we were able to request a unique path for uploads +echo $UPLOAD_PATH | grep -q "^Upload:" +if [ $? -ne 0 ]; then + echo "Failed to get upload path -- abort" + exit 1 +fi +UPLOAD_PATH=`echo $UPLOAD_PATH | sed "s%^Upload: %%"` + +sign_kernels +if [ $? -ne 0 ]; then + echo "Failed to sign kernels -- abort" + exit 1 +fi + +sign_shims +if [ $? -ne 0 ]; then + echo "Failed to sign shims -- abort" + exit 1 +fi + +sign_grubs +if [ $? -ne 0 ]; then + echo "Failed to sign grubs -- abort" + exit 1 +fi + +update_repo std +if [ $? -ne 0 ]; then + echo "Failed to update std repo -- abort" + exit 1 +fi + +rebuild_pkgs +if [ $? -ne 0 ]; then + echo "Failed to update builds with signed dependancies -- abort" + exit 1 +fi + +update_repo std +if [ $? -ne 0 ]; then + echo "Failed to update std repo -- abort" + exit 1 +fi + +update_repo rt +if [ $? -ne 0 ]; then + echo "Failed to update rt repo -- abort" + exit 1 +fi + diff --git a/build-tools/sign-rpms b/build-tools/sign-rpms new file mode 100755 index 00000000..4e2300b0 --- /dev/null +++ b/build-tools/sign-rpms @@ -0,0 +1,258 @@ +#!/bin/bash + +# Add file signature to RPMs +# +# This script will add file signature to rpms in a given directory. +# The directory containing the RPMs must be passed as a parameter. There is no default location. +# +# + +usage () { + echo "" + echo "Usage: " + echo " sign-rpms -d|--pkg-dir " + echo " -d --pkg-dir directory contain the RPMs to sign" + echo " -h|--help this message" + echo "" +} + +# number of processors. The process will use all available processors by default. +NPROCS=$(nproc) + +export MOCK=/usr/bin/mock + +# check input variables +function check_vars { + # need access to repo, which should normally be defined as MY_REPO in the env + + if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then + INTERNAL_REPO_ROOT=$MY_REPO + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " unable to use \$MY_REPO (value \"$MY_REPO\")\n" + printf " -- checking \$MY_REPO_ROOT_DIR (value \"$MY_REPO_ROOT_DIR\")\n" + if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then + INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root + printf " Found!\n" + fi + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " No joy -- checking for \$MY_WORKSPACE/cgcs-root\n" + if [ -d "$MY_WORKSPACE/cgcs-root" ] ; then + INTERNAL_REPO_ROOT=$MY_WORKSPACE/cgcs-root + printf " Found!\n" + fi + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " Error -- could not locate cgcs-root repo.\n" + exit 1 + fi + + if [ -z "$MY_BUILD_ENVIRONMENT" ] ; then + printf " Error -- missing environment variable MY_BUILD_ENVIRONMENT" + exit 1 + fi + + if [ -z "$MY_BUILD_DIR" ] ; then + printf " Error -- missing environment variable MY_BUILD_DIR" + exit 1 + fi + +} + +# +# this function will add IMA file signatures to all rpms in the Packages directory +# +# the process will copy the signing key and a makefile in the mock env under /tmp +# it will also mount the Packages directory under /mnt/Packages +# then mock will be invoked to sign the packages +# +# This process is using mock because the build servers do not have the same rpm / rpmsign version +# + + +function sign_packages { + OLD_PWD=$PWD + + _MOCK_PKG_DIR=/mnt/Packages + _IMA_PRIV_KEY=ima_signing_key.priv + _KEY_DIR=$MY_REPO/build-tools/signing + _MOCK_KEY_DIR=/mnt/keys + _SIGN_MAKEFILE=_sign_pkgs.mk + _MK_DIR=$MY_REPO/build-tools/mk + _MOCK_MK_DIR=/mnt/mk + + # mock confgiuration file + _MOCK_CFG=$MY_BUILD_DIR/${MY_BUILD_ENVIRONMENT}-sign.cfg + + # recreate configuration file + rm $_MOCK_CFG + export BUILD_TYPE=std + export MY_BUILD_DIR_TOP=$MY_BUILD_DIR + modify-build-cfg $_MOCK_CFG + # and customize + echo "config_opts['chroot_setup_cmd'] = 'install shadow-utils make rpm-sign'" >> $_MOCK_CFG + echo "config_opts['root'] = 'mock-sign'" >> $_MOCK_CFG + echo "config_opts['basedir'] = '${MY_WORKSPACE}'" >> $_MOCK_CFG + echo "config_opts['cache_topdir'] = '${MY_WORKSPACE}/mock-cache'" >> $_MOCK_CFG + + echo "Signing packages in $_PKG_DIR with $NPROCS threads" + echo "using development key $_KEY_DIR/$_IMA_PRIV_KEY" + + printf "Initializing mock environment\n" + + # invoke make in mock to sign packages. + # this call will also create and initialize the mock env + eval $MOCK -q -r $_MOCK_CFG \'--plugin-option=bind_mount:dirs=[\(\"$_PKG_DIR\", \"$_MOCK_PKG_DIR\"\),\(\"$_MK_DIR\",\"$_MOCK_MK_DIR\"\),\(\"$_KEY_DIR\",\"$_MOCK_KEY_DIR\"\)]\' --shell \"cd $_MOCK_PKG_DIR\; make -j $NPROCS -f $_MOCK_MK_DIR/$_SIGN_MAKEFILE KEY=$_MOCK_KEY_DIR/$_IMA_PRIV_KEY\" + + retval=$? + + printf "Cleaning mock environment\n" + $MOCK -q -r $_MOCK_CFG --scrub=all + + if [ $retval -ne 0 ] ; then + echo "failed to add file signatures to RPMs in mock environment." + return $retval + fi + + cd $OLD_PWD + +} + +function _copy_and_sign { + + # upload rpms to server + scp $_PKG_DIR/*.rpm $SIGNING_USER@$SIGNING_SERVER:$_UPLOAD_DIR + retval=$? + if [ $retval -ne 0 ] ; then + echo "ERROR: failed to copy RPM files to signing server." + return $retval + fi + + # get server to sign packages. + ssh $SIGNING_USER@$SIGNING_SERVER -- sudo $SIGNING_SERVER_SCRIPT -s -d $sub + retval=$? + if [ $retval -ne 0 ] ; then + echo "ERROR: failed to sign RPM files." + return $retval + fi + + # download results back. This overwrites the original files. + scp $SIGNING_USER@$SIGNING_SERVER:$_UPLOAD_DIR/*.rpm $_PKG_DIR + retval=$? + if [ $retval -ne 0 ] ; then + echo "ERROR: failed to copy signed RPM files back from signing server." + return $retval + fi + + return $retval + +} + + +function sign_packages_on_server { + + retval=0 + + # obtain temporary diretory to upload RPMs on signing server + _UPLOAD_DIR=`ssh $SIGNING_USER@$SIGNING_SERVER -- sudo $SIGNING_SERVER_SCRIPT -r` + + retval=$? + if [ $retval -ne 0 ] ; then + echo "failed to obtain upload directory from signing server." + return $retval + fi + + # extract base chroot dir and rpm dir within chroot + read base com sub <<< $_UPLOAD_DIR + + # this is the upload temp dir, outside of chroot env + _UPLOAD_DIR=$base$sub + + _copy_and_sign + retval=$? + + # cleanup + ssh $SIGNING_USER@$SIGNING_SERVER rm $_UPLOAD_DIR/*.rpm + if [ $? -ne 0 ] ; then + echo "Warning : failed to remove rpms from temporary upload directory." + fi + ssh $SIGNING_USER@$SIGNING_SERVER rmdir $_UPLOAD_DIR + if [ $? -ne 0 ] ; then + echo "Warning : failed to remove temporary upload directory." + fi + + return $retval +} + + + +############################################# +# Main code +############################################# + +# Check args +HELP=0 +SIGNING_SERVER=yow-tiks01 +SIGNING_USER=signing +SIGNING_SERVER_SCRIPT=/opt/signing/sign_rpms_18.03.sh + +# return value +retval=0 + +# read the options +TEMP=`getopt -o hd: --long help,pkg-dir: -n 'test.sh' -- "$@"` +if [ $? -ne 0 ] ; then + echo "Invalid parameters - exiting" + exit 1 +fi + +eval set -- "$TEMP" + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -h|--help) HELP=1 ; shift ;; + -d|--pkg-dir) _PKG_DIR="$2"; shift; shift ;; + --) shift ; break ;; + *) echo "Internal error : unexpected parameter $2" ; exit 1 ;; + esac +done + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +# package directory must be defined +if [ -z "$_PKG_DIR" ]; then + echo "Need package directory. Use -d/--pkg-dir option" + usage + exit 1 +fi + +# ... and must exist +if [ ! -d "$_PKG_DIR" ]; then + echo "Package directory $_PKG_DIR does not exist" + exit 1 +fi + +# Init variables +check_vars + +echo signing $_PKG_DIR + +# sign all rpms +if [ "$USER" == "jenkins" ]; then + sign_packages_on_server + retval=$? +else + sign_packages + retval=$? +fi + +exit $retval + diff --git a/build-tools/sign_iso_formal.sh b/build-tools/sign_iso_formal.sh new file mode 100755 index 00000000..fc68444e --- /dev/null +++ b/build-tools/sign_iso_formal.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +# This script makes a request to the signing server to sign a .iso with the +# formal key. It will only work for users authorized to access the signing +# server. The detached signature is placed in the same path as the .iso as +# the file bootimage.sig +# +# Script written to be quite simple + +if [ "x$1" == "x" ]; then + echo "You must specify an ISO file to sign" + exit 1 +fi + +ISO_FILE_PATH=$1 +ISO_FILE_NAME=$(basename ${ISO_FILE_PATH}) +ISO_FILE_ROOT=$(dirname ${ISO_FILE_PATH}) +ISO_FILE_NOEXT="${ISO_FILE_NAME%.*}" +SIGNING_SERVER="signing@yow-tiks01" +GET_UPLOAD_PATH="sudo /opt/signing/sign.sh -r" +REQUEST_SIGN="sudo /opt/signing/sign_iso.sh" +SIGNATURE_FILE="$ISO_FILE_NOEXT.sig" + +# Make a request for an upload path +# Output is a path where we can upload stuff, of the form +# "Upload: /tmp/sign_upload.5jR11pS0" +UPLOAD_PATH=`ssh ${SIGNING_SERVER} ${GET_UPLOAD_PATH}` +if [ $? -ne 0 ]; then + echo "Could not get upload path. Do you have permissions on the signing server?" + exit 1 +fi +UPLOAD_PATH=`echo ${UPLOAD_PATH} | cut -d ' ' -f 2` + +echo "Uploading file" +scp -q ${ISO_FILE_PATH} ${SIGNING_SERVER}:${UPLOAD_PATH} +if [ $? -ne 0 ]; then + echo "Could not upload ISO" + exit 1 +fi +echo "File uploaded to signing server -- signing" + +# Make the signing request. +# Output is path of detached signature +RESULT=`ssh ${SIGNING_SERVER} ${REQUEST_SIGN} ${UPLOAD_PATH}/${ISO_FILE_NAME}` +if [ $? -ne 0 ]; then + echo "Could not perform signing -- output $RESULT" + ssh ${SIGNING_SERVER} rm -f ${UPLOAD_PATH}/${ISO_FILE_NAME} + exit 1 +fi + +echo "Signing complete. Downloading detached signature" +scp -q ${SIGNING_SERVER}:${RESULT} ${ISO_FILE_ROOT}/${SIGNATURE_FILE} +if [ $? -ne 0 ]; then + echo "Could not download newly signed file" + ssh ${SIGNING_SERVER} rm -f ${UPLOAD_PATH}/${ISO_FILE_NAME} + exit 1 +fi + +# Clean up (ISOs are big) +ssh ${SIGNING_SERVER} rm -f ${UPLOAD_PATH}/${ISO_FILE_NAME} + +echo "${ISO_FILE_ROOT}/${SIGNATURE_FILE} detached signature" diff --git a/build-tools/sign_patch_formal.sh b/build-tools/sign_patch_formal.sh new file mode 100755 index 00000000..9056c25d --- /dev/null +++ b/build-tools/sign_patch_formal.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# This script makes a request to the signing server to sign a .patch with the +# formal key. It will only work for users authorized to access the signing +# server. +# +# Script written to be quite simple + +if [ "x$1" == "x" ]; then + echo "You must specify a patch file to sign" + exit 1 +fi + +PATCH_FILE_PATH=$1 +PATCH_FILE_NAME=$(basename ${PATCH_FILE_PATH}) +SIGNING_SERVER="signing@yow-tiks01" +GET_UPLOAD_PATH="sudo /opt/signing/sign.sh -r" +REQUEST_SIGN="sudo /opt/signing/sign_patch.sh" + +# Make a request for an upload path +# Output is a path where we can upload stuff, of the form +# "Upload: /tmp/sign_upload.5jR11pS0" +UPLOAD_PATH=`ssh ${SIGNING_SERVER} ${GET_UPLOAD_PATH}` +if [ $? -ne 0 ]; then + echo "Could not get upload path. Do you have permissions on the signing server?" + exit 1 +fi +UPLOAD_PATH=`echo ${UPLOAD_PATH} | cut -d ' ' -f 2` + +scp -q ${PATCH_FILE_PATH} ${SIGNING_SERVER}:${UPLOAD_PATH} +if [ $? -ne 0 ]; then + echo "Could upload patch" + exit 1 +fi +echo "File uploaded to signing server" + +# Make the signing request. +# Output is path of newly signed file +RESULT=`ssh ${SIGNING_SERVER} ${REQUEST_SIGN} ${UPLOAD_PATH}/${PATCH_FILE_NAME}` +if [ $? -ne 0 ]; then + echo "Could not perform signing -- output $RESULT" + exit 1 +fi + +echo "Signing complete. Downloading" +scp -q ${SIGNING_SERVER}:${RESULT} ${PATCH_FILE_PATH} +if [ $? -ne 0 ]; then + echo "Could not download newly signed file" + exit 1 +fi +echo "${PATCH_FILE_PATH} now signed with formal key" diff --git a/build-tools/signing/bootimage_sig_validation_key.pem b/build-tools/signing/bootimage_sig_validation_key.pem new file mode 100644 index 00000000..596a5ab9 --- /dev/null +++ b/build-tools/signing/bootimage_sig_validation_key.pem @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvtH0vMm4PDN+5VvpXEfS +5kAEds1CXW55di9H491v+1vjzJt5iVnkA039fcH/qhx64BJI8RqddAUl1mWHT8gn +TowGsvxMNbP8GhXgEGaX+L7//Q9WP+LFZKnLfYm6OTVYL1bI6ilZ1yFYcsx5YQUl +KmIWZUv0jsfCXC0FklDCaD9l0eE79vMSF4tBbsqfEAcFJksJr3z8uNMsc3zjf2/J +B5XeWk2N+dyi14f+WFrJ9c0ZTmH0tTriZOCxo5uh75mbcTzVGZOLyDQNuKx+BSpL +scjN8fGj4tyxOBuhkqyQ7jYtvMczo/OuK5RaMizg8v5qL9WDEf4OJHd5hulG0XJD +8QIDAQAB +-----END PUBLIC KEY----- diff --git a/build-tools/signing/dev-private-key.pem b/build-tools/signing/dev-private-key.pem new file mode 100644 index 00000000..730dfffd --- /dev/null +++ b/build-tools/signing/dev-private-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAtyzT9N7rHr2XFA7G52sX65ILkX2vlspSdk1ZQofJcqU3hLNm +BYaz59v8OpcibumK6hDoYfsYRB0EQ3WP9vCnfQtmW/RSv7yOEK8R2qwg/e9Rd+/u +m/RuiZWzRDxlEj8VhmGPFiXIUquBkVX1Y8/xL1LB8KUeNphFS52/C+MceeeeRn1u +VUVtdTRd7zcDx9Z3Qc2oBTVOWYrgJ5KLgUjhpEqJcmc6+C/ZSgkwjnusEhUphFQf +y64Wk+h87CM4z9F3qvciv7j8Jfr780As4EgQ3ozTlnOiLKIfw0cQuSdAGrM2zRxc +5slalpKKcA5wGc8JzsM1HCBDONFJ1wT0UbX4twIDAQABAoIBAC2+3URHM7LeqE9T +DWuX/EZRl+AC4oonBwcSdBtAbBfcmQvj/bOdm2ppLwm22tuWKaIhDNgDbGD+WiU6 +w/Roz3tUEIEgeFX4EgaqoivZVvfz/4WTfySlm8FyiG2fI6K17Hs9/A0M7IVw3Y/d +RsZhlRs7cMHXir8ZshybTa37Tw5GVIHPz7b6MHAj2SyotkvFkKT7vYrHNfF/L4ES +m5nS5BoZuABALsh4TeO0iHFy+9p2d63+wE+evgOLuU1cuENH6GyrcrltHSbbBQLD +ckJJ0vZ8WhC/yAz7vle5Pc9oKadgNzQ6yL4f275GSnXDT0rz2tqtOreCWjNo1osn +CI8TZHECgYEA55zPwjpsGzzBDLQ90seKfe9UUQ4P3ewPURZSybklLihXWpLpb8vK +Jt+d766NyqjOKU0RDgc7/hOL/ztXfx5dQBTLQCqinOHkrbV779cMCGfMpL+vyPOU +6sUM6kUiRwxssmM0/7+YUSSV7ZYzYVnKjQvAWwmCrZxH21TguIxQFfkCgYEAynZe +xQpRUG6okh4t/zUikKh6pTXbFcqTxX6sX/BsbxnR1ZvnGwL+WYbFGCUukWI72K7p +IKbqCdNPkt4Tqk1c8pWIOYivubA6WFlRuoehRlnzRjSRYJ301Tbq30KD7olTuZpA +QOALvBDmsBbAJ2GCyxWwep5Oab3WHxET5JXNcC8CgYAreP5x+V9DOYMOrGISRwvS +mXJdCGOoBt/VG3iMjmjRIhdLU9nm9vxJkZ1mWIAastaHYAS8elUjWm3jiTLiFEPy +fDSuJZKCGdA/XpoVqWqDxV/e6G/JKxcffb7v1ewaI9XfIcnX2xFu4YwWnSOluSHQ +mY0QSVey2GuQWn0XhpadmQKBgGS91ZYx9hgRl+ts/PI6HELisnQ2ghcrv6/LwjXS +ygHWVST2+F3mdW1StAu5pJxOxvygu/u1UkslTDkGK77X0+IZKbmHG/lHiSChARvR +lRwOchCC47uxnTvioagJzuAIkRKPgF1Hnk8sEb2Y9HAkAxLObgv1bYsMHNlEhCN/ +rsOdAoGBANPMBK0n2wLP80sYLv4UUvih8uXv3ls79wdCDt9De1NjGBiyy+AjUmb3 +9F0HtlkIbIeAr8Vqs0pem3gWfFVgswMQsdmiXPxpxHVYfZRnvYTtx1/nNBjCz8vz +tmQjGIV21WcCKeAP/VigldpMKMfaMNNuiHSdLgq+Y1UmnvaHhFbR +-----END RSA PRIVATE KEY----- diff --git a/build-tools/signing/ima_signing_key.priv b/build-tools/signing/ima_signing_key.priv new file mode 100755 index 00000000..d291c634 --- /dev/null +++ b/build-tools/signing/ima_signing_key.priv @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCmt+G+URVAhTVK +hgoEcN3IOYxjaJFxm5NdEl7kvq+9M6LRSnF5JJfzGj0YqYqBFipI0GrJcHD4mDN8 +CrPh+hFEai6WdOwzft+NjO/yNBy6Ve0aTYwM+/8FPOFS/z0DiUpWIP9WEaF0Y4fo +eREiHibCO0u2HaceG7WDeze2rHwO6Zc2e7KPsoF/VB+zVs0rxNTDaJrhEnn2TKAU +ej/7wDJ4QMNy2Vmzq7Ha9U9sPlpoBNLHdJA+Iz4qU7SiLc6+Q9xhdk4/xdoe4HEN +UUelwEnakM8Yw21SJoRIV2z1UjqlNStN8yQOPJxmhZnLfKQv0lRgwUsdLL0OkmGo +wZNpjk05AgMBAAECggEAVOeQr7vbduBgI3Pf5iMUz8N/G6FFcPYqNlwEt7nlBhYl +9TBUWb2pJMVYGBc/0j3Xiv2ZZL0+y1u33JQWyB7ybEUnNLiXEiKq9wuvp0mi18Kq +3ZpgmLw9RA2dMweJqyNgGDz1zH9uGjgWHu6JKzbvxjssc7LmSQ8TgcJevUlaZLz/ +GinLFhNqI2Rjrv8l5yznPbHv0gbkl/Wm0j7MN3vQrP82190AyT6iq2+OUaYljY4E +n7P40wqtAZMbp6UnkO4kYdKVzh4Nhq9/6/XstgZLraXaauVVfYU2BlhHR0DaeIm2 +SLRctBa9opL/qqF15xUMpnBZYQIgc0ATFyi30c8zgQKBgQDVq73MTRO2gHPGX5uA +jU0jRUtyC0yRaOWukrVZAEXW1JFkTZPYcFxoL0kckMBfL+hxCsC95jsGPICY9whv +G/ReYpJq32tWUHQdPmBahZR2YxcROju/nDZ6HrwzC31y2tlNZZizP9mjXZ8x6sBE +KkTuDMYxifMJzlzj3I3XQ1ToXwKBgQDHvvQaS7aZfG7mEHUv2nUg1//ZCOWkkpnC +kPGV0HadGxhknSRtFqScjdV7QTH16eiy39ukJVGDrYRXWiSFQ2bkOWAl4wYda0kt +jbr8AZcSo4V2EhNVkuEpvITJQeJESD2QNJrzkGKXZgq27fs0gyL4ZyYN5/BM+zwR +3mWw5pmRZwKBgQCMX2K2iGCOAj+NMJxpTOtDY6/YAYJQ+TgDg1BuYxZ+t3G5Epox +lEexDneas6IeJswvD6BmYY2fYDofxBMfY58cLugAHD1ZHEvcNnF4ps9NI3YxFC/S +ht7nRa/gXQkSXqJqDXfPMwBZKhV+5rlfTuGv/mZqfzVS2Ob/r5Lju9FZQwKBgQCm +12wjjrxd5d7n0jVgHDQbk/ADg/f1B/Lg3pq8DV0WCyzL1ao4aGJk1d/MTP+DLbcN +ughHSliVCVlycTvyCAOoe/hAjSltZ1C/eRQHLPjYO7fdkGmP39itOOcdCCgL6t9p +VVdcPGztkvE+LPfXu/V40QerM+G3G17oegAsy+DX3wKBgBbbeMBwv+EDwUkuFyTY +v/np0EAxNjMj5TXLzOrlli7MTDYSGdAxr0kudje1rPYq/+EgjCftINYlfMioV/HI +cKDnza6HwWAK5TMX6OtHdGzunQDcHM8es+RPUDcYnZ4ZUE4pcRnY/ZAh68oyg8mD +mIodrWbJ+tXmV4jTvgpQKM7e +-----END PRIVATE KEY----- diff --git a/build-tools/source_lookup.txt b/build-tools/source_lookup.txt new file mode 100644 index 00000000..4a6e524e --- /dev/null +++ b/build-tools/source_lookup.txt @@ -0,0 +1,87 @@ +git://git.qemu.org/qemu.git qemu-kvm-ev 2.3.0 +https://github.com/openstack/nova.git openstack-nova 2015.1.0 +git://libvirt.org/libvirt.git libvirt 1.2.17 +http://www.drbd.org/download/drbd/8.4/archive/drbd-8.4.3.tar.gz drbd 8.4.3 +https://github.com/openstack/neutron.git openstack-neutron 2015.1.2 +https://github.com/openstack/ceilometer.git openstack-ceilometer 2015.1.2 +git://dpdk.org/dpdk cgcs-dpdk 2.2.0 +git://dpdk.org/dpdk cgcs-dpdk-rt 2.2.0 +http.debian.net/debian/pool/main/d/dpkg/dpkg_1.18.4.tar.xz dpkg 1.18.4 +https://sourceforge.net/projects/e1000/files/i40e%20stable/1.4.25/i40e-1.4.25.tar.gz/download i40e-kmod 1.4.25 +http://dpdk.org/download/mlx4/2015-05-27-DPDK-v2.0.0/libmlx4-1.0.5mlnx1.tar.gz libmlx4-dpdk 1.0.5 +https://www.kernel.org/pub/software/utils/dtc/dtc-1.4.0.tar.gz libfdt 1.4.0 +https://github.com/openstack/heat.git openstack-heat 2015.1.2 +https://github.com/openstack/keystone.git openstack-keystone 2015.1.0 +https://github.com/openstack/puppet-ceilometer.git puppet-ceilometer 5.1.0 +https://github.com/openstack/puppet-ceph.git puppet-ceph 0.1.0 +https://github.com/openstack/puppet-cinder.git puppet-cinder 5.1.0 +https://github.com/openstack/puppet-glance.git puppet-glance 5.1.0 +https://github.com/openstack/puppet-heat.git puppet-heat 5.1.0 +https://github.com/openstack/puppet-horizon.git puppet-horizon 5.1.0 +https://github.com/openstack/puppet-keystone.git puppet-keystone 5.1.0 +https://github.com/openstack/puppet-neutron.git puppet-neutron 5.1.0 +https://github.com/openstack/puppet-nova.git puppet-nova 5.1.0 +https://github.com/openstack/puppet-openstacklib.git puppet-openstacklib 5.1.0 +https://github.com/openstack/puppet-swift.git puppet-swift 5.1.0 +https://github.com/openstack/puppet-tempest.git puppet-tempest 5.1.0 +https://github.com/openstack/puppet-vswitch.git puppet-vswitch 1.1.0 +https://github.com/adrienthebo/puppet-boolean.git puppet-boolean 1.0.2 +https://github.com/rcritten/puppet-certmonger.git puppet-certmonger 1.0.3 +https://github.com/puppetlabs/puppetlabs-concat.git puppet-concat 1.2.3 +https://github.com/puppetlabs/puppetlabs-create_resources.git puppet-create_resources 0.0.1 +github.com/netmanagers/puppet-dnsmasq puppet-dnsmasq 1.1.0 +https://github.com/puppetlabs/puppetlabs-drbd.git puppet-drbd 0.1.0 +https://github.com/voxpupuli/puppet-filemapper puppet-filemapper 1.1.3 +https://github.com/puppetlabs/puppetlabs-firewall.git puppet-firewall 1.6.0 +https://github.com/puppetlabs/puppetlabs-haproxy.git puppet-haproxy 1.2.0 +https://github.com/puppetlabs/puppetlabs-inifile.git puppet-inifile 1.3.0 +https://github.com/camptocamp/puppet-kmod puppet-kmod 2.1.1 +https://github.com/torian/puppet-ldap puppet-ldap 0.2.4 +https://github.com/puppetlabs/puppetlabs-lvm.git puppet-lvm 0.5.0 +https://github.com/voxpupuli/puppet-network puppet-network 1.0.2 +https://github.com/jlyheden/puppet-nslcd puppet-nslcd 0.0.1 +https://github.com/rcritten/puppet-nssdb puppet-nssdb 1.0.1 +https://github.com/puppetlabs/puppetlabs-postgresql.git puppet-postgresql 4.3.0 +https://github.com/example42/puppi puppet-puppi 2.1.11 +https://github.com/puppetlabs/puppetlabs-rabbitmq.git puppet-rabbitmq 5.2.2 +https://github.com/puppetlabs/puppetlabs-rsync.git puppet-rsync 0.4.0 +https://github.com/puppetlabs/puppetlabs-stdlib.git puppet-stdlib 4.6.0 +https://github.com/puppetlabs/puppetlabs-sysctl.git puppet-sysctl 0.1.0 +https://github.com/puppetlabs/puppetlabs-vcsrepo.git puppet-vcsrepo 1.3.0 +https://github.com/derekhiggins/puppet-vlan puppet-vlan 0.1.0 +https://github.com/puppetlabs/puppetlabs-xinetd.git puppet-xinetd 1.5.0 +https://github.com/dmsimard/python-cephclient python-cephclient 0.1.0.5 +https://github.com/jaraco/keyring python-keyring 5.3 +http://vincentbernat.github.com/lldpd/ lldpd 0.9.0 +http://launchpad.net/smart/trunk/1.4.1/+download/smart-1.4.1.tar.bz2 python-smartpm 1.4.1 +https://launchpad.net/tempest tempest 4 +https://toolbelt.readthedocs.org/ requests-toolbelt 0.5.1 +https://pypi.python.org/pypi/WSME python-wsme 0.6.4 +https://github.com/madkiss/openstack-resource-agents/tree/stable-grizzly openstack-ras 1.0.0 +https://github.com/openstack/python-ceilometerclient python-ceilometerclient 1.0.14 +https://github.com/openstack/python-cinderclient/archive python-cinderclient 1.1.3 +http://horizon.openstack.org/ python-django-horizon 2015.1.0 +http://github.com/openstack/python-glanceclient python-glanceclient 0.17.1 +https://github.com/openstack/python-heatclient python-heatclient 0.4.0 +https://github.com/openstack/python-keystoneclient python-keystoneclient 1.3.1 +http://launchpad.net/python-neutronclient/ python-neutronclient 2.4.0 +https://pypi.python.org/pypi/python-novaclient python-novaclient 2.23.0 +https://en.osdn.jp/projects/sfnet_ldapscripts/releases/ ldapscripts 2.0.5 +http://dpdk.org/download/mlx4/2015-05-27-DPDK-v2.0.0/libibverbs-1.1.7mlnx1.tar.gz libibverbs-dpdk 1.1.7 +http://www.openstack.org/software/openstack-storage/ openstack-cinder 2015.1.0 +http://glance.openstack.org openstack-glance 2015.1.0 +https://github.com/stackforge/packstack packstack 2014.1.0 +https://github.com/stackforge/puppet puppet 3.7.4 +http://www.drbd.org/ drbd-kernel 8.4.7 +http://pypi.python.org/pypi/django_openstack_auth/ python-django-openstack-auth 1.2.0 +http://ceph.com/ ceph 0.94.6 +https://sourceforge.net/p/ibmtpm20tss/tss/ci/v930/tree/ tss2 930 +https://git.centos.org/git/rpms/rt-setup rt-setup 1.59 +https://git.centos.org/git/rpms/rtctl rtctl 1.13 +https://github.com/openstack/kingbird.git distributedcloud 1.0.0 +https://github.com/openstack/python-kingbirdclient.git distributedcloud-client 1.0.0 +http://git.infradead.org/users/jjs/linux-tpmdd.git tpm-kmod 4.12 +http://git.infradead.org/users/jjs/linux-tpmdd.git tpm-kmod-rt 4.12 +http://git.infradead.org/users/jjs/linux-tpmdd.git integrity-kmod 4.12 # yes, integrity (IMA) and tpm come from the same place +http://git.infradead.org/users/jjs/linux-tpmdd.git integrity-kmod-rt 4.12 + diff --git a/build-tools/spec-utils b/build-tools/spec-utils new file mode 100644 index 00000000..23d6e471 --- /dev/null +++ b/build-tools/spec-utils @@ -0,0 +1,686 @@ +RPM_MACRO_FILE=/usr/lib/rpm/macros + +spec_query_with_macros () { + local SPEC_FILE=$1; shift + local BUILD_DIR=$1; shift + local TIS_PATCH_VER=$1; shift + local rc + + TMPSPEC=$(mktemp /tmp/spec-utils-XXXXXX) + cat $SPEC_FILE | sed 's/%(rpm.*)/%(echo 0)/' > $TMPSPEC + + rpmspec -P \ + --define="_tis_build_type ${BUILD_TYPE:-std}" \ + --define="_tis_dist .tis" \ + --define="tis_patch_ver ${TIS_PATCH_VER:-0}" \ + --define="platform_release ${PLATFORM_RELEASE:-00.00}" \ + --define="%_topdir $BUILD_DIR" \ + "${@}" \ + $TMPSPEC 2>> /dev/null + rc=$? + + \rm -f $TMPSPEC + return $rc +} + +spec_evaluate () { + local RAW_VALUE=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + + local MACRO="" + local MACRO_VALUE="" + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + MACRO=`expr match "$RAW_VALUE" '.*\(%{[^}]*}\)'` + if [ $? -ne 0 ]; then + echo "$RAW_VALUE" + return 0 + fi + + # >&2 echo "spec_evaluate: MACRO=$MACRO" + local MACRO_NAME2=${MACRO#%{} + local MACRO_NAME3=${MACRO_NAME2%\}} + local PREFIX=`expr match "$MACRO_NAME3" '\([!?]*\)'` + local MACRO_NAME=${MACRO_NAME3#${PREFIX}} + + # >&2 echo "spec_evaluate: MACRO_NAME=$MACRO_NAME" + MACRO_VALUE=`spec_find_macro $MACRO_NAME $SPEC_FILE $RPMBUILD_DIR` + if [ $? -ne 0 ]; then + MACRO_VALUE=`spec_find_global $MACRO_NAME $SPEC_FILE $RPMBUILD_DIR` + if [ $? -ne 0 ]; then + MACRO_VALUE=`spec_find_tag ${MACRO_NAME^} $SPEC_FILE $RPMBUILD_DIR` + if [ $? -ne 0 ]; then + MACRO_VALUE=`macro_find_macro $MACRO_NAME $SPEC_FILE $RPMBUILD_DIR` + if [ $? -ne 0 ]; then + MACRO_VALUE=`spec_find_macro_via_rpm $MACRO_NAME $SPEC_FILE $RPMBUILD_DIR` + if [ $? -ne 0 ]; then + case "$MACRO_NAME" in + + _tis_build_type) MACRO_VALUE="${BUILD_TYPE}" ;; + _tis_dist) MACRO_VALUE=".tis" ;; + tis_patch_ver) MACRO_VALUE="{TIS_PATCH_VER:-0}" ;; + platform_release) MACRO_VALUE="$PLATFORM_RELEASE" ;; + _topdir) MACRO_VALUE="$BUILD_DIR" ;; + *) ;; + esac + + if [ "x$MACRO_VALUE" == "x" ]; then + if [ "$PREFIX" == '?' ]; then + >&2 echo "NOTE: optional macro '$MACRO' not defined" + else + >&2 echo "ERROR: evaluation of macro '$MACRO' failed" + return 1 + fi + fi + fi + fi + fi + fi + fi + + # >&2 echo "spec_evaluate: MACRO_VALUE=$MACRO_VALUE" + local NEW_VALUE=${RAW_VALUE/"${MACRO}"/${MACRO_VALUE}} + # >&2 echo "spec_evaluate: NEW_VALUE=$NEW_VALUE" + spec_evaluate "$NEW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR" +} + +macro_find_macro () { + local TARGET=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + local LINE="" + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + # >&2 echo "grep ^%$TARGET $RPM_MACRO_FILE" + LINE=`grep "^%$TARGET[[:space:]]" $RPM_MACRO_FILE` + if [ $? -eq 1 ]; then + >&2 echo "macro_find_macro: '%$TARGET' not found in file '$RPM_MACRO_FILE'" + echo "" + return 1 + fi + + # >&2 echo "macro_find_macro: LINE=$LINE" + local UNSTRIPED_VALUE=${LINE##"%$TARGET"} + # >&2 echo "macro_find_macro: UNSTRIPED_VALUE=$UNSTRIPED_VALUE" + local RAW_VALUE=${UNSTRIPED_VALUE//[[:space:]]/} + # >&2 echo "macro_find_macro: RAW_VALUE=$RAW_VALUE" + + spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR" +} + +spec_find_macro_via_rpm () { + local TARGET=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + + local RC=1 + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + # >&2 echo "spec_find_macro_via_rpm: TARGET=$TARGET" + + case "$TARGET" in + name|_name) (spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{NAME}\n' | head -n 1 ; exit ${PIPESTATUS[0]} ); RC=$? ;; + version|_version) (spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{VERSION}\n' | head -n 1 ; exit ${PIPESTATUS[0]} ); RC=$? ;; + release|_release) (spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{RELEASE}\n' | head -n 1 ; exit ${PIPESTATUS[0]} ); RC=$? ;; + *) ;; + esac + + if [ $RC -ne 0 ]; then + echo "" + fi + return $RC +} + +spec_find_macro () { + local TARGET=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$2 + local LINE="" + local UNSTRIPED_VALUE="" + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + # >&2 echo "grep ^%define $TARGET $SPEC_FILE" + LINE=`grep "^%define $TARGET[[:space:]]" $SPEC_FILE` + if [ $? -eq 1 ]; then + LINE=`grep "^%$TARGET[[:space:]]" $SPEC_FILE` + if [ $? -eq 1 ]; then + >&2 echo "spec_find_macro: Neither '%define $TARGET' nor '%$TARGET' not found in file '$SPEC_FILE'" + echo "" + return 1 + else + UNSTRIPED_VALUE=${LINE##"%$TARGET"} + fi + else + UNSTRIPED_VALUE=${LINE##"%define $TARGET"} + fi + + # >&2 echo "spec_find_macro: LINE=$LINE" + # >&2 echo "spec_find_macro: UNSTRIPED_VALUE=$UNSTRIPED_VALUE" + local RAW_VALUE=$(echo ${UNSTRIPED_VALUE} | sed -e 's/^ *//g;s/ *$//g') + # >&2 echo "spec_find_macro: RAW_VALUE=$RAW_VALUE" + + spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR" +} + +spec_find_tag () { + local TARGET=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + local TIS_PATCH_VER=$4 + local LINE="" + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + local SPEC_FILE2=$(mktemp /tmp/tmp_spec_XXXXXX.spec) + + # Note: ${VAR:-val} is bash syntax for providing a default value. + # ie. if $VAR is not set, use 'val' as default value + spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} > $SPEC_FILE2 + if [ $? != 0 ]; then + # spec_query_with_macros can fail on grub2 if it's just a spec file without SOURCES + \cp $SPEC_FILE $SPEC_FILE2 + fi + + LINE=$(grep "^$TARGET:" $SPEC_FILE2 | head -n 1 ; exit ${PIPESTATUS[0]}) + if [ $? -eq 1 ]; then + LINE=$(grep "^${TARGET^}:" $SPEC_FILE2 | head -n 1 ; exit ${PIPESTATUS[0]}) + if [ $? -eq 1 ]; then + >&2 echo "spec_find_tag: '$TARGET:' not found in file '$SPEC_FILE'" + echo "" + \rm -f "$SPEC_FILE2" + return 1 + else + TARGET=${TARGET^} + fi + fi + \rm -f "$SPEC_FILE2" + + # >&2 echo "spec_find_tag: LINE=$LINE" + local UNSTRIPED_VALUE=${LINE##"$TARGET:"} + # >&2 echo "spec_find_tag: UNSTRIPED_VALUE=$UNSTRIPED_VALUE" + local RAW_VALUE=${UNSTRIPED_VALUE//[[:space:]]/} + # >&2 echo "spec_find_tag: RAW_VALUE=$RAW_VALUE" + + spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR" +} + +spec_find_multi_tag () { + local TARGET=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$2 + local LINE="" + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + while read LINE; do + # >&2 echo "spec_find_multi_tag: LINE=$LINE" + local UNSTRIPED_VALUE=${LINE##"$TARGET:"} + # >&2 echo "spec_find_multi_tag: UNSTRIPED_VALUE=$UNSTRIPED_VALUE" + local RAW_VALUE=${UNSTRIPED_VALUE//[[:space:]]/} + # >&2 echo "spec_find_multi_tag: RAW_VALUE=$RAW_VALUE" + + spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR" + done << EOF +$(grep "^$TARGET:" $SPEC_FILE) +EOF +} + +spec_find_global () { + local TARGET=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + local LINE="" + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + LINE=`grep "^%global $TARGET" $SPEC_FILE` + if [ $? -eq 1 ]; then + >&2 echo "spec_find_global: '%global $TARGET' not found in file '$SPEC_FILE'" + echo "" + return 1 + fi + + # >&2 echo "spec_find_global: LINE=$LINE" + local UNSTRIPED_VALUE=${LINE##"%global $TARGET"} + # >&2 echo "spec_find_global: UNSTRIPED_VALUE=$UNSTRIPED_VALUE" + local RAW_VALUE=${UNSTRIPED_VALUE//[[:space:]]/} + # >&2 echo "spec_find_global: RAW_VALUE=$RAW_VALUE" + + spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR" +} + +spec_find_patch_args () { + local PATCH_NO="$1" + local SPEC_FILE="$2" + local RPMBUILD_DIR="$3" + + local LINE="" + local LINE2="" + local PATCH_LOWER_NO + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + PATCH_LOWER_NO=$(echo $PATCH_NO | tr '[:upper:]' '[:lower:]') + LINE=`grep "^%$PATCH_LOWER_NO " $SPEC_FILE` + if [ $? -eq 1 ]; then + >&2 echo "pec_find_patch_args: $PATCH_LOWER_NO' not found in file '$SPEC_FILE'" + echo "-p1" + return 1 + fi + LINE2=$(spec_evaluate "$LINE" "$SPEC_FILE" "$RPMBUILD_DIR") + + echo $LINE2 | cut -d' ' -f2- | sed 's/-b/-b -z/' + return 0 +} + +spec_list_packages () { + local SPEC_FILE=$1 + local RPMBUILD_DIR=$2 + + local d=$(dirname $SPEC_FILE) + local bd=$(basename $d) + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + if [ "$bd" == "SPECS" ]; then + local dd=$(dirname $d) + spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{name}\n' --define="%_topdir $dd" 2>> /dev/null + else + spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{name}\n' 2>> /dev/null + fi + + if [ $? -ne 0 ]; then + # spec_query_with_macros can fail on grub2 if it's just a spec file without SOURCES + local NAME=$(spec_find_tag Name "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + if [ $? -ne 0 ]; then + >&2 echo "ERROR: failed to evaluate 'Name'" + return 1 + fi + echo $NAME + + grep "^%package" $SPEC_FILE | while read PACKAGE_LINE; do + local PKG_NAME="" + local PKG_NAME_RAW=$(echo $PACKAGE_LINE | awk '{ print $2 }') + # >&2 echo "spec_list_packages: PKG_NAME_RAW=$PKG_NAME_RAW" + + local PKG_NAME_TEMP="" + if [ "$PKG_NAME_RAW" == "-n" ]; then + PKG_NAME_TEMP=$(echo $PACKAGE_LINE | awk '{ print $3 }') + else + PKG_NAME_TEMP="$NAME-$PKG_NAME_RAW" + fi + # >&2 echo "spec_list_packages: PKG_NAME_TEMP=$PKG_NAME_TEMP" + + PKG_NAME=$(spec_evaluate "$PKG_NAME_TEMP" "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + if [ $? -ne 0 ]; then + >&2 echo "ERROR: failed to evaluate package '$PACKAGE_LINE'" + return 1 + fi + # >&2 echo "spec_list_packages: PKG_NAME=$PKG_NAME" + + echo $PKG_NAME + done + fi +} + +spec_list_versioned_packages () { + local SPEC_FILE=$1 + local RPMBUILD_DIR=$2 + + local d=$(dirname $SPEC_FILE) + local bd=$(basename $d) + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + if [ "$bd" == "SPECS" ]; then + local dd=$(dirname $d) + spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{name}-%{version}\n' --define="%_topdir $dd" 2>> /dev/null + else + spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{name}-%{version}\n' 2>> /dev/null + fi + + if [ $? -ne 0 ]; then + # spec_query_with_macros can fail on grub2 if it's just a spec file without SOURCES + local NAME=$(spec_find_tag Name "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + if [ $? -ne 0 ]; then + >&2 echo "ERROR: failed to evaluate 'Name'" + return 1 + fi + + local VERSION=$(spec_find_tag Version "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + echo "$NAME-$VERSION" + + grep "^%package" $SPEC_FILE | while read PACKAGE_LINE; do + local PKG_NAME="" + local PKG_NAME_RAW=$(echo $PACKAGE_LINE | awk '{ print $2 }') + # >&2 echo "spec_list_packages: PKG_NAME_RAW=$PKG_NAME_RAW" + + local PKG_NAME_TEMP="" + if [ "$PKG_NAME_RAW" == "-n" ]; then + PKG_NAME_TEMP=$(echo $PACKAGE_LINE | awk '{ print $3 }') + else + PKG_NAME_TEMP="$NAME-$PKG_NAME_RAW" + fi + # >&2 echo "spec_list_packages: PKG_NAME_TEMP=$PKG_NAME_TEMP" + + PKG_NAME=$(spec_evaluate "$PKG_NAME_TEMP" "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + if [ $? -ne 0 ]; then + >&2 echo "ERROR: failed to evaluate package '$PACKAGE_LINE'" + return 1 + fi + # >&2 echo "spec_list_packages: PKG_NAME=$PKG_NAME" + + echo "$PKG_NAME-$VERSION" + done + fi +} + +spec_name_ver_rel () { + local SPEC_FILE=$1 + local RPMBUILD_DIR=$2 + + local NAME="" + local d=$(dirname $SPEC_FILE) + local bd=$(basename $d) + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + NAME=$(spec_find_tag Name $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + VERSION=$(spec_find_tag Version $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + RELEASE=$(spec_find_tag Release $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + echo "$NAME-$VERSION-$RELEASE" +} + +spec_list_ver_rel_packages () { + local SPEC_FILE=$1 + local RPMBUILD_DIR=$2 + + local d=$(dirname $SPEC_FILE) + local bd=$(basename $d) + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + if [ "$bd" == "SPECS" ]; then + local dd=$(dirname $d) + spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{name}-%{version}-%{release}\n' --define="%_topdir $dd" 2>> /dev/null + else + spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{name}-%{version}-%{release}\n' 2>> /dev/null + fi + + if [ $? -ne 0 ]; then + # spec_query_with_macros can fail on grub2 if it's just a spec file without SOURCES + local NAME=$(spec_find_tag Name "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + if [ $? -ne 0 ]; then + >&2 echo "ERROR: failed to evaluate 'Name'" + return 1 + fi + + local VERSION=$(spec_find_tag Version "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + local RELEASE=$(spec_find_tag Release "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + echo "$NAME-$VERSION-$RELEASE" + + grep "^%package" $SPEC_FILE | while read PACKAGE_LINE; do + local PKG_NAME="" + local PKG_NAME_RAW=$(echo $PACKAGE_LINE | awk '{ print $2 }') + # >&2 echo "spec_list_packages: PKG_NAME_RAW=$PKG_NAME_RAW" + + local PKG_NAME_TEMP="" + if [ "$PKG_NAME_RAW" == "-n" ]; then + PKG_NAME_TEMP=$(echo $PACKAGE_LINE | awk '{ print $3 }') + else + PKG_NAME_TEMP="$NAME-$PKG_NAME_RAW" + fi + # >&2 echo "spec_list_packages: PKG_NAME_TEMP=$PKG_NAME_TEMP" + + PKG_NAME=$(spec_evaluate "$PKG_NAME_TEMP" "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0}) + if [ $? -ne 0 ]; then + >&2 echo "ERROR: failed to evaluate package '$PACKAGE_LINE'" + return 1 + fi + # >&2 echo "spec_list_packages: PKG_NAME=$PKG_NAME" + + echo "$PKG_NAME-$VERSION-$RELEASE" + done + fi +} + +spec_list_ver_rel_arch_packages () { + local SPEC_FILE=$1 + local RPMBUILD_DIR=$2 + + local d=$(dirname $SPEC_FILE) + local bd=$(basename $d) + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + if [ "$bd" == "SPECS" ]; then + local dd=$(dirname $d) + spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{name}-%{version}-%{release}.%{arch}\n' --define="%_topdir $dd" 2>> /dev/null + else + spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} -q --qf '%{name}-%{version}-%{release}.%{arch}\n' 2>> /dev/null + fi +} + + +spec_match_package_list () { + local Aname=$1[@] + local TARGET_LIST=("${!Aname}") + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + local TARGET + local PKG_NAME + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + for PKG_NAME in `spec_list_packages "$SPEC_FILE" "$RPMBUILD_DIR"`; do + for TARGET in "${TARGET_LIST[@]}"; do + if [ "$PKG_NAME" == "$TARGET" ]; then + echo $TARGET + return 0 + fi + if [ $BUILD_TYPE == "rt" ] && [ "$PKG_NAME" == "${TARGET}-rt" ]; then + echo $TARGET + return 0 + fi + done + done + + return 1 +} + + +spec_match_package () { + local TARGET=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + local PKG_NAME + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + for PKG_NAME in `spec_list_packages "$SPEC_FILE" "$RPMBUILD_DIR"`; do + if [ "$PKG_NAME" == "$TARGET" ]; then + echo "found target '$TARGET' in file '$SPEC_FILE' as a package name" + return 0 + fi + done + + return 1 +} + +spec_match_target_list () { + local Aname=$1[@] + local TARGET_LIST=("${!Aname}") + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + local TARGET + local NAME + local SERVICE + local PKG_NAME + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + NAME=`spec_find_tag Name "$SPEC_FILE" "$RPMBUILD_DIR"` + if [ $? -eq 0 ]; then + for TARGET in "${TARGET_LIST[@]}"; do + if [ "$NAME" == "$TARGET" ]; then + echo $TARGET + return 0 + fi + if [ $BUILD_TYPE == "rt" ] && [ "$NAME" == "${TARGET}-rt" ]; then + echo $TARGET + return 0 + fi + done + fi + + SERVICE=`spec_find_global service "$SPEC_FILE" "$RPMBUILD_DIR"` + if [ $? -eq 0 ]; then + for TARGET in "${TARGET_LIST[@]}"; do + if [ "$SERVICE" == "$TARGET" ]; then + echo $TARGET + return 0 + fi + if [ $BUILD_TYPE == "rt" ] && [ "$SERVICE" == "${TARGET}-rt" ]; then + echo $TARGET + return 0 + fi + done + fi + + spec_match_package_list TARGET_LIST "$SPEC_FILE" "$RPMBUILD_DIR" + if [ $? -eq 0 ]; then + return 0 + fi + + return 1 +} + + +spec_match_target () { + local TARGET=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + local NAME + local SERVICE + local PKG_NAME + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + NAME=`spec_find_tag Name "$SPEC_FILE" "$RPMBUILD_DIR"` + if [ $? -eq 0 ]; then + if [ "$NAME" == "$TARGET" ]; then + echo "found target '$TARGET' in file '$SPEC_FILE' as a name" + return 0 + fi + fi + + SERVICE=`spec_find_global service "$SPEC_FILE" "$RPMBUILD_DIR"` + if [ $? -eq 0 ]; then + if [ "$SERVICE" == "$TARGET" ]; then + echo "found target '$TARGET' in file '$SPEC_FILE' as a service" + return 0 + fi + fi + + spec_match_package "$TARGET" "$SPEC_FILE" "$RPMBUILD_DIR" + if [ $? -eq 0 ]; then + return 0 + fi + + return 1 +} + + +spec_build_requires () { + local SPEC_FILE=$1 + local RPMBUILD_DIR=$2 + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + spec_find_multi_tag BuildRequires $SPEC_FILE $RPMBUILD_DIR +} + +spec_untar_path () { + local SOURCE_NO=$1 + local SPEC_FILE=$2 + local RPMBUILD_DIR=$3 + + >&2 echo "spec_untar_path SOURCE_NO=$SOURCE_NO SPEC_FILE=$SPEC_FILE" + local UNTAR_PATH="." + local AFTER="" + + if [ "x$RPMBUILD_DIR" == "x" ];then + RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE)) + fi + + local SETUP=$(spec_find_macro setup $SPEC_FILE $RPMBUILD_DIR) + AFTER=$(echo "$SETUP " | grep -o -e "[-]a[[:space:]]$SOURCE_NO[[:space:]]") + if [ $? -eq 0 ]; then + UNTAR_PATH=$(echo "$SETUP " | grep -o -e '[-]n[[:space:]][^[:space:]]*[[:space:]]' | awk '{ print $2}'; exit ${PIPESTATUS[1]}) + if [ $? -ne 0 ]; then + NAME=$( spec_find_tag Name $SPEC_FILE $RPMBUILD_DIR) + VERSION=$(spec_find_tag Version $SPEC_FILE $RPMBUILD_DIR) + UNTAR_PATH="$NAME-$VERSION" + fi + fi + echo "$UNTAR_PATH" + return 0 +} + + +spec_validate_tis_release () { + local SPEC_FILE=$1 + + # TIS Release value must include either %{?_tis_dist}.%{tis_patch_ver} or %{tis_patch_ver}%{?_tis_dist} + # Because spec_query_with_macros defines tis_patch_ver, we're using rpmspec directly here + rpmspec --define='_tis_dist .tis' -P $SPEC_FILE 2>/dev/null | grep '^Release:' \ + | grep -qvE '\.tis\.%{tis_patch_ver}|%{tis_patch_ver}\.tis' + if [ $? -eq 0 ]; then + >&2 echo "ERROR: $SPEC_FILE: 'Release' must use %{?_tis_dist}.%{tis_patch_ver} or %{tis_patch_ver}%{?_tis_dist}" + >&2 grep 'Release:' $SPEC_FILE + return 1 + fi + return 0 +} + diff --git a/build-tools/srpm-utils b/build-tools/srpm-utils new file mode 100644 index 00000000..42372701 --- /dev/null +++ b/build-tools/srpm-utils @@ -0,0 +1,3144 @@ +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source $DIR/spec-utils +source $DIR/classify + +declare -A SRPM_PKG_NAME_TO_PATH +declare -a SRPM_PKG_NAMES + +declare -A STD_SRPM_PKG_NAME_TO_PATH +declare -a STD_SRPM_PKG_NAMES + + +METHOD_NO_RPMBUILD=0 +METHOD_RPMBUILD_UNPATCH=1 +METHOD_RPMBUILD_SCRIPT=2 + +# Find the common root directory of a tar file. +# This form take as input command syntax to list the tar file contents. +# Prefered from is to use tar -tvf ... plus any additional args. +# - don't use x in place of t, we don't want side effects +# - Must use 'v' to help identif directories +tar_cmd_common_dir () { + local TAR_EXTRACT_CMD="$1" + + local i=1 + local prev_path="" + local path + local count=0 + + path=$(eval "$TAR_EXTRACT_CMD -v" | grep '^d') + count=$(echo "$path" | wc -w) + if [ $count -gt 0 ]; then + i=1 + while [ $i -lt 25 ]; do + path=$(eval "$TAR_EXTRACT_CMD -v" | grep '^d' | cut -d ":" -f 2- | cut -d " " -f 2- | cut -f1-$i -d/ | uniq) + count=$(echo "$path" | wc -l) + if [ $count -gt 1 ]; then + echo $prev_path + i=0 + break + else + prev_path=$path + fi + i=$((i + 1)) + done + else + i=1 + while [ $i -lt 25 ]; do + path=$(eval "$TAR_EXTRACT_CMD -v" | cut -d ':' -f 2- | cut -d ' ' -f 2- | rev | cut -d '/' -f 1 --complement | rev | cut -f1-$i -d/ | uniq) + count=$(echo "$path" | wc -l) + if [ $count -gt 1 ]; then + echo $prev_path + i=0 + break + else + prev_path=$path + fi + i=$((i + 1)) + done + fi + return $i +} + + +rpm_get_srpm () { + local rpm_path=$1 + rpm -q --info --nosignature -p $rpm_path | grep '^Source RPM' | sed 's#^Source RPM : ##' +} + +rpm_get_name () { + local srpm_path=$1 + rpm -q --queryformat '%{NAME}\n' --nosignature -p $srpm_path +} + +rpm_get_version () { + local srpm_path=$1 + rpm -q --queryformat '%{VERSION}\n' --nosignature -p $srpm_path +} + +rpm_get_release () { + local srpm_path=$1 + rpm -q --queryformat '%{RELEASE}\n' --nosignature -p $srpm_path +} + +rpm_get_arch () { + local srpm_path=$1 + rpm -q --queryformat '%{ARCH}\n' --nosignature -p $srpm_path +} + +rpm_get_full_name () { + local srpm_path=$1 + rpm -q --queryformat '%{NAME}-%{VERSION}-%{RELEASE}\n' --nosignature -p $srpm_path +} + + +raw_fix_if_ApplyPatch () { + local RAW_SCRIPT=$1 + + local TMP_SCRIPT=$(dirname $RAW_SCRIPT)/tmp_raw_script + + grep '^ApplyPatch ' $RAW_SCRIPT >> /dev/null + if [ $? -eq 0 ]; then + mv -f $RAW_SCRIPT $TMP_SCRIPT + local COUNT=0 + while read -r LINE ; do + case "$LINE" in + "ApplyPatch "*) + PN=$(echo "$LINE" | awk '{print $2}') + COUNT=$((COUNT + 1)) + echo "echo 'Patch #$COUNT $PN'" >> $RAW_SCRIPT + echo "$LINE" >> $RAW_SCRIPT + ;; + *) + echo "$LINE" >> $RAW_SCRIPT + ;; + esac + done < "$TMP_SCRIPT" + fi +} + +srpm_create_raw_extract_script () { + local SPEC_FILE=$1 + local ROOT_DIR=$2 + local RPMBUILD_DIR=$3 + local TARGET_ARCH=$4 + local TIS_PATCH_VER=$5 + local RAW_SCRIPT=$6 + local TAR_DIR=$7 + + local BUILD_DIR="$RPMBUILD_DIR/BUILD" + local ApplyPatchCount=0 + + if [ ! -f $SPEC_FILE ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): file SPEC_FILE='$SPEC_FILE' does not exist" + return 1 + fi + + if [ ! -d $ROOT_DIR ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): directory ROOT_DIR='$ROOT_DIR' does not exist" + return 1 + fi + + if [ ! -d $RPMBUILD_DIR ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): directory RPMBUILD_DIR='$RPMBUILD_DIR' does not exist" + return 1 + fi + + mkdir -p $BUILD_DIR + mkdir -p $ROOT_DIR/tmp + local STDOUT_LOG=$(mktemp /tmp/stdout_XXXXX.log) + local STDERR_LOG=$(mktemp /tmp/stderr_XXXXX.log) + local PREV_STDOUT_LOG=$(mktemp /tmp/stdout_XXXXX.log) + local PREV_STDERR_LOG=$(mktemp /tmp/stderr_XXXXX.log) + local SAME=0 + + # Build the srpm as though for std build, for naming consistency + echo "stdbuf -oL -eL rpmbuild -bp $SPEC_FILE --root $ROOT_DIR --define='%_topdir $RPMBUILD_DIR' --define='_tis_dist .tis' --define='tis_patch_ver $TIS_PATCH_VER' --nodeps --target $TARGET_ARCH > $STDOUT_LOG 2> $STDERR_LOG" + stdbuf -oL -eL rpmbuild -bp $SPEC_FILE --root $ROOT_DIR \ + --define="%_topdir $RPMBUILD_DIR" \ + --define='_tis_dist .tis' \ + --define="tis_patch_ver $TIS_PATCH_VER" \ + --define="_tis_build_type $BUILD_TYPE" \ + --nodeps --target $TARGET_ARCH > $STDOUT_LOG 2> $STDERR_LOG + if [ $? -ne 0 ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): rpmbuild -bp failed" + \rm -rf $STDOUT_LOG $STDERR_LOG $PREV_STDOUT_LOG $PREV_STDERR_LOG + return 1 + fi + + # The kernel-rt spec file protects against re-extraction, + # so we can't do multiple passes for that package. + # Trick the loop by setting SAME=1 to bypass it + if [ "$(basename $SPEC_FILE)" = "kernel-rt.spec" ]; then + SAME=1 + fi + + let -i COUNT=0 + while [ $SAME -eq 0 ]; do + \cp -f $STDOUT_LOG $PREV_STDOUT_LOG + \cp -f $STDERR_LOG $PREV_STDERR_LOG + stdbuf -oL -eL rpmbuild -bp $SPEC_FILE --root $ROOT_DIR \ + --define="%_topdir $RPMBUILD_DIR" \ + --define='_tis_dist .tis' \ + --define="tis_patch_ver $TIS_PATCH_VER" \ + --define="_tis_build_type $BUILD_TYPE" \ + --nodeps --target $TARGET_ARCH > $STDOUT_LOG 2> $STDERR_LOG + if [ $? -ne 0 ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): rpmbuild -bp failed" + \rm -rf $STDOUT_LOG $STDERR_LOG $PREV_STDOUT_LOG $PREV_STDERR_LOG + return 1 + fi + diff $STDERR_LOG $PREV_STDERR_LOG + if [ $? -eq 0 ]; then + SAME=1 + fi + let -i COUNT++ + if [ $COUNT -ge 20 ]; then + break; + fi + done + + grep '^+' $STDERR_LOG | sed -e 's/^[+]* //' | grep -v "^rm .*$TAR_DIR" > $RAW_SCRIPT + raw_fix_if_ApplyPatch $RAW_SCRIPT + \rm -rf $STDOUT_LOG $STDERR_LOG $PREV_STDOUT_LOG $PREV_STDERR_LOG + return 0 +} + + +## +## Return patch file for the target patch number +## +raw_extract_patch_file () { + local RAW_SCRIPT=$1 + local TARGET_PATCH_NO=$2 + local SPEC_FILE=$3 + + local PATCH_FILE + local PATCH_PATH + if [ ! -f $RAW_SCRIPT ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist" + return 1 + fi + + PATCH_FILE=$(cat $RAW_SCRIPT | grep "echo 'Patch #$TARGET_PATCH_NO " | awk '{print $NF}' | sed 's#^(##' | sed "s#'\$##" | sed 's#):$##') + if [ "x$PATCH_FILE" == "x" ]; then + PATCH_PATH=$(cat $RAW_SCRIPT | grep "/usr/bin/cat " | grep "/$TARGET_PATCH_NO" | awk '{print $2}') + if [ "x$PATCH_PATH" == "x" ]; then + grep "^git am " $RAW_SCRIPT >> /dev/null + if [ $? -eq 0 ]; then + # Extract list of patches from git am command line options, then find n'th patch + PATCH_PATH=$(grep "^git am " $RAW_SCRIPT | tr ' ' '\n' | grep '[.]patch$' | sed -n "${TARGET_PATCH_NO}p") + else + grep "^xargs git am" $RAW_SCRIPT >> /dev/null + if [ $? -eq 0 ]; then + # Extract list of patches from spec file... assume no reordering ... then find n'th patch + PATCH_PATH=$(grep '^Patch[0-9]*:' $SPEC_FILE | sort -n | awk -F ':' '{ print $2}' | sed 's/^[ \t]*//' | sed 's/[ \t]*$//' | sed -n "${TARGET_PATCH_NO}p") + else + return 1 + fi + fi + fi + PATCH_FILE=$(basename $PATCH_PATH) + fi + + echo $PATCH_FILE + return 0 +} + +## +## Create script to apply one patch +## +raw_create_patch_apply_script () { + local RAW_SCRIPT=$1 + local TARGET_PATCH_NO=$2 + local PATCH_SCRIPT=$3 + local OLD_BUILD_DIR=$4 + local NEW_BUILD_DIR=$5 + local SPEC_FILE=$6 + local PATCH_COUNT_TARGET=$7 + + local SOURCE_PATH=$(echo $OLD_BUILD_DIR | sed 's#/BUILD$#/SOURCES#') + local PATCH_NO=0 + local PATCH_FILE="" + local PATCH_PATH="" + + if [ ! -f $RAW_SCRIPT ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist" + return 1 + fi + + local COUNT_START=0 + grep "echo 'Patch #$TARGET_PATCH_NO " $RAW_SCRIPT >> /dev/null + if [ $? -ne 0 ]; then + grep "/usr/bin/cat " $RAW_SCRIPT | grep "/$TARGET_PATCH_NO" >> /dev/null + if [ $? -ne 0 ]; then + # Extract list of patches from git am command line options, then find n'th patch + PATCH_PATH=$(grep "^git am " $RAW_SCRIPT | tr ' ' '\n' | grep '.patch$' | sed -n "${TARGET_PATCH_NO}p") + if [ "x$PATCH_PATH" == "x" ]; then + grep "^xargs git am" $RAW_SCRIPT >> /dev/null + if [ $? -eq 0 ] && [ "$SPEC_FILE" != "" ]; then + # Extract list of patches from spec file... assume no reordering ... then find n'th patch + PATCH_PATH=$(grep '^Patch[0-9]*:' $SPEC_FILE | sort -n | awk -F ':' '{ print $2}' | sed 's/^[ \t]*//' | sed 's/[ \t]*$//' | sed -n "${TARGET_PATCH_NO}p") + if [ "x$PATCH_PATH" == "x" ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): TARGET_PATCH_NO=$TARGET_PATCH_NO does not exist in RAW_SCRIPT=$RAW_SCRIPT" + return 1 + fi + else + >&2 echo "ERROR: $FUNCNAME (${LINENO}): TARGET_PATCH_NO=$TARGET_PATCH_NO does not exist in RAW_SCRIPT=$RAW_SCRIPT" + return 1 + fi + fi + fi + else + # We know 'echo Patch #$TARGET_PATCH_NO' exists in the file, so + # rig it so CAT_COUNT and PATCH_COUNT never match TARGET_PATCH_NO. + # CAT_COUNT and PATCH_COUNT are a fall back when patches aren't explicitly numbered. + COUNT_START=-20000 + fi + + if [ -f $PATCH_SCRIPT ]; then + \rm -rf $PATCH_SCRIPT + fi + + echo "set -e" >> $PATCH_SCRIPT + echo "set -x" >> $PATCH_SCRIPT + + local STATE=PRE_PATCH + local LAST_LINE="" + local LINE="" + local TYPICAL_PATCH="" + local CAT_COUNT=$COUNT_START + local PATCH_COUNT=$COUNT_START + local RC=0 + + PATCH_NO=0 + PATCH_FILE="" + PATCH_PATH="" + local LAST_CD="" + local DD="" + + while read -r LINE ; do + LINE=$(echo $LINE | sed -r "s#$(echo $OLD_BUILD_DIR | sed 's#/#[/]+#g')#$NEW_BUILD_DIR#g") + # >&2 echo "Parse: STATE=$STATE, LINE=$LINE" + if [[ "$LINE" == "'['"* ]]; then + continue + fi + case $STATE in + PRE_PATCH) + case "$LINE" in + "echo 'Patch #"*) + PATCH_NO=$(echo $LINE | awk '{ print $3 }' | sed 's/#//') + PATCH_FILE=$(echo $LINE | awk '{ print $4 }' | sed "s/[():']//g") + if [ $PATCH_NO -eq $TARGET_PATCH_NO ]; then + STATE="PATCH_BEGIN" + echo $LINE >> $PATCH_SCRIPT + fi + ;; + "cat "*|\ + "/usr/bin/cat "*) + PATCH_PATH=$(echo $LINE | awk '{ print $2 }') + PATCH_FILE=$(basename $PATCH_PATH) + PATCH_NO=$PATCH_FILE + CAT_COUNT=$((CAT_COUNT + 1)) + if [ "$PATCH_NO" == "$TARGET_PATCH_NO" ] || [ "$CAT_COUNT" == "$TARGET_PATCH_NO" ] ; then + STATE="PATCH" + PATCH_NO=$TARGET_PATCH_NO + echo "echo 'Patch #$PATCH_NO ($PATCH_FILE):'" >> $PATCH_SCRIPT + fi + ;; + "/usr/bin/patch "*|\ + "patch "*) + TYPICAL_PATCH="$LINE" + PATCH_COUNT=$((PATCH_COUNT + 1)) + # >&2 echo "Parse: PATCH_COUNT=$PATCH_COUNT, PATCH_COUNT_TARGET=$PATCH_COUNT_TARGET, TARGET_PATCH_NO=$TARGET_PATCH_NO" + if [ "$PATCH_COUNT" == "$TARGET_PATCH_NO" ] || [ "$PATCH_COUNT" == "$PATCH_COUNT_TARGET" ] ; then + STATE="REVERSE_PATCH" + PATCH_NO=$TARGET_PATCH_NO + fi + ;; + "/usr/bin/git apply "*|\ + "git apply "*) + TYPICAL_PATCH="$LINE" + PATCH_COUNT=$((PATCH_COUNT + 1)) + if [ "$PATCH_COUNT" == "$TARGET_PATCH_NO" ] || [ "$PATCH_COUNT" == "$PATCH_COUNT_TARGET" ]; then + STATE="REVERSE_PATCH" + PATCH_NO=$TARGET_PATCH_NO + fi + ;; + "/usr/bin/git am "*|\ + "git am "*) + PATCH_PATH=$(grep "^git am " $RAW_SCRIPT | tr ' ' '\n' | grep '.patch$' | sed -n "${TARGET_PATCH_NO}p") + if [ "x$PATCH_PATH" != "x" ]; then + GIT_APPLY_ARGS="" + GIT_AM_EXCLUDE_PENDING=0 + GIT_AM_INCLUDE_PENDING=0 + GIT_AM_DIRECTORY_PENDING=0 + GIT_AM_WHITESPACE_PENDING=0 + for GIT_AM_ARG in $(grep "^git am " $RAW_SCRIPT | tr ' ' '\n' | grep -v '.patch$'); do + case "$GIT_AM_ARG" in + "--exclude="*) + GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG" + ;; + "--exclude") + GIT_AM_EXCLUDE_PENDING=1 + ;; + "--include="*) + GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG" + ;; + "--include") + GIT_AM_INCLUDE_PENDING=1 + ;; + "--directory="*) + DD=$(basename $(echo "$GIT_AM_ARG" | cut -d '=' -f 2)) + echo "DD=$DD, LAST_CD=$LAST_CD" + if [ "$DD" != "$LAST_CD" ]; then + GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG" + fi + ;; + "--directory") + GIT_AM_DIRECTORY_PENDING=1 + ;; + "--whitespace="*) + GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG" + ;; + "--whitespace") + GIT_AM_WHITESPACE_PENDING=1 + ;; + "-p"*) + GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG" + ;; + "-C"*) + GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG" + ;; + "--ignore-space-change") + GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG" + ;; + "--ignore-whitespace") + GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG" + ;; + *) + if [ $GIT_AM_EXCLUDE_PENDING -eq 1 ]; then + GIT_AM_EXCLUDE_PENDING=0 + GIT_APPLY_ARGS="$GIT_APPLY_ARGS --exclude=$GIT_AM_ARG" + fi + if [ $GIT_AM_INCLUDE_PENDING -eq 1 ]; then + GIT_AM_INCLUDE_PENDING=0 + GIT_APPLY_ARGS="$GIT_APPLY_ARGS --include=$GIT_AM_ARG" + fi + if [ $GIT_AM_DIRECTORY_PENDING -eq 1 ]; then + GIT_AM_DIRECTORY_PENDING=0 + DD=$(basename $(echo "$GIT_AM_ARG" | cut -d '=' -f 2)) + echo "DD=$DD, LAST_CD=$LAST_CD" + if [ "$DD" != "$LAST_CD" ]; then + GIT_APPLY_ARGS="$GIT_APPLY_ARGS --directory=$GIT_AM_ARG" + fi + fi + if [ $GIT_AM_WHITESPACE_PENDING -eq 1 ]; then + GIT_AM_WHITESPACE_PENDING=0 + GIT_APPLY_ARGS="$GIT_APPLY_ARGS --whitespace=$GIT_AM_ARG" + fi + ;; + esac + done + PATCH_FILE=$(basename $PATCH_PATH) + PATCH_NO=$TARGET_PATCH_NO + echo "echo 'Patch #$PATCH_NO ($PATCH_FILE):'" >> $PATCH_SCRIPT + # >&2 echo "echo GIT_APPLY_ARGS=$GIT_APPLY_ARGS" + if [ "$GIT_APPLY_ARGS" == "" ]; then + echo "cat $PATCH_PATH | patch -p1" >> $PATCH_SCRIPT + else + echo "git apply $GIT_APPLY_ARGS $PATCH_PATH" >> $PATCH_SCRIPT + fi + STATE="POST_PATCH" + fi + ;; + "xargs git am"*) + PATCH_SRC_DIR="$(dirname $(dirname $SPEC_FILE))/SOURCES" + PATCH_PATH=$(grep '^Patch[0-9]*:' $SPEC_FILE | sort -n | awk -F ':' '{ print $2}' | sed 's/^[ \t]*//' | sed 's/[ \t]*$//' | sed -n "${TARGET_PATCH_NO}p" | sed "s#^#$PATCH_SRC_DIR/#") + if [ "x$PATCH_PATH" != "x" ]; then + PATCH_FILE=$(basename $PATCH_PATH) + PATCH_NO=$TARGET_PATCH_NO + echo "echo 'Patch #$PATCH_NO ($PATCH_FILE):'" >> $PATCH_SCRIPT + echo "cat $PATCH_PATH | patch -p1" >> $PATCH_SCRIPT + STATE="POST_PATCH" + fi + ;; + "cd "*|\ + "popd"*|\ + "pushd "*) + echo $LINE >> $PATCH_SCRIPT + LAST_CD=$(basename $(echo $LINE | cut -d ' ' -f2-)) + ;; + *) + ;; + esac + ;; + PATCH_BEGIN) + case "$LINE" in + "cat "*|\ + "/usr/bin/cat "*) + STATE="PATCH" + CAT_COUNT=$((CAT_COUNT + 1)) + PATCH_PATH=$(echo $LINE | awk '{ print $2 }') + ;; + "/usr/bin/patch "*|\ + "patch "*) + STATE="REVERSE_PATCH" + PATCH_COUNT=$((PATCH_COUNT + 1)) + TYPICAL_PATCH="$LINE" + ;; + "/usr/bin/git apply "*|\ + "git apply "*) + STATE="REVERSE_PATCH" + PATCH_COUNT=$((PATCH_COUNT + 1)) + TYPICAL_PATCH="$LINE" + ;; + "ApplyPatch "*) + STATE="APPLYPATCH" + PATCH_PATH=$(echo $LINE | awk '{ print $2 }') + if [ ! -f $PATCH_PATH ]; then + PATCH_PATH="$SOURCE_PATH/$PATCH_PATH" + fi + ;; + *) + >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state PATCH_BEGIN: $LINE" + RC=1 + break + ;; + esac + ;; + APPLYPATCH) + case "$LINE" in + "/usr/bin/patch "*|\ + "patch "*) + STATE="POST_PATCH" + PATCH_COUNT=$((PATCH_COUNT + 1)) + echo "/usr/bin/cat $PATCH_PATH | $LINE" >> $PATCH_SCRIPT + ;; + "/usr/bin/git apply "*|\ + "git apply "*) + STATE="POST_PATCH" + echo "/usr/bin/cat $PATCH_PATH | $LINE" >> $PATCH_SCRIPT + ;; + *) + ;; + esac + ;; + PATCH) + case "$LINE" in + "/usr/bin/patch "*|\ + "patch "*) + STATE="POST_PATCH" + TYPICAL_PATCH="$LINE" + PATCH_COUNT=$((PATCH_COUNT + 1)) + echo "$LAST_LINE | $LINE" >> $PATCH_SCRIPT + ;; + "/usr/bin/git apply "*|\ + "git apply "*) + STATE="POST_PATCH" + TYPICAL_PATCH="$LINE" + echo "$LAST_LINE | $LINE" >> $PATCH_SCRIPT + ;; + "echo 'Patch #"*) + STATE="POST_PATCH" + if [ "x$TYPICAL_PATCH" != "x" ];then + echo "$LAST_LINE | $TYPICAL_PATCH" >> $PATCH_SCRIPT + else + >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state PATCH: $LINE" + RC=1 + break + fi + ;; + *) + >&2 echo "WARNING: * TYPICAL_PATCH=$TYPICAL_PATCH" + >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state PATCH: $LINE" + RC=1 + break + ;; + esac + ;; + REVERSE_PATCH) + case "$LINE" in + "cat "*|\ + "/usr/bin/cat "*) + STATE="POST_PATCH" + CAT_COUNT=$((CAT_COUNT + 1)) + PATCH_PATH=$(echo $LINE | awk '{ print $2 }') + PATCH_FILE=$(basename $PATCH_PATH) + echo "echo 'Patch #$PATCH_NO ($PATCH_FILE):'" >> $PATCH_SCRIPT + echo "$LINE | $LAST_LINE" >> $PATCH_SCRIPT + ;; + *) + # Not sure why, but the 'cat' line gets dropped on rare and hard to reproduce occasions. + # Recreate it here if we can. + PATCH_PATH="$SOURCE_PATH/PATCH_FILE" + if [ -f "$PATCH_PATH" ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): Assuming PATCH_PATH=$PATCH_PATH" + STATE="POST_PATCH" + echo "/usr/bin/cat $PATCH_PATH | $LAST_LINE" >> $PATCH_SCRIPT + else + >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state REVERSE_PATCH: $LINE" + RC=1 + break + fi + ;; + esac + ;; + POST_PATCH) + case "$LINE" in + "cd "*|\ + "popd"*|\ + "pushd "*) + echo $LINE >> $PATCH_SCRIPT + ;; + *) + ;; + esac + ;; + + esac + LAST_LINE="$LINE" + done < "$RAW_SCRIPT" + + return $RC +} + +## +## script to extract tarballs +## +raw_create_tarballs_extract_script () { + local RAW_SCRIPT=$1 + local EXTRACT_SCRIPT=$2 + local OLD_BUILD_DIR=$3 + local NEW_BUILD_DIR=$4 + + if [ ! -f $RAW_SCRIPT ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist" + return 1 + fi + + if [ -f $EXTRACT_SCRIPT ]; then + \rm -rf $EXTRACT_SCRIPT + fi + + local STATE="PRE_PATCH" + local LAST_LINE="" + local RC=0 + local FIRST_TAR=0 + local EXTRACT_DIR="" + local EXTRACT_TAR_DIR="" + local EXTRACT_TAR_DIR_NOW="" + local MV_DEST="" + local CURR_DIR="" + local PREV_DIR="" + local DEST + local TAR_ARGS + local POST_PATCH_FIRST_PASS=0 + + echo "set -e" >> $EXTRACT_SCRIPT + echo "set -x" >> $EXTRACT_SCRIPT + + while read -r LINE ; do + LINE=$(echo $LINE | sed -r "s#$(echo $OLD_BUILD_DIR | sed 's#/#[/]+#g')#$NEW_BUILD_DIR#g") + # >&2 echo "Parse: STATE=$STATE, LINE=$LINE" + if [[ "$LINE" == "'['"* ]]; then + # kernel-rt hack + if [[ "$LINE" == "'[' -L vanilla-3.10.0/configs ']'" ]]; then + echo "if [ -L vanilla-3.10.0/configs ]; then rm -f vanilla-3.10.0/configs; fi" >> $EXTRACT_SCRIPT + fi + # kernel hack + if [[ "$LINE" == "'[' -L configs ']'" ]]; then + echo "if [ -L configs ]; then rm -f configs; fi" >> $EXTRACT_SCRIPT + fi + continue + fi + case $STATE in + PRE_PATCH) + case "$LINE" in + "ApplyOptionalPatch"*|\ + "ApplyPatch"*|\ + "echo 'Patch #"*) + STATE="POST_PATCH" + ;; + "gzip -dc "*|\ + "xz -dc "*|\ + "bzip2 -dc "*|\ + "/usr/bin/gzip -dc "*|\ + "/usr/bin/xz -dc "*|\ + "/usr/bin/bzip2 -dc "*) + STATE="TAR" + ;; + "tar -xf -"|\ + "tar -xvf -"|\ + "tar -xvvf -"|\ + "tar -xo -f -"|\ + "/usr/bin/tar -xf -"|\ + "/usr/bin/tar -xvf -"|\ + "/usr/bin/tar -xvvf -"|\ + "/usr/bin/tar -xo -f -") + LINE="$LINE --exclude .git" + STATE="REVERSE_TAR" + ;; + "tar -xf "*|\ + "tar -xvf "*|\ + "tar -xvvf "*|\ + "tar -xo -f "*|\ + "/usr/bin/tar -xf "*|\ + "/usr/bin/tar -xvf "*|\ + "/usr/bin/tar -xvvf "*|\ + "/usr/bin/tar -xo -f "*) + echo "$LINE --exclude .git" >> $EXTRACT_SCRIPT + if [ $FIRST_TAR -eq 0 ]; then + TAR_ARGS=$(echo $LINE | sed -e 's#^/usr/bin/tar ##' -e 's#^tar ##' -e 's#^-xf ##' -e 's#^-xvf ##' -e 's#^-xvvf ##' -e 's#^-xo -f ##') + EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir + EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir + EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "tar -tvf $TAR_ARGS") + echo "readlink -f \$(pwd) > $EXTRACT_DIR" >> $EXTRACT_SCRIPT + fi + FIRST_TAR=1 + ;; + + "git am "*) + STATE="POST_PATCH" + ;; + "xargs git am"*) + STATE="POST_PATCH" + ;; + "/usr/bin/patch "*|\ + "patch "*) + STATE="POST_PATCH" + ;; + "/usr/bin/git apply "*|\ + "git apply "*) + STATE="POST_PATCH" + ;; + "mv $EXTRACT_TAR_DIR_NOW "*) + if [ "x$EXTRACT_TAR_DIR_NOW" == "x" ]; then + echo "$LINE" >> $EXTRACT_SCRIPT + else + MV_DEST=$(echo "$LINE" | awk '{ print $NF}' ) + MV_SRC=$(echo "$LINE" | awk '{ print $(NF-1)}' ) + echo "if [ ! -L $MV_DEST ]; then if [ -d $MV_DEST ]; then if [ ! -L $MV_DEST/$EXTRACT_TAR_DIR_NOW ]; then ln -s ../$EXTRACT_TAR_DIR_NOW $MV_DEST/$EXTRACT_TAR_DIR_NOW; fi; else ln -s $EXTRACT_TAR_DIR_NOW $MV_DEST; fi; fi" >> $EXTRACT_SCRIPT + fi + ;; + "cd "*) + DEST=$(echo "$LINE" | awk '{ print $NF}' ) + case "$DEST" in + "/"*) + CURR_DIR="$DEST" + ;; + *) + CURR_DIR="$CURR_DIR/$DEST" + ;; + esac + + echo "$LINE" >> $EXTRACT_SCRIPT + ;; + "pushd "*) + DEST=$(echo "$LINE" | awk '{ print $NF}' ) + PREV_DIR="$CURR_DIR" + case "$DEST" in + "/"*) + CURR_DIR="$DEST" + ;; + *) + CURR_DIR="$CURR_DIR/$DEST" + ;; + esac + echo "$LINE" >> $EXTRACT_SCRIPT + ;; + "popd"*) + CURR_DIR="$PREV_DIR" + echo "$LINE" >> $EXTRACT_SCRIPT + ;; + "cp "*) + DEST=$(echo "$LINE" | awk '{ print $NF}' ) + CPY_SRC=$(echo "$LINE" | awk '{ print $(NF-1)}' ) + if [ "$DEST" == "linux-3.10.0.x86_64" ] && [ "$CPY_SRC" == "vanilla-3.10.0" ]; then + # special case for kernel-rt + echo "if [ ! -L "$DEST" ]; then" >> $EXTRACT_SCRIPT + echo " ln -s $CPY_SRC $DEST" >> $EXTRACT_SCRIPT + echo "fi" >> $EXTRACT_SCRIPT + else + echo "$LINE" >> $EXTRACT_SCRIPT + fi + ;; + "mkdir "*) + echo "$LINE -p" >> $EXTRACT_SCRIPT + ;; + "exit "*) + ;; + "grep "*) + ;; + "xargs "*) + ;; + "wc "*) + ;; + "git init "*|\ + "git config "*|\ + "git add "*|\ + "git commit "*) + ;; + *) + echo "$LINE" >> $EXTRACT_SCRIPT + ;; + esac + ;; + REVERSE_TAR) + case "$LINE" in + "gzip -dc "*|\ + "xz -dc "*|\ + "bzip2 -dc "*|\ + "/usr/bin/gzip -dc "*|\ + "/usr/bin/xz -dc "*|\ + "/usr/bin/bzip2 -dc "*) + STATE="PRE_PATCH" + echo "$LINE | $LAST_LINE" >> $EXTRACT_SCRIPT + if [ $FIRST_TAR -eq 0 ]; then + EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir + EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir + EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "$LINE | tar -tvf -") + echo "readlink -f \$(pwd) > $EXTRACT_DIR" >> $EXTRACT_SCRIPT + fi + FIRST_TAR=1 + ;; + *) + >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state REVERSE_TAR: $LINE" + RC=1 + break + ;; + esac + ;; + TAR) + case "$LINE" in + "tar -xf -"|\ + "tar -xvf -"|\ + "tar -xvvf -"|\ + "tar -xo -f -"|\ + "/usr/bin/tar -xf -"|\ + "/usr/bin/tar -xvf -"|\ + "/usr/bin/tar -xvvf -"|\ + "/usr/bin/tar -xo -f -") + STATE="PRE_PATCH" + echo "$LAST_LINE | $LINE --exclude .git" >> $EXTRACT_SCRIPT + if [ $FIRST_TAR -eq 0 ]; then + EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir + EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir + EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "$LAST_LINE | tar -tvf -") + echo "readlink -f \$(pwd) > $EXTRACT_DIR" >> $EXTRACT_SCRIPT + fi + FIRST_TAR=1 + ;; + "exit "*) + ;; + *) + >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state TAR: $LINE" + RC=1 + break + ;; + esac + ;; + POST_PATCH) + if [ $POST_PATCH_FIRST_PASS -eq 0 ]; then + POST_PATCH_FIRST_PASS=1 + PATCH_DIR=$(dirname $EXTRACT_SCRIPT)/patch_dir + echo "readlink -f \$(pwd) > $PATCH_DIR" >> $EXTRACT_SCRIPT + readlink -f $(pwd) + fi + case "$LINE" in + "cd "*|\ + "popd"*|\ + "pushd "*) + echo $LINE >> $EXTRACT_SCRIPT + ;; + "iconv"*) + local ICONV_LAST_ARG=$(echo $LINE | awk '{ print $NF }') + local ICONV_SECOND_LAST_ARG=$(echo $LINE | awk '{ print $(NF-1) }') + if [ "$ICONV_SECOND_LAST_ARG" == "utf-8" ]; then + # shadow-utils hack + echo "$LINE > $ICONV_LAST_ARG.utf8" >> $EXTRACT_SCRIPT + fi + ;; + "cp "*) + DEST=$(echo "$LINE" | awk '{ print $NF}' ) + CPY_SRC=$(echo "$LINE" | awk '{ print $(NF-1)}' ) + if [ "$DEST" == "linux-3.10.0.x86_64" ] && [ "$CPY_SRC" == "vanilla-3.10.0" ]; then + # special case for kernel-rt + echo "if [ ! -L "$DEST" ]; then" >> $EXTRACT_SCRIPT + echo " ln -s $CPY_SRC $DEST" >> $EXTRACT_SCRIPT + echo "fi" >> $EXTRACT_SCRIPT + else + echo "$LINE" >> $EXTRACT_SCRIPT + fi + ;; + "mkdir "*) + echo "$LINE -p" >> $EXTRACT_SCRIPT + ;; + "exit "*) + ;; + *) + ;; + esac + ;; + esac + LAST_LINE="$LINE" + done < "$RAW_SCRIPT" + + if [ $POST_PATCH_FIRST_PASS -eq 0 ]; then + PATCH_DIR=$(dirname $EXTRACT_SCRIPT)/patch_dir + echo "readlink -f \$(pwd) > $PATCH_DIR" >> $EXTRACT_SCRIPT + readlink -f $(pwd) + fi + + return $RC +} + +## +## script to extract tarballs after metapatchs +## ok, not really extracting a tarball, just set up symlink if required +## +raw_create_tarballs_extract_script_post_metapatch () { + local RAW_SCRIPT=$1 + local EXTRACT_SCRIPT=$2 + local OLD_BUILD_DIR=$3 + local NEW_BUILD_DIR=$4 + + if [ ! -f $RAW_SCRIPT ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist" + return 1 + fi + + if [ -f $EXTRACT_SCRIPT ]; then + \rm -rf $EXTRACT_SCRIPT + fi + + local STATE="PRE_PATCH" + local LAST_LINE="" + local RC=0 + local FIRST_TAR=0 + local EXTRACT_DIR="" + local EXTRACT_TAR_DIR="" + local EXTRACT_TAR_DIR_NOW="" + local MV_DEST="" + local TAR_ARGS + + echo "set -e" >> $EXTRACT_SCRIPT + echo "set -x" >> $EXTRACT_SCRIPT + + while read -r LINE ; do + LINE=$(echo $LINE | sed -r "s#$(echo $OLD_BUILD_DIR | sed 's#/#[/]+#g')#$NEW_BUILD_DIR#g") + # >&2 echo "Parse: STATE=$STATE, LINE=$LINE" + if [[ "$LINE" == "'['"* ]]; then + # kernel-rt hack + if [[ "$LINE" == "'[' -L vanilla-3.10.0/configs ']'" ]]; then + echo "if [ -L vanilla-3.10.0/configs ]; then rm -f vanilla-3.10.0/configs; fi" >> $EXTRACT_SCRIPT + fi + # kernel hack + if [[ "$LINE" == "'[' -L configs ']'" ]]; then + echo "if [ -L configs ]; then rm -f configs; fi" >> $EXTRACT_SCRIPT + fi + continue + fi + case $STATE in + PRE_PATCH) + case "$LINE" in + "ApplyOptionalPatch"*|\ + "ApplyPatch"*|\ + "echo 'Patch #"*) + STATE="POST_PATCH" + ;; + "gzip -dc "*|\ + "xz -dc "*|\ + "bzip2 -dc "*|\ + "/usr/bin/gzip -dc "*|\ + "/usr/bin/xz -dc "*|\ + "/usr/bin/bzip2 -dc "*) + STATE="TAR" + ;; + "tar -xf -"|\ + "tar -xvf -"|\ + "tar -xvvf -"|\ + "tar -xo -f -"|\ + "/usr/bin/tar -xf -"|\ + "/usr/bin/tar -xvf -"|\ + "/usr/bin/tar -xvvf -"|\ + "/usr/bin/tar -xo -f -") + STATE="REVERSE_TAR" + ;; + "tar -xf "*|\ + "tar -xvf "*|\ + "tar -xvvf "*|\ + "tar -xo -f "*|\ + "/usr/bin/tar -xf "*|\ + "/usr/bin/tar -xvf "*|\ + "/usr/bin/tar -xvvf "*|\ + "/usr/bin/tar -xo -f "*) + LINE="$LINE --exclude .git" + if [ $FIRST_TAR -eq 0 ]; then + TAR_ARGS=$(echo $LINE | sed -e 's#^/usr/bin/tar ##' -e 's#^-xf ##' -e 's#^-xvf ##' -e 's#^-xvvf ##' -e 's#^-xo -f ##') + EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir + EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir + EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "tar -tvf $TAR_ARGS") + fi + FIRST_TAR=1 + ;; + "git am "*) + STATE="POST_PATCH" + ;; + "xargs git am"*) + STATE="POST_PATCH" + ;; + "/usr/bin/patch "*|\ + "patch "*) + STATE="POST_PATCH" + ;; + "/usr/bin/git apply "*|\ + "git apply "*) + STATE="POST_PATCH" + ;; + "mv $EXTRACT_TAR_DIR_NOW "*) + if [ "x$EXTRACT_TAR_DIR_NOW" == "x" ]; then + echo "" >> $EXTRACT_SCRIPT + else + MV_DEST=$(echo "$LINE" | awk '{ print $NF}' ) + MV_SRC=$(echo "$LINE" | awk '{ print $(NF-1)}' ) + echo "if [ ! -L $MV_DEST ]; then if [ -d $MV_DEST ]; then if [ ! -L $MV_DEST/$EXTRACT_TAR_DIR_NOW ]; then ln -s ../$EXTRACT_TAR_DIR_NOW $MV_DEST/$EXTRACT_TAR_DIR_NOW; fi; else ln -s $EXTRACT_TAR_DIR_NOW $MV_DEST; fi; fi" >> $EXTRACT_SCRIPT + fi + ;; + "cd "*|\ + "popd"*|\ + "pushd "*) + echo "$LINE" >> $EXTRACT_SCRIPT + ;; + "mkdir "*) + echo "$LINE -p" >> $EXTRACT_SCRIPT + ;; + "grep "*) + ;; + *) + ;; + esac + ;; + REVERSE_TAR) + case "$LINE" in + "gzip -dc "*|\ + "xz -dc "*|\ + "bzip2 -dc "*|\ + "/usr/bin/gzip -dc "*|\ + "/usr/bin/xz -dc "*|\ + "/usr/bin/bzip2 -dc "*) + STATE="PRE_PATCH" + if [ $FIRST_TAR -eq 0 ]; then + EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir + EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir + EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "$LINE | tar -tvf -") + fi + FIRST_TAR=1 + ;; + *) + >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state REVERSE_TAR: $LINE" + RC=1 + break + ;; + esac + ;; + TAR) + case "$LINE" in + "tar -xf -"|\ + "tar -xvf -"|\ + "tar -xvvf -"|\ + "tar -xo -f -"|\ + "/usr/bin/tar -xf -"|\ + "/usr/bin/tar -xvf -"|\ + "/usr/bin/tar -xvvf -"|\ + "/usr/bin/tar -xo -f -") + LINE="$LINE --exclude .git" + STATE="PRE_PATCH" + if [ $FIRST_TAR -eq 0 ]; then + EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir + EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir + EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "$LAST_LINE | tar -tvf -") + fi + FIRST_TAR=1 + ;; + *) + >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state TAR: $LINE" + RC=1 + break + ;; + esac + ;; + POST_PATCH) + case "$LINE" in + "cd "*|\ + "popd"*|\ + "pushd "*) + echo $LINE >> $EXTRACT_SCRIPT + ;; + "mkdir "*) + echo "$LINE -p" >> $EXTRACT_SCRIPT + ;; + *) + ;; + esac + ;; + esac + LAST_LINE="$LINE" + done < "$RAW_SCRIPT" + + return $RC +} + + +## +## script to list patch numbers +## +raw_patch_order () { + local RAW_SCRIPT=$1 + local SPEC_FILE=$2 + local LINE + local LINE2 + local PATCH_NO=0 + + if [ ! -f $RAW_SCRIPT ]; then + >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist" + return 1 + fi + + + while read -r LINE ; do + if [[ "$LINE" == "'['"* ]]; then + continue + fi + case "$LINE" in + "echo 'Patch #"*) + PATCH_NO=$(echo $LINE | awk '{ print $3 }' | sed 's/#//') + echo $PATCH_NO + ;; + "git am "*) + for LINE2 in $(echo $LINE | tr ' ' '\n' | grep '.patch$'); do + PATCH_NO=$((PATCH_NO + 1)) + echo $PATCH_NO + done + ;; + "xargs git am"*) + grep '^Patch[0-9]*:' $SPEC_FILE |\ + while read -r LINE2; do + PATCH_NO=$((PATCH_NO + 1)) + echo $PATCH_NO + done + ;; + *) + ;; + esac + done < "$RAW_SCRIPT" + + if [ $PATCH_NO -eq 0 ]; then + while read -r LINE ; do + if [[ "$LINE" == "'['"* ]]; then + continue + fi + case "$LINE" in + "cat "*|\ + "/usr/bin/cat "*) + PATCH_PATH=$(echo $LINE | awk '{ print $2 }') + PATCH_FILE=$(basename $PATCH_PATH) + PATCH_NO=$PATCH_FILE + echo $PATCH_NO + ;; + *) + ;; + esac + done < "$RAW_SCRIPT" + fi + + return 0 +} + +srpm_build_dictionary () { + local srpm_dir=$1 + local srpm_path + local name + + for srpm_path in `find $srpm_dir -name '*.src.rpm' | sort -V`; do + name=`rpm_get_name $srpm_path` + SRPM_PKG_NAME_TO_PATH[$name]="$srpm_path" + SRPM_PKG_NAMES+=("$name") + done +} + +srpm_build_std_dictionary () { + local srpm_dir=$1 + local srpm_path + local name + + for srpm_path in `find $srpm_dir -name '*.src.rpm' | sort -V`; do + name=`rpm_get_name $srpm_path` + STD_SRPM_PKG_NAME_TO_PATH[$name]="$srpm_path" + STD_SRPM_PKG_NAMES+=("$name") + done +} + +srpm_assemble () { + local FULL_BUILD_DIR=$1 + local TIS_PATCH_VER=$2 + + local SPEC_PATH + local SPEC + local SRPM_PATH + local SRPM + local NAME + local VERSION + local RELEASE + local BUILD_NEEDED + + for SPEC in $(cd $FULL_BUILD_DIR/SPECS/; ls -1 *.spec); do + SPEC_PATH="$FULL_BUILD_DIR/SPECS/$SPEC" + NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'Name' not found in '$SPEC_PATH'" + fi + VERSION=`spec_find_tag Version "$SPEC_PATH" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'Version' not found in '$SPEC_PATH'" + VERSION="0" + fi + RELEASE=`spec_find_tag Release "$SPEC_PATH" "$(dirname $(dirname $SPEC_PATH))" "$TIS_PATCH_VER" 2>> /dev/null` + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'Release' not found in '$SPEC_PATH'" + RELEASE="0" + fi + SRPM="$NAME-$VERSION-$RELEASE.src.rpm" + SRPM_PATH="$FULL_BUILD_DIR/SRPMS/$SRPM" + + spec_validate_tis_release $SPEC_PATH + if [ $? -ne 0 ]; then + echo "TIS Validation of $SPEC_PATH failed" + exit 1 + fi + + BUILD_NEEDED=0 + if [ -f $SRPM_PATH ]; then + n=`find $FULL_BUILD_DIR -cnewer $SRPM_PATH | wc -l` + if [ $n -gt 0 ]; then + BUILD_NEEDED=1 + fi + else + BUILD_NEEDED=1 + fi + + if [ $BUILD_NEEDED -gt 0 ]; then + echo "SPEC file: $SPEC_PATH" + echo "SRPM build directory: $FULL_BUILD_DIR" + echo "TIS_PATCH_VER: $TIS_PATCH_VER" + + sed -i -e "1 i%define _tis_build_type $BUILD_TYPE" $SPEC_PATH + sed -i -e "1 i%define tis_patch_ver $TIS_PATCH_VER" $SPEC_PATH + + # Build the srpm as though for std build, for naming consistency + if [ "x$PLATFORM_RELEASE" == "x" ]; then + rpmbuild -bs $SPEC_PATH \ + --define="%_topdir $FULL_BUILD_DIR" \ + --define='_tis_dist .tis' \ + --undefine=dist + else + rpmbuild -bs $SPEC_PATH \ + --define="%_topdir $FULL_BUILD_DIR" \ + --define='_tis_dist .tis' \ + --define="platform_release $PLATFORM_RELEASE" \ + --undefine=dist + fi + + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): rpmbuild failed: rpmbuild -bs $SPEC_PATH --define='%_topdir $FULL_BUILD_DIR' --define='_tis_dist .tis' --undefine=dist" + return 1 + fi + else + echo "SRPM build not needed" + fi + done + + return 0 +} + + +srpm_extract () { + local ORIG_SRPM_PATH=$1 + local WRS_PKG_DIR=$2 + local ROOT_DIR=$3 + local BUILD_DIR=$4 + local BRANCH=$5 + + local USE_GIT=0 + local ORIG_DIR=`pwd` + local PKG_DIR=`rpm -q --queryformat '%{NAME}-%{VERSION}-%{RELEASE}\n' --nosignature -p $ORIG_SRPM_PATH` + + if [ "x$ROOT_DIR" == "x" ]; then + ROOT_DIR="$MY_WORKSPACE/srpm_assemble" + fi + + if [ "x$BUILD_DIR" == "x" ]; then + BUILD_DIR="$PKG_DIR/rpmbuild" + fi + + local SPEC_DIR="$ROOT_DIR/$BUILD_DIR/SPECS" + local SOURCE_DIR="$ROOT_DIR/$BUILD_DIR/SOURCES" + local GIT_DIR="$ROOT_DIR/$PKG_DIR/gits" + local META_PATCH_TARGET_DIR="$ROOT_DIR/$BUILD_DIR" + local ARCH=centos + + if [ ! -d $ROOT_DIR ]; then + mkdir -p "$ROOT_DIR" + fi + + if [ ! -d $SPEC_DIR ]; then + rpm -i --nosignature --root=$ROOT_DIR --define="%_topdir $BUILD_DIR" $ORIG_SRPM_PATH 2>> /dev/null + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Failed to extract '$ORIG_SRPM_PATH' to '$ROOT_DIR/$BUILD_DIR'" + return 1 + fi + fi + + for SPEC in $(cd $SPEC_DIR; ls -1 *.spec); do + echo $SPEC; + SPEC_GIT="$GIT_DIR/$SPEC" + PKG_NAME=$(spec_find_tag Name $SPEC_DIR/$SPEC 2>> /dev/null) + PKG_VER=$(spec_find_tag Version $SPEC_DIR/$SPEC 2>> /dev/null) + TAR_DIR="$PKG_NAME-$PKG_VER" + PATCH_TARGET_DIR="$SPEC_GIT/$TAR_DIR" + echo " $TAR_DIR" + + if [ "x$WRS_PKG_DIR" != "x" ]; then + echo "srpm_apply_meta_patches '$META_PATCH_TARGET_DIR' '$WRS_PKG_DIR' $USE_GIT '$ARCH' '$BRANCH'" + srpm_apply_meta_patches "$META_PATCH_TARGET_DIR" "$WRS_PKG_DIR" $USE_GIT "$ARCH" "$BRANCH" + if [ $? -ne 0 ]; then + cd $ORIG_DIR + return 1 + fi + fi + done + + cd $ORIG_DIR + return 0 +} + + +srpm_apply_meta_patches () { + local META_PATCH_TARGET_DIR=$1 + local WRS_PKG_DIR=$2 + local USE_GIT=$3 + local ARCH=$4 + local BRANCH=$5 + + local ORIG_DIR=`pwd` + local META_PATCH_DIR + local PATCH_DIR + local PATCH + local PATCH_PATH + local PO_PATH + + echo "Applying metadata patches" + if [ ! -d "$META_PATCH_TARGET_DIR" ]; then + echo "ERROR: $FUNCNAME (${LINENO}): directory '$META_PATCH_TARGET_DIR' not found." + return 1 + fi + + if [ ! -d "$WRS_PKG_DIR" ]; then + echo "ERROR: $FUNCNAME (${LINENO}): directory '$WRS_PKG_DIR' not found." + return 1 + fi + + META_PATCH_DIR="$WRS_PKG_DIR/$ARCH/meta_patches" + PATCH_DIR="$WRS_PKG_DIR/$ARCH/patches" + PO_PATH="$META_PATCH_DIR/PATCH_ORDER" + if [ ! -f $PO_PATH ]; then + echo "No WRS patches to apply" + return 0 + fi + + cd $META_PATCH_TARGET_DIR + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to change directory to '$META_PATCH_TARGET_DIR'" + return 1 + fi + + for PATCH in `cat $PO_PATH`; do + PATCH_PATH="$META_PATCH_DIR/$PATCH" + if [ ! -f "$PATCH_PATH" ]; then + echo "ERROR: $FUNCNAME (${LINENO}): patch '$PATCH_PATH' not found." + cd $ORIG_DIR + return 1 + fi + + echo "srpm_apply_patch '$PATCH_PATH' '-p1' '$META_PATCH_TARGET_DIR' $USE_GIT 'WRS: ' '$METHOD_NO_RPMBUILD' '' '' '' '' 0 '$BRANCH' ''" + srpm_apply_patch "$PATCH_PATH" "-p1" "$META_PATCH_TARGET_DIR" $USE_GIT "WRS: " $METHOD_NO_RPMBUILD "" "" "" "" 0 "$BRANCH" "" 0 + + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to apply patch '$PATCH'" + cd $ORIG_DIR + return 1 + fi + done + + local d + local dd + local f + local ff + + if [ -d "$PATCH_DIR" ]; then + cd $PATCH_DIR + echo ".gitignore" >> "$META_PATCH_TARGET_DIR/.gitignore" + + if [ $? -ne 0 ]; then + echo "ERROR: Failed to cd to '$PATCH_DIR'" + cd $ORIG_DIR + return 1 + fi + + for dd in `find . -type d | sort -V`; do + d=${dd:2} + mkdir -p "$META_PATCH_TARGET_DIR/SOURCES/$d" + if [ $? -ne 0 ]; then + echo "ERROR: Failed to mkdir '$META_PATCH_TARGET_DIR/SOURCES/$d'" + cd $ORIG_DIR + return 1 + fi + done + + for ff in `find . -type f | sort -V`; do + f=${ff:2} + d=$(dirname $f) + \cp -L -f -v "$PATCH_DIR/$f" "$META_PATCH_TARGET_DIR/SOURCES/$d" + if [ $? -ne 0 ]; then + echo "ERROR: Failed to copy '$PATCH_DIR/$f' to '$META_PATCH_TARGET_DIR/SOURCES/$d'" + cd $ORIG_DIR + return 1 + fi + echo "SOURCES/$f" >> "$META_PATCH_TARGET_DIR/.gitignore" + done + fi + + cd $ORIG_DIR + return 0 +} + +export GLOBAL_PATCH_TARGET_DIR="" + + +commit_git () { + local DIR="$1" + local COMMIT_MESSAGE="$2" + local TAG="$3" + + local ORIG_DIR=$(pwd) + + # Add and Commit + cd $DIR + echo "git add . @ $(pwd)" + git add . + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git add' failed for at '$DIR'" + cd $ORIG_DIR + return 1 + fi + + echo "git commit --allow-empty -m '$COMMIT_MESSAGE' @ $(pwd)" + git commit --allow-empty -m "$COMMIT_MESSAGE" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git commit' failed at '$DIR'" + cd $ORIG_DIR + return 1 + fi + + # Tag the contents + if [ "$TAG" != "" ]; then + echo "git tag $TAG @ $(pwd)" + git tag $TAG + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git tag' failed at '$DIR'" + cd $ORIG_DIR + return 1 + fi + fi + + cd $ORIG_DIR >> /dev/null + return 0 +} + +init_git_if_required () { + local DIR="$1" + local COMMIT_MESSAGE="$2" + local TAG="$3" + + local ORIG_DIR=$(pwd) + + cd $DIR + + # Initialize git if this is our first time + if [ ! -d .git ]; then + echo "$(pwd)/.git not found, creating a new git" + echo "git init @ $(pwd)" + git init + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git init' failed for at '$BUILD_DIR'" + cd $ORIG_DIR + return 1 + fi + + echo "git add . @ $(pwd)" + git add . + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git add' failed for at '$DIR'" + cd $ORIG_DIR + return 1 + fi + + echo "git commit --allow-empty -m '$COMMIT_MESSAGE' @ $(pwd)" + git commit --allow-empty -m "$COMMIT_MESSAGE" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git commit' failed at '$DIR'" + cd $ORIG_DIR + return 1 + fi + + # Tag the contents + if [ "$TAG" != "" ]; then + echo "git tag $TAG @ $(pwd)" + git tag $TAG + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git tag' failed at '$DIR'" + cd $ORIG_DIR + return 1 + fi + fi + fi + + cd $ORIG_DIR >> /dev/null + return 0 +} + +prep_git_for_metadata () { + local BUILD_DIR="$1" + local BRANCH="$2" + local NO_META_PATCH="$3" + local PRE_WRS_PREFIX="$4" + + local ORIG_BRANCH="" + local ORIG_PRE_WRS_TAG="" + local ORIG_DIR=$(pwd) + + cd $BUILD_DIR + + # Initialize git if this is our first time + init_git_if_required "." "ORIGINAL: initial commit" "" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): init_git_if_required failed for at '$(pwd)'" + cd $ORIG_DIR + return 1 + fi + + if [ "x$NO_META_PATCH" == "x1" ]; then + ORIG_BRANCH=$(git rev-parse --abbrev-ref HEAD) + ORIG_PRE_WRS_TAG="$PRE_WRS_PREFIX$ORIG_BRANCH" + fi + + # Delete branch if it previously existed + git checkout $BRANCH &>> /dev/null + if [ $? -eq 0 ]; then + git checkout master + git branch -D $BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to delete branch '$BRANCH' at '$(pwd)'" + cd $ORIG_DIR + return 1 + fi + fi + + # create branch + if [ "x$ORIG_PRE_WRS_TAG" != "x" ]; then + git checkout $ORIG_PRE_WRS_TAG + if [ $? -ne 0 ]; then + git checkout master + fi + else + git checkout master + fi + + echo "git checkout -b $BRANCH" + git checkout -b $BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to create branch '$BRANCH' at '$(pwd)'" + cd $ORIG_DIR + return 1 + fi + + cd $ORIG_DIR >> /dev/null + return 0 +} + + +tarball_extract () { + local SPEC_DIR="${1}" + local SPEC="${2}" + local SPEC_GIT="${3}" + local SOURCE_DIR="${4}" + local BRANCH="${5}" + local ORIG_BRANCH="${6}" + local TAR_DIR="${7}" + local ROOT_DIR="${8}" + local PKG_DIR="${9}" + local BUILD_DIR="${10}" + local TARGET_ARCH="${11}" + local TIS_PATCH_VER="${12}" + local OUTPUT_FILE="${13}" + local NO_META_PATCH=${14} + # BUILD_TYPE exported from higher layers + + echo "tarball_extract SPEC_DIR=$SPEC_DIR SPEC=$SPEC SPEC_GIT=$SPEC_GIT SOURCE_DIR=$SOURCE_DIR BRANCH=$BRANCH ORIG_BRANCH=$ORIG_BRANCH TAR_DIR=$TAR_DIR ROOT_DIR=$ROOT_DIR PKG_DIR=$PKG_DIR BUILD_DIR=$BUILD_DIR TARGET_ARCH=$TARGET_ARCH TIS_PATCH_VER=$TIS_PATCH_VER OUTPUT_FILE=$OUTPUT_FILE NO_META_PATCH=$NO_META_PATCH" + + if [ -f $OUTPUT_FILE ]; then + \rm -f $OUTPUT_FILE + fi + + local ALT_TAR_DIR="" + local SOURCE_NO="" + local SOURCE_NAME="" + local TAR="" + local TAR_HAS_CHANGED=1 + local REAL_TYPE="" + local ORIG_DIR="$(pwd)" + local TAR_EXTRACT_ARG="" + local PATCH_TARGET_DIR="" + local EXTRACT_TO_DIR="" + local AUTOSETUP_MACRO="" + local AUTOSETUP=0 + local METHOD=$METHOD_RPMBUILD_SCRIPT + local RPMBUILD_BP_LOG=$ROOT_DIR/$PKG_DIR/rpmbuild_bp.log + local RPMBUILD_BUILD_DIR=$ROOT_DIR/$BUILD_DIR/BUILD + local EXCLUDE_PATCH_NUM_CSV="" + local RAW_SCRIPT="" + local EXTRACT_SCRIPT="" + + + # Create a directory for the extraction of tarballs + echo "SPEC_GIT=$SPEC_GIT" + echo "mkdir -p $SPEC_GIT" + mkdir -p $SPEC_GIT + echo "cd $SPEC_GIT" + cd $SPEC_GIT + pwd + + # Extract tarballs named in spec file + + # Does this spec file use autosetup + AUTOSETUP_MACRO=$(grep '%autosetup' $SPEC_DIR/$SPEC) + if [ $? -eq 0 ]; then + AUTOSETUP=1 + fi + + if [ $METHOD -eq $METHOD_RPMBUILD_SCRIPT ]; then + if [ -d "$RPMBUILD_BUILD_DIR" ]; then + echo "rm -rf RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR" + \rm -rf "$RPMBUILD_BUILD_DIR" + fi + mkdir -p $RPMBUILD_BUILD_DIR + + if [ -f $RPMBUILD_BP_LOG ]; then + echo "rm -f RPMBUILD_BP_LOG=$RPMBUILD_BP_LOG" + \rm -f $RPMBUILD_BP_LOG + fi + touch $RPMBUILD_BP_LOG + + RAW_SCRIPT=$ROOT_DIR/$PKG_DIR/raw_script + EXTRACT_SCRIPT=$ROOT_DIR/$PKG_DIR/extract_script + echo "srpm_create_raw_extract_script '$SPEC_DIR/$SPEC' '$ROOT_DIR/$PKG_DIR' '$ROOT_DIR/$BUILD_DIR' '$TARGET_ARCH' '$TIS_PATCH_VER' '$RAW_SCRIPT'" + srpm_create_raw_extract_script $SPEC_DIR/$SPEC $ROOT_DIR/$PKG_DIR $ROOT_DIR/$BUILD_DIR $TARGET_ARCH $TIS_PATCH_VER $RAW_SCRIPT + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): srpm_create_raw_extract_script failed" + cd $ORIG_DIR + return 1 + fi + + if [ -d "$RPMBUILD_BUILD_DIR" ]; then + echo "rm -rf RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR" + \rm -rf "$RPMBUILD_BUILD_DIR" + fi + mkdir -p $RPMBUILD_BUILD_DIR + + echo "raw_create_tarballs_extract_script '$RAW_SCRIPT' '$EXTRACT_SCRIPT' '$RPMBUILD_BUILD_DIR' '$SPEC_GIT'" + EXTRACT_TO_DIR=$(raw_create_tarballs_extract_script "$RAW_SCRIPT" "$EXTRACT_SCRIPT" "$RPMBUILD_BUILD_DIR" "$SPEC_GIT") + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): srpm_create_raw_extract_script failed" + cd $ORIG_DIR + return 1 + fi + echo "EXTRACT_TO_DIR=$EXTRACT_TO_DIR" + fi + + local EXTRACT_DIR_FILE="" + local EXTRACT_TARGET_DIR="" + local OLD_EXTRACT_TARGET_DIR="" + local SAVE_OLD_EXTRACT_TARGET_DIR="" + local PATCH_DIR_FILE="" + local PATCH_TARGET_DIR="" + local OLD_PATCH_TARGET_DIR="" + local SAVE_OLD_PATCH_TARGET_DIR="" + + if [ $METHOD -eq $METHOD_RPMBUILD_SCRIPT ]; then + EXTRACT_DIR_FILE=$(dirname $EXTRACT_SCRIPT)/extract_dir + PATCH_DIR_FILE=$(dirname $EXTRACT_SCRIPT)/patch_dir + if [ -f $EXTRACT_DIR_FILE ]; then + OLD_EXTRACT_TARGET_DIR=$(cat $EXTRACT_DIR_FILE) + fi + if [ -f $PATCH_DIR_FILE ]; then + OLD_PATCH_TARGET_DIR=$(cat $PATCH_DIR_FILE) + fi + if [ "$OLD_PATCH_TARGET_DIR" != "" ] && [ -d $OLD_PATCH_TARGET_DIR ]; then + SAVE_OLD_PATCH_TARGET_DIR="${OLD_PATCH_TARGET_DIR}.save" + echo "mv $OLD_PATCH_TARGET_DIR $SAVE_OLD_PATCH_TARGET_DIR" + mv $OLD_PATCH_TARGET_DIR $SAVE_OLD_PATCH_TARGET_DIR + fi + if [ "$OLD_EXTRACT_TARGET_DIR" != "" ] && [ -d $OLD_EXTRACT_TARGET_DIR ]; then + SAVE_OLD_EXTRACT_TARGET_DIR="${OLD_EXTRACT_TARGET_DIR}.save" + echo "mv $OLD_EXTRACT_TARGET_DIR $SAVE_OLD_EXTRACT_TARGET_DIR" + mv $OLD_EXTRACT_TARGET_DIR $SAVE_OLD_EXTRACT_TARGET_DIR + fi + if [ ! -d $SPEC_GIT ]; then + mkdir -p $SPEC_GIT + fi + ( + source $EXTRACT_SCRIPT + RC=$? + echo "SRPM_EXTRACT_DIR=$(pwd)" + exit $RC + ) | tee $EXTRACT_SCRIPT.pre.log + if [ ${PIPESTATUS[0]} -ne 0 ] ; then + echo "ERROR: Failed in script '$EXTRACT_SCRIPT'" + cd $ORIG_DIR + return 1 + fi + + CANONICAL_SPEC_GIT=$(readlink -f "$SPEC_GIT") + EXTRACT_TARGET_DIR=$(cat $EXTRACT_DIR_FILE) + PATCH_TARGET_DIR=$(cat $PATCH_DIR_FILE) + TAR_DIR=$(echo "$PATCH_TARGET_DIR" | sed "s#^$CANONICAL_SPEC_GIT/##" | sed "s#^$CANONICAL_SPEC_GIT##") + if [ "$TAR_DIR" == "" ]; then + TAR_DIR="." + fi + echo "=== CANONICAL_SPEC_GIT=$CANONICAL_SPEC_GIT" + echo "=== TAR_DIR=$TAR_DIR" + echo "=== PATCH_TARGET_DIR=$PATCH_TARGET_DIR" + echo "=== EXTRACT_TARGET_DIR=$EXTRACT_TARGET_DIR" + if [ "$PATCH_TARGET_DIR" == "$TAR_DIR" ] || [ "$PATCH_TARGET_DIR" == "" ] || [ "$EXTRACT_TARGET_DIR" == "" ] || [[ "$TAR_DIR" == /* ]]; then + echo "Something went wrong" + cd $ORIG_DIR + return 1 + fi + + echo "rm -rf $PATCH_TARGET_DIR; mkdir -p $PATCH_TARGET_DIR" + \rm -rf "$PATCH_TARGET_DIR" + mkdir -p "$PATCH_TARGET_DIR" + + if [ "$SAVE_OLD_EXTRACT_TARGET_DIR" != "" ] && [ -d $SAVE_OLD_EXTRACT_TARGET_DIR ]; then + echo "mv $SAVE_OLD_EXTRACT_TARGET_DIR $OLD_EXTRACT_TARGET_DIR" + if [ -d $OLD_EXTRACT_TARGET_DIR ]; then + \rm -rf $OLD_EXTRACT_TARGET_DIR + fi + mv $SAVE_OLD_EXTRACT_TARGET_DIR $OLD_EXTRACT_TARGET_DIR + fi + if [ "$SAVE_OLD_PATCH_TARGET_DIR" != "" ] && [ -d $SAVE_OLD_PATCH_TARGET_DIR ]; then + echo "mv $SAVE_OLD_PATCH_TARGET_DIR $OLD_PATCH_TARGET_DIR" + if [ -d $OLD_PATCH_TARGET_DIR ]; then + \rm -rf $OLD_EXTRACT_TARGET_DIR + fi + mv $SAVE_OLD_PATCH_TARGET_DIR $OLD_PATCH_TARGET_DIR + fi + else + # Figure out where tarball will extract to... + # afterwards ALT_TAR_DIR = common path element found in all files in the tarball + for SOURCE_NO in $(grep -i '^Source[0-9]*:' $SPEC_DIR/$SPEC | awk -F : '{print $1}' | sort --unique --version-sort); do + echo " $SOURCE_NO" + SOURCE_NAME=$(spec_find_tag $SOURCE_NO $SPEC_DIR/$SPEC 2>> /dev/null | awk -F / '{print $NF}') + if [ "x$SOURCE_NAME" != "x" ]; then + echo " $SOURCE_NAME" + TAR="$SOURCE_DIR/$SOURCE_NAME" + echo " TAR=$TAR" + # Where will the tarball install to ... put it in ALT_TAR_DIR + if [ -f $TAR ]; then + if [ "$ALT_TAR_DIR" == "" ]; then + if [ "x$ORIG_BRANCH" == "x" ]; then + TAR_HAS_CHANGED=1 + else + cd $SOURCE_DIR + TAR_HAS_CHANGED=$(git diff $BRANCH $ORIG_BRANCH --name-only -- $SOURCE_NAME | wc -l) + cd - >> /dev/null + fi + + echo " TAR_HAS_CHANGED=$TAR_HAS_CHANGED" + + case $SOURCE_NAME in + *.tar.gz) REAL_TYPE=$(file $TAR | awk -F : '{ print $2 }') + # For whatever reason, centos-release-7-2.1511.tar.gz is actually + # an uncompressed tarball, regardless of the name + if [ "$REAL_TYPE" == " POSIX tar archive (GNU)" ]; then + ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tvf $TAR") + else + ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tzvf $TAR") + fi + ;; + *.tgz) ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tzvf $TAR") ;; + *.tar.bz2) ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tjvf $TAR") ;; + *.tar.xz) ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tJvf $TAR") ;; + *.tar) ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tvf $TAR") ;; + *) echo "skipping '$SOURCE_NAME'";; + esac + echo " ALT_TAR_DIR=$ALT_TAR_DIR" + fi + else + echo "ERROR: $FUNCNAME (${LINENO}): '$SOURCE_NAME' not found in '$SOURCE_DIR'" + cd $ORIG_DIR + return 1 + fi + else + echo "WARNING: nothing found by 'spec_find_tag $SOURCE_NO $SPEC_DIR/$SPEC'" + fi + done + + echo "TAR_DIR=$TAR_DIR" + echo "ALT_TAR_DIR=$ALT_TAR_DIR" + + if [ "$ALT_TAR_DIR" == "." ]; then + TAR_EXTRACT_ARG=" -C $TAR_DIR" + elif [ "$ALT_TAR_DIR" != "." ] && [ "$ALT_TAR_DIR" != "" ]; then + if [ $AUTOSETUP -eq 0 ]; then + TAR_DIR="$ALT_TAR_DIR" + else + TAR_DIR="$TAR_DIR/$ALT_TAR_DIR" + fi + fi + + PATCH_TARGET_DIR="$SPEC_GIT/$TAR_DIR" + fi + + export GLOBAL_PATCH_TARGET_DIR="$PATCH_TARGET_DIR" + echo "TAR_DIR=$TAR_DIR" + echo "PATCH_TARGET_DIR=$PATCH_TARGET_DIR" + + if [ -z "$TAR_DIR" ]; then + echo "No tarball found." + return 1 + fi + + if [ "x$NO_META_PATCH" == "x1" ] && [ -d "$TAR_DIR" ] && [ $(ls -1 "$TAR_DIR" | wc -l) -gt 0 ]; then + echo "Tarball already extracted, and we are processing an upgrade. Skipping tarball extract" + echo "PATCH_TARGET_DIR=$PATCH_TARGET_DIR" > $OUTPUT_FILE + echo "EXCLUDE_PATCH_NUM_CSV=$EXCLUDE_PATCH_NUM_CSV" >> $OUTPUT_FILE + echo "METHOD=$METHOD" >> $OUTPUT_FILE + echo "RAW_SCRIPT=$RAW_SCRIPT" >> $OUTPUT_FILE + echo "RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR" >> $OUTPUT_FILE + return 0 + fi + + if [ ! -d "$TAR_DIR" ]; then + mkdir -p $TAR_DIR + fi + + if [ -d "$TAR_DIR" ]; then + cd $TAR_DIR + + (init_git_if_required "." "ORIGINAL: initial commit" "") + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): init_git_if_required failed at '$(pwd)' while extracting '$SPEC_PATH'" + cd $ORIG_DIR + return 1 + fi + + echo "git created at '$(pwd)'" + cd - >> /dev/null + fi + + local NEED_TAR_EXTRACT=1 + + # Set up Branch + if [ -d "$TAR_DIR" ]; then + echo "cd '$TAR_DIR'" + cd $TAR_DIR + pwd + + # Delete old branch if it exists + git checkout $BRANCH &>> /dev/null + if [ $? -eq 0 ]; then + git checkout master + git branch -D $BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to delete branch '$BRANCH'" + cd $ORIG_DIR + return 1 + fi + fi + + # Determine origin of our branch + if [ $TAR_HAS_CHANGED -gt 0 ]; then + git checkout master + else + git checkout $ORIG_PRE_WRS_TAG + if [ $? -eq 0 ]; then + NEED_TAR_EXTRACT=0 + else + git checkout master + fi + fi + + cd - >> /dev/null + fi + + # Extract tarball(s) if needed + echo "NEED_TAR_EXTRACT=$NEED_TAR_EXTRACT" + if [ $NEED_TAR_EXTRACT -eq 1 ]; then + + # Create branch + echo "cd $TAR_DIR; git checkout -b $BRANCH" + cd $TAR_DIR + git checkout -b $BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to create branch '$BRANCH'" + cd $ORIG_DIR + return 1 + fi + cd - >> /dev/null + + ######################################################################### + if [ $METHOD -eq $METHOD_NO_RPMBUILD ]; then + # Don't use rpmbuild to extrace tarball, instead try to do it for ourselves + for SOURCE_NO in $(grep -i '^Source[0-9]*:' $SPEC_DIR/$SPEC | awk -F : '{print $1}'); do + echo " $SOURCE_NO" + local NO=$(echo $SOURCE_NO | sed 's/Source//') + SOURCE_NAME=$(spec_find_tag $SOURCE_NO $SPEC_DIR/$SPEC 2>> /dev/null | awk -F / '{print $NF}') + echo " $SOURCE_NAME" + TAR="$SOURCE_DIR/$SOURCE_NAME" + echo " $TAR" + if [ -f $TAR ]; then + if [ $NEED_TAR_EXTRACT -eq 1 ]; then + echo "spec_untar_path '$NO' '$SPEC_DIR/$SPEC'" + local UNTAR_PATH=$(spec_untar_path "$NO" "$SPEC_DIR/$SPEC") + echo "UNTAR_PATH=$UNTAR_PATH" + mkdir -p $UNTAR_PATH + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): command failed: mkdir -p $UNTAR_PATH" + cd $ORIG_DIR + return 1 + fi + ( + cd $UNTAR_PATH + case $SOURCE_NAME in + *.tar.gz) REAL_TYPE=$(file $TAR | awk -F : '{ print $2 }') + # For whatever reason, centos-release-7-2.1511.tar.gz is actually + # an uncompressed tarball, regardless of the name + if [ "$REAL_TYPE" == " POSIX tar archive (GNU)" ]; then + tar_cmd_common_dir "tar -xvf $TAR $TAR_EXTRACT_ARG" + else + tar_cmd_common_dir "tar -xzvf $TAR $TAR_EXTRACT_ARG" + fi + ;; + *.tgz) tar -xzvf $TAR $TAR_EXTRACT_ARG ;; + *.tar.bz2) tar -xjvf $TAR $TAR_EXTRACT_ARG ;; + *.tar.xz) tar -xJvf $TAR $TAR_EXTRACT_ARG ;; + *.tar) tar -xvf $TAR $TAR_EXTRACT_ARG ;; + *) echo "skipping '$SOURCE_NAME'";; + esac + exit $? + ) + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): tar failed to extract '$TAR'" + cd $ORIG_DIR + return 1 + fi + fi + else + echo "ERROR: $FUNCNAME (${LINENO}): '$SOURCE_NAME' not found in '$SOURCE_DIR'" + cd $ORIG_DIR + return 1 + fi + done + fi + + ######################################################################### + if [ $METHOD -eq $METHOD_RPMBUILD_UNPATCH ]; then + if [ -d "$RPMBUILD_BUILD_DIR" ]; then + \rm -rf "$RPMBUILD_BUILD_DIR" + fi + mkdir -p $RPMBUILD_BUILD_DIR + + # The following rpmbuild will extract all tarballs, run any other prep script, and apply all patches + + local NEED_PATCH_ROLLBACK=0 + local LAST_PATCH=$(grep '^%patch[0-9]' $SPEC_DIR/$SPEC | tail -n 1 | awk '{ print $1 }') + if [ "x$LAST_PATCH" == "x" ]; then + cat $SPEC_DIR/$SPEC | grep -v '^git ' > $SPEC_DIR/_$SPEC + else + cat $SPEC_DIR/$SPEC | grep -v '^git ' | grep -v '^%build' | sed "/$LAST_PATCH/a %build" > $SPEC_DIR/_$SPEC + NEED_PATCH_ROLLBACK=1 + fi + + if [ -f $RPMBUILD_BP_LOG ]; then + \rm -f $RPMBUILD_BP_LOG + fi + touch $RPMBUILD_BP_LOG + # Note stdout and stderr go to same file, must not use 2>&1 syntax as it doesn't guarantee order + # Build the srpm as though for std build, for naming consistency + echo "rpmbuild -bp $SPEC_DIR/_$SPEC --root $ROOT_DIR/$PKG_DIR --define='%_topdir $ROOT_DIR/$BUILD_DIR' --define='_tis_dist .tis' --nodeps --target $TARGET_ARCH >> $RPMBUILD_BP_LOG 2>> $RPMBUILD_BP_LOG" + rpmbuild -bp $SPEC_DIR/_$SPEC --root $ROOT_DIR/$PKG_DIR \ + --define="%_topdir $ROOT_DIR/$BUILD_DIR" \ + --define='_tis_dist .tis' \ + --define="_tis_build_type $BUILD_TYPE" \ + --nodeps --target $TARGET_ARCH >> $RPMBUILD_BP_LOG 2>> $RPMBUILD_BP_LOG + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): command failed: rpmbuild -bp $SPEC_DIR/$SPEC --root $ROOT_DIR/$PKG_DIR --define='%_topdir $ROOT_DIR/$BUILD_DIR' --define='_tis_dist .tis' --nodeps --target $TARGET_ARCH > $RPMBUILD_BP_LOG" + cd $ORIG_DIR + return 1 + fi + + \rm -f $SPEC_DIR/_$SPEC + + if [ $NEED_PATCH_ROLLBACK -eq 1 ]; then + # But we don't want patches yet, so roll them back. + # Use the log from rpmbuild to learn what patches to roll back, in what order, and with what arguements + for n in `grep '^[Pp]atch #' $RPMBUILD_BP_LOG | tac | awk '{ print $2 }' | sed 's/#//'`; do + cmd1=$(cat $RPMBUILD_BP_LOG | sed -n "/^[Pp]atch #$n /,/^patching/p" | grep '^+' | sed 's/^+ //' | grep '[/]cat') + cmd2=$(cat $RPMBUILD_BP_LOG | sed -n "/^[Pp]atch #$n /,/^patching/p" | grep '^+' | sed 's/^+ //' | grep '[/]patch') + cmd="$cmd1 | $cmd2 -R" + ( + echo "Remove patch #$n" + cd $RPMBUILD_BUILD_DIR/$TAR_DIR + echo "$cmd" + eval $cmd + if [ ${PIPESTATUS[0]} -ne 0 ] ; then + echo "ERROR: $FUNCNAME (${LINENO}): failed command: $cmd" + return 1 + fi + ) + if [ $? -ne 0 ]; then + return 1 + fi + done + fi + + echo "find $RPMBUILD_BUILD_DIR/$TAR_DIR/ -mindepth 1 -maxdepth 1 -exec mv -t $SPEC_GIT/$TAR_DIR/ -- {} +" + find $RPMBUILD_BUILD_DIR/$TAR_DIR/ -mindepth 1 -maxdepth 1 -exec mv -t $SPEC_GIT/$TAR_DIR/ -- {} + + + \rm -rf "$RPMBUILD_BUILD_DIR" + + grep '^%patch[0-9]* ' $SPEC_DIR/$SPEC > /dev/null + if [ $? -eq 0 ];then + echo "Using '%patch' method" + + local PATCH_NO="" + # for PATCH_NO in $(grep '^%patch[0-9]* ' $SPEC_DIR/$SPEC | awk '{print $1}' | sed 's/^%patch//') ; do + for PATCH_NO in $(grep -i '^[Pp]atch[0-9]*:' "$SPEC_DIR/$SPEC" | awk -F : '{print $1}' | sed 's/^[Pp]atch//' | sort --unique --version-sort); do + grep "^[Pp]atch #$PATCH_NO " $RPMBUILD_BP_LOG + if [ $? -ne 0 ]; then + if [ "x$EXCLUDE_PATCH_NUM_CSV" == "x" ]; then + EXCLUDE_PATCH_NUM_CSV="$PATCH_NO" + else + EXCLUDE_PATCH_NUM_CSV="$EXCLUDE_PATCH_NUM_CSV,$PATCH_NO" + fi + fi + done + else + grep '^git am' $SPEC_DIR/$SPEC > /dev/null + if [ $? -eq 0 ];then + echo "Using 'git am' method, EXCLUDE_PATCH_NUM_CSV=''" + else + echo "Warning: no known patch apply command, EXCLUDE_PATCH_NUM_CSV=''" + fi + fi + fi + + ######################################################################### + if [ $METHOD -eq $METHOD_RPMBUILD_SCRIPT ]; then + ( + # SAL + source $EXTRACT_SCRIPT + RC=$? + echo "SRPM_EXTRACT_DIR=$(pwd)" + exit $RC + ) | tee $EXTRACT_SCRIPT.log + if [ ${PIPESTATUS[0]} -ne 0 ] ; then + echo "ERROR: Failed in script '$EXTRACT_SCRIPT'" + cd $ORIG_DIR + return 1 + fi + + local TMP_PATCH_TARGET_DIR=$(cat $PATCH_DIR_FILE) + if [ "x$TMP_PATCH_TARGET_DIR" != "x" ]; then + export GLOBAL_PATCH_TARGET_DIR=$TMP_PATCH_TARGET_DIR + echo "EXTRACT_TO_DIR=$EXTRACT_TO_DIR" + echo "GLOBAL_PATCH_TARGET_DIR=$GLOBAL_PATCH_TARGET_DIR" + EXTRACT_TO_DIR="$GLOBAL_PATCH_TARGET_DIR" + fi + + if [ -z "$EXTRACT_TO_DIR" ]; then + echo "Failed to get EXTRACT_TO_DIR from raw_create_tarballs_extract_script" + cd $ORIG_DIR + return 1 + fi + + if [ "$EXTRACT_TO_DIR" != "$PATCH_TARGET_DIR" ]; then + echo "Change PATCH_TARGET_DIR from '$PATCH_TARGET_DIR' to '$EXTRACT_TO_DIR'" + PATCH_TARGET_DIR="$EXTRACT_TO_DIR" + export GLOBAL_PATCH_TARGET_DIR="$PATCH_TARGET_DIR" + fi + + echo "rm -rf $RPMBUILD_BUILD_DIR" + \rm -rf "$RPMBUILD_BUILD_DIR" + + + fi + fi + + echo "aaa TAR_DIR=$TAR_DIR" + if [ ! -d "$TAR_DIR" ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Failed to create expected TAR_DIR='$TAR_DIR' from $(pwd)" + cd $ORIG_DIR + return 1 + fi + + # track extracted tarball in git + cd "$TAR_DIR" + echo "NEED_TAR_EXTRACT=$NEED_TAR_EXTRACT" + echo "cd PATCH_TARGET_DIR=$PATCH_TARGET_DIR" + cd "$PATCH_TARGET_DIR" + + if [ $NEED_TAR_EXTRACT -eq 1 ]; then + commit_git "." "ORIGINAL: extracted archive" "" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed (post tarball extracted) while extracting '$TAR'" + cd $ORIG_DIR + return 1 + fi + fi + + echo "PATCH_TARGET_DIR=$PATCH_TARGET_DIR" > $OUTPUT_FILE + echo "EXCLUDE_PATCH_NUM_CSV=$EXCLUDE_PATCH_NUM_CSV" >> $OUTPUT_FILE + echo "METHOD=$METHOD" >> $OUTPUT_FILE + echo "RAW_SCRIPT=$RAW_SCRIPT" >> $OUTPUT_FILE + echo "RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR" >> $OUTPUT_FILE + return 0 +} + +tar_and_spec_extract_to_git () { + local SPEC_PATH=$1 + local WRS_PKG_DIR=$2 + local ROOT_DIR=$3 + local BUILD_DIR=$4 + local BRANCH=$5 + local NO_META_PATCH=$6 + local TIS_PATCH_VER=$7 + local USE_GIT=1 + local TARGET_ARCH=x86_64 + + if [ ! -f $SPEC_PATH ]; then + echo "ERROR: $FUNCNAME (${LINENO}): spec not found '$SPEC_PATH'" + return 1 + fi + + local ORIG_DIR=`pwd` + + if [ "x$ROOT_DIR" == "x" ]; then + ROOT_DIR="$MY_WORKSPACE/srpm_work" + fi + + if [ "x$BUILD_DIR" == "x" ]; then + BUILD_DIR="$PKG_DIR/rpmbuild" + fi + + if [ "x$BRANCH" == "x" ]; then + BRANCH="work" + fi + + local SPEC_DIR="$ROOT_DIR/$BUILD_DIR/SPECS" + local SOURCE_DIR="$ROOT_DIR/$BUILD_DIR/SOURCES" + local GIT_DIR="$ROOT_DIR/$(dirname $BUILD_DIR)/gits" + local PATCH_TARGET_DIR + local META_PATCH_TARGET_DIR="$ROOT_DIR/$BUILD_DIR" + local ARCH=centos + local ORIG_BRANCH="" + local PRE_WRS_PREFIX="pre_wrs_" + local WRS_POST_COPY_PREFIX="wrs_post_copy_list_" + local PRE_WRS_TAG="$PRE_WRS_PREFIX$BRANCH" + local WRS_POST_COPY_TAG="$WRS_POST_COPY_PREFIX$BRANCH" + local ORIG_PRE_WRS_TAG="" + local THIS_FUNC + + if [ "x$WRS_PKG_DIR" != "x" ]; then + if [ ! -d $WRS_PKG_DIR ]; then + echo "ERROR: $FUNCNAME (${LINENO}): WRS_PKG_DIR not found '$WRS_PKG_DIR'" + return 1 + fi + fi + + if [ ! -d $ROOT_DIR ]; then + mkdir -p "$ROOT_DIR" + fi + + if [ ! -d $ROOT_DIR/$BUILD_DIR ]; then + mkdir -p "$ROOT_DIR/$BUILD_DIR" + else + if [ "x$NO_META_PATCH" != "x1" ]; then + echo "" + echo "Warning: Refusing to overwrite pre-existing edit environment for '$PKG_DIR'." + echo " To delete the old edit environment use: --edit --clean " + return 2 + fi + fi + + prep_git_for_metadata "$ROOT_DIR/$BUILD_DIR" "$BRANCH" $NO_META_PATCH "$PRE_WRS_PREFIX" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'prep_git_for_metadata' failed while extracting '$SPEC_PATH'" + cd $ORIG_DIR + return 1 + fi + + # Copy SPEC and TAR + mkdir -p "$SPEC_DIR" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$SPEC_DIR'" + return 1 + fi + + mkdir -p "$SOURCE_DIR" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$SOURCE_DIR'" + return 1 + fi + + cp -f "$SPEC_PATH" "$SPEC_DIR" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Failed to copy '$SPEC_PATH' to '$SPEC_DIR'" + return 1 + fi + + # Add and Commit + commit_git "$ROOT_DIR/$BUILD_DIR" "WRS: spec file" "$PRE_WRS_TAG" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed while extracting '$SPEC_PATH'" + cd $ORIG_DIR + return 1 + fi + + + local SPEC_GIT + local PKG_NAME + local PKG_VER + local TAR_DIR + local TAR + local SOURCE_NO + local SOURCE_NAME + local PATCH_NO + local PATCH_NAME + local NUM_TAR + local TAR_LIST + + + for SPEC in $(cd $SPEC_DIR; ls -1 *.spec); do + echo $SPEC; + SPEC_GIT="$GIT_DIR/$SPEC" + PKG_NAME=$(spec_find_tag Name $SPEC_DIR/$SPEC 2>> /dev/null) + PKG_VER=$(spec_find_tag Version $SPEC_DIR/$SPEC 2>> /dev/null) + TAR_DIR="$PKG_NAME-$PKG_VER" + echo " $TAR_DIR" + + local TAR_HAS_CHANGED + + TAR_HAS_CHANGED=1 + + # Copy content from COPY_LIST if defined + if [ "x$COPY_LIST" != "x" ]; then + echo "COPY_LIST: $COPY_LIST" + cd $WRS_PKG_DIR + for p in $COPY_LIST; do + echo "COPY_LIST: $p" + \cp -L -f -r -v $p $META_PATCH_TARGET_DIR/SOURCES + if [ $? -ne 0 ]; then + echo "ERROR: COPY_LIST: file not found: '$p'" + cd $ORIG_DIR + return 1 + fi + done + + cd - >> /dev/null + + # Add and Commit + commit_git "$META_PATCH_TARGET_DIR" "WRS: COPY_LIST content" "$WRS_POST_COPY_TAG" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed while extracting '$SPEC_PATH'" + cd $ORIG_DIR + return 1 + fi + fi + + local PATCH_TARGET_DIR="" + local EXCLUDE_PATCH_NUM_CSV="" + local METHOD="" + local RAW_SCRIPT="" + local RPMBUILD_BUILD_DIR="" + local OUTPUT_FILE="$ROOT_DIR/$PKG_DIR/tarball_extract_result" + + tarball_extract "$SPEC_DIR" "$SPEC" "$SPEC_GIT" "$SOURCE_DIR" "$BRANCH" "$ORIG_BRANCH" "$TAR_DIR" "$ROOT_DIR" "$PKG_DIR" "$BUILD_DIR" "$TARGET_ARCH" "$TIS_PATCH_VER" "$OUTPUT_FILE" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): tarball_extract failed while extracting '$SPEC_PATH'" + cd $ORIG_DIR + return 1 + fi + + source $OUTPUT_FILE + + # Apply patches named in spec file. + echo "srpm_apply_spec_patches '$SPEC_DIR/$SPEC' '$SOURCE_DIR' '$PATCH_TARGET_DIR' '$EXCLUDE_PATCH_NUM_CSV' $USE_GIT '' '$METHOD' '$RAW_SCRIPT' '$ROOT_DIR' '$RPMBUILD_BUILD_DIR' '$SPEC_GIT' '$BRANCH'" + srpm_apply_spec_patches "$SPEC_DIR/$SPEC" "$SOURCE_DIR" "$PATCH_TARGET_DIR" "$EXCLUDE_PATCH_NUM_CSV" $USE_GIT "" $METHOD "$RAW_SCRIPT" "$ROOT_DIR" "$RPMBUILD_BUILD_DIR" "$SPEC_GIT" "$BRANCH" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): srpm_apply_spec_patches failed while extracting '$SPEC_PATH'" + cd $ORIG_DIR + return 1 + fi + + done + +} + + + +srpm_extract_to_git () { + local ORIG_SRPM_PATH=$1 + local WRS_PKG_DIR=$2 + local ROOT_DIR=$3 + local BUILD_DIR=$4 + local BRANCH=$5 + local NO_META_PATCH=$6 + local TIS_PATCH_VER=$7 + local USE_GIT=1 + local TARGET_ARCH=x86_64 + + if [ ! -f $ORIG_SRPM_PATH ]; then + echo "ERROR: $FUNCNAME (${LINENO}): srpm not found '$ORIG_SRPM_PATH'" + return 1 + fi + + local ORIG_DIR=`pwd` + local PKG_DIR=`rpm -q --queryformat '%{NAME}\n' --nosignature -p $ORIG_SRPM_PATH` + + if [ "x$ROOT_DIR" == "x" ]; then + ROOT_DIR="$MY_WORKSPACE/srpm_work" + fi + + if [ "x$BUILD_DIR" == "x" ]; then + BUILD_DIR="$PKG_DIR/rpmbuild" + fi + + if [ "x$BRANCH" == "x" ]; then + BRANCH="work" + fi + + local SPEC_DIR="$ROOT_DIR/$BUILD_DIR/SPECS" + local SOURCE_DIR="$ROOT_DIR/$BUILD_DIR/SOURCES" + local GIT_DIR="$ROOT_DIR/$(dirname $BUILD_DIR)/gits" + local PATCH_TARGET_DIR + local META_PATCH_TARGET_DIR="$ROOT_DIR/$BUILD_DIR" + local ARCH=centos + local ORIG_BRANCH="" + local PRE_WRS_PREFIX="pre_wrs_" + local WRS_POST_COPY_PREFIX="wrs_post_copy_list_" + local PRE_WRS_TAG="$PRE_WRS_PREFIX$BRANCH" + local WRS_POST_COPY_TAG="$WRS_POST_COPY_PREFIX$BRANCH" + local ORIG_PRE_WRS_TAG="" + local THIS_FUNC + + + if [ "x$WRS_PKG_DIR" != "x" ]; then + if [ ! -d $WRS_PKG_DIR ]; then + echo "ERROR: $FUNCNAME (${LINENO}): WRS_PKG_DIR not found '$WRS_PKG_DIR'" + return 1 + fi + fi + + if [ ! -d $ROOT_DIR ]; then + mkdir -p "$ROOT_DIR" + fi + + if [ ! -d $ROOT_DIR/$BUILD_DIR ]; then + mkdir -p "$ROOT_DIR/$BUILD_DIR" + else + if [ "x$NO_META_PATCH" != "x1" ]; then + echo "" + echo "Warning: Refusing to overwrite pre-existing edit environment for '$PKG_DIR'." + echo " To delete the old edit environment use: --edit --clean " + return 2 + fi + fi + + prep_git_for_metadata "$ROOT_DIR/$BUILD_DIR" "$BRANCH" $NO_META_PATCH "$PRE_WRS_PREFIX" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): prep_git_for_metadata failed while extracting '$ORIG_SRPM_PATH'" + cd $ORIG_DIR + return 1 + fi + + # Extract src.rpm + echo "rpm -i --nosignature --root=$ROOT_DIR --define='%_topdir $BUILD_DIR' $ORIG_SRPM_PATH" + rpm -i --nosignature --root=$ROOT_DIR --define="%_topdir $BUILD_DIR" $ORIG_SRPM_PATH + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Failed to extract '$ORIG_SRPM_PATH' to '$ROOT_DIR/$BUILD_DIR'" + return 1 + fi + + # Add and Commit + commit_git "$ROOT_DIR/$BUILD_DIR" "ORIGINAL: srpm extract" "$PRE_WRS_TAG" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed while extracting '$ORIG_SRPM_PATH'" + cd $ORIG_DIR + return 1 + fi + + local SPEC_GIT + local PKG_NAME + local PKG_VER + local TAR_DIR + local TAR + local SOURCE_NO + local SOURCE_NAME + local PATCH_NO + local PATCH_NAME + local NUM_TAR + local TAR_LIST + + + for SPEC in $(cd $SPEC_DIR; ls -1 *.spec); do + echo $SPEC; + SPEC_GIT="$GIT_DIR/$SPEC" + PKG_NAME=$(spec_find_tag Name $SPEC_DIR/$SPEC 2>> /dev/null) + PKG_VER=$(spec_find_tag Version $SPEC_DIR/$SPEC 2>> /dev/null) + TAR_DIR="$PKG_NAME-$PKG_VER" + echo " $TAR_DIR" + + local TAR_HAS_CHANGED + + TAR_HAS_CHANGED=1 + + local PATCH_TARGET_DIR="" + local EXCLUDE_PATCH_NUM_CSV="" + local METHOD="" + local RAW_SCRIPT="" + local RPMBUILD_BUILD_DIR="" + local OUTPUT_FILE="$ROOT_DIR/$PKG_DIR/tarball_extract_result" + + tarball_extract "$SPEC_DIR" "$SPEC" "$SPEC_GIT" "$SOURCE_DIR" "$BRANCH" "$ORIG_BRANCH" "$TAR_DIR" "$ROOT_DIR" "$PKG_DIR" "$BUILD_DIR" "$TARGET_ARCH" "$TIS_PATCH_VER" "$OUTPUT_FILE" "$NO_META_PATCH" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): tarball_extract failed while extracting '$ORIG_SRPM_PATH'" + cd $ORIG_DIR + return 1 + fi + + source $OUTPUT_FILE + + # Apply patches named in original spec file... before our meta patches + echo "srpm_apply_spec_patches '$SPEC_DIR/$SPEC' '$SOURCE_DIR' '$PATCH_TARGET_DIR' '$EXCLUDE_PATCH_NUM_CSV' $USE_GIT 'ORIGINAL: ' '$METHOD' '$RAW_SCRIPT' '$ROOT_DIR' '$RPMBUILD_BUILD_DIR' '$SPEC_GIT' '$BRANCH'" + srpm_apply_spec_patches "$SPEC_DIR/$SPEC" "$SOURCE_DIR" "$PATCH_TARGET_DIR" "$EXCLUDE_PATCH_NUM_CSV" $USE_GIT "ORIGINAL: " $METHOD "$RAW_SCRIPT" "$ROOT_DIR" "$RPMBUILD_BUILD_DIR" "$SPEC_GIT" "$BRANCH" + if [ $? -ne 0 ]; then + cd $ORIG_DIR + echo "ERROR: $FUNCNAME (${LINENO}): srpm_apply_spec_patches failed while extracting '$ORIG_SRPM_PATH'" + return 1 + fi + + if [ "$GLOBAL_PATCH_TARGET_DIR" != "$PATCH_TARGET_DIR" ]; then + echo "changing PATCH_TARGET_DIR from $PATCH_TARGET_DIR to $GLOBAL_PATCH_TARGET_DIR" + PATCH_TARGET_DIR="$GLOBAL_PATCH_TARGET_DIR" + fi + cd $PATCH_TARGET_DIR + + # Verify we are on the correct branch + CURRENT_BRANCH=`git rev-parse --abbrev-ref HEAD` + if [ "$CURRENT_BRANCH" != "$BRANCH" ]; then + echo "git checkout -b $BRANCH" + git checkout -b $BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to create branch '$BRANCH'" + cd $ORIG_DIR + return 1 + fi + fi + + # Tag the pre-wrs-patches contents + git tag $PRE_WRS_TAG + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git tag' failed for 'rpmbuild'" + cd $ORIG_DIR + return 1 + fi + + # Copy content from COPY_LIST if defined + if [ "x$COPY_LIST" != "x" ]; then + echo "COPY_LIST: $COPY_LIST" + cd $WRS_PKG_DIR + for p in $COPY_LIST; do + echo "COPY_LIST: $p" + \cp -L -f -r -v $p $META_PATCH_TARGET_DIR/SOURCES + if [ $? -ne 0 ]; then + echo "ERROR: COPY_LIST: file not found: '$p'" + cd $ORIG_DIR + return 1 + fi + done + + cd - >> /dev/null + + # Add and Commit + commit_git "$META_PATCH_TARGET_DIR" "WRS: COPY_LIST content" "$WRS_POST_COPY_TAG" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed while extracting '$ORIG_SRPM_PATH'" + cd $ORIG_DIR + return 1 + fi + fi + + # Apply WRS patches + if [ "x$NO_META_PATCH" != "x1" ]; then + if [ "x$WRS_PKG_DIR" != "x" ]; then + # Apply wrs patches to spec file and other meta-data + echo "srpm_apply_meta_patches '$META_PATCH_TARGET_DIR' '$WRS_PKG_DIR' $USE_GIT '$ARCH' '$BRANCH'" + srpm_apply_meta_patches "$META_PATCH_TARGET_DIR" "$WRS_PKG_DIR" $USE_GIT "$ARCH" "$BRANCH" + if [ $? -ne 0 ]; then + cd $ORIG_DIR + return 1 + fi + + RAW_SCRIPT=$ROOT_DIR/$PKG_DIR/raw_script2 + + local RPMBUILD_BUILD_DIR2=$ROOT_DIR/$BUILD_DIR/BUILD + if [ -d "$RPMBUILD_BUILD_DIR2" ]; then + echo "rm -rf RPMBUILD_BUILD_DIR2=$RPMBUILD_BUILD_DIR" + \rm -rf "$RPMBUILD_BUILD_DIR2" + fi + mkdir -p $RPMBUILD_BUILD_DIR2 + + srpm_create_raw_extract_script $SPEC_DIR/$SPEC $ROOT_DIR/$PKG_DIR $ROOT_DIR/$BUILD_DIR $TARGET_ARCH $TIS_PATCH_VER $RAW_SCRIPT + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): srpm_create_raw_extract_script post meta-patches failed" + cd $ORIG_DIR + return 1 + fi + + if [ -d "$RPMBUILD_BUILD_DIR2" ]; then + echo "rm -rf RPMBUILD_BUILD_DIR2=$RPMBUILD_BUILD_DIR" + \rm -rf "$RPMBUILD_BUILD_DIR2" + fi + mkdir -p $RPMBUILD_BUILD_DIR2 + + EXTRACT_SCRIPT=$ROOT_DIR/$PKG_DIR/extract_script2 + echo "raw_create_tarballs_extract_script_post_metapatch '$RAW_SCRIPT' '$EXTRACT_SCRIPT' '$RPMBUILD_BUILD_DIR' '$SPEC_GIT'" + raw_create_tarballs_extract_script_post_metapatch "$RAW_SCRIPT" "$EXTRACT_SCRIPT" "$RPMBUILD_BUILD_DIR" "$SPEC_GIT" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): raw_create_tarballs_extract_script_post_metapatch failed" + cd $ORIG_DIR + return 1 + fi + + ( + source $EXTRACT_SCRIPT + RC=$? + echo "SRPM_EXTRACT_DIR=$(pwd)" + exit $RC + ) | tee $EXTRACT_SCRIPT.post.log + if [ ${PIPESTATUS[0]} -ne 0 ] ; then + echo "ERROR: Failed in script '$EXTRACT_SCRIPT'" + cd $ORIG_DIR + return 1 + fi + + + # Apply wrs patches named in modified spec file. + echo "srpm_apply_spec_patches '$SPEC_DIR/$SPEC' '$SOURCE_DIR' '$PATCH_TARGET_DIR' '$EXCLUDE_PATCH_NUM_CSV' $USE_GIT 'WRS: ' '$METHOD' '$RAW_SCRIPT' '$ROOT_DIR' '$RPMBUILD_BUILD_DIR' '$SPEC_GIT' '$BRANCH'" + srpm_apply_spec_patches "$SPEC_DIR/$SPEC" "$SOURCE_DIR" "$PATCH_TARGET_DIR" "$EXCLUDE_PATCH_NUM_CSV" $USE_GIT "WRS: " $METHOD "$RAW_SCRIPT" "$ROOT_DIR" "$RPMBUILD_BUILD_DIR" "$SPEC_GIT" "$BRANCH" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): srpm_apply_spec_patches failed" + cd $ORIG_DIR + return 1 + fi + fi + fi + + done + + echo "Successfully extracted to: $BUILD_DIR" + cd $ORIG_DIR + return 0 +} + + + + +srpm_apply_spec_patches () { + local SPEC_PATH=${1} + local PATCH_DIR=${2} + local PATCH_TARGET_DIR=${3} + local EXCLUDE_PATCH_NUM_CSV=${4} + local USE_GIT=${5} + local COMMEN_PREFIX=${6} + local METHOD=${7} + local RAW_SCRIPT=${8} + local ROOT_DIR=${9} + local RPMBUILD_BUILD_DIR=${10} + local SPEC_GIT=${11} + local BRANCH=${12} + + + local PATCH_NO + local PATCH_NAME + local PATCH + local PATCH_ARGS + + local ORIG_DIR=`pwd` + echo "Applying patches" + + if [ ! -f "$SPEC_PATH" ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Can't find spec file at '$SPEC_PATH'" + return 1 + fi + + if [ ! -d "$PATCH_DIR" ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Patch directory not found '$PATCH_DIR'" + return 1 + fi + + cd $PATCH_TARGET_DIR + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Failed to cd to Target directory '$PATCH_TARGET_DIR'" + return 1 + fi + + # Add patches + local PL="" + if [ "$METHOD" -eq $METHOD_RPMBUILD_SCRIPT ]; then + PL=$(raw_patch_order $RAW_SCRIPT $SPEC_PATH) + if [ $? -ne 0 ];then + echo "ERROR: $FUNCNAME (${LINENO}): raw_patch_order failed on RAW_SCRIPT=$RAW_SCRIPT" + return 1 + fi + else + grep '^%patch[0-9]* ' $SPEC_PATH > /dev/null + if [ $? -eq 0 ];then + echo "Using '%patch' method" + PL=$(grep '^%patch[0-9]* ' $SPEC_PATH | awk '{print $1}' | sed 's/^%patch//') + else + grep '^git am' $SPEC_PATH > /dev/null + if [ $? -eq 0 ];then + echo "Using 'git am' method" + PL=$(grep -i '^[Pp]atch[0-9]*:' $SPEC_PATH | awk -F : '{print $1}' | sed 's/^[Pp]atch//' | sort --unique --version-sort) + else + grep '^xargs git am' $SPEC_PATH > /dev/null + if [ $? -eq 0 ];then + echo "Using 'xargs git am' method" + PL=$(grep -i '^[Pp]atch[0-9]*:' $SPEC_PATH | awk -F : '{print $1}' | sed 's/^[Pp]atch//' | sort --unique --version-sort) + else + echo "Warning: no known patch apply command" + fi + fi + fi + fi + + local PATCH_COUNT + if [ "x$PL" != "x" ];then + PATCH_COUNT=0 + for PATCH_NO in $PL ; do + PATCH_COUNT=$((PATCH_COUNT + 1)) + local EXCLUDED=0 + for EXCLUDE_PATCH_NO in $(echo $EXCLUDE_PATCH_NUM_CSV | tr ',' ' '); do + if [ $EXCLUDE_PATCH_NO == $PATCH_NO ]; then + EXCLUDED=1 + break + fi + done + + if [ $EXCLUDED -eq 1 ]; then + echo " Exclude Patch$PATCH_NO" + continue + fi + + local PATCH_NM + PATCH_NM="Patch$PATCH_NO" + echo " $PATCH_NM" + + if [ "$METHOD" -eq $METHOD_RPMBUILD_SCRIPT ]; then + PATCH_NAME=$(raw_extract_patch_file $RAW_SCRIPT $PATCH_NO $SPEC_PATH) + else + PATCH_NAME=$(spec_find_tag $PATCH_NM $SPEC_PATH 2>> /dev/null | awk -F / '{print $NF}') + if [ "x$PATCH_NAME" == "x" ]; then + PATCH_NM="patch$PATCH_NO" + echo " $PATCH_NM" + PATCH_NAME=$(spec_find_tag $PATCH_NM $SPEC_PATH 2>> /dev/null | awk -F / '{print $NF}') + fi + fi + + echo " $PATCH_NAME" + PATCH="$PATCH_DIR/$PATCH_NAME" + + if [ "$METHOD" -eq $METHOD_RPMBUILD_SCRIPT ]; then + PATCH_ARGS="-p1" + else + PATCH_ARGS=$(spec_find_patch_args "$PATCH_NM" "$SPEC_PATH") + fi + + echo "srpm_apply_patch '$PATCH' '$PATCH_ARGS' '$PATCH_TARGET_DIR' '$USE_GIT' '$COMMEN_PREFIX$PATCH_NM: ' '$METHOD' '$RAW_SCRIPT' '$ROOT_DIR' '$RPMBUILD_BUILD_DIR' '$SPEC_GIT' '$PATCH_NO' '$BRANCH' '$SPEC_PATH' '$PATCH_COUNT'" + srpm_apply_patch "$PATCH" "$PATCH_ARGS" "$PATCH_TARGET_DIR" $USE_GIT "$COMMEN_PREFIX$PATCH_NM: " $METHOD "$RAW_SCRIPT" "$ROOT_DIR" "$RPMBUILD_BUILD_DIR" "$SPEC_GIT" "$PATCH_NO" "$BRANCH" "$SPEC_PATH" $PATCH_COUNT + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to apply patch '$PATCH'" + cd $ORIG_DIR + return 1 + fi + done + fi + + cd $ORIG_DIR + return 0 +} + + + +srpm_apply_patch() { + local PATCH="${1}" + local PATCH_ARGS="${2}" + local TARGET_DIR="${3}" + local USE_GIT="${4}" + local COMMENT_PREFIX="${5}" + local METHOD=${6} + local RAW_SCRIPT=${7} + local ROOT_DIR=${8} + local RPMBUILD_BUILD_DIR=${9} + local SPEC_GIT=${10} + local PATCH_NO="${11}" + local BRANCH="${12}" + local SPEC_PATH="${13}" + local PATCH_COUNT_TARGET="${14}" + + + # echo "srpm_apply_patch: PATCH=$PATCH PATCH_ARGS=$PATCH_ARGS TARGET_DIR=$TARGET_DIR USE_GIT=$USE_GIT COMMENT_PREFIX=$COMMENT_PREFIX METHOD=$METHOD RAW_SCRIPT=$RAW_SCRIPT ROOT_DIR=$ROOT_DIR RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR SPEC_GIT=$SPEC_GIT PATCH_NO=$PATCH_NO" + local ORIG_DIR + ORIG_DIR=`pwd` + + if [ ! -f $PATCH ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Patch '$PATCH' not found" + return 1 + fi + + if [ "x$TARGET_DIR" == "x" ]; then + TARGET_DIR="$ORIG_DIR" + fi + + if [ ! -d $TARGET_DIR ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Directory '$TARGET_DIR' not found" + return 1 + fi + + if [ $USE_GIT -gt 0 ]; then + if [ ! -d "$TARGET_DIR/.git" ] && [ ! -d "$TARGET_DIR/../.git" ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Directory '$TARGET_DIR' is not managed by git" + return 1 + fi + fi + + cd "$TARGET_DIR" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): Failed to cd to '$TARGET_DIR'" + return 1 + fi + + local TAG="v$BRANCH" + local PFN=`basename $PATCH` + + local MSG="$PFN" + local HASH="" + local ADD_OUT + local ADD_WC + + if [ $USE_GIT -gt 0 ]; then + HASH=`git log --pretty=format:'%H' --grep="$MSG\$"` + fi + + if [ "x$HASH" == "x" ]; then + if [ $USE_GIT -gt 0 ]; then + # Verify we are on the correct branch + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD) + if [ "$CURRENT_BRANCH" != "$BRANCH" ]; then + echo "git checkout $TAG" + git checkout $TAG + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to checkout tag '$TAG'" + fi + + echo "git checkout -b $BRANCH" + git checkout -b $BRANCH + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): failed to create branch '$BRANCH'" + cd $ORIG_DIR + return 1 + fi + fi + fi + + if [ $METHOD -eq $METHOD_RPMBUILD_SCRIPT ]; then + local PATCH_SCRIPT=$(dirname $RAW_SCRIPT)/patch_script + echo "raw_create_patch_apply_script $RAW_SCRIPT $PATCH_NO $PATCH_SCRIPT $RPMBUILD_BUILD_DIR $SPEC_GIT $SPEC_PATH $PATCH_COUNT_TARGET" + raw_create_patch_apply_script $RAW_SCRIPT $PATCH_NO $PATCH_SCRIPT $RPMBUILD_BUILD_DIR $SPEC_GIT $SPEC_PATH $PATCH_COUNT_TARGET + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): raw_create_patch_apply_script failed" + cd $ORIG_DIR + return 1 + fi + + if [ -f $PATCH_SCRIPT ]; then + echo "source $PATCH_SCRIPT" + ( + source $PATCH_SCRIPT + ) + if [ $? -ne 0 ]; then + echo "ERROR: Failed to apply patch '$PATCH' using script '$PATCH_SCRIPT'" + return 1 + fi + else + echo "ERROR: $FUNCNAME (${LINENO}): file not found at PATCH_SCRIPT=$PATCH_SCRIPT" + cd $ORIG_DIR + return 1 + fi + else + echo "patch $PATCH_ARGS < $PATCH" + patch $PATCH_ARGS --no-backup-if-mismatch < $PATCH + if [ $? -ne 0 ]; then + echo "failed to apply patch '$PATCH'" + return 1 + fi + fi + + if [ $PWD = $HOME ]; then + echo "DPENNEY: in the home dir somehow" + exit 1 + fi + + if [ $? -eq 0 ]; then + if [ $USE_GIT -gt 0 ]; then + ADD_OUT=$(git add --all --verbose) + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git add' failed for patch '$PATCH' of '$SPEC' while extracting '$ORIG_SRPM_PATH'" + cd $ORIG_DIR + return 1 + fi + + ADD_WC=$(git status porcelain | wc -l) + if [ $ADD_WC -gt 0 ]; then + # The kernel-rt has an empty "test patch", so use --allow-empty + git commit --allow-empty -m "$COMMENT_PREFIX$PFN" + if [ $? -ne 0 ]; then + echo "ERROR: $FUNCNAME (${LINENO}): 'git commit' failed for patch '$PATCH' of '$SPEC' while extracting '$ORIG_SRPM_PATH'" + cd $ORIG_DIR + return 1 + fi + fi + fi + else + echo "ERROR: $FUNCNAME (${LINENO}): Failed patch: $MSG" + cd $ORIG_DIR + return 1 + fi + else + echo "Patch already applied: $MSG" + if [ $USE_GIT -gt 0 ]; then + git tag -d $TAG + git tag $TAG $HASH + echo "git tag $TAG $HASH == $?" + fi + fi + + cd $ORIG_DIR + return 0 +} + + +srpm_find_tag () { + local TAG=$1 + local SRPM_FILE=$2 + + local VALUE=`rpm -q --queryformat "%{$TAG}\n" --nosignature -p $SRPM_FILE` + if [ $? -ne 0 ]; then + echo "" + return 1 + fi + + echo "$VALUE" + return 0 +} + + +srpm_list_packages () { + local SRPM_FILE=$1 + + local TMPDIR=`mktemp -d /tmp/srpm_list_packages_XXXXXX` + + ( + cd $TMPDIR &>> /dev/null + # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null + rpm -i --root=$TMPDIR --nosignature $SRPM_FILE + ) + + for SPEC in `find $TMPDIR -name '*.spec' | sort -V`; do + spec_list_packages $SPEC + done + + \rm -rf $TMPDIR &>> /dev/null +} + + +srpm_list_versioned_packages () { + local SRPM_FILE=$1 + + local TMPDIR=`mktemp -d /tmp/srpm_list_packages_XXXXXX` + + ( + cd $TMPDIR &>> /dev/null + # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null + rpm -i --root=$TMPDIR --nosignature $SRPM_FILE + ) + + for SPEC in `find $TMPDIR -name '*.spec' | sort -V`; do + spec_list_versioned_packages $SPEC + done + + \rm -rf $TMPDIR &>> /dev/null +} + + +srpm_list_ver_rel_packages () { + local SRPM_FILE=$1 + + local TMPDIR=`mktemp -d /tmp/srpm_list_packages_XXXXXX` + + ( + cd $TMPDIR &>> /dev/null + # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null + rpm -i --root=$TMPDIR --nosignature $SRPM_FILE + ) + + for SPEC in `find $TMPDIR -name '*.spec' | sort -V`; do + spec_list_ver_rel_packages $SPEC + done + + \rm -rf $TMPDIR &>> /dev/null +} + + +srpm_list_ver_rel_arch_packages () { + local SRPM_FILE=$1 + + local TMPDIR=`mktemp -d /tmp/srpm_list_packages_XXXXXX` + + ( + cd $TMPDIR &>> /dev/null + # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null + rpm -i --root=$TMPDIR --nosignature $SRPM_FILE + ) + + for SPEC in `find $TMPDIR -name '*.spec' | sort -V`; do + spec_list_ver_rel_arch_packages $SPEC + done + + \rm -rf $TMPDIR &>> /dev/null +} + + +srpm_build_requires () { + local SRPM_FILE=$1 + + local TMPDIR=`mktemp -d /tmp/srpm_list_packages_XXXXXX` + + ( + cd $TMPDIR &>> /dev/null + # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null + rpm -i --root=$TMPDIR $SRPM_FILE + ) + + for SPEC in `find $TMPDIR -name '*.spec' | sort -V`; do + spec_build_requires $SPEC + done + + \rm -rf $TMPDIR &>> /dev/null +} + + +srpm_match_package_list () { + local Aname=$1[@] + local TARGET_LIST=("${!Aname}") + local SRPM_FILE=$2 + local TARGET + local PKG_NAME + + for PKG_NAME in `srpm_list_packages "$SRPM_FILE"`; do + for TARGET in "${TARGET_LIST[@]}"; do + if [ "$PKG_NAME" == "$TARGET" ]; then + >&2 echo "found target '$TARGET' in file '$SRPM_FILE' as a package name" + echo "$TARGET" + return 0 + fi + done + done + + return 1 +} + +srpm_match_package () { + local TARGET=$1 + local SRPM_FILE=$2 + local PKG_NAME + + for PKG_NAME in `srpm_list_packages "$SRPM_FILE"`; do + if [ "$PKG_NAME" == "$TARGET" ]; then + echo "found target '$TARGET' in file '$SRPM_FILE' as a package name" + return 0 + fi + done + + return 1 +} + + +srpm_match_target_list () { + local Aname=$1[@] + local TARGET_LIST=("${!Aname}") + local SRPM_FILE=$2 + local TARGET + local NAME + local SERVICE + local PKG_NAME + + NAME=`srpm_find_tag Name "$SRPM_FILE"` + if [ $? -eq 0 ]; then + for TARGET in "${TARGET_LIST[@]}"; do + if [ "$NAME" == "$TARGET" ]; then + echo $TARGET + return 0 + fi + if [ "$BUILD_TYPE" == "rt" ]; then + if [ "${NAME}-rt" == "$TARGET" ]; then + echo $TARGET + return 0 + fi + fi + done + fi + + SERVICE=`srpm_find_tag Service "$SRPM_FILE"` + if [ $? -eq 0 ]; then + for TARGET in "${TARGET_LIST[@]}"; do + if [ "$SERVICE" == "$TARGET" ]; then + echo $TARGET + return 0 + fi + done + fi + + srpm_match_package_list TARGET_LIST "$SRPM_FILE" + if [ $? -eq 0 ]; then + return 0 + fi + + return 1 +} + +srpm_match_target () { + local TARGET=$1 + local SRPM_FILE=$2 + local NAME + local SERVICE + local PKG_NAME + + NAME=`srpm_find_tag Name "$SRPM_FILE"` + if [ $? -eq 0 ]; then + if [ "$NAME" == "$TARGET" ]; then + echo "found target '$TARGET' in file '$SRPM_FILE' as a name" + return 0 + fi + fi + + SERVICE=`srpm_find_tag Service "$SRPM_FILE"` + if [ $? -eq 0 ]; then + if [ "$SERVICE" == "$TARGET" ]; then + echo "found target '$TARGET' in file '$SRPM_FILE' as a service" + return 0 + fi + fi + + srpm_match_package "$TARGET" "$SRPM_FILE" + if [ $? -eq 0 ]; then + return 0 + fi + + return 1 +} + +# The intent of this function is to calculate the number of commits between the +# base srcrev and the top-most commit. This is only meant to be used at the +# top level of a subgit; not a subdirectory within a git tree. +# +srpm_git_revision_count () { + local SRC_DIR=$1 + local BASE_SRCREV=$2 + + pushd $SRC_DIR > /dev/null + local COUNT=$(git rev-list --count $BASE_SRCREV..HEAD) + if [ $? -ne 0 ]; then + return 1 + fi + local DIRTY=$(git status --porcelain | wc -l) + if [ "$DIRTY" -ne 0 ]; then + # add an extra value for uncommitted work. + COUNT=$((COUNT+1)) + fi + popd > /dev/null + + echo $COUNT + return 0 +} + +srpm_source_build_data () { + local DATA_FILE=$1 + if [ ! -f $DATA_FILE ]; then + >&2 echo "ERROR: $DATA_FILE not found" + return 1 + fi + source $DATA_FILE + + # TIS_PATCH_VER is mandatory + if [ -z "$TIS_PATCH_VER" ]; then + >&2 echo "ERROR: srpm_source_srpm_data: TIS_PATCH_VER must be set in $DATA_FILE" + return 1 + elif [[ "$TIS_PATCH_VER" == GITREVCOUNT* ]]; then + # Calculate the patch version dynamically based on the number of commits + # in the subgit. This also supports adding a "+N" at the end to force + # an additional increment (e.g., TIS_PATCH_VER=GITREVCOUNT+1) + if [ -z "$TIS_BASE_SRCREV" ]; then + >&2 echo "ERROR: srpm_source_srpm_data: TIS_BASE_SRCREV must be set in $DATA_FILE" + return 1 + fi + if [ ! -d "$SRC_DIR" ]; then + >&2 echo "ERROR: srpm_source_srpm_data: SRC_DIR must specify a subgit root path" + return 1 + fi + TIS_PATCH_INC=${TIS_PATCH_VER//[A-Z \+]/} + TIS_PATCH_VER=$(srpm_git_revision_count $SRC_DIR $TIS_BASE_SRCREV) + if [ $? -ne 0 ] || [ "$TIS_PATCH_VER" == "" ]; then + >&2 echo "ERROR: srpm_source_srpm_data: Invalid TIS_BASE_SRCREV '$TIS_BASE_SRCREV'" + return 1 + fi + if [[ "$TIS_PATCH_INC" =~ ^-?[0-9]+$ ]]; then + TIS_PATCH_VER=$((TIS_PATCH_VER+${TIS_PATCH_INC})) + fi + fi + + return 0 +} diff --git a/build-tools/sync-jenkins b/build-tools/sync-jenkins new file mode 100755 index 00000000..8aa08294 --- /dev/null +++ b/build-tools/sync-jenkins @@ -0,0 +1,154 @@ +#!/bin/bash + +# This script "syncs" a local workspace up with a Jenkins build. +# +# NOTE - please keep this script in one file (i.e. don't break into sub-scripts +# or call sub-scripts from this file). It is expected that doing so will +# screw things up if the sub-script gets checked out to a different +# version that the main script. +# +# The general flow of what it does is: +# - checks out $MY_REPO to the same commits as the Jenkins build +# - copies over Jenkins build artifacts in an order such that the timestamps +# for SRPM/RPMS artifacts make sense (RPMS have later timestamps than SRPMS) +# +# The user can then check out changes since the Jenkins build, and build +# updated artifacts. Typical use case would be +# $ cd $MY_WORKSPACE +# $ sync-jenkins --latest +# $ cd $MY_REPO +# $ wrgit checkout CGCS_DEV_0019 +# $ cd $MY_WORKSPACE +# $ build-pkgs +# +# Usage examples: +# sync-jenkins --help +# sync-jenkins --latest +# sync-jenkins yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_3.0_Centos_Build/2016-07-24_22-00-59" +# +# +# It is recommended that this tool be run with an initially empty workspace +# (or a workspace with only the build configuration file in it). +# +# Potential future improvements to this script +# - check for sane environment before doing anything +# - auto saving of the current branch of each git, and restoration to that point +# after pull +# - filter some packages (build-info, packages that depend on LICENSE, etc) from +# pull + +usage () { + echo "" + echo "Usage: " + echo " sync-jenkins <--latest|--help|[path_to_jenkins_build]>" + echo "" + echo " Examples:" + echo " sync-jenkins --latest" + echo " Syncs to the latest Jenkins build on yow-cgts4-lx" + echo "" + echo " sync-jenkins yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_3.0_Centos_Build/2016-07-24_22-00-59" + echo " Syncs to a specfic Jenkins build" + echo "" +} + + +# variables +BASEDIR=$MY_REPO +GITHASHFILE="LAST_COMMITS" +TMPFILE="$MY_WORKSPACE/export/temp.txt" +HELP=0 + +TEMP=`getopt -o h --long help,latest -n 'test.sh' -- "$@"` + +if [ $? -ne 0 ]; then + usage + exit 1 +fi + +eval set -- "$TEMP" + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -h|--help) HELP=1 ; shift ;; + --latest) JENKINSURL="yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_4.0_Centos_Build/latest_build" ; shift ;; + --) shift ; break ;; + esac +done + +if [ "x$JENKINSURL" == "x" ]; then + JENKINSURL=$@ +fi + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ "x$JENKINSURL" == "x" ]; then + usage + exit 1 +fi + +mkdir -p $MY_WORKSPACE/export $MY_WORKSPACE/std/rpmbuild/RPMS $MY_WORKSPACE/std/rpmbuild/SRPMS $MY_WORKSPACE/rt/rpmbuild/RPMS $MY_WORKSPACE/rt/rpmbuild/SRPMS +rsync $JENKINSURL/$GITHASHFILE $MY_WORKSPACE/$GITHASHFILE + +if [ $? -ne 0 ]; then + echo "Could not find $GITHASHFILE in $JENKINSURL -- aborting" + exit 1 +fi + +pushd $MY_REPO > /dev/null + +find . -type d -name ".git" | sed "s%/\.git$%%" > $TMPFILE + +while read hashfile; do + gitdir=`echo $hashfile | cut -d " " -f 1` + gitcommit=`echo $hashfile | sed s/.*[[:space:]]//g` + echo "doing dir $gitdir commit $gitcommit" + + pushd $gitdir >/dev/null + git checkout $gitcommit + popd +done < $MY_WORKSPACE/$GITHASHFILE + +popd + +pushd $MY_WORKSPACE + +# clean stuff +for build_type in std rt; do + rm -rf $MY_WORKSPACE/$build_type/rpmbuild/SRPMS + rm -rf $MY_WORKSPACE/$build_type/rpmbuild/RPMS + rm -rf $MY_WORKSPACE/$build_type/rpmbuild/inputs + rm -rf $MY_WORKSPACE/$build_type/rpmbuild/srpm_assemble +done + +# copy source rpms from jenkins +# Note that the order in which things are copies matters significantly. The +# timestamps on files is used to determine (for example) that an SRPM is +# order than an RPM, and therefore the RPM does not need to be rebuilt +for build_type in std rt; do + echo "Syncing $build_type build" + mkdir -p $MY_WORKSPACE/$build_type/rpmbuild/RPMS + mkdir -p $MY_WORKSPACE/$build_type/rpmbuild/SRPMS + rsync -r ${JENKINSURL}/$build_type/inputs $build_type/ + sleep 1 + rsync -r ${JENKINSURL}/$build_type/srpm_assemble $build_type/ + sleep 1 + rsync -r ${JENKINSURL}/$build_type/rpmbuild/SRPMS/* $MY_WORKSPACE/$build_type/rpmbuild/SRPMS + sleep 1 + rsync ${JENKINSURL}/$build_type/cgcs-centos-repo.last_head $MY_WORKSPACE/$build_type + rsync ${JENKINSURL}/$build_type/cgcs-3rd-party-repo.last_head $MY_WORKSPACE/$build_type + if [ "$build_type" == "std" ]; then + cp $MY_WORKSPACE/$build_type/cgcs-centos-repo.last_head $MY_REPO/cgcs-centos-repo/.last_head + cp $MY_WORKSPACE/$build_type/cgcs-3rd-party-repo.last_head $MY_REPO/cgcs-3rd-party-repo/.last_head + fi + sleep 1 + rsync -r ${JENKINSURL}/$build_type/results $build_type/ + sleep 1 + mv $build_type/results/jenkins* $build_type/results/${MY_BUILD_ENVIRONMENT}-$build_type + rsync -r ${JENKINSURL}/$build_type/rpmbuild/RPMS/* $MY_WORKSPACE/$build_type/rpmbuild/RPMS +done + +popd diff --git a/build-tools/sync_jenkins.sh b/build-tools/sync_jenkins.sh new file mode 100755 index 00000000..7f303e93 --- /dev/null +++ b/build-tools/sync_jenkins.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# This script "syncs" a local workspace up with a Jenkins build. +# +# The general flow of what it does is: +# - checks out $MY_REPO to the same commits as the Jenkins build +# - copies over Jenkins build artifacts in an order such that the timestamps +# for SRPM/RPMS artifacts make sense (RPMS have later timestamps than SRPMS) +# +# The user can then check out changes since the Jenkins build, and build +# updated artifacts. Typical use case would be +# $ cd $MY_WORKSPACE +# $ sync_jenkins.sh --latest +# $ cd $MY_REPO +# $ wrgit checkout CGCS_DEV_0017 +# $ cd $MY_WORKSPACE +# $ build-pkgs +# +# Usage examples: +# sync_jenkins.sh --help +# sync_jenkins.sh --latest +# sync_jenkins.sh yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_3.0_Centos_Build/2016-07-24_22-00-59" +# +# +# It is recommended that this tool be run with an initially empty workspace +# (or a workspace with only the build configuration file in it). +# +# Potential future improvements to this script +# - check for sane environment before doing anything +# - auto saving of the current branch of each git, and restoration to that point +# after pull +# - filter some packages (build-info, packages that depend on LICENSE, etc) from +# pull + +usage () { + echo "" + echo "Usage: " + echo " sync_jenkins.sh <--latest|--help|[path_to_jenkins_build]>" + echo "" + echo " Examples:" + echo " sync_jenkins.sh --latest" + echo " Syncs to the latest Jenkins build on yow-cgts4-lx" + echo "" + echo " sync_jenkins.sh yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_3.0_Centos_Build/2016-07-24_22-00-59" + echo " Syncs to a specfic Jenkins build" + echo "" +} + + +# variables +BASEDIR=$MY_REPO +GITHASHFILE="LAST_COMMITS" +TMPFILE="$MY_WORKSPACE/export/temp.txt" +HELP=0 + +TEMP=`getopt -o h --long help,latest -n 'test.sh' -- "$@"` + +if [ $? -ne 0 ]; then + usage + exit 1 +fi + +eval set -- "$TEMP" + +# extract options and their arguments into variables. +while true ; do + case "$1" in + -h|--help) HELP=1 ; shift ;; + --latest) JENKINSURL="yow-cgts4-lx:/localdisk/loadbuild/jenkins/latest_dev_stream/latest_build" ; shift ;; + --) shift ; break ;; + esac +done + +if [ "x$JENKINSURL" == "x" ]; then + JENKINSURL=$@ +fi + +if [ $HELP -eq 1 ]; then + usage + exit 0 +fi + +if [ "x$JENKINSURL" == "x" ]; then + usage + exit 1 +fi + +mkdir -p $MY_WORKSPACE/export $MY_WORKSPACE/std/rpmbuild/RPMS $MY_WORKSPACE/std/rpmbuild/SRPMS $MY_WORKSPACE/rt/rpmbuild/RPMS $MY_WORKSPACE/rt/rpmbuild/SRPMS +rsync $JENKINSURL/$GITHASHFILE $MY_WORKSPACE/$GITHASHFILE + +if [ $? -ne 0 ]; then + echo "Could not find $GITHASHFILE in $JENKINSURL -- aborting" + exit 1 +fi + +pushd $MY_REPO > /dev/null + +find . -type d -name ".git" | sed "s%/\.git$%%" > $TMPFILE + +while read hashfile; do + gitdir=`echo $hashfile | cut -d " " -f 1` + gitcommit=`echo $hashfile | sed s/.*[[:space:]]//g` + echo "doing dir $gitdir commit $gitcommit" + + pushd $gitdir >/dev/null + git checkout $gitcommit + popd +done < $MY_WORKSPACE/$GITHASHFILE + +popd + +pushd $MY_WORKSPACE + +# clean stuff +for build_type in std rt; do + rm -rf $MY_WORKSPACE/$build_type/rpmbuild/SRPMS + rm -rf $MY_WORKSPACE/$build_type/rpmbuild/RPMS + rm -rf $MY_WORKSPACE/$build_type/rpmbuild/inputs + rm -rf $MY_WORKSPACE/$build_type/rpmbuild/srpm_assemble +done + +# copy source rpms from jenkins +for build_type in std rt; do + mkdir -p $MY_WORKSPACE/$build_type/rpmbuild/RPMS + mkdir -p $MY_WORKSPACE/$build_type/rpmbuild/SRPMS + rsync -r ${JENKINSURL}/$build_type/inputs $build_type/ + sleep 1 + rsync -r ${JENKINSURL}/$build_type/srpm_assemble $build_type/ + sleep 1 + rsync -r ${JENKINSURL}/$build_type/rpmbuild/SRPMS/* $MY_WORKSPACE/$build_type/rpmbuild/SRPMS + sleep 1 + for sub_repo in cgcs-centos-repo cgcs-tis-repo cgcs-3rd-party-repo; do + rsync ${JENKINSURL}/$build_type/$sub_repo.last_head $MY_WORKSPACE/$build_type + if [ "$build_type" == "std" ]; then + cp $MY_WORKSPACE/$build_type/$sub_repo.last_head $MY_REPO/$sub_repo/.last_head + fi + done + sleep 1 + rsync -r ${JENKINSURL}/$build_type/results $build_type/ + sleep 1 + rsync -r ${JENKINSURL}/$build_type/rpmbuild/RPMS/* $MY_WORKSPACE/$build_type/rpmbuild/RPMS +done + +popd diff --git a/build-tools/tis.macros b/build-tools/tis.macros new file mode 100644 index 00000000..e72ad2ed --- /dev/null +++ b/build-tools/tis.macros @@ -0,0 +1,11 @@ +# +# This file provides name=value pairs that are added to the build +# config file as 'macros' passed into the RPM build +# +%__gzip=/usr/bin/pigz +%__bzip2=/usr/bin/lbzip2 +%_patch_confdir=%{_sysconfdir}/patching +%_patch_scripts=%{_patch_confdir}/patch-scripts +%_runtime_patch_scripts=/run/patching/patch-scripts +%_tis_dist=.tis + diff --git a/build-tools/update-efiboot-image b/build-tools/update-efiboot-image new file mode 100755 index 00000000..13915fc8 --- /dev/null +++ b/build-tools/update-efiboot-image @@ -0,0 +1,163 @@ +#!/bin/bash +# +# Copyright (c) 2016-2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# Update the efiboot.img (See https://wiki.archlinux.org/index.php/Remastering_the_Install_ISO) +# We need to mount the image file, make any changes to the filesystem, and unmount. +# +# e.g. udisksctl loop-setup -f efiboot.img --no-user-interaction +# Mapped file efiboot.img as /dev/loop0. +# udisksctl mount -b /dev/loop0 +# Mounted /dev/loop0 at /run/media/kbujold/ANACONDA. +# + +MY_YUM_CONF="" + + +# Note: +# This needs to be in /usr/local/bin and /usr/local/bin needs to come first in path list. +# this file is a mod of $MY_REPO/build-tools/update-efiboot-image. The original requires +# a fully working udev with we don't have in the container. I use sudo instead. + +# shadowing the file fails since the WR stuff carefully recreates a local path before invocation. +# sigh... +# added instructions to rm and ln -s to this one in the README + +# check for changes to the original, since we are shadowing... +#ORIG_SUM="2df81b2461ea358b24df77f7539d7fb4" +#TEST_SUM=$(md5sum $MY_REPO/build-tools/update-efiboot-image | awk '{print $1}') +#if [ "$TEST_SUM" != "$ORIG_SUM" ]; then +# echo "The original build-tools/update-efiboot-image has changed or is missing. May need mods to this file!" +# echo "Desired sum = $ORIG_SUM" +# echo "Actual sum = $TEST_SUM" +# exit 1 +#fi + +#printf " Shadowing update-efiboot-image with the sudo centric one from /usr/local/bin due to lack of udev \n" + +function setup_env_vars { + mkdir -p $MY_WORKSPACE/export/ + + MY_YUM_CONF=$(create-yum-conf) + if [ $? -ne 0 ]; then + echo "ERROR: create-yum-conf failed" + exit 1 + fi + + DISTRO_REPO_DIR=$(for d in $(grep baseurl $MY_YUM_CONF | grep file: | awk -F : '{print $2}' | sed 's:///:/:g'); do if [ -d $d/images ]; then echo $d ;fi; done) + + if [ ! -d "$DISTRO_REPO_DIR" ] ; then + printf " Error -- could not access $DISTRO_REPO_DIR\n" + exit 1 + fi + + # where to put stuff (curent dir unless MY_WORKSPACE defined) + OUTPUT_DIR="$PWD/export" + if [ ! -z "$MY_WORKSPACE" ] && [ -d "$MY_WORKSPACE" ] ; then + OUTPUT_DIR="$MY_WORKSPACE/export" + fi + + # Directory in which to populate files to be distributed + OUTPUT_DIST_DIR=$OUTPUT_DIR/dist + + if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then + INTERNAL_REPO_ROOT=$MY_REPO + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then + INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root + fi + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + if [ -d "$MY_WORKSPACE/std/repo" ] ; then + INTERNAL_REPO_ROOT=$MY_WORKSPACE/std/repo + fi + fi + + if [ -z "$INTERNAL_REPO_ROOT" ] ; then + printf " Error -- could not locate cgcs-root repo.\n" + exit 1 + fi +} + +setup_env_vars + +printf " Calling update-efibot-image\n" +MOUNT_LOG_FILE=$OUTPUT_DIR/mounts_used.log + +# Clear old image file +printf " Delete old efiboot.img file\n" +rm -f $OUTPUT_DIR/efiboot.img +yum clean all -c $MY_YUM_CONF + +# Copy Vanilla Centos image file +cp -L -u $DISTRO_REPO_DIR/images/efiboot.img $OUTPUT_DIR/ + +printf " Replacing the efiboot.img grub.cfg file with the Titanium Cloud one\n" + +#RET=$(udisksctl loop-setup -f $OUTPUT_DIR/efiboot.img --no-user-interaction) +RET=$(sudo losetup --show -f $OUTPUT_DIR/efiboot.img) + +if [ -z "$RET" ] ; then + printf " Error: failed sudo losetup command.\n" + exit 1 +fi + +printf " $RET\n" + +# Save the loop device used into a file +echo $(date) $RET >> $MOUNT_LOG_FILE + +#LOOP=$(echo $RET | awk '{print $5;}' | sed -e 's/\.//g') +LOOP=$(echo $RET) +if [ -z $LOOP ] ; then + printf " Error: failed losetup command.\n" + exit 1 +fi + +EFI_MOUNT=$(sudo mktemp -d -p /mnt -t EFI-noudev.XXXXXX) +sudo mount $LOOP $EFI_MOUNT +#udisksctl mount -b $LOOP +#EFI_MOUNT=$(udisksctl info -b $LOOP | grep MountPoints | awk '{print $2;}') +if [ -z $EFI_MOUNT ] ; then + printf " Error: failed mount command.\n" + exit 1 +fi + +# Update the vanilla UEFI Centos grub.cfg with the Titanium Cloud version +sudo cp $INTERNAL_REPO_ROOT/addons/wr-cgcs/layers/cgcs/common-bsp/files/grub.cfg $EFI_MOUNT/EFI/BOOT/grub.cfg + +# Update the grub and shim executables with the Titanium Cloud signed versions +# +# To do this, we extract the RPMS, grab the two executables we need, and replace +# the ones in the current filesystem +TMPDIR=`mktemp -d` +SHIMPKG=`find $MY_WORKSPACE/std/rpmbuild/RPMS/shim-[0-9]*.x86_64.rpm` +GRUBPKG=`find $MY_WORKSPACE/std/rpmbuild/RPMS/grub2-efi-[0-9]*.x86_64.rpm` + +pushd $TMPDIR >/dev/null +rpm2cpio $SHIMPKG | cpio -id --quiet +sudo find . -name "shim.efi" | xargs -I '{}' sudo cp '{}' $EFI_MOUNT/EFI/BOOT/BOOTX64.EFI +rm -rf * + +rpm2cpio $GRUBPKG | cpio -id --quiet +sudo find . -name "grubx64.efi" | xargs -I '{}' sudo cp '{}' $EFI_MOUNT/EFI/BOOT/grubx64.efi +popd >/dev/null +rm -rf $TMPDIR + +# Create a directory for Secure Boot certificate +sudo mkdir -p $EFI_MOUNT/CERTS +sudo cp $INTERNAL_REPO_ROOT/build-tools/certificates/* $EFI_MOUNT/CERTS + +# Cleanup mounts +sudo umount $LOOP +#udisksctl unmount -b $LOOP +RET=$(sudo losetup -d $LOOP) +#RET=$(udisksctl loop-delete -b $LOOP) +echo $(date) Deleted $LOOP. $RET >> $MOUNT_LOG_FILE +sudo rmdir $EFI_MOUNT +exit 0 diff --git a/build-tools/update-pxe-network-installer b/build-tools/update-pxe-network-installer new file mode 100755 index 00000000..f32d9297 --- /dev/null +++ b/build-tools/update-pxe-network-installer @@ -0,0 +1,396 @@ +#!/bin/bash -e +## this script is to update pxeboot images (vmlinuz, initrd.img and squashfs.img). +## based on RPMs generated by "build-pkgs" and "build-iso" +## created by Yong Hu (yong.hu@intel.com), 05/24/2018 + + +kernel_mode=0 +for arg in "$@"; do + case "$1" in + --std) kernel_mode=0 ;; + --rt) kernel_mode=1 ;; + --help) echo "Update pxe-network-installer images"; + echo "$(basename $0) --std for standard kernel mode, or" + echo "$(basename $0) --rt for realtime kernel mode"; + exit 0;; + *) echo "unknown argument"; + exit 1;; + esac +done + +echo "Start to update pxe-network-installer images .... " +timestamp=$(date +%F_%H%M) +cur_dir=$PWD + +pxe_network_installer_dir=$MY_BUILD_DIR/pxe-network-installer +if [ ! -d $pxe_network_installer_dir ];then + mkdir -p $pxe_network_installer_dir +fi + +cd $pxe_network_installer_dir + +echo "step 1: copy original images: vmlinuz, initrd.img, squashfs.img" +CENTOS_REPO="cgcs-centos-repo" +orig_img_dir="orig" +if [ ! -d $orig_img_dir ];then + mkdir -p $orig_img_dir +fi + +orig_kernel_img="$MY_REPO/$CENTOS_REPO/Binary/images/pxeboot/vmlinuz" +if [ -f $orig_kernel_img ]; then + cp -f $orig_kernel_img $pxe_network_installer_dir/$orig_img_dir/. +else + echo "$orig_kernel_img does not exit" + exit -1 +fi + +orig_initrd_img="$MY_REPO/$CENTOS_REPO/Binary/images/pxeboot/initrd.img" +if [ -f $orig_initrd_img ]; then + cp -f $orig_initrd_img $pxe_network_installer_dir/$orig_img_dir/. +else + echo "$orig_initrd_img does not exit" + exit -1 +fi + +orig_squashfs_img="$MY_REPO/$CENTOS_REPO/Binary/LiveOS/squashfs.img" +if [ -f $orig_squashfs_img ]; then + cp -f $orig_squashfs_img $pxe_network_installer_dir/$orig_img_dir/. +else + echo "$orig_squashfs_img does not exit" + exit -1 +fi + +echo "" +echo "step 2: prepare necessary kernel RPMs" +echo "" +kernel_rpms_std="$pxe_network_installer_dir/kernel-rpms/std" +kernel_rpms_rt="$pxe_network_installer_dir/kernel-rpms/rt" + +echo "--> get $kernel_rpms_std ready" +echo "--> get $kernel_rpms_rt ready" + +if [ -d $kernel_rpms_std ];then + mv $kernel_rpms_std $kernel_rpms_std-bak-$timestamp +fi +mkdir -p $kernel_rpms_std + +if [ -d $kernel_rpms_rt ];then + mv $kernel_rpms_rt $kernel_rpms_rt-bak-$timestamp +fi +mkdir -p $kernel_rpms_rt + +echo " -------- start to search standard kernel rpm and related kernel modules --------" +echo " --> find standard kernel rpm" +std_kernel=$(find $MY_BUILD_DIR/export/dist/isolinux/Packages/ -type f -name "kernel-[0-9]*.x86_64.rpm") +if [ -n $std_kernel ] && [ -f $std_kernel ];then + cp -f $std_kernel $kernel_rpms_std/. +else + echo "ERROR: failed to find kernel RPM!" + exit -1 +fi + +echo "--> find e1000e kernel module" +e1000e_module=$(find $MY_BUILD_DIR/std/rpmbuild/RPMS -type f -name "kmod-e1000e-[0-9]*.x86_64.rpm") +if [ -n $e1000e_module ] && [ -f $e1000e_module ];then + cp -f $e1000e_module $kernel_rpms_std/. +else + echo "ERROR: failed to find e1000e kernel module RPM!" + exit -1 +fi + +echo "--> find i40e kernel module" +i40e_module=$(find $MY_BUILD_DIR/std/rpmbuild/RPMS -type f -name "kmod-i40e-[0-9]*.x86_64.rpm") +if [ -n $i40e_module ] && [ -f $i40e_module ];then + cp -f $i40e_module $kernel_rpms_std/. +else + echo "ERROR: failed to find i40e kernel module RPM!" + exit -1 +fi + +echo "--> find ixgbe kernel module" +ixgbe_module=$(find $MY_BUILD_DIR/std/rpmbuild/RPMS -type f -name "kmod-ixgbe-[0-9]*.x86_64.rpm") +if [ -n $ixgbe_module ] && [ -f $ixgbe_module ];then + cp -f $ixgbe_module $kernel_rpms_std/. +else + echo "ERROR: failed to find ixgbe kernel module RPM!" + exit -1 +fi + +echo "--> find tpm kernel module" +tpm_module=$(find $MY_BUILD_DIR/std/rpmbuild/RPMS -type f -name "kmod-tpm-[0-9]*.x86_64.rpm") +if [ -n $tpm_module ] && [ -f $tpm_module ];then + cp -f $tpm_module $kernel_rpms_std/. +else + echo "ERROR: failed to find tpm kernel module RPM!" + exit -1 +fi + +echo "--> find mlnx-ofa_kernel" +mlnx_ofa_kernel=$(find $MY_BUILD_DIR/std/rpmbuild/RPMS -type f -name "mlnx-ofa_kernel-[0-9]*.x86_64.rpm") +if [ -n $mlnx_ofa_kernel ] && [ -f $mlnx_ofa_kernel ];then + cp -f $mlnx_ofa_kernel $kernel_rpms_std/. +else + echo "ERROR: failed to find mlnx-ofa_kernel module RPM!" + exit -1 +fi + +echo "--> find mlnx-ofa_kernel-devel" +mlnx_ofa_kernel_devel=$(find $MY_BUILD_DIR/std/rpmbuild/RPMS -type f -name "mlnx-ofa_kernel-devel-[0-9]*.x86_64.rpm") +if [ -n $mlnx_ofa_kernel_devel ] && [ -f $mlnx_ofa_kernel_devel ];then + cp -f $mlnx_ofa_kernel_devel $kernel_rpms_std/. +else + echo "ERROR: failed to find mlnx-ofa_kernel-devel module RPM!" + exit -1 +fi + +echo "--> find mlnx-ofa_kernel_modules" +mlnx_ofa_kernel_modules=$(find $MY_BUILD_DIR/std/rpmbuild/RPMS -type f -name "mlnx-ofa_kernel-modules-[0-9]*.x86_64.rpm") +if [ -n $mlnx_ofa_kernel_modules ] && [ -f $mlnx_ofa_kernel_modules ];then + cp -f $mlnx_ofa_kernel_modules $kernel_rpms_std/. +else + echo "ERROR: failed to find mlnx-ofa_kernel-modules RPM!" + exit -1 +fi +echo " -------- successfully found standard kernel rpm and related kernel modules --------" +echo "" +echo "" +echo " -------- start to search realtime kernel rpm and related kernel modules --------" +echo "--> find realtime kernel rpm" +rt_kernel=$(find $MY_BUILD_DIR/export/dist/isolinux/Packages/ -type f -name "kernel-rt-[0-9]*.rpm") +if [ -n $rt_kernel ] && [ -f $rt_kernel ];then + cp -f $rt_kernel $kernel_rpms_rt/. +else + echo "ERROR: failed to find realtime kernel RPM!" + exit -1 +fi + +echo "--> find realtime e1000e kernel module" +rt_e1000e_module=$(find $MY_BUILD_DIR/rt/rpmbuild/RPMS -type f -name "kmod-e1000e-rt-[0-9]*.x86_64.rpm") +if [ -n $rt_e1000e_module ] && [ -f $rt_e1000e_module ];then + cp -f $rt_e1000e_module $kernel_rpms_rt/. +else + echo "ERROR: failed to find realtime e1000e kernel module RPM!" + exit -1 +fi + +echo "--> find realtime i40e kernel module" +rt_i40e_module=$(find $MY_BUILD_DIR/rt/rpmbuild/RPMS -type f -name "kmod-i40e-rt-[0-9]*.x86_64.rpm") +if [ -n $rt_i40e_module ] && [ -f $rt_i40e_module ];then + cp -f $rt_i40e_module $kernel_rpms_rt/. +else + echo "ERROR: failed to find realtime i40e kernel module RPM!" + exit -1 +fi + +echo "--> find realtime ixgbe kernel module" +rt_ixgbe_module=$(find $MY_BUILD_DIR/rt/rpmbuild/RPMS -type f -name "kmod-ixgbe-rt-[0-9]*.x86_64.rpm") +if [ -n $rt_ixgbe_module ] && [ -f $rt_ixgbe_module ];then + cp -f $rt_ixgbe_module $kernel_rpms_rt/. +else + echo "ERROR: failed to find realtime ixgbe kernel module RPM!" + exit -1 +fi + +echo "--> find realtime tpm kernel module" +rt_tpm_module=$(find $MY_BUILD_DIR/rt/rpmbuild/RPMS -type f -name "kmod-tpm-rt-[0-9]*.x86_64.rpm") +if [ -n $rt_tpm_module ] && [ -f $rt_tpm_module ];then + cp -f $rt_tpm_module $kernel_rpms_rt/. +else + echo "ERROR: failed to find realtime tpm kernel module RPM!" + exit -1 +fi + +rootfs_rpms="$pxe_network_installer_dir/rootfs-rpms" +if [ -d $rootfs_rpms ];then + mv $rootfs_rpms $rootfs_rpms-bak-$timestamp +fi +mkdir -p $rootfs_rpms + + +echo " -------- successfully found realtime kernel rpm and related kernel modules --------" +echo "" + +echo " step 3: start to search rpms for rootfs" +echo "--> find anaconda rpm" +anaconda=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "anaconda-[0-9]*.x86_64.rpm") +if [ -n $anaconda ] && [ -f $anaconda ];then + cp -f $anaconda $rootfs_rpms/. +else + echo "ERROR: failed to find anaconda RPM!" + exit -1 +fi + +echo "--> find anaconda-core rpm" +anaconda_core=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "anaconda-core-[0-9]*.x86_64.rpm") +if [ -n $anaconda_core ] && [ -f $anaconda_core ];then + cp -f $anaconda_core $rootfs_rpms/. +else + echo "ERROR: failed to find anaconda-core RPM!" + exit -1 +fi + +echo "--> find anaconda-debuginfo rpm" +anaconda_debuginfo=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "anaconda-debuginfo-[0-9]*.x86_64.rpm") +if [ -n $anaconda_debuginfo ] && [ -f $anaconda_debuginfo ];then + cp -f $anaconda_debuginfo $rootfs_rpms/. +else + echo "ERROR: failed to find anaconda-debuginfo RPM!" + exit -1 +fi + +echo "--> find anaconda-dracut rpm" +anaconda_dracut=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "anaconda-dracut-[0-9]*.x86_64.rpm") +if [ -n $anaconda_dracut ] && [ -f $anaconda_dracut ];then + cp -f $anaconda_dracut $rootfs_rpms/. +else + echo "ERROR: failed to find anaconda-dracut RPM!" + exit -1 +fi + +echo "--> find anaconda-gui rpm" +anaconda_gui=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "anaconda-gui-[0-9]*.x86_64.rpm") +if [ -n $anaconda_gui ] && [ -f $anaconda_gui ];then + cp -f $anaconda_gui $rootfs_rpms/. +else + echo "ERROR: failed to find anaconda-gui RPM!" + exit -1 +fi + +echo "--> find anaconda-tui rpm" +anaconda_tui=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "anaconda-tui-[0-9]*.x86_64.rpm") +if [ -n $anaconda_tui ] && [ -f $anaconda_tui ];then + cp -f $anaconda_tui $rootfs_rpms/. +else + echo "ERROR: failed to find anaconda-tui RPM!" + exit -1 +fi + +echo "--> find anaconda-widgets rpm" +anaconda_widgets=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "anaconda-widgets-[0-9]*.x86_64.rpm") +if [ -n $anaconda_widgets ] && [ -f $anaconda_widgets ];then + cp -f $anaconda_widgets $rootfs_rpms/. +else + echo "ERROR: failed to find anaconda-widgets RPM!" + exit -1 +fi + +echo "--> find anaconda-widgets-devel rpm" +anaconda_widgets_devel=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "anaconda-widgets-devel-[0-9]*.x86_64.rpm") +if [ -n $anaconda_widgets_devel ] && [ -f $anaconda_widgets_devel ];then + cp -f $anaconda_widgets_devel $rootfs_rpms/. +else + echo "ERROR: failed to find anaconda-widgets-devel RPM!" + exit -1 +fi + +echo "--> find rpm-xx.x86_64 rpm" +rpm_rpm=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "rpm-[0-9]*.x86_64.rpm") +if [ -n $rpm_rpm ] && [ -f $rpm_rpm ];then + cp -f $rpm_rpm $rootfs_rpms/. +else + echo "ERROR: failed to find rpm-xx.x86_64 RPM!" + exit -1 +fi + +echo "--> find rpm-build rpm" +rpm_build=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "rpm-build-[0-9]*.x86_64.rpm") +if [ -n $rpm_build ] && [ -f $rpm_build ];then + cp -f $rpm_build $rootfs_rpms/. +else + echo "ERROR: failed to find rpm-build RPM!" + exit -1 +fi + +echo "--> find rpm-build-libs rpm" +rpm_build_libs=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "rpm-build-libs-[0-9]*.x86_64.rpm") +if [ -n $rpm_build_libs ] && [ -f $rpm_build_libs ];then + cp -f $rpm_build_libs $rootfs_rpms/. +else + echo "ERROR: failed to find rpm-build-libs RPM!" + exit -1 +fi + +echo "--> find rpm-libs rpm" +rpm_libs=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "rpm-libs-[0-9]*.x86_64.rpm") +if [ -n $rpm_libs ] && [ -f $rpm_libs ];then + cp -f $rpm_libs $rootfs_rpms/. +else + echo "ERROR: failed to find rpm-libs RPM!" + exit -1 +fi + +echo "--> find rpm-plugin-systemd-inhibit rpm" +rpm_plugin_systemd_inhibit=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "rpm-plugin-systemd-inhibit-[0-9]*.x86_64.rpm") +if [ -n $rpm_plugin_systemd_inhibit ] && [ -f $rpm_plugin_systemd_inhibit ];then + cp -f $rpm_plugin_systemd_inhibit $rootfs_rpms/. +else + echo "ERROR: failed to find rpm-plugin-systemd-inhibit RPM!" + exit -1 +fi + +echo "--> find rpm-python rpm" +rpm_python=$(find $MY_BUILD_DIR/installer/rpmbuild/RPMS -type f -name "rpm-python-[0-9]*.x86_64.rpm") +if [ -n $rpm_python ] && [ -f $rpm_python ];then + cp -f $rpm_python $rootfs_rpms/. +else + echo "ERROR: failed to find rpm-python RPM!" + exit -1 +fi + +echo "--> find systemd rpm" +systemd=$(find $MY_BUILD_DIR/export/dist/isolinux/Packages -type f -name "systemd-[0-9]*.x86_64.rpm") +if [ -n $systemd ] && [ -f $systemd ];then + cp -f $systemd $rootfs_rpms/. +else + echo "ERROR: failed to find systemd RPM!" + exit -1 +fi + +echo "--> find systemd-libs rpm" +systemd_libs=$(find $MY_BUILD_DIR/export/dist/isolinux/Packages -type f -name "systemd-libs-[0-9]*.x86_64.rpm") +if [ -n $systemd_libs ] && [ -f $systemd_libs ];then + cp -f $systemd_libs $rootfs_rpms/. +else + echo "ERROR: failed to find systemd-libs RPM!" + exit -1 +fi + +echo "--> find systemd-sysv rpm" +systemd_sysv=$(find $MY_BUILD_DIR/export/dist/isolinux/Packages -type f -name "systemd-sysv-[0-9]*.x86_64.rpm") +if [ -n $systemd_sysv ] && [ -f $systemd_sysv ];then + cp -f $systemd_sysv $rootfs_rpms/. +else + echo "ERROR: failed to find systemd-sysv RPM!" + exit -1 +fi + +echo "--> find bind-utils rpm" +bind_utils=$(find $MY_BUILD_DIR/export/dist/isolinux/Packages -type f -name "bind-utils-[0-9]*.x86_64.rpm") +if [ -n $bind_utils ] && [ -f $bind_utils ];then + cp -f $bind_utils $rootfs_rpms/. +else + echo "ERROR: failed to find bind-utils RPM!" + exit -1 +fi + +echo "--> find ima-evm-utils rpm" +ima_evm_utils=$(find $MY_BUILD_DIR/export/dist/isolinux/Packages -type f -name "ima-evm-utils-[0-9]*.x86_64.rpm") +if [ -n $ima_evm_utils ] && [ -f $ima_evm_utils ];then + cp -f $ima_evm_utils $rootfs_rpms/. +else + echo "ERROR: failed to find ima-evm-utils RPM!" + exit -1 +fi +echo " ---------------- successfully found rpms for rootfs --------------------------------" + +echo "step 4: make installer images in this work dir" +same_folder="$(dirname ${BASH_SOURCE[0]})" +mk_images_tool="$same_folder/make-installer-images.sh" +if [ $kernel_mode -eq 0 ];then + sudo $mk_images_tool $pxe_network_installer_dir "std" +else + sudo $mk_images_tool $pxe_network_installer_dir "rt" +fi + +cd $cur_dir +echo "updating pxe-network-installer images -- done!" diff --git a/build-tools/wrs_orig.txt b/build-tools/wrs_orig.txt new file mode 100644 index 00000000..f07a445d --- /dev/null +++ b/build-tools/wrs_orig.txt @@ -0,0 +1,58 @@ +build-info 1.0 +ceph-manager 1.0 +cgcs-patch 1.0 +cgcs-users 1.0 +cgts-client 1.0 +cgts-mtce-common 1.0 +cgts-mtce-compute 1.0 +collector 1.0 +computeconfig 1.0 +compute-huge 1.0 +config-gate 1.0 +configutilities 1.1.1 +controllerconfig 1.0 +filesystem-scripts 1.0 +fm-api 1.0 +fm-common 1.0 +fm-doc 1.0 +fm-mgr 1.0 +io-monitor 1.0 +io-scheduler 1.0 +logmgmt 1.0 +nfv 1.0 +puppet-cgcs_vswitch 1.0.0 +puppet-mtce 1.0.0 +puppet-sshd 1.0.0 +sm 1.0.0 +sm-api 1.0 +sm-client 1.0 +sm-common 1.0.0 +sm-db 1.0.0 +sm-tools 1.0 +storageconfig 1.0 +sysinv 1.0 +sysinv-agent 1.0 +tis-extensions 1.0 +puppet-nfv 1.0.0 +puppet-nova_api_proxy 1.0.0 +puppet-nuage_vswitch 1.0.0 +puppet-patching 1.0.0 +puppet-sysinv 1.0.0 +nova-utils 1.0 +tsconfig 1.0 +vm-topology 1.0 +wrs-heat-templates 1.3.1 +wrs-ssl 1.0.0 +libtrap-handler 1.0 +update-motd 1.0 +monitor-tools 1.0 +namespace-utils 1.0 +nfscheck 1.0 +nova-api-proxy 1.0 +patch-alarm 1.0 +power-mgmt 1.0 +platform-util 1.0 +python-vswitchclient 1.0 +tpm2-openssl-engine 1.0 +puppet-dcorch 1.0.0 +puppet-dcmanager 1.0.0