From 9d3ca4938747e970c79d6080748e03debde75709 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 30 May 2018 16:16:51 -0700 Subject: [PATCH] StarlingX open source release updates Signed-off-by: Dean Troyer --- CONTRIBUTORS.wrs | 7 + LICENSE | 202 + README.rst | 5 + ceph-manager/.gitignore | 6 + ceph-manager/LICENSE | 202 + ceph-manager/PKG-INFO | 13 + ceph-manager/centos/build_srpm.data | 3 + ceph-manager/centos/ceph-manager.spec | 70 + ceph-manager/ceph-manager/LICENSE | 202 + .../ceph-manager/ceph_manager/__init__.py | 5 + .../ceph_manager/cache_tiering.py | 705 +++ .../ceph-manager/ceph_manager/ceph.py | 164 + .../ceph-manager/ceph_manager/constants.py | 107 + .../ceph-manager/ceph_manager/exception.py | 130 + .../ceph-manager/ceph_manager/i18n.py | 15 + .../ceph-manager/ceph_manager/monitor.py | 893 +++ .../ceph-manager/ceph_manager/server.py | 249 + .../ceph_manager/tests/__init__.py | 0 .../ceph_manager/tests/test_cache_flush.py | 309 + ceph-manager/ceph-manager/setup.py | 19 + .../ceph-manager/test-requirements.txt | 10 + ceph-manager/ceph-manager/tox.ini | 29 + ceph-manager/files/ceph-manager.logrotate | 11 + ceph-manager/files/ceph-manager.service | 17 + ceph-manager/scripts/bin/ceph-manager | 17 + ceph-manager/scripts/init.d/ceph-manager | 103 + ceph/centos/build_srpm.data | 5 + ceph/centos/ceph.spec | 1 + ceph/files/ceph-manage-journal.py | 326 ++ mwa-perian.map | 3 + openstack/cinder_conf_dummy/LICENSE | 202 + .../centos/build_srpm.data | 5 + .../centos/distributedcloud-client.spec | 81 + .../distributedcloud/centos/build_srpm.data | 6 + .../centos/distributedcloud.spec | 168 + .../centos/files/dcmanager-api.service | 13 + .../centos/files/dcmanager-manager.service | 13 + .../centos/files/dcorch-api.service | 13 + .../files/dcorch-cinder-api-proxy.service | 13 + .../centos/files/dcorch-engine.service | 13 + .../files/dcorch-neutron-api-proxy.service | 13 + .../files/dcorch-nova-api-proxy.service | 13 + .../centos/files/dcorch-snmp.service | 14 + .../files/dcorch-sysinv-api-proxy.service | 13 + .../openstack-aodh/centos/build_srpm.data | 1 + ...files-and-create-expirer-cron-script.patch | 245 + ...te-package-versioning-for-TIS-format.patch | 27 + .../0001-meta-modify-aodh-api.patch | 72 + .../0001-meta-pass-aodh-api-config.patch | 25 + .../0006-add-drivername-for-postgresql.patch | 32 + .../centos/meta_patches/PATCH_ORDER | 6 + .../meta-remove-default-logrotate.patch | 42 + .../centos/patches/0001-modify-aodh-api.patch | 65 + ...upport-for-postgresql-connection-set.patch | 65 + openstack/openstack-aodh/centos/srpm_path | 1 + .../openstack-ironic/centos/build_srpm.data | 6 + .../centos/files/ironic-dist.conf | 4 + .../centos/files/ironic-rootwrap-sudoers | 2 + .../centos/files/openstack-ironic-api.service | 12 + .../files/openstack-ironic-conductor.service | 12 + .../centos/openstack-ironic.spec | 284 + .../centos/build_srpm.data | 6 + .../centos/openstack-magnum-ui.spec | 93 + .../openstack-magnum/centos/build_srpm.data | 6 + .../centos/files/openstack-magnum-api.service | 15 + .../files/openstack-magnum-conductor.service | 15 + .../centos/openstack-magnum.spec | 325 ++ .../centos/build_srpm.data | 5 + .../centos/openstack-murano-ui.spec | 147 + .../openstack-murano/centos/build_srpm.data | 6 + .../centos/files/openstack-murano-api.service | 12 + .../files/openstack-murano-cf-api.service | 12 + .../files/openstack-murano-engine.service | 12 + .../centos/openstack-murano.spec | 290 + .../openstack-panko/centos/build_srpm.data | 1 + .../meta_patches/0001-panko-config.patch | 171 + ...02-spec-change-event-list-descending.patch | 32 + ...uery-to-sqlalchemy-with-non-admin-us.patch | 32 + .../centos/meta_patches/PATCH_ORDER | 3 + .../patches/0001-modify-panko-api.patch | 63 + .../0002-Change-event-list-descending.patch | 27 + ...ry-to-sqlalchemy-with-non-admin-user.patch | 101 + openstack/openstack-panko/centos/srpm_path | 1 + .../openstack-ras/centos/build_srpm.data | 5 + .../openstack-ras/centos/openstack-ras.spec | 80 + .../openstack-ras/CGCSkeyringsupport.patch | 221 + .../openstack-ras/openstack-ras/aodh.patch | 1467 +++++ .../openstack-ras/ceilometer-mem-db.patch | 374 ++ ...ceilometer-monitor-child-amqp-status.patch | 28 + .../openstack-ras/ceilometer-monitor.patch | 22 + .../ceilometer-version-pipeline.patch | 63 + .../openstack-ras/ceilometer.patch | 1726 ++++++ .../openstack-ras/ceilometer_pipeline.patch | 150 + ...cgts-4061-cinder-volume-service-down.patch | 141 + .../cinder-volume-enable-fix.patch | 18 + .../cinder-volume-fail-amqp-check.patch | 93 + ...able-multiple-nova-conductor-workers.patch | 95 + .../glance-api-bypass-monitor.patch | 16 + .../openstack-ras/glance-api-juno.patch | 13 + .../openstack-ras/heat-cloudwatch.patch | 349 ++ .../heat-engine-support-workers.patch | 52 + .../openstack-ras/openstack-ras/heat.patch | 698 +++ .../openstack-ras/neutron-logrotate.patch | 15 + .../neutron-server-sriov-config.patch | 52 + .../nova_novnc_kill_children.patch | 64 + .../openstack-ras/nova_prestart_hooks.patch | 42 + .../openstack-ras/nova_set_cwd.patch | 94 + .../openstack-ras/openstack-ras.patch | 405 ++ .../pkill_orphaned_processes.patch | 2056 +++++++ .../plugin_config_parameter_name.patch | 57 + .../openstack-ras/rebase_workaround.patch | 20 + .../remove-ceilometer-mem-db.patch | 388 ++ ...ed_up_respons_to_stop_ceilometer_svc.patch | 87 + .../python-ceilometer/centos/build_srpm.data | 11 + .../centos/files/ceilometer-dist.conf | 6 + .../centos/files/ceilometer-rootwrap-sudoers | 2 + .../centos/files/ceilometer.conf.sample | 1176 ++++ .../centos/files/ceilometer.logrotate | 9 + .../files/openstack-ceilometer-api.service | 13 + .../openstack-ceilometer-collector.service | 13 + .../files/openstack-ceilometer-ipmi.service | 13 + .../openstack-ceilometer-notification.service | 13 + .../centos/files/openstack-ceilometer-polling | 1 + .../openstack-ceilometer-polling.service | 16 + .../centos/openstack-ceilometer.spec | 699 +++ .../static/ceilometer-agent-compute | 125 + .../static/ceilometer-expirer-active | 60 + .../static/ceilometer-polling | 138 + ...eilometer-polling-compute.conf.pmon.centos | 26 + .../static/ceilometer-polling.conf | 19 + .../ceilometer-polling.conf.pmon.centos | 18 + .../centos/build_srpm.data | 10 + .../centos/python-ceilometerclient.spec | 193 + .../python-cinder/centos/build_srpm.data | 5 + .../centos/files/cinder-dist.conf | 19 + .../centos/files/cinder-purge-deleted-active | 63 + .../python-cinder/centos/files/cinder-sudoers | 3 + .../centos/files/cinder.conf.sample | 5154 +++++++++++++++++ .../centos/files/cinder.logrotate | 11 + .../centos/files/openstack-cinder-api.service | 18 + .../files/openstack-cinder-backup.service | 16 + .../files/openstack-cinder-scheduler.service | 17 + .../files/openstack-cinder-volume.service | 19 + .../python-cinder/centos/files/restart-cinder | 164 + .../centos/openstack-cinder.spec | 464 ++ .../centos/build_srpm.data | 10 + .../centos/python-cinderclient.spec | 177 + .../centos/build_srpm.data | 1 + ...te-package-versioning-for-TIS-format.patch | 25 + ...0002-remove-rpm-build-time-TOX-tests.patch | 36 + .../0003-meta-roll-in-TIS-patches.patch | 47 + ...isable-token-validation-per-auth-req.patch | 24 + ...nts-in-cookie-to-improve-performance.patch | 25 + .../centos/meta_patches/PATCH_ORDER | 5 + .../0001-Pike-rebase-for-openstack-auth.patch | 495 ++ ...le-token-validation-per-auth-request.patch | 28 + ...nts-in-cookie-to-improve-performance.patch | 69 + .../centos/srpm_path | 1 + .../centos/build_srpm.data | 1 + ...te-package-versioning-for-TIS-format.patch | 25 + ...-patch-Check-ceph-cluster-free-space.patch | 34 + .../0003-meta-patch-Glance-Driver.patch | 32 + .../centos/meta_patches/PATCH_ORDER | 3 + ...ter-free-space-before-creating-image.patch | 298 + .../patches/0002-Add-glance-driver.patch | 250 + .../python-glance-store/centos/srpm_path | 1 + .../python-glance/centos/build_srpm.data | 4 + .../centos/files/glance-api-dist.conf | 20 + .../centos/files/glance-cache-dist.conf | 5 + .../centos/files/glance-purge-deleted-active | 63 + .../centos/files/glance-registry-dist.conf | 20 + .../centos/files/glance-scrubber-dist.conf | 6 + .../python-glance/centos/files/glance-sudoers | 3 + .../centos/files/glance-swift.conf | 25 + .../centos/files/openstack-glance-api.service | 19 + .../files/openstack-glance-registry.service | 17 + .../files/openstack-glance-scrubber.service | 17 + .../centos/files/openstack-glance.logrotate | 8 + .../python-glance/centos/files/restart-glance | 150 + .../centos/openstack-glance.spec | 406 ++ .../centos/build_srpm.data | 5 + .../centos/files/image-backup.sh | 212 + .../centos/python-glanceclient.spec | 187 + .../openstack-heat/centos/build_srpm.data | 7 + .../centos/files/heat-dist.conf | 35 + .../centos/files/heat-purge-deleted-active | 69 + .../centos/files/heat.conf.sample | 1375 +++++ .../centos/files/heat.logrotate | 6 + .../centos/files/openstack-heat-all.service | 11 + .../files/openstack-heat-api-cfn.service | 11 + .../openstack-heat-api-cloudwatch.service | 11 + .../centos/files/openstack-heat-api.service | 11 + .../files/openstack-heat-engine.service | 11 + .../openstack-heat/centos/openstack-heat.spec | 540 ++ openstack/python-heat/python-heat/.yamllint | 10 + .../python-heat/python-heat/README.template | 0 .../python-heat/python-heat/templates/LICENSE | 202 + .../python-heat/python-heat/templates/README | 213 + .../python-heat/templates/hot/demo/README.txt | 35 + .../python-heat/templates/hot/demo/cfn_cron | 1 + .../templates/hot/demo/gen-add-load-service | 105 + .../templates/hot/demo/gen-add-load.sh | 48 + .../templates/hot/demo/gen-traffic-service | 105 + .../templates/hot/demo/gen-traffic.sh | 21 + .../templates/hot/demo/get_cpu_load | 204 + .../templates/hot/demo/iperf-server-service | 105 + .../python-heat/templates/hot/demo/make_load | 4 + .../hot/demo/network-appliance-install.sh | 215 + .../templates/hot/demo/pkt-capture.sh | 15 + .../templates/hot/demo/scaleUpDown.yaml | 316 + .../hot/demo/traffic-generator-install.sh | 27 + .../hot/scenarios/BootFromCinder.yaml | 118 + .../scenarios/CFNPushStatsAutoScaling.yaml | 305 + .../hot/scenarios/CombinationAutoScaling.yaml | 234 + .../templates/hot/scenarios/LabSetup.yaml | 379 ++ .../hot/scenarios/NestedAutoScale.yaml | 212 + .../templates/hot/scenarios/NestedStack.yaml | 103 + .../hot/scenarios/Networking_and_Servers.yaml | 217 + .../hot/scenarios/PortForwarding.yaml | 210 + .../templates/hot/scenarios/UserData.yaml | 175 + .../templates/hot/scenarios/VIF.yaml | 96 + .../hot/scenarios/VMAutoScaling.yaml | 223 + .../templates/hot/scenarios/WRSQoSPolicy.yaml | 59 + .../hot/simple/OS_Ceilometer_Alarm.yaml | 102 + .../hot/simple/OS_Cinder_Volume.yaml | 144 + .../simple/OS_Cinder_VolumeAttachment.yaml | 122 + .../templates/hot/simple/OS_Glance_Image.yaml | 91 + .../hot/simple/OS_Heat_AccessPolicy.yaml | 77 + .../hot/simple/OS_Heat_AutoScalingGroup.yaml | 77 + .../templates/hot/simple/OS_Heat_Stack.yaml | 59 + .../hot/simple/OS_Neutron_FloatingIP.yaml | 83 + .../templates/hot/simple/OS_Neutron_Net.yaml | 113 + .../templates/hot/simple/OS_Neutron_Port.yaml | 137 + .../hot/simple/OS_Neutron_Router.yaml | 102 + .../hot/simple/OS_Neutron_RouterGateway.yaml | 75 + .../simple/OS_Neutron_RouterInterface.yaml | 107 + .../hot/simple/OS_Neutron_SecurityGroup.yaml | 61 + .../hot/simple/OS_Neutron_Subnet.yaml | 149 + .../templates/hot/simple/OS_Nova_Flavor.yaml | 106 + .../templates/hot/simple/OS_Nova_KeyPair.yaml | 63 + .../templates/hot/simple/OS_Nova_Server.yaml | 226 + .../hot/simple/OS_Nova_ServerGroup.yaml | 99 + .../simple/WR_Neutron_Port_Forwarding.yaml | 129 + .../hot/simple/WR_Neutron_ProviderNet.yaml | 92 + .../simple/WR_Neutron_ProviderNetRange.yaml | 121 + .../hot/simple/WR_Neutron_QoSPolicy.yaml | 67 + .../wrs-heat-template/centos/build_srpm.data | 2 + .../centos/wrs-heat-templates.spec | 29 + .../python-heat/wrs-heat-template/python-heat | 1 + .../python-heatclient/centos/build_srpm.data | 1 + ...age-versioning-format-for-TiS-format.patch | 25 + ...0002-Packages-sdk-for-remote-clients.patch | 51 + .../0003-Apply-timezone-support-patch.patch | 37 + .../centos/meta_patches/PATCH_ORDER | 3 + ...0001-timezone-support-for-heatclient.patch | 224 + openstack/python-heatclient/centos/srpm_path | 2 + .../python-horizon/centos/build_srpm.data | 5 + .../centos/files/guni_config.py | 59 + .../centos/files/horizon-assets-compress | 43 + .../centos/files/horizon-clearsessions | 3 + .../centos/files/horizon-patching-restart | 80 + .../files/horizon-region-exclusions.csv | 12 + .../python-horizon/centos/files/horizon.init | 157 + .../centos/files/horizon.logrotate | 13 + .../centos/files/local_settings.py | 1198 ++++ .../files/openstack-dashboard-httpd-2.4.conf | 19 + .../openstack-dashboard-httpd-logging.conf | 32 + .../python-django-horizon-logrotate.conf | 8 + .../files/python-django-horizon-systemd.conf | 3 + .../centos/python-django-horizon.spec | 517 ++ .../centos/build_srpm.data | 5 + .../centos/python-ironicclient.spec | 141 + .../python-keystone/centos/build_srpm.data | 5 + .../python-keystone/centos/files/keystone-all | 151 + .../files/keystone-fernet-keys-rotate-active | 64 + .../files/openstack-keystone.defaultconf | 2 + .../centos/files/openstack-keystone.logrotate | 11 + .../centos/files/openstack-keystone.service | 14 + .../centos/files/openstack-keystone.sysctl | 3 + .../centos/files/openstack-keystone.tmpfiles | 1 + .../centos/files/password-rules.conf | 34 + .../centos/openstack-keystone.spec | 310 + .../centos/build_srpm.data | 1 + ...te-package-versioning-for-TIS-format.patch | 27 + ...-fix-neutron-error-not-shown-to-user.patch | 26 + .../0003-meta-spec-remote-client.patch | 55 + ...04-meta-dont-remove-requirements-txt.patch | 25 + ...unexpected-arguments-to-token-plugin.patch | 25 + .../centos/meta_patches/PATCH_ORDER | 5 + .../fix-neutron-error-not-shown-to-user.patch | 31 + ...ected-arguments-to-token-auth-plugin.patch | 29 + .../python-keystoneauth1/centos/srpm_path | 1 + .../centos/build_srpm.data | 1 + ...te-package-versioning-for-TIS-format.patch | 13 + .../0002-meta-public-adminURL-detection.patch | 16 + ...003-meta-TiS-remote-client-sdk-patch.patch | 47 + ...04-meta-dont-remove-requirements-txt.patch | 13 + ...-buildrequires-python-setuptools_scm.patch | 20 + .../centos/meta_patches/PATCH_ORDER | 5 + ...one-client-public-adminURL-detection.patch | 55 + .../python-keystoneclient/centos/srpm_path | 1 + .../CGCSkeyringsupport.patch | 144 + .../extend_token_expiry_window.patch | 16 + ...one-client-public-adminURL-detection.patch | 45 + .../v2-client-empty-password-check.patch | 26 + .../centos/build_srpm.data | 1 + ...te-package-versioning-for-TIS-format.patch | 35 + .../0002-Upstream-gnnochi-panko-fix.patch | 21 + .../centos/meta_patches/PATCH_ORDER | 2 + .../0001-Upstream-gnnochi-panko-fix.patch | 70 + .../centos/srpm_path | 1 + .../centos/build_srpm.data | 6 + .../centos/python-magnumclient.spec | 232 + .../centos/build_srpm.data | 5 + .../centos/python-muranoclient.spec | 195 + .../centos/build_srpm.data | 4 + .../centos/python-networking-bgpvpn.spec | 156 + .../centos/build_srpm.data | 4 + .../centos/python-networking-odl.spec | 80 + .../python-networking-odl/LICENSE | 176 + .../centos/build_srpm.data | 4 + .../centos/python-networking-sfc.spec | 185 + .../centos/build_srpm.data | 5 + .../centos/files/neutron-bgp-dragent.init | 97 + .../centos/files/neutron-bgp-dragent.pmon | 16 + .../centos/files/neutron-bgp-dragent.service | 15 + .../python-neutron-dynamic-routing.spec | 130 + .../python-neutron-lib/centos/build_srpm.data | 4 + .../centos/python-neutron-lib.spec | 105 + .../python-neutron/centos/build_srpm.data | 5 + .../centos/files/NetnsCleanup.ocf_ra | 154 + .../centos/files/NeutronScale.ocf_ra | 214 + .../centos/files/OVSCleanup.ocf_ra | 154 + .../python-neutron/centos/files/conf.README | 9 + .../centos/files/neutron-dhcp-agent.init | 87 + .../centos/files/neutron-dhcp-agent.pmon | 24 + .../centos/files/neutron-dhcp-agent.service | 16 + .../centos/files/neutron-dist.conf | 11 + .../centos/files/neutron-l3-agent.service | 13 + .../files/neutron-linuxbridge-agent.service | 13 + .../files/neutron-linuxbridge-cleanup.service | 15 + .../files/neutron-macvtap-agent.service | 13 + .../centos/files/neutron-metadata-agent.init | 87 + .../centos/files/neutron-metadata-agent.pmon | 24 + .../files/neutron-metadata-agent.service | 17 + .../files/neutron-metering-agent.service | 13 + .../centos/files/neutron-netns-cleanup.init | 74 + .../files/neutron-netns-cleanup.service | 15 + .../files/neutron-openvswitch-agent.service | 14 + .../centos/files/neutron-ovs-cleanup.init | 67 + .../centos/files/neutron-ovs-cleanup.service | 15 + .../centos/files/neutron-rpc-server.service | 14 + .../centos/files/neutron-server.init | 137 + .../centos/files/neutron-server.service | 13 + .../centos/files/neutron-sriov-nic-agent.init | 87 + .../centos/files/neutron-sriov-nic-agent.pmon | 24 + .../files/neutron-sriov-nic-agent.service | 16 + .../centos/files/neutron-sudoers | 4 + .../centos/openstack-neutron.spec | 812 +++ .../python-neutron/neutron-agent.init | 87 + .../python-neutron/neutron-server.init | 137 + .../neutron-src-dist-files.exclude | 5 + .../neutron-dhcp-agent-netns-cleanup.cron | 2 + .../centos/build_srpm.data | 4 + .../centos/python-neutronclient.spec | 190 + openstack/python-nova/centos/build_srpm.data | 6 + .../files/kvm_timer_advance_setup.service | 14 + .../centos/files/nova-clean-thinpool.service | 13 + .../centos/files/nova-compute.init | 95 + .../python-nova/centos/files/nova-dist.conf | 25 + .../centos/files/nova-ifc-template | 15 + .../centos/files/nova-migration-wrapper | 65 + .../centos/files/nova-pci-interrupts | 108 + .../centos/files/nova-placement-api | 64 + .../centos/files/nova-placement-api.conf | 25 + .../python-nova/centos/files/nova-polkit.pkla | 6 + .../centos/files/nova-polkit.rules | 8 + .../centos/files/nova-purge-deleted-active | 68 + .../python-nova/centos/files/nova-restart | 60 + .../python-nova/centos/files/nova-ssh-config | 4 + .../python-nova/centos/files/nova-sudoers | 4 + .../python-nova/centos/files/nova.conf.sample | 4007 +++++++++++++ .../python-nova/centos/files/nova.logrotate | 7 + .../centos/files/nova_migration-rootwrap.conf | 6 + .../nova_migration-rootwrap_cold_migration | 9 + .../centos/files/nova_migration-sudoers | 4 + .../files/nova_migration_authorized_keys | 4 + .../centos/files/nova_migration_identity | 6 + .../centos/files/nova_setup_timer_advance | 120 + .../centos/files/openstack-nova-api.service | 15 + .../centos/files/openstack-nova-cells.service | 15 + .../files/openstack-nova-compute.service | 20 + .../files/openstack-nova-conductor.service | 15 + .../files/openstack-nova-console.service | 15 + .../files/openstack-nova-consoleauth.service | 15 + .../files/openstack-nova-metadata-api.service | 15 + .../files/openstack-nova-network.service | 18 + .../files/openstack-nova-novncproxy.service | 13 + .../files/openstack-nova-novncproxy.sysconfig | 2 + .../openstack-nova-os-compute-api.service | 14 + .../files/openstack-nova-scheduler.service | 15 + .../files/openstack-nova-serialproxy.service | 13 + .../openstack-nova-spicehtml5proxy.service | 13 + .../files/openstack-nova-xvpvncproxy.service | 13 + .../python-nova/centos/files/policy.json | 2 + .../python-nova/centos/files/resctrl-show | 281 + .../python-nova/centos/openstack-nova.spec | 896 +++ .../python-nova/collect_host_memory_info.sh | 45 + openstack/python-nova/python-nova/nova.conf | 62 + openstack/python-nova/python-nova/nova.init | 157 + .../python-nova/nova_authorized_cmds | 16 + .../python-nova/nova_clean_thinpool | 66 + .../python-nova/nova_setup_cpusets | 188 + .../openstack-nova-compute-setup.service | 14 + .../python-novaclient/centos/build_srpm.data | 4 + .../centos/python-novaclient.spec | 186 + .../centos/build_srpm.data | 1 + ...te-package-versioning-for-TIS-format.patch | 13 + ...meta-US106901-Openstack-CLI-Adoption.patch | 25 + .../meta_patches/0001-meta-us101470.patch | 24 + ...meta-US106901-Openstack-CLI-Adoption.patch | 25 + .../meta_patches/0002-meta-us101470.patch | 24 + .../0002-spec-remote-clients-sdk.patch | 42 + ...meta-US106901-Openstack-CLI-Adoption.patch | 24 + ...03-meta-dont-remove-requirements-txt.patch | 13 + ...added-missing-build-require-dateutil.patch | 12 + ...05-meta-patch-for-neutron-extensions.patch | 14 + ...enstackClient_Passwordchange_warning.patch | 12 + ...g-only-when-the-admin-password-chang.patch | 12 + ...-keystone-region-name-to-identity-cl.patch | 26 + .../1000-remove-version-requirements.patch | 33 + .../1001-Turn-off-openstackclient-check.patch | 36 + ...1002-require-python-ceilometerclient.patch | 20 + .../centos/meta_patches/PATCH_ORDER | 16 + ...-keystone-region-name-to-identity-cl.patch | 27 + ...ckclient-implementation-of-novaclien.patch | 232 + ...0001-US106901-Openstack-CLI-Adoption.patch | 80 + .../patches/0001-neutron-extensions.patch | 2119 +++++++ ...ckclient-implementation-of-novaclien.patch | 273 + ...0002-US106901-Openstack-CLI-Adoption.patch | 330 ++ ...0003-US106901-Openstack-CLI-Adoption.patch | 435 ++ ...g-only-when-the-admin-password-chang.patch | 42 + ...enstackClient_Passwordchange_warning.patch | 22 + .../python-openstackclient/centos/srpm_path | 1 + .../centos/build_srpm.data | 1 + .../0001-WRS-apply-WRS-patches.patch | 33 + .../centos/meta_patches/PATCH_ORDER | 1 + ....-Fix-when-building-outside-git-tree.patch | 32 + .../centos/srpm_path | 1 + .../centos/build_srpm.data | 3 + .../centos/python-openstacksdk.spec | 206 + .../python-osc-lib/centos/build_srpm.data | 1 + ...te-package-versioning-for-TIS-format.patch | 26 + ...-keystone-region-name-option-to-open.patch | 28 + ...ate-remote-client-to-include-osc-lib.patch | 52 + .../centos/meta_patches/PATCH_ORDER | 4 + ...-keystone-region-name-option-to-open.patch | 45 + openstack/python-osc-lib/centos/srpm_path | 1 + .../centos/build_srpm.data | 1 + .../centos/meta_patches/PATCH_ORDER | 2 + .../meta_patches/spec-add-fair-lock.patch | 15 + ...te-package-versioning-for-TIS-format.patch | 13 + .../centos/patches/add-fair-lock.patch | 106 + .../python-oslo-concurrency/centos/srpm_path | 1 + .../centos/build_srpm.data | 1 + .../0004-disable-check-on-build.patch | 27 + .../centos/meta_patches/PATCH_ORDER | 4 + .../meta_patches/fix-pifpaf-build-error.patch | 42 + ...at-rate-to-decrease-polling-interval.patch | 28 + ...te-package-versioning-for-tis-format.patch | 26 + ...tbeat-rate-to-decrease-poll-interval.patch | 37 + .../python-oslo-messaging/centos/srpm_path | 1 + .../centos/build_srpm.data | 1 + .../centos/meta_patches/PATCH_ORDER | 2 + ...gcall-permit-aborting-while-sleeping.patch | 37 + ...te-package-versioning-for-tis-format.patch | 27 + ...gcall-permit-aborting-while-sleeping.patch | 177 + .../python-oslo-service/centos/srpm_path | 1 + openstack/python-wsme/centos/build_srpm.data | 1 + ...te-package-versioning-for-TIS-format.patch | 25 + .../0003-Remove-TOX-calls-from-build.patch | 29 + .../centos/meta_patches/PATCH_ORDER | 3 + ...lientSideError-logging-verbosity-fix.patch | 36 + .../patches/0001-log-client-side-errors.patch | 41 + openstack/python-wsme/centos/srpm_path | 1 + .../rabbitmq-server/centos/build_srpm.data | 3 + ...te-package-versioning-for-TIS-format.patch | 31 + ...006-Update-rabbitmq-server.logrotate.patch | 25 + .../Allow-rabbitmqctl-to-run-as-root.patch | 27 + .../centos/meta_patches/PATCH_ORDER | 6 + .../Set-root-home-for-rabbitmqctl.patch | 24 + .../centos/meta_patches/packstack-fixes.patch | 37 + .../meta_patches/spec-comments-for-ocf.patch | 49 + .../update-rabbitmq-server.service.patch | 32 + openstack/rabbitmq-server/centos/srpm_path | 1 + .../rabbitmq-server/rabbitmq-server | 251 + .../rabbitmq-server/rabbitmq-server.ocf | 413 ++ 497 files changed, 56138 insertions(+) create mode 100644 CONTRIBUTORS.wrs create mode 100644 LICENSE create mode 100644 README.rst create mode 100644 ceph-manager/.gitignore create mode 100644 ceph-manager/LICENSE create mode 100644 ceph-manager/PKG-INFO create mode 100644 ceph-manager/centos/build_srpm.data create mode 100644 ceph-manager/centos/ceph-manager.spec create mode 100644 ceph-manager/ceph-manager/LICENSE create mode 100644 ceph-manager/ceph-manager/ceph_manager/__init__.py create mode 100644 ceph-manager/ceph-manager/ceph_manager/cache_tiering.py create mode 100644 ceph-manager/ceph-manager/ceph_manager/ceph.py create mode 100644 ceph-manager/ceph-manager/ceph_manager/constants.py create mode 100644 ceph-manager/ceph-manager/ceph_manager/exception.py create mode 100644 ceph-manager/ceph-manager/ceph_manager/i18n.py create mode 100644 ceph-manager/ceph-manager/ceph_manager/monitor.py create mode 100644 ceph-manager/ceph-manager/ceph_manager/server.py create mode 100644 ceph-manager/ceph-manager/ceph_manager/tests/__init__.py create mode 100644 ceph-manager/ceph-manager/ceph_manager/tests/test_cache_flush.py create mode 100644 ceph-manager/ceph-manager/setup.py create mode 100644 ceph-manager/ceph-manager/test-requirements.txt create mode 100644 ceph-manager/ceph-manager/tox.ini create mode 100644 ceph-manager/files/ceph-manager.logrotate create mode 100644 ceph-manager/files/ceph-manager.service create mode 100644 ceph-manager/scripts/bin/ceph-manager create mode 100644 ceph-manager/scripts/init.d/ceph-manager create mode 100644 ceph/centos/build_srpm.data create mode 120000 ceph/centos/ceph.spec create mode 100644 ceph/files/ceph-manage-journal.py create mode 100644 mwa-perian.map create mode 100644 openstack/cinder_conf_dummy/LICENSE create mode 100644 openstack/distributedcloud-client/centos/build_srpm.data create mode 100644 openstack/distributedcloud-client/centos/distributedcloud-client.spec create mode 100644 openstack/distributedcloud/centos/build_srpm.data create mode 100644 openstack/distributedcloud/centos/distributedcloud.spec create mode 100644 openstack/distributedcloud/centos/files/dcmanager-api.service create mode 100644 openstack/distributedcloud/centos/files/dcmanager-manager.service create mode 100644 openstack/distributedcloud/centos/files/dcorch-api.service create mode 100644 openstack/distributedcloud/centos/files/dcorch-cinder-api-proxy.service create mode 100644 openstack/distributedcloud/centos/files/dcorch-engine.service create mode 100644 openstack/distributedcloud/centos/files/dcorch-neutron-api-proxy.service create mode 100644 openstack/distributedcloud/centos/files/dcorch-nova-api-proxy.service create mode 100644 openstack/distributedcloud/centos/files/dcorch-snmp.service create mode 100644 openstack/distributedcloud/centos/files/dcorch-sysinv-api-proxy.service create mode 100644 openstack/openstack-aodh/centos/build_srpm.data create mode 100644 openstack/openstack-aodh/centos/meta_patches/0001-Modify-service-files-and-create-expirer-cron-script.patch create mode 100644 openstack/openstack-aodh/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch create mode 100644 openstack/openstack-aodh/centos/meta_patches/0001-meta-modify-aodh-api.patch create mode 100644 openstack/openstack-aodh/centos/meta_patches/0001-meta-pass-aodh-api-config.patch create mode 100644 openstack/openstack-aodh/centos/meta_patches/0006-add-drivername-for-postgresql.patch create mode 100644 openstack/openstack-aodh/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/openstack-aodh/centos/meta_patches/meta-remove-default-logrotate.patch create mode 100644 openstack/openstack-aodh/centos/patches/0001-modify-aodh-api.patch create mode 100644 openstack/openstack-aodh/centos/patches/0002-Add-drivername-support-for-postgresql-connection-set.patch create mode 100644 openstack/openstack-aodh/centos/srpm_path create mode 100644 openstack/openstack-ironic/centos/build_srpm.data create mode 100644 openstack/openstack-ironic/centos/files/ironic-dist.conf create mode 100644 openstack/openstack-ironic/centos/files/ironic-rootwrap-sudoers create mode 100644 openstack/openstack-ironic/centos/files/openstack-ironic-api.service create mode 100644 openstack/openstack-ironic/centos/files/openstack-ironic-conductor.service create mode 100644 openstack/openstack-ironic/centos/openstack-ironic.spec create mode 100644 openstack/openstack-magnum-ui/centos/build_srpm.data create mode 100644 openstack/openstack-magnum-ui/centos/openstack-magnum-ui.spec create mode 100644 openstack/openstack-magnum/centos/build_srpm.data create mode 100644 openstack/openstack-magnum/centos/files/openstack-magnum-api.service create mode 100644 openstack/openstack-magnum/centos/files/openstack-magnum-conductor.service create mode 100644 openstack/openstack-magnum/centos/openstack-magnum.spec create mode 100644 openstack/openstack-murano-ui/centos/build_srpm.data create mode 100644 openstack/openstack-murano-ui/centos/openstack-murano-ui.spec create mode 100644 openstack/openstack-murano/centos/build_srpm.data create mode 100644 openstack/openstack-murano/centos/files/openstack-murano-api.service create mode 100644 openstack/openstack-murano/centos/files/openstack-murano-cf-api.service create mode 100644 openstack/openstack-murano/centos/files/openstack-murano-engine.service create mode 100644 openstack/openstack-murano/centos/openstack-murano.spec create mode 100644 openstack/openstack-panko/centos/build_srpm.data create mode 100644 openstack/openstack-panko/centos/meta_patches/0001-panko-config.patch create mode 100644 openstack/openstack-panko/centos/meta_patches/0002-spec-change-event-list-descending.patch create mode 100644 openstack/openstack-panko/centos/meta_patches/0003-spec-fix-event-query-to-sqlalchemy-with-non-admin-us.patch create mode 100644 openstack/openstack-panko/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/openstack-panko/centos/patches/0001-modify-panko-api.patch create mode 100644 openstack/openstack-panko/centos/patches/0002-Change-event-list-descending.patch create mode 100644 openstack/openstack-panko/centos/patches/0003-Fix-event-query-to-sqlalchemy-with-non-admin-user.patch create mode 100644 openstack/openstack-panko/centos/srpm_path create mode 100644 openstack/openstack-ras/centos/build_srpm.data create mode 100644 openstack/openstack-ras/centos/openstack-ras.spec create mode 100644 openstack/openstack-ras/openstack-ras/CGCSkeyringsupport.patch create mode 100644 openstack/openstack-ras/openstack-ras/aodh.patch create mode 100644 openstack/openstack-ras/openstack-ras/ceilometer-mem-db.patch create mode 100644 openstack/openstack-ras/openstack-ras/ceilometer-monitor-child-amqp-status.patch create mode 100644 openstack/openstack-ras/openstack-ras/ceilometer-monitor.patch create mode 100644 openstack/openstack-ras/openstack-ras/ceilometer-version-pipeline.patch create mode 100644 openstack/openstack-ras/openstack-ras/ceilometer.patch create mode 100644 openstack/openstack-ras/openstack-ras/ceilometer_pipeline.patch create mode 100644 openstack/openstack-ras/openstack-ras/cgts-4061-cinder-volume-service-down.patch create mode 100644 openstack/openstack-ras/openstack-ras/cinder-volume-enable-fix.patch create mode 100644 openstack/openstack-ras/openstack-ras/cinder-volume-fail-amqp-check.patch create mode 100644 openstack/openstack-ras/openstack-ras/enable-multiple-nova-conductor-workers.patch create mode 100644 openstack/openstack-ras/openstack-ras/glance-api-bypass-monitor.patch create mode 100644 openstack/openstack-ras/openstack-ras/glance-api-juno.patch create mode 100644 openstack/openstack-ras/openstack-ras/heat-cloudwatch.patch create mode 100644 openstack/openstack-ras/openstack-ras/heat-engine-support-workers.patch create mode 100644 openstack/openstack-ras/openstack-ras/heat.patch create mode 100644 openstack/openstack-ras/openstack-ras/neutron-logrotate.patch create mode 100644 openstack/openstack-ras/openstack-ras/neutron-server-sriov-config.patch create mode 100644 openstack/openstack-ras/openstack-ras/nova_novnc_kill_children.patch create mode 100644 openstack/openstack-ras/openstack-ras/nova_prestart_hooks.patch create mode 100644 openstack/openstack-ras/openstack-ras/nova_set_cwd.patch create mode 100644 openstack/openstack-ras/openstack-ras/openstack-ras.patch create mode 100644 openstack/openstack-ras/openstack-ras/pkill_orphaned_processes.patch create mode 100644 openstack/openstack-ras/openstack-ras/plugin_config_parameter_name.patch create mode 100644 openstack/openstack-ras/openstack-ras/rebase_workaround.patch create mode 100644 openstack/openstack-ras/openstack-ras/remove-ceilometer-mem-db.patch create mode 100644 openstack/openstack-ras/openstack-ras/speed_up_respons_to_stop_ceilometer_svc.patch create mode 100644 openstack/python-ceilometer/centos/build_srpm.data create mode 100644 openstack/python-ceilometer/centos/files/ceilometer-dist.conf create mode 100644 openstack/python-ceilometer/centos/files/ceilometer-rootwrap-sudoers create mode 100644 openstack/python-ceilometer/centos/files/ceilometer.conf.sample create mode 100644 openstack/python-ceilometer/centos/files/ceilometer.logrotate create mode 100644 openstack/python-ceilometer/centos/files/openstack-ceilometer-api.service create mode 100644 openstack/python-ceilometer/centos/files/openstack-ceilometer-collector.service create mode 100644 openstack/python-ceilometer/centos/files/openstack-ceilometer-ipmi.service create mode 100644 openstack/python-ceilometer/centos/files/openstack-ceilometer-notification.service create mode 100644 openstack/python-ceilometer/centos/files/openstack-ceilometer-polling create mode 100644 openstack/python-ceilometer/centos/files/openstack-ceilometer-polling.service create mode 100644 openstack/python-ceilometer/centos/openstack-ceilometer.spec create mode 100644 openstack/python-ceilometer/python-ceilometer/static/ceilometer-agent-compute create mode 100644 openstack/python-ceilometer/python-ceilometer/static/ceilometer-expirer-active create mode 100644 openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling create mode 100644 openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling-compute.conf.pmon.centos create mode 100644 openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling.conf create mode 100644 openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling.conf.pmon.centos create mode 100644 openstack/python-ceilometerclient/centos/build_srpm.data create mode 100644 openstack/python-ceilometerclient/centos/python-ceilometerclient.spec create mode 100644 openstack/python-cinder/centos/build_srpm.data create mode 100644 openstack/python-cinder/centos/files/cinder-dist.conf create mode 100644 openstack/python-cinder/centos/files/cinder-purge-deleted-active create mode 100644 openstack/python-cinder/centos/files/cinder-sudoers create mode 100644 openstack/python-cinder/centos/files/cinder.conf.sample create mode 100644 openstack/python-cinder/centos/files/cinder.logrotate create mode 100644 openstack/python-cinder/centos/files/openstack-cinder-api.service create mode 100644 openstack/python-cinder/centos/files/openstack-cinder-backup.service create mode 100644 openstack/python-cinder/centos/files/openstack-cinder-scheduler.service create mode 100644 openstack/python-cinder/centos/files/openstack-cinder-volume.service create mode 100644 openstack/python-cinder/centos/files/restart-cinder create mode 100644 openstack/python-cinder/centos/openstack-cinder.spec create mode 100644 openstack/python-cinderclient/centos/build_srpm.data create mode 100644 openstack/python-cinderclient/centos/python-cinderclient.spec create mode 100755 openstack/python-django-openstack-auth/centos/build_srpm.data create mode 100644 openstack/python-django-openstack-auth/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch create mode 100644 openstack/python-django-openstack-auth/centos/meta_patches/0002-remove-rpm-build-time-TOX-tests.patch create mode 100644 openstack/python-django-openstack-auth/centos/meta_patches/0003-meta-roll-in-TIS-patches.patch create mode 100644 openstack/python-django-openstack-auth/centos/meta_patches/0004-meta-disable-token-validation-per-auth-req.patch create mode 100644 openstack/python-django-openstack-auth/centos/meta_patches/0005-meta-cache-authorized-tenants-in-cookie-to-improve-performance.patch create mode 100644 openstack/python-django-openstack-auth/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-django-openstack-auth/centos/patches/0001-Pike-rebase-for-openstack-auth.patch create mode 100644 openstack/python-django-openstack-auth/centos/patches/0002-disable-token-validation-per-auth-request.patch create mode 100644 openstack/python-django-openstack-auth/centos/patches/0003-cache-authorized-tenants-in-cookie-to-improve-performance.patch create mode 100644 openstack/python-django-openstack-auth/centos/srpm_path create mode 100644 openstack/python-glance-store/centos/build_srpm.data create mode 100644 openstack/python-glance-store/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch create mode 100644 openstack/python-glance-store/centos/meta_patches/0002-meta-patch-Check-ceph-cluster-free-space.patch create mode 100644 openstack/python-glance-store/centos/meta_patches/0003-meta-patch-Glance-Driver.patch create mode 100644 openstack/python-glance-store/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-glance-store/centos/patches/0001-Check-ceph-cluster-free-space-before-creating-image.patch create mode 100644 openstack/python-glance-store/centos/patches/0002-Add-glance-driver.patch create mode 100644 openstack/python-glance-store/centos/srpm_path create mode 100644 openstack/python-glance/centos/build_srpm.data create mode 100644 openstack/python-glance/centos/files/glance-api-dist.conf create mode 100644 openstack/python-glance/centos/files/glance-cache-dist.conf create mode 100644 openstack/python-glance/centos/files/glance-purge-deleted-active create mode 100644 openstack/python-glance/centos/files/glance-registry-dist.conf create mode 100644 openstack/python-glance/centos/files/glance-scrubber-dist.conf create mode 100644 openstack/python-glance/centos/files/glance-sudoers create mode 100644 openstack/python-glance/centos/files/glance-swift.conf create mode 100644 openstack/python-glance/centos/files/openstack-glance-api.service create mode 100644 openstack/python-glance/centos/files/openstack-glance-registry.service create mode 100644 openstack/python-glance/centos/files/openstack-glance-scrubber.service create mode 100644 openstack/python-glance/centos/files/openstack-glance.logrotate create mode 100644 openstack/python-glance/centos/files/restart-glance create mode 100644 openstack/python-glance/centos/openstack-glance.spec create mode 100644 openstack/python-glanceclient/centos/build_srpm.data create mode 100644 openstack/python-glanceclient/centos/files/image-backup.sh create mode 100644 openstack/python-glanceclient/centos/python-glanceclient.spec create mode 100644 openstack/python-heat/openstack-heat/centos/build_srpm.data create mode 100644 openstack/python-heat/openstack-heat/centos/files/heat-dist.conf create mode 100644 openstack/python-heat/openstack-heat/centos/files/heat-purge-deleted-active create mode 100644 openstack/python-heat/openstack-heat/centos/files/heat.conf.sample create mode 100644 openstack/python-heat/openstack-heat/centos/files/heat.logrotate create mode 100644 openstack/python-heat/openstack-heat/centos/files/openstack-heat-all.service create mode 100644 openstack/python-heat/openstack-heat/centos/files/openstack-heat-api-cfn.service create mode 100644 openstack/python-heat/openstack-heat/centos/files/openstack-heat-api-cloudwatch.service create mode 100644 openstack/python-heat/openstack-heat/centos/files/openstack-heat-api.service create mode 100644 openstack/python-heat/openstack-heat/centos/files/openstack-heat-engine.service create mode 100644 openstack/python-heat/openstack-heat/centos/openstack-heat.spec create mode 100644 openstack/python-heat/python-heat/.yamllint create mode 100644 openstack/python-heat/python-heat/README.template create mode 100644 openstack/python-heat/python-heat/templates/LICENSE create mode 100644 openstack/python-heat/python-heat/templates/README create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/README.txt create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/cfn_cron create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/gen-add-load-service create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/gen-add-load.sh create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/gen-traffic-service create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/gen-traffic.sh create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/get_cpu_load create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/iperf-server-service create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/make_load create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/network-appliance-install.sh create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/pkt-capture.sh create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/scaleUpDown.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/demo/traffic-generator-install.sh create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/BootFromCinder.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/CFNPushStatsAutoScaling.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/CombinationAutoScaling.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/LabSetup.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/NestedAutoScale.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/NestedStack.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/Networking_and_Servers.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/PortForwarding.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/UserData.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/VIF.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/VMAutoScaling.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/scenarios/WRSQoSPolicy.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Ceilometer_Alarm.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Cinder_Volume.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Cinder_VolumeAttachment.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Glance_Image.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_AccessPolicy.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_AutoScalingGroup.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_Stack.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_FloatingIP.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Net.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Port.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Router.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_RouterGateway.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_RouterInterface.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_SecurityGroup.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Subnet.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_Flavor.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_KeyPair.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_Server.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_ServerGroup.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_Port_Forwarding.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_ProviderNet.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_ProviderNetRange.yaml create mode 100644 openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_QoSPolicy.yaml create mode 100644 openstack/python-heat/wrs-heat-template/centos/build_srpm.data create mode 100644 openstack/python-heat/wrs-heat-template/centos/wrs-heat-templates.spec create mode 120000 openstack/python-heat/wrs-heat-template/python-heat create mode 100644 openstack/python-heatclient/centos/build_srpm.data create mode 100644 openstack/python-heatclient/centos/meta_patches/0001-Update-package-versioning-format-for-TiS-format.patch create mode 100644 openstack/python-heatclient/centos/meta_patches/0002-Packages-sdk-for-remote-clients.patch create mode 100644 openstack/python-heatclient/centos/meta_patches/0003-Apply-timezone-support-patch.patch create mode 100644 openstack/python-heatclient/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-heatclient/centos/patches/0001-timezone-support-for-heatclient.patch create mode 100644 openstack/python-heatclient/centos/srpm_path create mode 100755 openstack/python-horizon/centos/build_srpm.data create mode 100644 openstack/python-horizon/centos/files/guni_config.py create mode 100644 openstack/python-horizon/centos/files/horizon-assets-compress create mode 100644 openstack/python-horizon/centos/files/horizon-clearsessions create mode 100644 openstack/python-horizon/centos/files/horizon-patching-restart create mode 100755 openstack/python-horizon/centos/files/horizon-region-exclusions.csv create mode 100755 openstack/python-horizon/centos/files/horizon.init create mode 100644 openstack/python-horizon/centos/files/horizon.logrotate create mode 100755 openstack/python-horizon/centos/files/local_settings.py create mode 100644 openstack/python-horizon/centos/files/openstack-dashboard-httpd-2.4.conf create mode 100644 openstack/python-horizon/centos/files/openstack-dashboard-httpd-logging.conf create mode 100644 openstack/python-horizon/centos/files/python-django-horizon-logrotate.conf create mode 100644 openstack/python-horizon/centos/files/python-django-horizon-systemd.conf create mode 100755 openstack/python-horizon/centos/python-django-horizon.spec create mode 100644 openstack/python-ironicclient/centos/build_srpm.data create mode 100644 openstack/python-ironicclient/centos/python-ironicclient.spec create mode 100644 openstack/python-keystone/centos/build_srpm.data create mode 100644 openstack/python-keystone/centos/files/keystone-all create mode 100644 openstack/python-keystone/centos/files/keystone-fernet-keys-rotate-active create mode 100644 openstack/python-keystone/centos/files/openstack-keystone.defaultconf create mode 100644 openstack/python-keystone/centos/files/openstack-keystone.logrotate create mode 100644 openstack/python-keystone/centos/files/openstack-keystone.service create mode 100644 openstack/python-keystone/centos/files/openstack-keystone.sysctl create mode 100644 openstack/python-keystone/centos/files/openstack-keystone.tmpfiles create mode 100644 openstack/python-keystone/centos/files/password-rules.conf create mode 100644 openstack/python-keystone/centos/openstack-keystone.spec create mode 100644 openstack/python-keystoneauth1/centos/build_srpm.data create mode 100644 openstack/python-keystoneauth1/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch create mode 100644 openstack/python-keystoneauth1/centos/meta_patches/0002-meta-fix-neutron-error-not-shown-to-user.patch create mode 100644 openstack/python-keystoneauth1/centos/meta_patches/0003-meta-spec-remote-client.patch create mode 100644 openstack/python-keystoneauth1/centos/meta_patches/0004-meta-dont-remove-requirements-txt.patch create mode 100644 openstack/python-keystoneauth1/centos/meta_patches/0005-meta-ignore-unexpected-arguments-to-token-plugin.patch create mode 100644 openstack/python-keystoneauth1/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-keystoneauth1/centos/patches/fix-neutron-error-not-shown-to-user.patch create mode 100644 openstack/python-keystoneauth1/centos/patches/ignore-unexpected-arguments-to-token-auth-plugin.patch create mode 100644 openstack/python-keystoneauth1/centos/srpm_path create mode 100644 openstack/python-keystoneclient/centos/build_srpm.data create mode 100644 openstack/python-keystoneclient/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch create mode 100644 openstack/python-keystoneclient/centos/meta_patches/0002-meta-public-adminURL-detection.patch create mode 100644 openstack/python-keystoneclient/centos/meta_patches/0003-meta-TiS-remote-client-sdk-patch.patch create mode 100644 openstack/python-keystoneclient/centos/meta_patches/0004-meta-dont-remove-requirements-txt.patch create mode 100644 openstack/python-keystoneclient/centos/meta_patches/0006-meta-buildrequires-python-setuptools_scm.patch create mode 100644 openstack/python-keystoneclient/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-keystoneclient/centos/patches/internal-keystone-client-public-adminURL-detection.patch create mode 100644 openstack/python-keystoneclient/centos/srpm_path create mode 100644 openstack/python-keystoneclient/python-keystoneclient/CGCSkeyringsupport.patch create mode 100644 openstack/python-keystoneclient/python-keystoneclient/extend_token_expiry_window.patch create mode 100644 openstack/python-keystoneclient/python-keystoneclient/internal-keystone-client-public-adminURL-detection.patch create mode 100644 openstack/python-keystoneclient/python-keystoneclient/v2-client-empty-password-check.patch create mode 100644 openstack/python-keystonemiddleware/centos/build_srpm.data create mode 100644 openstack/python-keystonemiddleware/centos/meta_patches/0001-update-package-versioning-for-TIS-format.patch create mode 100644 openstack/python-keystonemiddleware/centos/meta_patches/0002-Upstream-gnnochi-panko-fix.patch create mode 100644 openstack/python-keystonemiddleware/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-keystonemiddleware/centos/patches/0001-Upstream-gnnochi-panko-fix.patch create mode 100644 openstack/python-keystonemiddleware/centos/srpm_path create mode 100644 openstack/python-magnumclient/centos/build_srpm.data create mode 100644 openstack/python-magnumclient/centos/python-magnumclient.spec create mode 100644 openstack/python-muranoclient/centos/build_srpm.data create mode 100644 openstack/python-muranoclient/centos/python-muranoclient.spec create mode 100644 openstack/python-networking-bgpvpn/centos/build_srpm.data create mode 100644 openstack/python-networking-bgpvpn/centos/python-networking-bgpvpn.spec create mode 100644 openstack/python-networking-odl/centos/build_srpm.data create mode 100644 openstack/python-networking-odl/centos/python-networking-odl.spec create mode 100644 openstack/python-networking-odl/python-networking-odl/LICENSE create mode 100644 openstack/python-networking-sfc/centos/build_srpm.data create mode 100644 openstack/python-networking-sfc/centos/python-networking-sfc.spec create mode 100644 openstack/python-neutron-dynamic-routing/centos/build_srpm.data create mode 100755 openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.init create mode 100644 openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.pmon create mode 100644 openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.service create mode 100644 openstack/python-neutron-dynamic-routing/centos/python-neutron-dynamic-routing.spec create mode 100644 openstack/python-neutron-lib/centos/build_srpm.data create mode 100644 openstack/python-neutron-lib/centos/python-neutron-lib.spec create mode 100644 openstack/python-neutron/centos/build_srpm.data create mode 100644 openstack/python-neutron/centos/files/NetnsCleanup.ocf_ra create mode 100755 openstack/python-neutron/centos/files/NeutronScale.ocf_ra create mode 100644 openstack/python-neutron/centos/files/OVSCleanup.ocf_ra create mode 100644 openstack/python-neutron/centos/files/conf.README create mode 100755 openstack/python-neutron/centos/files/neutron-dhcp-agent.init create mode 100644 openstack/python-neutron/centos/files/neutron-dhcp-agent.pmon create mode 100644 openstack/python-neutron/centos/files/neutron-dhcp-agent.service create mode 100644 openstack/python-neutron/centos/files/neutron-dist.conf create mode 100644 openstack/python-neutron/centos/files/neutron-l3-agent.service create mode 100644 openstack/python-neutron/centos/files/neutron-linuxbridge-agent.service create mode 100644 openstack/python-neutron/centos/files/neutron-linuxbridge-cleanup.service create mode 100644 openstack/python-neutron/centos/files/neutron-macvtap-agent.service create mode 100755 openstack/python-neutron/centos/files/neutron-metadata-agent.init create mode 100644 openstack/python-neutron/centos/files/neutron-metadata-agent.pmon create mode 100644 openstack/python-neutron/centos/files/neutron-metadata-agent.service create mode 100644 openstack/python-neutron/centos/files/neutron-metering-agent.service create mode 100644 openstack/python-neutron/centos/files/neutron-netns-cleanup.init create mode 100644 openstack/python-neutron/centos/files/neutron-netns-cleanup.service create mode 100644 openstack/python-neutron/centos/files/neutron-openvswitch-agent.service create mode 100644 openstack/python-neutron/centos/files/neutron-ovs-cleanup.init create mode 100644 openstack/python-neutron/centos/files/neutron-ovs-cleanup.service create mode 100644 openstack/python-neutron/centos/files/neutron-rpc-server.service create mode 100755 openstack/python-neutron/centos/files/neutron-server.init create mode 100644 openstack/python-neutron/centos/files/neutron-server.service create mode 100755 openstack/python-neutron/centos/files/neutron-sriov-nic-agent.init create mode 100644 openstack/python-neutron/centos/files/neutron-sriov-nic-agent.pmon create mode 100644 openstack/python-neutron/centos/files/neutron-sriov-nic-agent.service create mode 100644 openstack/python-neutron/centos/files/neutron-sudoers create mode 100644 openstack/python-neutron/centos/openstack-neutron.spec create mode 100644 openstack/python-neutron/python-neutron/neutron-agent.init create mode 100644 openstack/python-neutron/python-neutron/neutron-server.init create mode 100644 openstack/python-neutron/python-neutron/neutron-src-dist-files.exclude create mode 100644 openstack/python-neutron/python-neutron_meta-cloud-services/neutron-dhcp-agent-netns-cleanup.cron create mode 100644 openstack/python-neutronclient/centos/build_srpm.data create mode 100644 openstack/python-neutronclient/centos/python-neutronclient.spec create mode 100644 openstack/python-nova/centos/build_srpm.data create mode 100644 openstack/python-nova/centos/files/kvm_timer_advance_setup.service create mode 100644 openstack/python-nova/centos/files/nova-clean-thinpool.service create mode 100644 openstack/python-nova/centos/files/nova-compute.init create mode 100644 openstack/python-nova/centos/files/nova-dist.conf create mode 100644 openstack/python-nova/centos/files/nova-ifc-template create mode 100644 openstack/python-nova/centos/files/nova-migration-wrapper create mode 100644 openstack/python-nova/centos/files/nova-pci-interrupts create mode 100755 openstack/python-nova/centos/files/nova-placement-api create mode 100644 openstack/python-nova/centos/files/nova-placement-api.conf create mode 100644 openstack/python-nova/centos/files/nova-polkit.pkla create mode 100644 openstack/python-nova/centos/files/nova-polkit.rules create mode 100644 openstack/python-nova/centos/files/nova-purge-deleted-active create mode 100644 openstack/python-nova/centos/files/nova-restart create mode 100644 openstack/python-nova/centos/files/nova-ssh-config create mode 100644 openstack/python-nova/centos/files/nova-sudoers create mode 100644 openstack/python-nova/centos/files/nova.conf.sample create mode 100644 openstack/python-nova/centos/files/nova.logrotate create mode 100644 openstack/python-nova/centos/files/nova_migration-rootwrap.conf create mode 100644 openstack/python-nova/centos/files/nova_migration-rootwrap_cold_migration create mode 100644 openstack/python-nova/centos/files/nova_migration-sudoers create mode 100644 openstack/python-nova/centos/files/nova_migration_authorized_keys create mode 100644 openstack/python-nova/centos/files/nova_migration_identity create mode 100644 openstack/python-nova/centos/files/nova_setup_timer_advance create mode 100644 openstack/python-nova/centos/files/openstack-nova-api.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-cells.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-compute.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-conductor.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-console.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-consoleauth.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-metadata-api.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-network.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-novncproxy.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-novncproxy.sysconfig create mode 100644 openstack/python-nova/centos/files/openstack-nova-os-compute-api.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-scheduler.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-serialproxy.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-spicehtml5proxy.service create mode 100644 openstack/python-nova/centos/files/openstack-nova-xvpvncproxy.service create mode 100644 openstack/python-nova/centos/files/policy.json create mode 100755 openstack/python-nova/centos/files/resctrl-show create mode 100644 openstack/python-nova/centos/openstack-nova.spec create mode 100755 openstack/python-nova/python-nova/collect_host_memory_info.sh create mode 100644 openstack/python-nova/python-nova/nova.conf create mode 100644 openstack/python-nova/python-nova/nova.init create mode 100644 openstack/python-nova/python-nova/nova_authorized_cmds create mode 100644 openstack/python-nova/python-nova/nova_clean_thinpool create mode 100644 openstack/python-nova/python-nova/nova_setup_cpusets create mode 100644 openstack/python-nova/python-nova/openstack-nova-compute-setup.service create mode 100644 openstack/python-novaclient/centos/build_srpm.data create mode 100644 openstack/python-novaclient/centos/python-novaclient.spec create mode 100644 openstack/python-openstackclient/centos/build_srpm.data create mode 100644 openstack/python-openstackclient/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0001-meta-US106901-Openstack-CLI-Adoption.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0001-meta-us101470.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0002-meta-US106901-Openstack-CLI-Adoption.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0002-meta-us101470.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0002-spec-remote-clients-sdk.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0003-meta-US106901-Openstack-CLI-Adoption.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0003-meta-dont-remove-requirements-txt.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0004-added-missing-build-require-dateutil.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0005-meta-patch-for-neutron-extensions.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0006-openstackClient_Passwordchange_warning.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0007-CGTS-7814-warning-only-when-the-admin-password-chang.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/0008-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/1000-remove-version-requirements.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/1001-Turn-off-openstackclient-check.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/1002-require-python-ceilometerclient.patch create mode 100644 openstack/python-openstackclient/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-openstackclient/centos/patches/0001-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch create mode 100644 openstack/python-openstackclient/centos/patches/0001-US101470-Openstackclient-implementation-of-novaclien.patch create mode 100644 openstack/python-openstackclient/centos/patches/0001-US106901-Openstack-CLI-Adoption.patch create mode 100644 openstack/python-openstackclient/centos/patches/0001-neutron-extensions.patch create mode 100644 openstack/python-openstackclient/centos/patches/0002-US101470-Openstackclient-implementation-of-novaclien.patch create mode 100644 openstack/python-openstackclient/centos/patches/0002-US106901-Openstack-CLI-Adoption.patch create mode 100644 openstack/python-openstackclient/centos/patches/0003-US106901-Openstack-CLI-Adoption.patch create mode 100644 openstack/python-openstackclient/centos/patches/CGTS-7814-warning-only-when-the-admin-password-chang.patch create mode 100644 openstack/python-openstackclient/centos/patches/openstackClient_Passwordchange_warning.patch create mode 100644 openstack/python-openstackclient/centos/srpm_path create mode 100644 openstack/python-openstackdocstheme/centos/build_srpm.data create mode 100644 openstack/python-openstackdocstheme/centos/meta_patches/0001-WRS-apply-WRS-patches.patch create mode 100644 openstack/python-openstackdocstheme/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-openstackdocstheme/centos/patches/0003-WRS.-Fix-when-building-outside-git-tree.patch create mode 100644 openstack/python-openstackdocstheme/centos/srpm_path create mode 100644 openstack/python-openstacksdk/centos/build_srpm.data create mode 100644 openstack/python-openstacksdk/centos/python-openstacksdk.spec create mode 100644 openstack/python-osc-lib/centos/build_srpm.data create mode 100644 openstack/python-osc-lib/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch create mode 100644 openstack/python-osc-lib/centos/meta_patches/0002-CGTS-7947-add-os-keystone-region-name-option-to-open.patch create mode 100644 openstack/python-osc-lib/centos/meta_patches/0003-CGTS-8470-update-remote-client-to-include-osc-lib.patch create mode 100644 openstack/python-osc-lib/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-osc-lib/centos/patches/0001-CGTS-7947-add-os-keystone-region-name-option-to-open.patch create mode 100644 openstack/python-osc-lib/centos/srpm_path create mode 100644 openstack/python-oslo-concurrency/centos/build_srpm.data create mode 100644 openstack/python-oslo-concurrency/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-oslo-concurrency/centos/meta_patches/spec-add-fair-lock.patch create mode 100644 openstack/python-oslo-concurrency/centos/meta_patches/update-package-versioning-for-TIS-format.patch create mode 100644 openstack/python-oslo-concurrency/centos/patches/add-fair-lock.patch create mode 100644 openstack/python-oslo-concurrency/centos/srpm_path create mode 100644 openstack/python-oslo-messaging/centos/build_srpm.data create mode 100644 openstack/python-oslo-messaging/centos/meta_patches/0004-disable-check-on-build.patch create mode 100644 openstack/python-oslo-messaging/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-oslo-messaging/centos/meta_patches/fix-pifpaf-build-error.patch create mode 100644 openstack/python-oslo-messaging/centos/meta_patches/spec-rabbit-increase-heartbeat-rate-to-decrease-polling-interval.patch create mode 100644 openstack/python-oslo-messaging/centos/meta_patches/update-package-versioning-for-tis-format.patch create mode 100644 openstack/python-oslo-messaging/centos/patches/rabbit-increase-heartbeat-rate-to-decrease-poll-interval.patch create mode 100644 openstack/python-oslo-messaging/centos/srpm_path create mode 100644 openstack/python-oslo-service/centos/build_srpm.data create mode 100644 openstack/python-oslo-service/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-oslo-service/centos/meta_patches/spec-loopingcall-permit-aborting-while-sleeping.patch create mode 100644 openstack/python-oslo-service/centos/meta_patches/update-package-versioning-for-tis-format.patch create mode 100644 openstack/python-oslo-service/centos/patches/loopingcall-permit-aborting-while-sleeping.patch create mode 100644 openstack/python-oslo-service/centos/srpm_path create mode 100644 openstack/python-wsme/centos/build_srpm.data create mode 100644 openstack/python-wsme/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch create mode 100644 openstack/python-wsme/centos/meta_patches/0003-Remove-TOX-calls-from-build.patch create mode 100644 openstack/python-wsme/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/python-wsme/centos/meta_patches/meta-patch-ClientSideError-logging-verbosity-fix.patch create mode 100644 openstack/python-wsme/centos/patches/0001-log-client-side-errors.patch create mode 100644 openstack/python-wsme/centos/srpm_path create mode 100644 openstack/rabbitmq-server/centos/build_srpm.data create mode 100644 openstack/rabbitmq-server/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch create mode 100644 openstack/rabbitmq-server/centos/meta_patches/0006-Update-rabbitmq-server.logrotate.patch create mode 100644 openstack/rabbitmq-server/centos/meta_patches/Allow-rabbitmqctl-to-run-as-root.patch create mode 100644 openstack/rabbitmq-server/centos/meta_patches/PATCH_ORDER create mode 100644 openstack/rabbitmq-server/centos/meta_patches/Set-root-home-for-rabbitmqctl.patch create mode 100644 openstack/rabbitmq-server/centos/meta_patches/packstack-fixes.patch create mode 100644 openstack/rabbitmq-server/centos/meta_patches/spec-comments-for-ocf.patch create mode 100644 openstack/rabbitmq-server/centos/patches/update-rabbitmq-server.service.patch create mode 100644 openstack/rabbitmq-server/centos/srpm_path create mode 100644 openstack/rabbitmq-server/rabbitmq-server/rabbitmq-server create mode 100644 openstack/rabbitmq-server/rabbitmq-server/rabbitmq-server.ocf diff --git a/CONTRIBUTORS.wrs b/CONTRIBUTORS.wrs new file mode 100644 index 00000000..68489a05 --- /dev/null +++ b/CONTRIBUTORS.wrs @@ -0,0 +1,7 @@ +The following contributors from Wind River have developed the seed code in this +repository. We look forward to community collaboration and contributions for +additional features, enhancements and refactoring. + +Contributors: +============= +Wind River Titanium Cloud Team diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..a5d663d4 --- /dev/null +++ b/README.rst @@ -0,0 +1,5 @@ +============ +stx-upstream +============ + +StarlingX Upstream Packages diff --git a/ceph-manager/.gitignore b/ceph-manager/.gitignore new file mode 100644 index 00000000..78868598 --- /dev/null +++ b/ceph-manager/.gitignore @@ -0,0 +1,6 @@ +!.distro +.distro/centos7/rpmbuild/RPMS +.distro/centos7/rpmbuild/SRPMS +.distro/centos7/rpmbuild/BUILD +.distro/centos7/rpmbuild/BUILDROOT +.distro/centos7/rpmbuild/SOURCES/ceph-manager*tar.gz diff --git a/ceph-manager/LICENSE b/ceph-manager/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/ceph-manager/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-manager/PKG-INFO b/ceph-manager/PKG-INFO new file mode 100644 index 00000000..5b6746d8 --- /dev/null +++ b/ceph-manager/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 1.1 +Name: ceph-manager +Version: 1.0 +Summary: Handle Ceph API calls and provide status updates via alarms +Home-page: +Author: Windriver +Author-email: info@windriver.com +License: Apache-2.0 + +Description: Handle Ceph API calls and provide status updates via alarms + + +Platform: UNKNOWN diff --git a/ceph-manager/centos/build_srpm.data b/ceph-manager/centos/build_srpm.data new file mode 100644 index 00000000..d01510bd --- /dev/null +++ b/ceph-manager/centos/build_srpm.data @@ -0,0 +1,3 @@ +SRC_DIR="ceph-manager" +COPY_LIST_TO_TAR="files scripts" +TIS_PATCH_VER=4 diff --git a/ceph-manager/centos/ceph-manager.spec b/ceph-manager/centos/ceph-manager.spec new file mode 100644 index 00000000..2f54deb5 --- /dev/null +++ b/ceph-manager/centos/ceph-manager.spec @@ -0,0 +1,70 @@ +Summary: Handle Ceph API calls and provide status updates via alarms +Name: ceph-manager +Version: 1.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown +Source0: %{name}-%{version}.tar.gz + +BuildRequires: python-setuptools +BuildRequires: systemd-units +BuildRequires: systemd-devel +Requires: sysinv + +%description +Handle Ceph API calls and provide status updates via alarms. +Handle sysinv RPC calls for long running Ceph API operations: +- cache tiering enable +- cache tiering disable + +%define local_bindir /usr/bin/ +%define local_etc_initd /etc/init.d/ +%define local_etc_logrotated /etc/logrotate.d/ +%define pythonroot /usr/lib64/python2.7/site-packages + +%define debug_package %{nil} + +%prep +%setup + +%build +%{__python} setup.py build + +%install +%{__python} setup.py install --root=$RPM_BUILD_ROOT \ + --install-lib=%{pythonroot} \ + --prefix=/usr \ + --install-data=/usr/share \ + --single-version-externally-managed + +install -d -m 755 %{buildroot}%{local_etc_initd} +install -p -D -m 700 scripts/init.d/ceph-manager %{buildroot}%{local_etc_initd}/ceph-manager + +install -d -m 755 %{buildroot}%{local_bindir} +install -p -D -m 700 scripts/bin/ceph-manager %{buildroot}%{local_bindir}/ceph-manager + +install -d -m 755 %{buildroot}%{local_etc_logrotated} +install -p -D -m 644 files/ceph-manager.logrotate %{buildroot}%{local_etc_logrotated}/ceph-manager.logrotate + +install -d -m 755 %{buildroot}%{_unitdir} +install -m 644 -p -D files/%{name}.service %{buildroot}%{_unitdir}/%{name}.service + +%clean +rm -rf $RPM_BUILD_ROOT + +# Note: The package name is ceph-manager but the import name is ceph_manager so +# can't use '%{name}'. +%files +%defattr(-,root,root,-) +%doc LICENSE +%{local_bindir}/* +%{local_etc_initd}/* +%{_unitdir}/%{name}.service +%dir %{local_etc_logrotated} +%{local_etc_logrotated}/* +%dir %{pythonroot}/ceph_manager +%{pythonroot}/ceph_manager/* +%dir %{pythonroot}/ceph_manager-%{version}.0-py2.7.egg-info +%{pythonroot}/ceph_manager-%{version}.0-py2.7.egg-info/* diff --git a/ceph-manager/ceph-manager/LICENSE b/ceph-manager/ceph-manager/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/ceph-manager/ceph-manager/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ceph-manager/ceph-manager/ceph_manager/__init__.py b/ceph-manager/ceph-manager/ceph_manager/__init__.py new file mode 100644 index 00000000..754a8f4e --- /dev/null +++ b/ceph-manager/ceph-manager/ceph_manager/__init__.py @@ -0,0 +1,5 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/ceph-manager/ceph-manager/ceph_manager/cache_tiering.py b/ceph-manager/ceph-manager/ceph_manager/cache_tiering.py new file mode 100644 index 00000000..4e814c3b --- /dev/null +++ b/ceph-manager/ceph-manager/ceph_manager/cache_tiering.py @@ -0,0 +1,705 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import copy +import contextlib +import functools +import math +import subprocess +import time +import traceback +# noinspection PyUnresolvedReferences +import eventlet +# noinspection PyUnresolvedReferences +from eventlet.semaphore import Semaphore +# noinspection PyUnresolvedReferences +from oslo_log import log as logging +# noinspection PyUnresolvedReferences +from sysinv.conductor.cache_tiering_service_config import ServiceConfig + +from i18n import _LI, _LW, _LE + +import constants +import exception +import ceph + +LOG = logging.getLogger(__name__) +CEPH_POOLS = copy.deepcopy(constants.CEPH_POOLS) + +MAX_WAIT = constants.CACHE_FLUSH_MAX_WAIT_OBJ_COUNT_DECREASE_SEC +MIN_WAIT = constants.CACHE_FLUSH_MIN_WAIT_OBJ_COUNT_DECREASE_SEC + + +class LockOwnership(object): + def __init__(self, sem): + self.sem = sem + + @contextlib.contextmanager + def __call__(self): + try: + yield + finally: + if self.sem: + self.sem.release() + + def transfer(self): + new_lo = LockOwnership(self.sem) + self.sem = None + return new_lo + + +class Lock(object): + + def __init__(self): + self.sem = Semaphore(value=1) + + def try_lock(self): + result = self.sem.acquire(blocking=False) + if result: + return LockOwnership(self.sem) + + +class CacheTiering(object): + + def __init__(self, service): + self.service = service + self.lock = Lock() + # will be unlocked by set_initial_config() + self._init_config_lock = self.lock.try_lock() + self.config = None + self.config_desired = None + self.config_applied = None + self.target_max_bytes = {} + + def set_initial_config(self, config): + with self._init_config_lock(): + LOG.info("Setting Ceph cache tiering initial configuration") + self.config = ServiceConfig.from_dict( + config.get(constants.CACHE_TIERING, {})) or \ + ServiceConfig() + self.config_desired = ServiceConfig.from_dict( + config.get(constants.CACHE_TIERING_DESIRED, {})) or \ + ServiceConfig() + self.config_applied = ServiceConfig.from_dict( + config.get(constants.CACHE_TIERING_APPLIED, {})) or \ + ServiceConfig() + if self.config_desired: + LOG.debug("set_initial_config config_desired %s " % + self.config_desired.to_dict()) + if self.config_applied: + LOG.debug("set_initial_config config_applied %s " % + self.config_applied.to_dict()) + + # Check that previous caching tier operation completed + # successfully or perform recovery + if (self.config_desired and + self.config_applied and + (self.config_desired.cache_enabled != + self.config_applied.cache_enabled)): + if self.config_desired.cache_enabled: + self.enable_cache(self.config_desired.to_dict(), + self.config_applied.to_dict(), + self._init_config_lock.transfer()) + else: + self.disable_cache(self.config_desired.to_dict(), + self.config_applied.to_dict(), + self._init_config_lock.transfer()) + + def is_locked(self): + lock_ownership = self.lock.try_lock() + if not lock_ownership: + return True + with lock_ownership(): + return False + + def update_pools_info(self): + global CEPH_POOLS + cfg = self.service.sysinv_conductor.call( + {}, 'get_ceph_pools_config') + CEPH_POOLS = copy.deepcopy(cfg) + LOG.info(_LI("update_pools_info: pools: {}").format(CEPH_POOLS)) + + def enable_cache(self, new_config, applied_config, lock_ownership=None): + new_config = ServiceConfig.from_dict(new_config) + applied_config = ServiceConfig.from_dict(applied_config) + if not lock_ownership: + lock_ownership = self.lock.try_lock() + if not lock_ownership: + raise exception.CephCacheEnableFailure() + with lock_ownership(): + eventlet.spawn(self.do_enable_cache, + new_config, applied_config, + lock_ownership.transfer()) + + def do_enable_cache(self, new_config, applied_config, lock_ownership): + LOG.info(_LI("cache_tiering_enable_cache: " + "new_config={}, applied_config={}").format( + new_config.to_dict(), applied_config.to_dict())) + _unwind_actions = [] + with lock_ownership(): + success = False + _exception = None + try: + self.config_desired.cache_enabled = True + self.update_pools_info() + for pool in CEPH_POOLS: + if (pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or + pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER): + object_pool_name = \ + self.service.monitor._get_object_pool_name() + pool['pool_name'] = object_pool_name + + self.cache_pool_create(pool) + _unwind_actions.append( + functools.partial(self.cache_pool_delete, pool)) + for pool in CEPH_POOLS: + if (pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or + pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER): + object_pool_name = \ + self.service.monitor._get_object_pool_name() + pool['pool_name'] = object_pool_name + + self.cache_tier_add(pool) + _unwind_actions.append( + functools.partial(self.cache_tier_remove, pool)) + for pool in CEPH_POOLS: + if (pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or + pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER): + object_pool_name = \ + self.service.monitor._get_object_pool_name() + pool['pool_name'] = object_pool_name + + self.cache_mode_set(pool, 'writeback') + self.cache_pool_set_config(pool, new_config) + self.cache_overlay_create(pool) + success = True + except Exception as e: + LOG.error(_LE('Failed to enable cache: reason=%s') % + traceback.format_exc()) + for action in reversed(_unwind_actions): + try: + action() + except Exception: + LOG.warn(_LW('Failed cache enable ' + 'unwind action: reason=%s') % + traceback.format_exc()) + success = False + _exception = str(e) + finally: + self.service.monitor.monitor_check_cache_tier(success) + if success: + self.config_applied.cache_enabled = True + self.service.sysinv_conductor.call( + {}, 'cache_tiering_enable_cache_complete', + success=success, exception=_exception, + new_config=new_config.to_dict(), + applied_config=applied_config.to_dict()) + # Run first update of periodic target_max_bytes + self.update_cache_target_max_bytes() + + @contextlib.contextmanager + def ignore_ceph_failure(self): + try: + yield + except exception.CephManagerException: + pass + + def disable_cache(self, new_config, applied_config, lock_ownership=None): + new_config = ServiceConfig.from_dict(new_config) + applied_config = ServiceConfig.from_dict(applied_config) + if not lock_ownership: + lock_ownership = self.lock.try_lock() + if not lock_ownership: + raise exception.CephCacheDisableFailure() + with lock_ownership(): + eventlet.spawn(self.do_disable_cache, + new_config, applied_config, + lock_ownership.transfer()) + + def do_disable_cache(self, new_config, applied_config, lock_ownership): + LOG.info(_LI("cache_tiering_disable_cache: " + "new_config={}, applied_config={}").format( + new_config, applied_config)) + with lock_ownership(): + success = False + _exception = None + try: + self.config_desired.cache_enabled = False + for pool in CEPH_POOLS: + if (pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or + pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER): + object_pool_name = \ + self.service.monitor._get_object_pool_name() + pool['pool_name'] = object_pool_name + + with self.ignore_ceph_failure(): + self.cache_mode_set( + pool, 'forward') + + for pool in CEPH_POOLS: + if (pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or + pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER): + object_pool_name = \ + self.service.monitor._get_object_pool_name() + pool['pool_name'] = object_pool_name + + retries_left = 3 + while True: + try: + self.cache_flush(pool) + break + except exception.CephCacheFlushFailure: + retries_left -= 1 + if not retries_left: + # give up + break + else: + time.sleep(1) + for pool in CEPH_POOLS: + if (pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or + pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER): + object_pool_name = \ + self.service.monitor._get_object_pool_name() + pool['pool_name'] = object_pool_name + + with self.ignore_ceph_failure(): + self.cache_overlay_delete(pool) + self.cache_tier_remove(pool) + for pool in CEPH_POOLS: + if (pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or + pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER): + object_pool_name = \ + self.service.monitor._get_object_pool_name() + pool['pool_name'] = object_pool_name + + with self.ignore_ceph_failure(): + self.cache_pool_delete(pool) + success = True + except Exception as e: + LOG.warn(_LE('Failed to disable cache: reason=%s') % + traceback.format_exc()) + _exception = str(e) + finally: + self.service.monitor.monitor_check_cache_tier(False) + if success: + self.config_desired.cache_enabled = False + self.config_applied.cache_enabled = False + self.service.sysinv_conductor.call( + {}, 'cache_tiering_disable_cache_complete', + success=success, exception=_exception, + new_config=new_config.to_dict(), + applied_config=applied_config.to_dict()) + + def get_pool_pg_num(self, pool_name): + return self.service.sysinv_conductor.call( + {}, 'get_pool_pg_num', + pool_name=pool_name) + + def cache_pool_create(self, pool): + backing_pool = pool['pool_name'] + cache_pool = backing_pool + '-cache' + pg_num = self.get_pool_pg_num(cache_pool) + if not ceph.osd_pool_exists(self.service.ceph_api, cache_pool): + ceph.osd_pool_create( + self.service.ceph_api, cache_pool, + pg_num, pg_num) + + def cache_pool_delete(self, pool): + cache_pool = pool['pool_name'] + '-cache' + ceph.osd_pool_delete( + self.service.ceph_api, cache_pool) + + def cache_tier_add(self, pool): + backing_pool = pool['pool_name'] + cache_pool = backing_pool + '-cache' + response, body = self.service.ceph_api.osd_tier_add( + backing_pool, cache_pool, + force_nonempty="--force-nonempty", + body='json') + if response.ok: + LOG.info(_LI("Added OSD tier: " + "backing_pool={}, cache_pool={}").format( + backing_pool, cache_pool)) + else: + e = exception.CephPoolAddTierFailure( + backing_pool=backing_pool, + cache_pool=cache_pool, + response_status_code=response.status_code, + response_reason=response.reason, + status=body.get('status'), + output=body.get('output')) + LOG.warn(e) + raise e + + def cache_tier_remove(self, pool): + backing_pool = pool['pool_name'] + cache_pool = backing_pool + '-cache' + response, body = self.service.ceph_api.osd_tier_remove( + backing_pool, cache_pool, body='json') + if response.ok: + LOG.info(_LI("Removed OSD tier: " + "backing_pool={}, cache_pool={}").format( + backing_pool, cache_pool)) + else: + e = exception.CephPoolRemoveTierFailure( + backing_pool=backing_pool, + cache_pool=cache_pool, + response_status_code=response.status_code, + response_reason=response.reason, + status=body.get('status'), + output=body.get('output')) + LOG.warn(e) + raise e + + def cache_mode_set(self, pool, mode): + backing_pool = pool['pool_name'] + cache_pool = backing_pool + '-cache' + response, body = self.service.ceph_api.osd_tier_cachemode( + cache_pool, mode, body='json') + if response.ok: + LOG.info(_LI("Set OSD tier cache mode: " + "cache_pool={}, mode={}").format(cache_pool, mode)) + else: + e = exception.CephCacheSetModeFailure( + cache_pool=cache_pool, + mode=mode, + response_status_code=response.status_code, + response_reason=response.reason, + status=body.get('status'), + output=body.get('output')) + LOG.warn(e) + raise e + + def cache_pool_set_config(self, pool, config): + for name, value in config.params.iteritems(): + self.cache_pool_set_param(pool, name, value) + + def cache_pool_set_param(self, pool, name, value): + backing_pool = pool['pool_name'] + cache_pool = backing_pool + '-cache' + ceph.osd_set_pool_param( + self.service.ceph_api, cache_pool, name, value) + + def cache_overlay_create(self, pool): + backing_pool = pool['pool_name'] + cache_pool = backing_pool + '-cache' + response, body = self.service.ceph_api.osd_tier_set_overlay( + backing_pool, cache_pool, body='json') + if response.ok: + LOG.info(_LI("Set OSD tier overlay: " + "backing_pool={}, cache_pool={}").format( + backing_pool, cache_pool)) + else: + e = exception.CephCacheCreateOverlayFailure( + backing_pool=backing_pool, + cache_pool=cache_pool, + response_status_code=response.status_code, + response_reason=response.reason, + status=body.get('status'), + output=body.get('output')) + LOG.warn(e) + raise e + + def cache_overlay_delete(self, pool): + backing_pool = pool['pool_name'] + cache_pool = pool['pool_name'] + response, body = self.service.ceph_api.osd_tier_remove_overlay( + backing_pool, body='json') + if response.ok: + LOG.info(_LI("Removed OSD tier overlay: " + "backing_pool={}").format(backing_pool)) + else: + e = exception.CephCacheDeleteOverlayFailure( + backing_pool=backing_pool, + cache_pool=cache_pool, + response_status_code=response.status_code, + response_reason=response.reason, + status=body.get('status'), + output=body.get('output')) + LOG.warn(e) + raise e + + @staticmethod + def rados_cache_flush_evict_all(pool): + backing_pool = pool['pool_name'] + cache_pool = backing_pool + '-cache' + try: + subprocess.check_call( + ['/usr/bin/rados', '-p', cache_pool, 'cache-flush-evict-all']) + LOG.info(_LI("Flushed OSD cache pool:" + "cache_pool={}").format(cache_pool)) + except subprocess.CalledProcessError as e: + _e = exception.CephCacheFlushFailure( + cache_pool=cache_pool, + return_code=str(e.returncode), + cmd=" ".join(e.cmd), + output=e.output) + LOG.warn(_e) + raise _e + + def cache_flush(self, pool): + backing_pool = pool['pool_name'] + cache_pool = backing_pool + '-cache' + try: + # set target_max_objects to a small value to force evacuation of + # objects from cache before we use rados cache-flush-evict-all + # WARNING: assuming cache_pool will be deleted after flush so + # we don't have to save/restore the value of target_max_objects + # + self.cache_pool_set_param(pool, 'target_max_objects', 1) + prev_object_count = None + wait_interval = MIN_WAIT + while True: + response, body = self.service.ceph_api.df(body='json') + if not response.ok: + LOG.warn(_LW( + "Failed to retrieve cluster free space stats: " + "status_code=%d, reason=%s") % ( + response.status_code, response.reason)) + break + stats = None + for s in body['output']['pools']: + if s['name'] == cache_pool: + stats = s['stats'] + break + if not stats: + LOG.warn(_LW("Missing pool free space stats: " + "cache_pool=%s") % cache_pool) + break + object_count = stats['objects'] + if object_count < constants.CACHE_FLUSH_OBJECTS_THRESHOLD: + break + if prev_object_count is not None: + delta_objects = object_count - prev_object_count + if delta_objects > 0: + LOG.warn(_LW("Unexpected increase in number " + "of objects in cache pool: " + "cache_pool=%s, prev_object_count=%d, " + "object_count=%d") % ( + cache_pool, prev_object_count, + object_count)) + break + if delta_objects == 0: + wait_interval *= 2 + if wait_interval > MAX_WAIT: + LOG.warn(_LW( + "Cache pool number of objects did not " + "decrease: cache_pool=%s, object_count=%d, " + "wait_interval=%d") % ( + cache_pool, object_count, wait_interval)) + break + else: + wait_interval = MIN_WAIT + time.sleep(wait_interval) + prev_object_count = object_count + except exception.CephPoolSetParamFailure as e: + LOG.warn(e) + finally: + self.rados_cache_flush_evict_all(pool) + + def update_cache_target_max_bytes(self): + "Dynamically compute target_max_bytes of caching pools" + + # Only compute if cache tiering is enabled + if self.config_applied and self.config_desired: + if (not self.config_desired.cache_enabled or + not self.config_applied.cache_enabled): + LOG.debug("Cache tiering disabled, no need to update " + "target_max_bytes.") + return + LOG.debug("Updating target_max_bytes") + + # Get available space + response, body = self.service.ceph_api.osd_df(body='json', + output_method='tree') + if not response.ok: + LOG.warn(_LW( + "Failed to retrieve cluster free space stats: " + "status_code=%d, reason=%s") % ( + response.status_code, response.reason)) + return + + storage_tier_size = 0 + cache_tier_size = 0 + + replication = constants.CEPH_REPLICATION_FACTOR + for node in body['output']['nodes']: + if node['name'] == 'storage-tier': + storage_tier_size = node['kb']*1024/replication + elif node['name'] == 'cache-tier': + cache_tier_size = node['kb']*1024/replication + + if storage_tier_size == 0 or cache_tier_size == 0: + LOG.info("Failed to get cluster size " + "(storage_tier_size=%s, cache_tier_size=%s)," + "retrying on next cycle" % + (storage_tier_size, cache_tier_size)) + return + + # Get available pools + response, body = self.service.ceph_api.osd_lspools(body='json') + if not response.ok: + LOG.warn(_LW( + "Failed to retrieve available pools: " + "status_code=%d, reason=%s") % ( + response.status_code, response.reason)) + return + pools = [p['poolname'] for p in body['output']] + + # Separate backing from caching for easy iteration + backing_pools = [] + caching_pools = [] + for p in pools: + if p.endswith('-cache'): + caching_pools.append(p) + else: + backing_pools.append(p) + LOG.debug("Pools: caching: %s, backing: %s" % (caching_pools, + backing_pools)) + + if not len(caching_pools): + # We do not have caching pools created yet + return + + # Get quota from backing pools that are cached + stats = {} + for p in caching_pools: + backing_name = p.replace('-cache', '') + stats[backing_name] = {} + try: + quota = ceph.osd_pool_get_quota(self.service.ceph_api, + backing_name) + except exception.CephPoolGetQuotaFailure as e: + LOG.warn(_LW( + "Failed to retrieve quota: " + "exception: %s") % str(e)) + return + stats[backing_name]['quota'] = quota['max_bytes'] + stats[backing_name]['quota_pt'] = (quota['max_bytes']*100.0 / + storage_tier_size) + LOG.debug("Quota for pool: %s " + "is: %s B representing %s pt" % + (backing_name, + quota['max_bytes'], + stats[backing_name]['quota_pt'])) + + # target_max_bytes logic: + # - For computing target_max_bytes cache_tier_size must be equal than + # the sum of target_max_bytes of each caching pool + # - target_max_bytes for each caching pool is computed as the + # percentage of quota in corresponding backing pool + # - the caching tiers has to work at full capacity, so if the sum of + # all quotas in the backing tier is different than 100% we need to + # normalize + # - if the quota is zero for any pool we add CACHE_TIERING_MIN_QUOTA + # by default *after* normalization so that we have real minimum + + # We compute the real percentage that need to be normalized after + # ensuring that we have CACHE_TIERING_MIN_QUOTA for each pool with + # a quota of 0 + real_100pt = 90.0 # we start from max and decrease it for each 0 pool + # Note: We must avoid reaching 100% at all costs! and + # cache_target_full_ratio, the Ceph parameter that is supposed to + # protect the cluster against this does not work in Ceph v0.94.6! + # Therefore a value of 90% is better suited for this + for p in caching_pools: + backing_name = p.replace('-cache', '') + if stats[backing_name]['quota_pt'] == 0: + real_100pt -= constants.CACHE_TIERING_MIN_QUOTA + LOG.debug("Quota before normalization for %s is: %s pt" % + (p, stats[backing_name]['quota_pt'])) + + # Compute total percentage of quotas for all backing pools. + # Should be 100% if correctly configured + total_quota_pt = 0 + for p in caching_pools: + backing_name = p.replace('-cache', '') + total_quota_pt += stats[backing_name]['quota_pt'] + LOG.debug("Total quota pt is: %s" % total_quota_pt) + + # Normalize quota pt to 100% (or real_100pt) + if total_quota_pt != 0: # to avoid divide by zero + for p in caching_pools: + backing_name = p.replace('-cache', '') + stats[backing_name]['quota_pt'] = \ + (stats[backing_name]['quota_pt'] * + (real_100pt / total_quota_pt)) + + # Do not allow quota to be 0 for any pool + total = 0 + for p in caching_pools: + backing_name = p.replace('-cache', '') + if stats[backing_name]['quota_pt'] == 0: + stats[backing_name]['quota_pt'] = \ + constants.CACHE_TIERING_MIN_QUOTA + total += stats[backing_name]['quota_pt'] + LOG.debug("Quota after normalization for %s is: %s:" % + (p, stats[backing_name]['quota_pt'])) + + if total > 100: + # Supplementary protection, we really have to avoid going above + # 100%. Note that real_100pt is less than 100% but we still got + # more than 100! + LOG.warn("Total sum of quotas should not go above 100% " + "but is: %s, recalculating in next cycle" % total) + return + LOG.debug("Total sum of quotas is %s pt" % total) + + # Get current target_max_bytes. We cache it to reduce requests + # to ceph-rest-api. We are the ones changing it, so not an issue. + for p in caching_pools: + if p not in self.target_max_bytes: + try: + value = ceph.osd_get_pool_param(self.service.ceph_api, p, + constants.TARGET_MAX_BYTES) + except exception.CephPoolGetParamFailure as e: + LOG.warn(e) + return + self.target_max_bytes[p] = value + LOG.debug("Existing target_max_bytes got from " + "Ceph: %s" % self.target_max_bytes) + + # Set TARGET_MAX_BYTES + LOG.debug("storage_tier_size: %s " + "cache_tier_size: %s" % (storage_tier_size, + cache_tier_size)) + for p in caching_pools: + backing_name = p.replace('-cache', '') + s = stats[backing_name] + target_max_bytes = math.floor(s['quota_pt'] * cache_tier_size / + 100.0) + target_max_bytes = int(target_max_bytes) + LOG.debug("New Target max bytes of pool: %s is: %s B" % ( + p, target_max_bytes)) + + # Set the new target_max_bytes only if it changed + if self.target_max_bytes.get(p) == target_max_bytes: + LOG.debug("Target max bytes of pool: %s " + "is already updated" % p) + continue + try: + ceph.osd_set_pool_param(self.service.ceph_api, p, + constants.TARGET_MAX_BYTES, + target_max_bytes) + self.target_max_bytes[p] = target_max_bytes + except exception.CephPoolSetParamFailure as e: + LOG.warn(e) + continue + return diff --git a/ceph-manager/ceph-manager/ceph_manager/ceph.py b/ceph-manager/ceph-manager/ceph_manager/ceph.py new file mode 100644 index 00000000..dff3c8ab --- /dev/null +++ b/ceph-manager/ceph-manager/ceph_manager/ceph.py @@ -0,0 +1,164 @@ +# +# Copyright (c) 2016-2018 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import exception +from i18n import _LI +# noinspection PyUnresolvedReferences +from oslo_log import log as logging + + +LOG = logging.getLogger(__name__) + + +def osd_pool_set_quota(ceph_api, pool_name, max_bytes=0, max_objects=0): + """Set the quota for an OSD pool_name + Setting max_bytes or max_objects to 0 will disable that quota param + :param pool_name: OSD pool_name + :param max_bytes: maximum bytes for OSD pool_name + :param max_objects: maximum objects for OSD pool_name + """ + + # Update quota if needed + prev_quota = osd_pool_get_quota(ceph_api, pool_name) + if prev_quota["max_bytes"] != max_bytes: + resp, b = ceph_api.osd_set_pool_quota(pool_name, 'max_bytes', + max_bytes, body='json') + if resp.ok: + LOG.info(_LI("Set OSD pool_name quota: " + "pool_name={}, max_bytes={}").format( + pool_name, max_bytes)) + else: + e = exception.CephPoolSetQuotaFailure( + pool=pool_name, name='max_bytes', + value=max_bytes, reason=resp.reason) + LOG.error(e) + raise e + if prev_quota["max_objects"] != max_objects: + resp, b = ceph_api.osd_set_pool_quota(pool_name, 'max_objects', + max_objects, + body='json') + if resp.ok: + LOG.info(_LI("Set OSD pool_name quota: " + "pool_name={}, max_objects={}").format( + pool_name, max_objects)) + else: + e = exception.CephPoolSetQuotaFailure( + pool=pool_name, name='max_objects', + value=max_objects, reason=resp.reason) + LOG.error(e) + raise e + + +def osd_pool_get_quota(ceph_api, pool_name): + resp, quota = ceph_api.osd_get_pool_quota(pool_name, body='json') + if not resp.ok: + e = exception.CephPoolGetQuotaFailure( + pool=pool_name, reason=resp.reason) + LOG.error(e) + raise e + else: + return {"max_objects": quota["output"]["quota_max_objects"], + "max_bytes": quota["output"]["quota_max_bytes"]} + + +def osd_pool_exists(ceph_api, pool_name): + response, body = ceph_api.osd_pool_get( + pool_name, "pg_num", body='json') + if response.ok: + return True + return False + + +def osd_pool_create(ceph_api, pool_name, pg_num, pgp_num): + if pool_name.endswith("-cache"): + # ruleset 1: is the ruleset for the cache tier + # Name: cache_tier_ruleset + ruleset = 1 + else: + # ruleset 0: is the default ruleset if no crushmap is loaded or + # the ruleset for the backing tier if loaded: + # Name: storage_tier_ruleset + ruleset = 0 + response, body = ceph_api.osd_pool_create( + pool_name, pg_num, pgp_num, pool_type="replicated", + ruleset=ruleset, body='json') + if response.ok: + LOG.info(_LI("Created OSD pool: " + "pool_name={}, pg_num={}, pgp_num={}, " + "pool_type=replicated, ruleset={}").format( + pool_name, pg_num, pgp_num, ruleset)) + else: + e = exception.CephPoolCreateFailure( + name=pool_name, reason=response.reason) + LOG.error(e) + raise e + + # Explicitly assign the ruleset to the pool on creation since it is + # ignored in the create call + response, body = ceph_api.osd_set_pool_param( + pool_name, "crush_ruleset", ruleset, body='json') + if response.ok: + LOG.info(_LI("Assigned crush ruleset to OS pool: " + "pool_name={}, ruleset={}").format( + pool_name, ruleset)) + else: + e = exception.CephPoolRulesetFailure( + name=pool_name, reason=response.reason) + LOG.error(e) + ceph_api.osd_pool_delete( + pool_name, pool_name, + sure='--yes-i-really-really-mean-it', + body='json') + raise e + + +def osd_pool_delete(ceph_api, pool_name): + """Delete an osd pool + :param pool_name: pool name + """ + response, body = ceph_api.osd_pool_delete( + pool_name, pool_name, + sure='--yes-i-really-really-mean-it', + body='json') + if response.ok: + LOG.info(_LI("Deleted OSD pool {}").format(pool_name)) + else: + e = exception.CephPoolDeleteFailure( + name=pool_name, reason=response.reason) + LOG.warn(e) + raise e + + +def osd_set_pool_param(ceph_api, pool_name, param, value): + response, body = ceph_api.osd_set_pool_param( + pool_name, param, value, + force=None, body='json') + if response.ok: + LOG.info('OSD set pool param: ' + 'pool={}, name={}, value={}'.format( + pool_name, param, value)) + else: + raise exception.CephPoolSetParamFailure( + pool_name=pool_name, + param=param, + value=str(value), + reason=response.reason) + return response, body + + +def osd_get_pool_param(ceph_api, pool_name, param): + response, body = ceph_api.osd_get_pool_param( + pool_name, param, body='json') + if response.ok: + LOG.debug('OSD get pool param: ' + 'pool={}, name={}, value={}'.format( + pool_name, param, body['output'][param])) + else: + raise exception.CephPoolGetParamFailure( + pool_name=pool_name, + param=param, + reason=response.reason) + return body['output'][param] diff --git a/ceph-manager/ceph-manager/ceph_manager/constants.py b/ceph-manager/ceph-manager/ceph_manager/constants.py new file mode 100644 index 00000000..5b297743 --- /dev/null +++ b/ceph-manager/ceph-manager/ceph_manager/constants.py @@ -0,0 +1,107 @@ +# +# Copyright (c) 2016-2018 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from i18n import _ +# noinspection PyUnresolvedReferences +from sysinv.common import constants as sysinv_constants + +CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL = \ + sysinv_constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL +CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER = \ + sysinv_constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER +CEPH_POOLS = sysinv_constants.BACKING_POOLS +CEPH_REPLICATION_FACTOR = sysinv_constants.CEPH_REPLICATION_FACTOR_DEFAULT +SERVICE_PARAM_CEPH_CACHE_HIT_SET_TYPE_BLOOM = \ + sysinv_constants.SERVICE_PARAM_CEPH_CACHE_HIT_SET_TYPE_BLOOM +CACHE_TIERING_DEFAULTS = sysinv_constants.CACHE_TIERING_DEFAULTS +TARGET_MAX_BYTES = \ + sysinv_constants.SERVICE_PARAM_CEPH_CACHE_TIER_TARGET_MAX_BYTES + +# Cache tiering section shortener +CACHE_TIERING = \ + sysinv_constants.SERVICE_PARAM_SECTION_CEPH_CACHE_TIER +CACHE_TIERING_DESIRED = \ + sysinv_constants.SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_DESIRED +CACHE_TIERING_APPLIED = \ + sysinv_constants.SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_APPLIED +CACHE_TIERING_SECTIONS = \ + [CACHE_TIERING, CACHE_TIERING_DESIRED, CACHE_TIERING_APPLIED] + +# Cache flush parameters +CACHE_FLUSH_OBJECTS_THRESHOLD = 1000 +CACHE_FLUSH_MIN_WAIT_OBJ_COUNT_DECREASE_SEC = 1 +CACHE_FLUSH_MAX_WAIT_OBJ_COUNT_DECREASE_SEC = 128 + +CACHE_TIERING_MIN_QUOTA = 5 + +FM_ALARM_REASON_MAX_SIZE = 256 + +# TODO this will later change based on parsed health +# clock skew is vm malfunction, mon or osd is equipment mal +ALARM_CAUSE = 'equipment-malfunction' +ALARM_TYPE = 'equipment' + +# Ceph health check interval (in seconds) +CEPH_HEALTH_CHECK_INTERVAL = 60 + +# Ceph health statuses +CEPH_HEALTH_OK = 'HEALTH_OK' +CEPH_HEALTH_WARN = 'HEALTH_WARN' +CEPH_HEALTH_ERR = 'HEALTH_ERR' +CEPH_HEALTH_DOWN = 'CEPH_DOWN' + +# Statuses not reported by Ceph +CEPH_STATUS_CUSTOM = [CEPH_HEALTH_DOWN] + +SEVERITY = {CEPH_HEALTH_DOWN: 'critical', + CEPH_HEALTH_ERR: 'critical', + CEPH_HEALTH_WARN: 'warning'} + +SERVICE_AFFECTING = {CEPH_HEALTH_DOWN: True, + CEPH_HEALTH_ERR: True, + CEPH_HEALTH_WARN: False} + +# TODO this will later change based on parsed health +ALARM_REASON_NO_OSD = _('no OSDs') +ALARM_REASON_OSDS_DOWN = _('OSDs are down') +ALARM_REASON_OSDS_OUT = _('OSDs are out') +ALARM_REASON_OSDS_DOWN_OUT = _('OSDs are down/out') +ALARM_REASON_PEER_HOST_DOWN = _('peer host down') + +REPAIR_ACTION_MAJOR_CRITICAL_ALARM = _( + 'Ensure storage hosts from replication group are unlocked and available.' + 'Check if OSDs of each storage host are up and running.' + 'If problem persists, contact next level of support.') +REPAIR_ACTION = _('If problem persists, contact next level of support.') + +SYSINV_CONDUCTOR_TOPIC = 'sysinv.conductor_manager' +CEPH_MANAGER_TOPIC = 'sysinv.ceph_manager' +SYSINV_CONFIG_FILE = '/etc/sysinv/sysinv.conf' + +# Titanium Cloud version strings +TITANIUM_SERVER_VERSION_16_10 = '16.10' + +CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET = ( + "all OSDs are running jewel or later but the " + "'require_jewel_osds' osdmap flag is not set") + +UPGRADE_COMPLETED = \ + sysinv_constants.UPGRADE_COMPLETED +UPGRADE_ABORTING = \ + sysinv_constants.UPGRADE_ABORTING +UPGRADE_ABORT_COMPLETING = \ + sysinv_constants.UPGRADE_ABORT_COMPLETING +UPGRADE_ABORTING_ROLLBACK = \ + sysinv_constants.UPGRADE_ABORTING_ROLLBACK + +CEPH_FLAG_REQUIRE_JEWEL_OSDS = 'require_jewel_osds' + +# Tiers +CEPH_CRUSH_TIER_SUFFIX = sysinv_constants.CEPH_CRUSH_TIER_SUFFIX +SB_TIER_TYPE_CEPH = sysinv_constants.SB_TIER_TYPE_CEPH +SB_TIER_SUPPORTED = sysinv_constants.SB_TIER_SUPPORTED +SB_TIER_DEFAULT_NAMES = sysinv_constants.SB_TIER_DEFAULT_NAMES +SB_TIER_CEPH_POOLS = sysinv_constants.SB_TIER_CEPH_POOLS diff --git a/ceph-manager/ceph-manager/ceph_manager/exception.py b/ceph-manager/ceph-manager/ceph_manager/exception.py new file mode 100644 index 00000000..c2d81b8b --- /dev/null +++ b/ceph-manager/ceph-manager/ceph_manager/exception.py @@ -0,0 +1,130 @@ +# +# Copyright (c) 2016-2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# noinspection PyUnresolvedReferences +from i18n import _, _LW +# noinspection PyUnresolvedReferences +from oslo_log import log as logging + + +LOG = logging.getLogger(__name__) + + +class CephManagerException(Exception): + message = _("An unknown exception occurred.") + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + if not message: + try: + message = self.message % kwargs + except TypeError: + LOG.warn(_LW('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + # at least get the core message out if something happened + message = self.message + super(CephManagerException, self).__init__(message) + + +class CephPoolSetQuotaFailure(CephManagerException): + message = _("Error seting the OSD pool " + "quota %(name)s for %(pool)s to %(value)s") \ + + ": %(reason)s" + + +class CephPoolGetQuotaFailure(CephManagerException): + message = _("Error geting the OSD pool quota for %(pool)s") \ + + ": %(reason)s" + + +class CephPoolCreateFailure(CephManagerException): + message = _("Creating OSD pool %(name)s failed: %(reason)s") + + +class CephPoolDeleteFailure(CephManagerException): + message = _("Deleting OSD pool %(name)s failed: %(reason)s") + + +class CephPoolRulesetFailure(CephManagerException): + message = _("Assigning crush ruleset to OSD " + "pool %(name)s failed: %(reason)s") + + +class CephPoolAddTierFailure(CephManagerException): + message = _("Failed to add OSD tier: " + "backing_pool=%(backing_pool)s, cache_pool=%(cache_pool)s, " + "response=%(response_status_code)s:%(response_reason)s, " + "status=%(status)s, output=%(output)s") + + +class CephPoolRemoveTierFailure(CephManagerException): + message = _("Failed to remove tier: " + "backing_pool=%(backing_pool)s, cache_pool=%(cache_pool)s, " + "response=%(response_status_code)s:%(response_reason)s, " + "status=%(status)s, output=%(output)s") + + +class CephCacheSetModeFailure(CephManagerException): + message = _("Failed to set OSD tier cache mode: " + "cache_pool=%(cache_pool)s, mode=%(mode)s, " + "response=%(response_status_code)s:%(response_reason)s, " + "status=%(status)s, output=%(output)s") + + +class CephPoolSetParamFailure(CephManagerException): + message = _("Cannot set Ceph OSD pool parameter: " + "pool_name=%(pool_name)s, param=%(param)s, value=%(value)s. " + "Reason: %(reason)s") + + +class CephPoolGetParamFailure(CephManagerException): + message = _("Cannot get Ceph OSD pool parameter: " + "pool_name=%(pool_name)s, param=%(param)s. " + "Reason: %(reason)s") + + +class CephCacheCreateOverlayFailure(CephManagerException): + message = _("Failed to create overlay: " + "backing_pool=%(backing_pool)s, cache_pool=%(cache_pool)s, " + "response=%(response_status_code)s:%(response_reason)s, " + "status=%(status)s, output=%(output)s") + + +class CephCacheDeleteOverlayFailure(CephManagerException): + message = _("Failed to delete overlay: " + "backing_pool=%(backing_pool)s, cache_pool=%(cache_pool)s, " + "response=%(response_status_code)s:%(response_reason)s, " + "status=%(status)s, output=%(output)s") + + +class CephCacheFlushFailure(CephManagerException): + message = _("Failed to flush cache pool: " + "cache_pool=%(cache_pool)s, " + "return_code=%(return_code)s, " + "cmd=%(cmd)s, output=%(output)s") + + +class CephCacheEnableFailure(CephManagerException): + message = _("Cannot enable Ceph cache tier. " + "Reason: cache tiering operation in progress.") + + +class CephCacheDisableFailure(CephManagerException): + message = _("Cannot disable Ceph cache tier. " + "Reason: cache tiering operation in progress.") + + +class CephSetKeyFailure(CephManagerException): + message = _("Error setting the Ceph flag " + "'%(flag)s' %(extra)s: " + "response=%(response_status_code)s:%(response_reason)s, " + "status=%(status)s, output=%(output)s") + + +class CephApiFailure(CephManagerException): + message = _("API failure: " + "call=%(call)s, reason=%(reason)s") diff --git a/ceph-manager/ceph-manager/ceph_manager/i18n.py b/ceph-manager/ceph-manager/ceph_manager/i18n.py new file mode 100644 index 00000000..67977cea --- /dev/null +++ b/ceph-manager/ceph-manager/ceph_manager/i18n.py @@ -0,0 +1,15 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +import oslo_i18n + +DOMAIN = 'ceph-manager' + +_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) +_ = _translators.primary + +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error diff --git a/ceph-manager/ceph-manager/ceph_manager/monitor.py b/ceph-manager/ceph-manager/ceph_manager/monitor.py new file mode 100644 index 00000000..941e5fc0 --- /dev/null +++ b/ceph-manager/ceph-manager/ceph_manager/monitor.py @@ -0,0 +1,893 @@ +# +# Copyright (c) 2013-2018 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import time + +# noinspection PyUnresolvedReferences +from fm_api import fm_api +# noinspection PyUnresolvedReferences +from fm_api import constants as fm_constants +# noinspection PyUnresolvedReferences +from oslo_log import log as logging + +from sysinv.conductor.cache_tiering_service_config import ServiceConfig + +# noinspection PyProtectedMember +from i18n import _, _LI, _LW, _LE + +import constants +import exception + +LOG = logging.getLogger(__name__) + + +# When upgrading from 16.10 to 17.x Ceph goes from Hammer release +# to Jewel release. After all storage nodes are upgraded to 17.x +# the cluster is in HEALTH_WARN until administrator explicitly +# enables require_jewel_osds flag - which signals Ceph that it +# can safely transition from Hammer to Jewel +# +# This class is needed only when upgrading from 16.10 to 17.x +# TODO: remove it after 1st 17.x release +# +class HandleUpgradesMixin(object): + + def __init__(self, service): + self.service = service + self.surpress_require_jewel_osds_warning = False + + def setup(self, config): + self._set_upgrade(self.service.retry_get_software_upgrade_status()) + + def _set_upgrade(self, upgrade): + state = upgrade.get('state') + from_version = upgrade.get('from_version') + if (state + and state != constants.UPGRADE_COMPLETED + and from_version == constants.TITANIUM_SERVER_VERSION_16_10): + LOG.info(_LI("Surpress require_jewel_osds health warning")) + self.surpress_require_jewel_osds_warning = True + + def set_flag_require_jewel_osds(self): + try: + response, body = self.service.ceph_api.osd_set_key( + constants.CEPH_FLAG_REQUIRE_JEWEL_OSDS, + body='json') + LOG.info(_LI("Set require_jewel_osds flag")) + except IOError as e: + raise exception.CephApiFailure( + call="osd_set_key", + reason=e.message) + else: + if not response.ok: + raise exception.CephSetKeyFailure( + flag=constants.CEPH_FLAG_REQUIRE_JEWEL_OSDS, + extra=_("needed to complete upgrade to Jewel"), + response_status_code=response.status_code, + response_reason=response.reason, + status=body.get('status'), + output=body.get('output')) + + def filter_health_status(self, health): + health = self.auto_heal(health) + # filter out require_jewel_osds warning + # + if not self.surpress_require_jewel_osds_warning: + return health + if health['health'] != constants.CEPH_HEALTH_WARN: + return health + if (constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET + not in health['detail']): + return health + return self._remove_require_jewel_osds_warning(health) + + def _remove_require_jewel_osds_warning(self, health): + reasons_list = [] + for reason in health['detail'].split(';'): + reason = reason.strip() + if len(reason) == 0: + continue + if constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET in reason: + continue + reasons_list.append(reason) + if len(reasons_list) == 0: + health = { + 'health': constants.CEPH_HEALTH_OK, + 'detail': ''} + else: + health['detail'] = '; '.join(reasons_list) + return health + + def auto_heal(self, health): + if (health['health'] == constants.CEPH_HEALTH_WARN + and (constants.CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET + in health['detail'])): + try: + upgrade = self.service.get_software_upgrade_status() + except Exception as ex: + LOG.warn(_LW( + "Getting software upgrade status failed " + "with: %s. Skip auto-heal attempt " + "(will retry on next ceph status poll).") % str(ex)) + return + state = upgrade.get('state') + # surpress require_jewel_osds in case upgrade is + # in progress but not completed or aborting + if (not self.surpress_require_jewel_osds_warning + and (upgrade.get('from_version') + == constants.TITANIUM_SERVER_VERSION_16_10) + and state not in [ + None, + constants.UPGRADE_COMPLETED, + constants.UPGRADE_ABORTING, + constants.UPGRADE_ABORT_COMPLETING, + constants.UPGRADE_ABORTING_ROLLBACK]): + LOG.info(_LI("Surpress require_jewel_osds health warning")) + self.surpress_require_jewel_osds_warning = True + # set require_jewel_osds in case upgrade is + # not in progress or completed + if (state in [None, constants.UPGRADE_COMPLETED]): + LOG.warn(_LW( + "No upgrade in progress or update completed " + "and require_jewel_osds health warning raised. " + "Set require_jewel_osds flag.")) + self.set_flag_require_jewel_osds() + health = self._remove_require_jewel_osds_warning(health) + LOG.info(_LI("Unsurpress require_jewel_osds health warning")) + self.surpress_require_jewel_osds_warning = False + # unsurpress require_jewel_osds in case upgrade + # is aborting + if (self.surpress_require_jewel_osds_warning + and state in [ + constants.UPGRADE_ABORTING, + constants.UPGRADE_ABORT_COMPLETING, + constants.UPGRADE_ABORTING_ROLLBACK]): + LOG.info(_LI("Unsurpress require_jewel_osds health warning")) + self.surpress_require_jewel_osds_warning = False + return health + + +class Monitor(HandleUpgradesMixin): + + def __init__(self, service): + self.service = service + self.current_ceph_health = "" + self.cache_enabled = False + self.tiers_size = {} + self.known_object_pool_name = None + self.primary_tier_name = constants.SB_TIER_DEFAULT_NAMES[ + constants.SB_TIER_TYPE_CEPH] + constants.CEPH_CRUSH_TIER_SUFFIX + self.cluster_is_up = False + super(Monitor, self).__init__(service) + + def setup(self, config): + self.set_caching_tier_config(config) + super(Monitor, self).setup(config) + + def set_caching_tier_config(self, config): + conf = ServiceConfig().from_dict( + config.get(constants.CACHE_TIERING_APPLIED)) + if conf: + self.cache_enabled = conf.cache_enabled + + def monitor_check_cache_tier(self, enable_flag): + LOG.info(_LI("monitor_check_cache_tier: " + "enable_flag={}".format(enable_flag))) + self.cache_enabled = enable_flag + + def run(self): + # Wait until Ceph cluster is up and we can get the fsid + while True: + self.ceph_get_fsid() + if self.service.entity_instance_id: + break + time.sleep(constants.CEPH_HEALTH_CHECK_INTERVAL) + + # Start monitoring ceph status + while True: + self.ceph_poll_status() + self.ceph_poll_quotas() + time.sleep(constants.CEPH_HEALTH_CHECK_INTERVAL) + + def ceph_get_fsid(self): + # Check whether an alarm has already been raised + self._get_current_alarms() + if self.current_health_alarm: + LOG.info(_LI("Current alarm: %s") % + str(self.current_health_alarm.__dict__)) + + fsid = self._get_fsid() + if not fsid: + # Raise alarm - it will not have an entity_instance_id + self._report_fault({'health': constants.CEPH_HEALTH_DOWN, + 'detail': 'Ceph cluster is down.'}, + fm_constants.FM_ALARM_ID_STORAGE_CEPH) + else: + # Clear alarm with no entity_instance_id + self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH) + self.service.entity_instance_id = 'cluster=%s' % fsid + + def ceph_poll_status(self): + # get previous data every time in case: + # * daemon restarted + # * alarm was cleared manually but stored as raised in daemon + self._get_current_alarms() + if self.current_health_alarm: + LOG.info(_LI("Current alarm: %s") % + str(self.current_health_alarm.__dict__)) + + # get ceph health + health = self._get_health() + LOG.info(_LI("Current Ceph health: " + "%(health)s detail: %(detail)s") % health) + + health = self.filter_health_status(health) + if health['health'] != constants.CEPH_HEALTH_OK: + self._report_fault(health, fm_constants.FM_ALARM_ID_STORAGE_CEPH) + self._report_alarm_osds_health() + else: + self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH) + self.clear_all_major_critical() + + def filter_health_status(self, health): + return super(Monitor, self).filter_health_status(health) + + def ceph_poll_quotas(self): + self._get_current_alarms() + if self.current_quota_alarms: + LOG.info(_LI("Current quota alarms %s") % + self.current_quota_alarms) + + # Get current current size of each tier + previous_tiers_size = self.tiers_size + self.tiers_size = self._get_tiers_size() + + # Make sure any removed tiers have the alarms cleared + for t in (set(previous_tiers_size)-set(self.tiers_size)): + self._clear_fault(fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE, + "{0}.tier={1}".format( + self.service.entity_instance_id, + t[:-len(constants.CEPH_CRUSH_TIER_SUFFIX)])) + + # Check the quotas on each tier + for tier in self.tiers_size: + # TODO(rchurch): For R6 remove the tier from the default crushmap + # and remove this check. No longer supporting this tier in R5 + if tier == 'cache-tier': + continue + + # Extract the tier name from the crush equivalent + tier_name = tier[:-len(constants.CEPH_CRUSH_TIER_SUFFIX)] + + if self.tiers_size[tier] == 0: + LOG.info(_LI("'%s' tier cluster size not yet available") + % tier_name) + continue + + pools_quota_sum = 0 + if tier == self.primary_tier_name: + for pool in constants.CEPH_POOLS: + if (pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL or + pool['pool_name'] == + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER): + object_pool_name = self._get_object_pool_name() + if object_pool_name is None: + LOG.error("Rados gateway object data pool does " + "not exist.") + else: + pools_quota_sum += \ + self._get_osd_pool_quota(object_pool_name) + else: + pools_quota_sum += self._get_osd_pool_quota( + pool['pool_name']) + else: + for pool in constants.SB_TIER_CEPH_POOLS: + pool_name = "{0}-{1}".format(pool['pool_name'], tier_name) + pools_quota_sum += self._get_osd_pool_quota(pool_name) + + # Currently, there is only one pool on the addtional tier(s), + # therefore allow a quota of 0 + if (pools_quota_sum != self.tiers_size[tier] and + pools_quota_sum != 0): + self._report_fault( + {'tier_name': tier_name, + 'tier_eid': "{0}.tier={1}".format( + self.service.entity_instance_id, + tier_name)}, + fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE) + else: + self._clear_fault( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE, + "{0}.tier={1}".format(self.service.entity_instance_id, + tier_name)) + + # CEPH HELPERS + + def _get_fsid(self): + try: + response, fsid = self.service.ceph_api.fsid( + body='text', timeout=30) + except IOError as e: + LOG.warning(_LW("ceph_api.fsid failed: %s") % str(e.message)) + self.cluster_is_up = False + return None + + if not response.ok: + LOG.warning(_LW("Get fsid failed: %s") % response.reason) + self.cluster_is_up = False + return None + + self.cluster_is_up = True + return fsid.strip() + + def _get_health(self): + try: + # we use text since it has all info + response, body = self.service.ceph_api.health( + body='text', timeout=30) + except IOError as e: + LOG.warning(_LW("ceph_api.health failed: %s") % str(e.message)) + self.cluster_is_up = False + return {'health': constants.CEPH_HEALTH_DOWN, + 'detail': 'Ceph cluster is down.'} + + if not response.ok: + LOG.warning(_LW("CEPH health check failed: %s") % response.reason) + health_info = [constants.CEPH_HEALTH_DOWN, response.reason] + self.cluster_is_up = False + else: + health_info = body.split(' ', 1) + self.cluster_is_up = True + + health = health_info[0] + + if len(health_info) > 1: + detail = health_info[1] + else: + detail = health_info[0] + + return {'health': health.strip(), + 'detail': detail.strip()} + + def _get_object_pool_name(self): + if self.known_object_pool_name is None: + response, body = self.service.ceph_api.osd_pool_get( + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL, + "pg_num", + body='json') + + if response.ok: + self.known_object_pool_name = \ + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL + return self.known_object_pool_name + + response, body = self.service.ceph_api.osd_pool_get( + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER, + "pg_num", + body='json') + + if response.ok: + self.known_object_pool_name = \ + constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER + return self.known_object_pool_name + + return self.known_object_pool_name + + def _get_osd_pool_quota(self, pool_name): + try: + resp, quota = self.service.ceph_api.osd_get_pool_quota( + pool_name, body='json') + except IOError: + return 0 + + if not resp.ok: + LOG.error(_LE("Getting the quota for " + "%(name)s pool failed:%(reason)s)") % + {"name": pool_name, "reason": resp.reason}) + return 0 + else: + try: + quota_gib = int(quota["output"]["quota_max_bytes"])/(1024**3) + return quota_gib + except IOError: + return 0 + + # we have two root nodes 'cache-tier' and 'storage-tier' + # to calculate the space that is used by the pools, we must only + # use 'storage-tier' + # this function determines if a certain node is under a certain + # tree + def host_is_in_root(self, search_tree, node, root_name): + if node['type'] == 'root': + if node['name'] == root_name: + return True + else: + return False + return self.host_is_in_root(search_tree, + search_tree[node['parent']], + root_name) + + # The information received from ceph is not properly + # structured for efficient parsing and searching, so + # it must be processed and transformed into a more + # structured form. + # + # Input received from ceph is an array of nodes with the + # following structure: + # [{'id':, 'children':, ....}, + # ...] + # + # We process this array and transform it into a dictionary + # (for efficient access) The transformed "search tree" is a + # dictionary with the following structure: + # { : {'children':} + def _get_tiers_size(self): + try: + resp, body = self.service.ceph_api.osd_df( + body='json', + output_method='tree') + except IOError: + return 0 + if not resp.ok: + LOG.error(_LE("Getting the cluster usage " + "information failed: %(reason)s - " + "%(body)s") % {"reason": resp.reason, + "body": body}) + return {} + + # A node is a crushmap element: root, chassis, host, osd. Create a + # dictionary for the nodes with the key as the id used for efficient + # searching through nodes. + # + # For example: storage-0's node has one child node => OSD 0 + # { + # "id": -4, + # "name": "storage-0", + # "type": "host", + # "type_id": 1, + # "reweight": -1.000000, + # "kb": 51354096, + # "kb_used": 1510348, + # "kb_avail": 49843748, + # "utilization": 2.941047, + # "var": 1.480470, + # "pgs": 0, + # "children": [ + # 0 + # ] + # }, + search_tree = {} + for node in body['output']['nodes']: + search_tree[node['id']] = node + + # Extract the tiers as we will return a dict for the size of each tier + tiers = {k: v for k, v in search_tree.items() if v['type'] == 'root'} + + # For each tier, traverse the heirarchy from the root->chassis->host. + # Sum the host sizes to determine the overall size of the tier + tier_sizes = {} + for tier in tiers.values(): + tier_size = 0 + for chassis_id in tier['children']: + chassis_size = 0 + chassis = search_tree[chassis_id] + for host_id in chassis['children']: + host = search_tree[host_id] + if (chassis_size == 0 or + chassis_size > host['kb']): + chassis_size = host['kb'] + tier_size += chassis_size/(1024 ** 2) + tier_sizes[tier['name']] = tier_size + + return tier_sizes + + # ALARM HELPERS + + @staticmethod + def _check_storage_group(osd_tree, group_id, + hosts, osds, fn_report_alarm): + reasons = set() + degraded_hosts = set() + severity = fm_constants.FM_ALARM_SEVERITY_CRITICAL + for host_id in hosts: + if len(osds[host_id]) == 0: + reasons.add(constants.ALARM_REASON_NO_OSD) + degraded_hosts.add(host_id) + else: + for osd_id in osds[host_id]: + if osd_tree[osd_id]['status'] == 'up': + if osd_tree[osd_id]['reweight'] == 0.0: + reasons.add(constants.ALARM_REASON_OSDS_OUT) + degraded_hosts.add(host_id) + else: + severity = fm_constants.FM_ALARM_SEVERITY_MAJOR + elif osd_tree[osd_id]['status'] == 'down': + reasons.add(constants.ALARM_REASON_OSDS_DOWN) + degraded_hosts.add(host_id) + if constants.ALARM_REASON_OSDS_OUT in reasons \ + and constants.ALARM_REASON_OSDS_DOWN in reasons: + reasons.add(constants.ALARM_REASON_OSDS_DOWN_OUT) + reasons.remove(constants.ALARM_REASON_OSDS_OUT) + if constants.ALARM_REASON_OSDS_DOWN in reasons \ + and constants.ALARM_REASON_OSDS_DOWN_OUT in reasons: + reasons.remove(constants.ALARM_REASON_OSDS_DOWN) + reason = "/".join(list(reasons)) + if severity == fm_constants.FM_ALARM_SEVERITY_CRITICAL: + reason = "{} {}: {}".format( + fm_constants.ALARM_CRITICAL_REPLICATION, + osd_tree[group_id]['name'], + reason) + elif severity == fm_constants.FM_ALARM_SEVERITY_MAJOR: + reason = "{} {}: {}".format( + fm_constants.ALARM_MAJOR_REPLICATION, + osd_tree[group_id]['name'], + reason) + if len(degraded_hosts) == 0: + if len(hosts) < 2: + fn_report_alarm( + osd_tree[group_id]['name'], + "{} {}: {}".format( + fm_constants.ALARM_MAJOR_REPLICATION, + osd_tree[group_id]['name'], + constants.ALARM_REASON_PEER_HOST_DOWN), + fm_constants.FM_ALARM_SEVERITY_MAJOR) + elif len(degraded_hosts) == 1: + fn_report_alarm( + "{}.host={}".format( + osd_tree[group_id]['name'], + osd_tree[list(degraded_hosts)[0]]['name']), + reason, severity) + else: + fn_report_alarm( + osd_tree[group_id]['name'], + reason, severity) + + def _check_storage_tier(self, osd_tree, tier_name, fn_report_alarm): + for tier_id in osd_tree: + if osd_tree[tier_id]['type'] != 'root': + continue + if osd_tree[tier_id]['name'] != tier_name: + continue + for group_id in osd_tree[tier_id]['children']: + if osd_tree[group_id]['type'] != 'chassis': + continue + if not osd_tree[group_id]['name'].startswith('group-'): + continue + hosts = [] + osds = {} + for host_id in osd_tree[group_id]['children']: + if osd_tree[host_id]['type'] != 'host': + continue + hosts.append(host_id) + osds[host_id] = [] + for osd_id in osd_tree[host_id]['children']: + if osd_tree[osd_id]['type'] == 'osd': + osds[host_id].append(osd_id) + self._check_storage_group(osd_tree, group_id, hosts, + osds, fn_report_alarm) + break + + def _current_health_alarm_equals(self, reason, severity): + if not self.current_health_alarm: + return False + if getattr(self.current_health_alarm, 'severity', None) != severity: + return False + if getattr(self.current_health_alarm, 'reason_text', None) != reason: + return False + return True + + def _report_alarm_osds_health(self): + response, osd_tree = self.service.ceph_api.osd_tree(body='json') + if not response.ok: + LOG.error(_LE("Failed to retrieve Ceph OSD tree: " + "status_code: %(status_code)s, reason: %(reason)s") % + {"status_code": response.status_code, + "reason": response.reason}) + return + osd_tree = dict([(n['id'], n) for n in osd_tree['output']['nodes']]) + alarms = [] + + self._check_storage_tier(osd_tree, "storage-tier", + lambda *args: alarms.append(args)) + if self.cache_enabled: + self._check_storage_tier(osd_tree, "cache-tier", + lambda *args: alarms.append(args)) + + old_alarms = {} + for alarm_id in [ + fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR, + fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL]: + alarm_list = self.service.fm_api.get_faults_by_id(alarm_id) + if not alarm_list: + continue + for alarm in alarm_list: + if alarm.entity_instance_id not in old_alarms: + old_alarms[alarm.entity_instance_id] = [] + old_alarms[alarm.entity_instance_id].append( + (alarm.alarm_id, alarm.reason_text)) + + for peer_group, reason, severity in alarms: + if self._current_health_alarm_equals(reason, severity): + continue + alarm_critical_major = fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR + if severity == fm_constants.FM_ALARM_SEVERITY_CRITICAL: + alarm_critical_major = ( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL) + entity_instance_id = ( + self.service.entity_instance_id + '.peergroup=' + peer_group) + alarm_already_exists = False + if entity_instance_id in old_alarms: + for alarm_id, old_reason in old_alarms[entity_instance_id]: + if (reason == old_reason and + alarm_id == alarm_critical_major): + # if the alarm is exactly the same, we don't need + # to recreate it + old_alarms[entity_instance_id].remove( + (alarm_id, old_reason)) + alarm_already_exists = True + elif (alarm_id == alarm_critical_major): + # if we change just the reason, then we just remove the + # alarm from the list so we don't remove it at the + # end of the function + old_alarms[entity_instance_id].remove( + (alarm_id, old_reason)) + + if (len(old_alarms[entity_instance_id]) == 0): + del old_alarms[entity_instance_id] + + # in case the alarm is exactly the same, we skip the alarm set + if alarm_already_exists is True: + continue + major_repair_action = constants.REPAIR_ACTION_MAJOR_CRITICAL_ALARM + fault = fm_api.Fault( + alarm_id=alarm_critical_major, + alarm_type=fm_constants.FM_ALARM_TYPE_4, + alarm_state=fm_constants.FM_ALARM_STATE_SET, + entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER, + entity_instance_id=entity_instance_id, + severity=severity, + reason_text=reason, + probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_15, + proposed_repair_action=major_repair_action, + service_affecting=constants.SERVICE_AFFECTING['HEALTH_WARN']) + alarm_uuid = self.service.fm_api.set_fault(fault) + if alarm_uuid: + LOG.info(_LI( + "Created storage alarm %(alarm_uuid)s - " + "severity: %(severity)s, reason: %(reason)s, " + "service_affecting: %(service_affecting)s") % { + "alarm_uuid": str(alarm_uuid), + "severity": str(severity), + "reason": reason, + "service_affecting": str( + constants.SERVICE_AFFECTING['HEALTH_WARN'])}) + else: + LOG.error(_LE( + "Failed to create storage alarm - " + "severity: %(severity)s, reason: %(reason)s, " + "service_affecting: %(service_affecting)s") % { + "severity": str(severity), + "reason": reason, + "service_affecting": str( + constants.SERVICE_AFFECTING['HEALTH_WARN'])}) + + for entity_instance_id in old_alarms: + for alarm_id, old_reason in old_alarms[entity_instance_id]: + self.service.fm_api.clear_fault(alarm_id, entity_instance_id) + + @staticmethod + def _parse_reason(health): + """ Parse reason strings received from Ceph """ + if health['health'] in constants.CEPH_STATUS_CUSTOM: + # Don't parse reason messages that we added + return "Storage Alarm Condition: %(health)s. %(detail)s" % health + + reasons_lst = health['detail'].split(';') + + parsed_reasons_text = "" + + # Check if PGs have issues - we can't safely store the entire message + # as it tends to be long + for reason in reasons_lst: + if "pgs" in reason: + parsed_reasons_text += "PGs are degraded/stuck or undersized" + break + + # Extract recovery status + parsed_reasons = [r.strip() for r in reasons_lst if 'recovery' in r] + if parsed_reasons: + parsed_reasons_text += ";" + ";".join(parsed_reasons) + + # We need to keep the most important parts of the messages when storing + # them to fm alarms, therefore text between [] brackets is truncated if + # max size is reached. + + # Add brackets, if needed + if len(parsed_reasons_text): + lbracket = " [" + rbracket = "]" + else: + lbracket = "" + rbracket = "" + + msg = {"head": "Storage Alarm Condition: ", + "tail": ". Please check 'ceph -s' for more details."} + max_size = constants.FM_ALARM_REASON_MAX_SIZE - \ + len(msg["head"]) - len(msg["tail"]) + + return ( + msg['head'] + + (health['health'] + lbracket + parsed_reasons_text)[:max_size-1] + + rbracket + msg['tail']) + + def _report_fault(self, health, alarm_id): + if alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH: + new_severity = constants.SEVERITY[health['health']] + new_reason_text = self._parse_reason(health) + new_service_affecting = \ + constants.SERVICE_AFFECTING[health['health']] + + # Raise or update alarm if necessary + if ((not self.current_health_alarm) or + (self.current_health_alarm.__dict__['severity'] != + new_severity) or + (self.current_health_alarm.__dict__['reason_text'] != + new_reason_text) or + (self.current_health_alarm.__dict__['service_affecting'] != + str(new_service_affecting))): + + fault = fm_api.Fault( + alarm_id=fm_constants.FM_ALARM_ID_STORAGE_CEPH, + alarm_type=fm_constants.FM_ALARM_TYPE_4, + alarm_state=fm_constants.FM_ALARM_STATE_SET, + entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER, + entity_instance_id=self.service.entity_instance_id, + severity=new_severity, + reason_text=new_reason_text, + probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_15, + proposed_repair_action=constants.REPAIR_ACTION, + service_affecting=new_service_affecting) + + alarm_uuid = self.service.fm_api.set_fault(fault) + if alarm_uuid: + LOG.info(_LI( + "Created storage alarm %(alarm_uuid)s - " + "severity: %(severity)s, reason: %(reason)s, " + "service_affecting: %(service_affecting)s") % { + "alarm_uuid": alarm_uuid, + "severity": new_severity, + "reason": new_reason_text, + "service_affecting": new_service_affecting}) + else: + LOG.error(_LE( + "Failed to create storage alarm - " + "severity: %(severity)s, reason: %(reason)s " + "service_affecting: %(service_affecting)s") % { + "severity": new_severity, + "reason": new_reason_text, + "service_affecting": new_service_affecting}) + + # Log detailed reason for later analysis + if (self.current_ceph_health != health['health'] or + self.detailed_health_reason != health['detail']): + LOG.info(_LI("Ceph status changed: %(health)s " + "detailed reason: %(detail)s") % health) + self.current_ceph_health = health['health'] + self.detailed_health_reason = health['detail'] + + elif (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE and + not health['tier_eid'] in self.current_quota_alarms): + + quota_reason_text = ("Quota/Space mismatch for the %s tier. The " + "sum of Ceph pool quotas does not match the " + "tier size." % health['tier_name']) + fault = fm_api.Fault( + alarm_id=fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE, + alarm_state=fm_constants.FM_ALARM_STATE_SET, + entity_type_id=fm_constants.FM_ENTITY_TYPE_CLUSTER, + entity_instance_id=health['tier_eid'], + severity=fm_constants.FM_ALARM_SEVERITY_MINOR, + reason_text=quota_reason_text, + alarm_type=fm_constants.FM_ALARM_TYPE_7, + probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_75, + proposed_repair_action=( + "Update ceph storage pool quotas to use all available " + "cluster space for the %s tier." % health['tier_name']), + service_affecting=False) + + alarm_uuid = self.service.fm_api.set_fault(fault) + if alarm_uuid: + LOG.info(_LI( + "Created storage quota storage alarm %(alarm_uuid)s. " + "Reason: %(reason)s") % { + "alarm_uuid": alarm_uuid, "reason": quota_reason_text}) + else: + LOG.error(_LE("Failed to create quota " + "storage alarm. Reason: %s") % quota_reason_text) + + def _clear_fault(self, alarm_id, entity_instance_id=None): + # Only clear alarm if there is one already raised + if (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH and + self.current_health_alarm): + LOG.info(_LI("Clearing health alarm")) + self.service.fm_api.clear_fault( + fm_constants.FM_ALARM_ID_STORAGE_CEPH, + self.service.entity_instance_id) + elif (alarm_id == fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE and + entity_instance_id in self.current_quota_alarms): + LOG.info(_LI("Clearing quota alarm with entity_instance_id %s") + % entity_instance_id) + self.service.fm_api.clear_fault( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE, + entity_instance_id) + + def clear_critical_alarm(self, group_name): + alarm_list = self.service.fm_api.get_faults_by_id( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL) + if alarm_list: + for alarm in range(len(alarm_list)): + group_id = alarm_list[alarm].entity_instance_id.find("group-") + group_instance_name = ( + "group-" + + alarm_list[alarm].entity_instance_id[group_id + 6]) + if group_name == group_instance_name: + self.service.fm_api.clear_fault( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL, + alarm_list[alarm].entity_instance_id) + + def clear_all_major_critical(self, group_name=None): + # clear major alarms + alarm_list = self.service.fm_api.get_faults_by_id( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR) + if alarm_list: + for alarm in range(len(alarm_list)): + if group_name is not None: + group_id = ( + alarm_list[alarm].entity_instance_id.find("group-")) + group_instance_name = ( + "group-" + + alarm_list[alarm].entity_instance_id[group_id+6]) + if group_name == group_instance_name: + self.service.fm_api.clear_fault( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR, + alarm_list[alarm].entity_instance_id) + else: + self.service.fm_api.clear_fault( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_MAJOR, + alarm_list[alarm].entity_instance_id) + # clear critical alarms + alarm_list = self.service.fm_api.get_faults_by_id( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL) + if alarm_list: + for alarm in range(len(alarm_list)): + if group_name is not None: + group_id = ( + alarm_list[alarm].entity_instance_id.find("group-")) + group_instance_name = ( + "group-" + + alarm_list[alarm].entity_instance_id[group_id + 6]) + if group_name == group_instance_name: + self.service.fm_api.clear_fault( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL, + alarm_list[alarm].entity_instance_id) + else: + self.service.fm_api.clear_fault( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_CRITICAL, + alarm_list[alarm].entity_instance_id) + + def _get_current_alarms(self): + """ Retrieve currently raised alarm """ + self.current_health_alarm = self.service.fm_api.get_fault( + fm_constants.FM_ALARM_ID_STORAGE_CEPH, + self.service.entity_instance_id) + quota_faults = self.service.fm_api.get_faults_by_id( + fm_constants.FM_ALARM_ID_STORAGE_CEPH_FREE_SPACE) + if quota_faults: + self.current_quota_alarms = [f.entity_instance_id + for f in quota_faults] + else: + self.current_quota_alarms = [] diff --git a/ceph-manager/ceph-manager/ceph_manager/server.py b/ceph-manager/ceph-manager/ceph_manager/server.py new file mode 100644 index 00000000..9403a7c2 --- /dev/null +++ b/ceph-manager/ceph-manager/ceph_manager/server.py @@ -0,0 +1,249 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2016-2018 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# https://chrigl.de/posts/2014/08/27/oslo-messaging-example.html +# http://docs.openstack.org/developer/oslo.messaging/server.html + +import sys + +# noinspection PyUnresolvedReferences +import eventlet +# noinspection PyUnresolvedReferences +import oslo_messaging as messaging +# noinspection PyUnresolvedReferences +from fm_api import fm_api +# noinspection PyUnresolvedReferences +from oslo_config import cfg +# noinspection PyUnresolvedReferences +from oslo_log import log as logging +# noinspection PyUnresolvedReferences +from oslo_service import service +# noinspection PyUnresolvedReferences +from oslo_service.periodic_task import PeriodicTasks +# noinspection PyUnresolvedReferences +from oslo_service import loopingcall + +from sysinv.conductor.cache_tiering_service_config import ServiceConfig + +# noinspection PyUnresolvedReferences +from cephclient import wrapper + +from monitor import Monitor +from cache_tiering import CacheTiering +import exception +import constants + +from i18n import _LI, _LW +from retrying import retry + +eventlet.monkey_patch(all=True) + +CONF = cfg.CONF +CONF.register_opts([ + cfg.StrOpt('sysinv_api_bind_ip', + default='0.0.0.0', + help='IP for the Ceph Manager server to bind to')]) +CONF.logging_default_format_string = ( + '%(asctime)s.%(msecs)03d %(process)d ' + '%(levelname)s %(name)s [-] %(message)s') +logging.register_options(CONF) +logging.setup(CONF, __name__) +LOG = logging.getLogger(__name__) +CONF.rpc_backend = 'rabbit' + + +class RpcEndpoint(PeriodicTasks): + + def __init__(self, service=None): + self.service = service + + def cache_tiering_enable_cache(self, _, new_config, applied_config): + LOG.info(_LI("Enabling cache")) + try: + self.service.cache_tiering.enable_cache( + new_config, applied_config) + except exception.CephManagerException as e: + self.service.sysinv_conductor.call( + {}, 'cache_tiering_enable_cache_complete', + success=False, exception=str(e.message), + new_config=new_config, applied_config=applied_config) + + def cache_tiering_disable_cache(self, _, new_config, applied_config): + LOG.info(_LI("Disabling cache")) + try: + self.service.cache_tiering.disable_cache( + new_config, applied_config) + except exception.CephManagerException as e: + self.service.sysinv_conductor.call( + {}, 'cache_tiering_disable_cache_complete', + success=False, exception=str(e.message), + new_config=new_config, applied_config=applied_config) + + def cache_tiering_operation_in_progress(self, _): + is_locked = self.service.cache_tiering.is_locked() + LOG.info(_LI("Cache tiering operation " + "is in progress: %s") % str(is_locked).lower()) + return is_locked + + def get_primary_tier_size(self, _): + """Get the ceph size for the primary tier. + + returns: an int for the size (in GB) of the tier + """ + + tiers_size = self.service.monitor.tiers_size + primary_tier_size = tiers_size.get( + self.service.monitor.primary_tier_name, 0) + LOG.debug(_LI("Ceph cluster primary tier size: %s GB") % + str(primary_tier_size)) + return primary_tier_size + + def get_tiers_size(self, _): + """Get the ceph cluster tier sizes. + + returns: a dict of sizes (in GB) by tier name + """ + + tiers_size = self.service.monitor.tiers_size + LOG.debug(_LI("Ceph cluster tiers (size in GB): %s") % + str(tiers_size)) + return tiers_size + + def is_cluster_up(self, _): + """Report if the last health check was successful. + + This is an independent view of the cluster accessibility that can be + used by the sysinv conductor to gate ceph API calls which would timeout + and potentially block other operations. + + This view is only updated at the rate the monitor checks for a cluster + uuid or a health check (CEPH_HEALTH_CHECK_INTERVAL) + + returns: boolean True if last health check was successful else False + """ + return self.service.monitor.cluster_is_up + + +# This class is needed only when upgrading from 16.10 to 17.x +# TODO: remove it after 1st 17.x release +# +class SysinvConductorUpgradeApi(object): + def __init__(self): + self.sysinv_conductor = None + super(SysinvConductorUpgradeApi, self).__init__() + + def get_software_upgrade_status(self): + LOG.info(_LI("Getting software upgrade status from sysinv")) + cctxt = self.sysinv_conductor.prepare(timeout=2) + upgrade = cctxt.call({}, 'get_software_upgrade_status') + LOG.info(_LI("Software upgrade status: %s") % str(upgrade)) + return upgrade + + @retry(wait_fixed=1000, + retry_on_exception=lambda exception: + LOG.warn(_LW( + "Getting software upgrade status failed " + "with: %s. Retrying... ") % str(exception)) or True) + def retry_get_software_upgrade_status(self): + return self.get_software_upgrade_status() + + +class Service(SysinvConductorUpgradeApi, service.Service): + + def __init__(self, conf): + super(Service, self).__init__() + self.conf = conf + self.rpc_server = None + self.sysinv_conductor = None + self.ceph_api = None + self.entity_instance_id = '' + self.fm_api = fm_api.FaultAPIs() + self.monitor = Monitor(self) + self.cache_tiering = CacheTiering(self) + self.config = None + self.config_desired = None + self.config_applied = None + + def start(self): + super(Service, self).start() + transport = messaging.get_transport(self.conf) + self.sysinv_conductor = messaging.RPCClient( + transport, + messaging.Target( + topic=constants.SYSINV_CONDUCTOR_TOPIC)) + + self.ceph_api = wrapper.CephWrapper( + endpoint='http://localhost:5001/api/v0.1/') + + # Get initial config from sysinv and send it to + # services that need it before starting them + config = self.get_caching_tier_config() + self.monitor.setup(config) + self.rpc_server = messaging.get_rpc_server( + transport, + messaging.Target(topic=constants.CEPH_MANAGER_TOPIC, + server=self.conf.sysinv_api_bind_ip), + [RpcEndpoint(self)], + executor='eventlet') + self.rpc_server.start() + self.cache_tiering.set_initial_config(config) + eventlet.spawn_n(self.monitor.run) + periodic = loopingcall.FixedIntervalLoopingCall( + self.update_ceph_target_max_bytes) + periodic.start(interval=300) + + def get_caching_tier_config(self): + LOG.info("Getting cache tiering configuration from sysinv") + while True: + # Get initial configuration from sysinv, + # retry until sysinv starts + try: + cctxt = self.sysinv_conductor.prepare(timeout=2) + config = cctxt.call({}, 'cache_tiering_get_config') + for section in config: + if section == constants.CACHE_TIERING: + self.config = ServiceConfig().from_dict( + config[section]) + elif section == constants.CACHE_TIERING_DESIRED: + self.config_desired = ServiceConfig().from_dict( + config[section]) + elif section == constants.CACHE_TIERING_APPLIED: + self.config_applied = ServiceConfig().from_dict( + config[section]) + LOG.info("Cache tiering configs: {}".format(config)) + return config + except Exception as ex: + # In production we should retry on every error until connection + # is reestablished. + LOG.warn("Getting cache tiering configuration failed " + "with: {}. Retrying... ".format(str(ex))) + + def stop(self): + try: + self.rpc_server.stop() + self.rpc_server.wait() + except Exception: + pass + super(Service, self).stop() + + def update_ceph_target_max_bytes(self): + try: + self.cache_tiering.update_cache_target_max_bytes() + except Exception as ex: + LOG.exception("Updating Ceph target max bytes failed " + "with: {} retrying on next cycle.".format(str(ex))) + + +def run_service(): + CONF(sys.argv[1:]) + logging.setup(CONF, "ceph-manager") + launcher = service.launch(CONF, Service(CONF), workers=1) + launcher.wait() + + +if __name__ == "__main__": + run_service() diff --git a/ceph-manager/ceph-manager/ceph_manager/tests/__init__.py b/ceph-manager/ceph-manager/ceph_manager/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ceph-manager/ceph-manager/ceph_manager/tests/test_cache_flush.py b/ceph-manager/ceph-manager/ceph_manager/tests/test_cache_flush.py new file mode 100644 index 00000000..2fd26519 --- /dev/null +++ b/ceph-manager/ceph-manager/ceph_manager/tests/test_cache_flush.py @@ -0,0 +1,309 @@ +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import unittest +import mock + +import subprocess +import math + +from ..cache_tiering import CacheTiering +from ..cache_tiering import LOG as CT_LOG +from ..constants import CACHE_FLUSH_OBJECTS_THRESHOLD +from ..constants import CACHE_FLUSH_MIN_WAIT_OBJ_COUNT_DECREASE_SEC as MIN_WAIT +from ..constants import CACHE_FLUSH_MAX_WAIT_OBJ_COUNT_DECREASE_SEC as MAX_WAIT +from ..exception import CephCacheFlushFailure + + +class TestCacheFlush(unittest.TestCase): + + def setUp(self): + self.service = mock.Mock() + self.ceph_api = mock.Mock() + self.service.ceph_api = self.ceph_api + self.cache_tiering = CacheTiering(self.service) + + @mock.patch('subprocess.check_call') + def test_set_param_fail(self, mock_proc_call): + self.ceph_api.osd_set_pool_param = mock.Mock() + self.ceph_api.osd_set_pool_param.return_value = ( + mock.Mock(ok=False, status_code=500, reason='denied'), + {}) + self.cache_tiering.cache_flush({'pool_name': 'test'}) + mock_proc_call.assert_called_with( + ['/usr/bin/rados', '-p', 'test-cache', 'cache-flush-evict-all']) + + @mock.patch('subprocess.check_call') + def test_df_fail(self, mock_proc_call): + self.ceph_api.osd_set_pool_param = mock.Mock() + self.ceph_api.osd_set_pool_param.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {}) + self.ceph_api.df = mock.Mock() + self.ceph_api.df.return_value = ( + mock.Mock(ok=False, status_code=500, reason='denied'), + {}) + self.cache_tiering.cache_flush({'pool_name': 'test'}) + self.ceph_api.osd_set_pool_param.assert_called_once_with( + 'test-cache', 'target_max_objects', 1, force=None, body='json') + mock_proc_call.assert_called_with( + ['/usr/bin/rados', '-p', 'test-cache', 'cache-flush-evict-all']) + + @mock.patch('subprocess.check_call') + def test_rados_evict_fail_raises(self, mock_proc_call): + mock_proc_call.side_effect = subprocess.CalledProcessError(1, ['cmd']) + self.ceph_api.osd_set_pool_param = mock.Mock() + self.ceph_api.osd_set_pool_param.return_value = ( + mock.Mock(ok=False, status_code=500, reason='denied'), + {}) + self.assertRaises(CephCacheFlushFailure, + self.cache_tiering.cache_flush, + {'pool_name': 'test'}) + mock_proc_call.assert_called_with( + ['/usr/bin/rados', '-p', 'test-cache', 'cache-flush-evict-all']) + + @mock.patch('subprocess.check_call') + def test_df_missing_pool(self, mock_proc_call): + self.ceph_api.osd_set_pool_param = mock.Mock() + self.ceph_api.osd_set_pool_param.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {}) + self.ceph_api.df = mock.Mock() + self.ceph_api.df.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'rbd', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': 0}}]}, + 'status': 'OK'}) + with mock.patch.object(CT_LOG, 'warn') as mock_lw: + self.cache_tiering.cache_flush({'pool_name': 'test'}) + self.ceph_api.df.assert_called_once_with(body='json') + for c in mock_lw.call_args_list: + if 'Missing pool free space' in c[0][0]: + break + else: + self.fail('expected log warning') + self.ceph_api.osd_set_pool_param.assert_called_once_with( + 'test-cache', 'target_max_objects', 1, force=None, body='json') + mock_proc_call.assert_called_with( + ['/usr/bin/rados', '-p', 'test-cache', 'cache-flush-evict-all']) + + @mock.patch('subprocess.check_call') + def test_df_objects_empty(self, mock_proc_call): + self.ceph_api.osd_set_pool_param = mock.Mock() + self.ceph_api.osd_set_pool_param.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {}) + self.ceph_api.df = mock.Mock() + self.ceph_api.df.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': 0}}]}, + 'status': 'OK'}) + self.cache_tiering.cache_flush({'pool_name': 'test'}) + self.ceph_api.df.assert_called_once_with(body='json') + self.ceph_api.osd_set_pool_param.assert_called_once_with( + 'test-cache', 'target_max_objects', 1, force=None, body='json') + mock_proc_call.assert_called_with( + ['/usr/bin/rados', '-p', 'test-cache', 'cache-flush-evict-all']) + + @mock.patch('time.sleep') + @mock.patch('subprocess.check_call') + def test_df_objects_above_threshold(self, mock_proc_call, mock_time_sleep): + self.ceph_api.osd_set_pool_param = mock.Mock() + self.ceph_api.osd_set_pool_param.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {}) + self.ceph_api.df = mock.Mock() + self.ceph_api.df.side_effect = [ + (mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': CACHE_FLUSH_OBJECTS_THRESHOLD}}]}, + 'status': 'OK'}), + (mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': + CACHE_FLUSH_OBJECTS_THRESHOLD - 1}}]}, + 'status': 'OK'})] + self.cache_tiering.cache_flush({'pool_name': 'test'}) + self.ceph_api.osd_set_pool_param.assert_called_once_with( + 'test-cache', 'target_max_objects', 1, force=None, body='json') + self.ceph_api.df.assert_called_with(body='json') + mock_time_sleep.assert_called_once_with(MIN_WAIT) + mock_proc_call.assert_called_with( + ['/usr/bin/rados', '-p', 'test-cache', 'cache-flush-evict-all']) + + @mock.patch('time.sleep') + @mock.patch('subprocess.check_call') + def test_df_objects_interval_increase(self, mock_proc_call, + mock_time_sleep): + self.ceph_api.osd_set_pool_param = mock.Mock() + self.ceph_api.osd_set_pool_param.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {}) + self.ceph_api.df = mock.Mock() + self.ceph_api.df.side_effect = [ + (mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': + CACHE_FLUSH_OBJECTS_THRESHOLD + 1}}]}, + 'status': 'OK'}), + (mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': + CACHE_FLUSH_OBJECTS_THRESHOLD + 1}}]}, + 'status': 'OK'}), + (mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': + CACHE_FLUSH_OBJECTS_THRESHOLD + 1}}]}, + 'status': 'OK'}), + (mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': + CACHE_FLUSH_OBJECTS_THRESHOLD - 1}}]}, + 'status': 'OK'})] + self.cache_tiering.cache_flush({'pool_name': 'test'}) + self.ceph_api.osd_set_pool_param.assert_called_once_with( + 'test-cache', 'target_max_objects', 1, force=None, body='json') + self.ceph_api.df.assert_called_with(body='json') + self.assertEqual([c[0][0] for c in mock_time_sleep.call_args_list], + [MIN_WAIT, + MIN_WAIT * 2, + MIN_WAIT * 4]) + mock_proc_call.assert_called_with( + ['/usr/bin/rados', '-p', 'test-cache', 'cache-flush-evict-all']) + + @mock.patch('time.sleep') + @mock.patch('subprocess.check_call') + def test_df_objects_allways_over_threshold(self, mock_proc_call, + mock_time_sleep): + self.ceph_api.osd_set_pool_param = mock.Mock() + self.ceph_api.osd_set_pool_param.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {}) + self.ceph_api.df = mock.Mock() + self.ceph_api.df.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': + CACHE_FLUSH_OBJECTS_THRESHOLD + 1}}]}, + 'status': 'OK'}) + # noinspection PyTypeChecker + mock_time_sleep.side_effect = \ + [None]*int(math.ceil(math.log(float(MAX_WAIT)/MIN_WAIT, 2)) + 1) \ + + [Exception('too many sleeps')] + self.cache_tiering.cache_flush({'pool_name': 'test'}) + self.ceph_api.osd_set_pool_param.assert_called_once_with( + 'test-cache', 'target_max_objects', 1, force=None, body='json') + self.ceph_api.df.assert_called_with(body='json') + expected_sleep = [] + interval = MIN_WAIT + while interval <= MAX_WAIT: + expected_sleep.append(interval) + interval *= 2 + self.assertEqual([c[0][0] for c in mock_time_sleep.call_args_list], + expected_sleep) + mock_proc_call.assert_called_with( + ['/usr/bin/rados', '-p', 'test-cache', 'cache-flush-evict-all']) + + @mock.patch('time.sleep') + @mock.patch('subprocess.check_call') + def test_df_objects_increase(self, mock_proc_call, mock_time_sleep): + self.ceph_api.osd_set_pool_param = mock.Mock() + self.ceph_api.osd_set_pool_param.return_value = ( + mock.Mock(ok=True, status_code=200, reason='OK'), + {}) + self.ceph_api.df = mock.Mock() + self.ceph_api.df.side_effect = [ + (mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': + CACHE_FLUSH_OBJECTS_THRESHOLD + 1}}]}, + 'status': 'OK'}), + (mock.Mock(ok=True, status_code=200, reason='OK'), + {'output': { + 'pools': [ + {'id': 0, + 'name': 'test-cache', + 'stats': {'bytes_used': 0, + 'kb_used': 0, + 'max_avail': 9588428800, + 'objects': + CACHE_FLUSH_OBJECTS_THRESHOLD + 2}}]}, + 'status': 'OK'})] + with mock.patch.object(CT_LOG, 'warn') as mock_lw: + self.cache_tiering.cache_flush({'pool_name': 'test'}) + for c in mock_lw.call_args_list: + if 'Unexpected increase' in c[0][0]: + break + else: + self.fail('expected log warning') + self.ceph_api.df.assert_called_with(body='json') + mock_time_sleep.assert_called_once_with(MIN_WAIT) + self.ceph_api.osd_set_pool_param.assert_called_once_with( + 'test-cache', 'target_max_objects', 1, force=None, body='json') + mock_proc_call.assert_called_with( + ['/usr/bin/rados', '-p', 'test-cache', 'cache-flush-evict-all']) diff --git a/ceph-manager/ceph-manager/setup.py b/ceph-manager/ceph-manager/setup.py new file mode 100644 index 00000000..40cf5012 --- /dev/null +++ b/ceph-manager/ceph-manager/setup.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013-2014, 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import setuptools + +setuptools.setup( + name='ceph_manager', + version='1.0.0', + description='CEPH manager', + license='Apache-2.0', + packages=['ceph_manager'], + entry_points={ + } +) diff --git a/ceph-manager/ceph-manager/test-requirements.txt b/ceph-manager/ceph-manager/test-requirements.txt new file mode 100644 index 00000000..1fdf2056 --- /dev/null +++ b/ceph-manager/ceph-manager/test-requirements.txt @@ -0,0 +1,10 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +mock +flake8 +eventlet +pytest +oslo.log +oslo.i18n \ No newline at end of file diff --git a/ceph-manager/ceph-manager/tox.ini b/ceph-manager/ceph-manager/tox.ini new file mode 100644 index 00000000..41d3854b --- /dev/null +++ b/ceph-manager/ceph-manager/tox.ini @@ -0,0 +1,29 @@ +# adapted from glance tox.ini + +[tox] +minversion = 1.6 +envlist = py27,pep8 +skipsdist = True +# tox does not work if the path to the workdir is too long, so move it to /tmp +toxworkdir = /tmp/{env:USER}_ceph_manager_tox + +[testenv] +setenv = VIRTUAL_ENV={envdir} +usedevelop = True +install_command = pip install --no-use-wheel -U --force-reinstall {opts} {packages} +deps = -r{toxinidir}/test-requirements.txt +commands = py.test {posargs} +whitelist_externals = bash +passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY + +[testenv:py27] +basepython = python2.7 +setenv = + PYTHONPATH={toxinidir}/../../../../sysinv/recipes-common/sysinv/sysinv:{toxinidir}/../../../../config/recipes-common/tsconfig/tsconfig + +[testenv:pep8] +commands = + flake8 {posargs} + +[flake8] +exclude = .venv,.git,.tox,dist,doc,etc,*glance/locale*,*lib/python*,*egg,build diff --git a/ceph-manager/files/ceph-manager.logrotate b/ceph-manager/files/ceph-manager.logrotate new file mode 100644 index 00000000..8d7a16ab --- /dev/null +++ b/ceph-manager/files/ceph-manager.logrotate @@ -0,0 +1,11 @@ +/var/log/ceph-manager.log { + nodateext + size 10M + start 1 + rotate 10 + missingok + notifempty + compress + delaycompress + copytruncate +} diff --git a/ceph-manager/files/ceph-manager.service b/ceph-manager/files/ceph-manager.service new file mode 100644 index 00000000..e8bf26cf --- /dev/null +++ b/ceph-manager/files/ceph-manager.service @@ -0,0 +1,17 @@ +[Unit] +Description=Handle Ceph API calls and provide status updates via alarms +After=ceph.target + +[Service] +Type=forking +Restart=no +KillMode=process +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/ceph-manager start +ExecStop=/etc/rc.d/init.d/ceph-manager stop +ExecReload=/etc/rc.d/init.d/ceph-manager reload +PIDFile=/var/run/ceph/ceph-manager.pid + +[Install] +WantedBy=multi-user.target + diff --git a/ceph-manager/scripts/bin/ceph-manager b/ceph-manager/scripts/bin/ceph-manager new file mode 100644 index 00000000..9aa4330d --- /dev/null +++ b/ceph-manager/scripts/bin/ceph-manager @@ -0,0 +1,17 @@ +#!/usr/bin/env python +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import sys + +try: + from ceph_manager.server import run_service +except EnvironmentError as e: + print >> sys.stderr, "Error importing ceph_manager: ", str(e) + sys.exit(1) + +run_service() diff --git a/ceph-manager/scripts/init.d/ceph-manager b/ceph-manager/scripts/init.d/ceph-manager new file mode 100644 index 00000000..88bdddfb --- /dev/null +++ b/ceph-manager/scripts/init.d/ceph-manager @@ -0,0 +1,103 @@ +#!/bin/sh +# +# Copyright (c) 2013-2014, 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +### BEGIN INIT INFO +# Provides: ceph-manager +# Required-Start: $ceph +# Required-Stop: $ceph +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Daemon for polling ceph status +# Description: Daemon for polling ceph status +### END INIT INFO + +DESC="ceph-manager" +DAEMON="/usr/bin/ceph-manager" +RUNDIR="/var/run/ceph" +PIDFILE=$RUNDIR/$DESC.pid + +CONFIGFILE="/etc/sysinv/sysinv.conf" +LOGFILE="/var/log/ceph-manager.log" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/prod/$(cat $PIDFILE) + if [ -d ${PIDFILE} ]; then + echo "$DESC already running." + exit 0 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + mkdir -p $RUNDIR + start-stop-daemon --start --quiet \ + --pidfile ${PIDFILE} --exec ${DAEMON} \ + --make-pidfile --background \ + -- --log-file=$LOGFILE --config-file=$CONFIGFILE + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + exit 1 + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE --retry 60 + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &> /dev/null ; then + echo "$DESC is running" + exit 0 + else + echo "$DESC is not running but has pid file" + exit 1 + fi + fi + echo "$DESC is not running" + exit 3 +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/ceph/centos/build_srpm.data b/ceph/centos/build_srpm.data new file mode 100644 index 00000000..e65b1435 --- /dev/null +++ b/ceph/centos/build_srpm.data @@ -0,0 +1,5 @@ +SRC_DIR="$CGCS_BASE/git/ceph" +TIS_BASE_SRCREV=fc689aa5ded5941b8ae86374c7124c7d91782973 +TIS_PATCH_VER=GITREVCOUNT +BUILD_IS_BIG=40 +BUILD_IS_SLOW=26 diff --git a/ceph/centos/ceph.spec b/ceph/centos/ceph.spec new file mode 120000 index 00000000..6aba2c9b --- /dev/null +++ b/ceph/centos/ceph.spec @@ -0,0 +1 @@ +../../../git/ceph/ceph.spec \ No newline at end of file diff --git a/ceph/files/ceph-manage-journal.py b/ceph/files/ceph-manage-journal.py new file mode 100644 index 00000000..b3312e0c --- /dev/null +++ b/ceph/files/ceph-manage-journal.py @@ -0,0 +1,326 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import ast +import os +import os.path +import re +import subprocess +import sys + + +######### +# Utils # +######### + +def command(arguments, **kwargs): + """ Execute e command and capture stdout, stderr & return code """ + process = subprocess.Popen( + arguments, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + **kwargs) + out, err = process.communicate() + return out, err, process.returncode + + +def get_input(arg, valid_keys): + """Convert the input to a dict and perform basic validation""" + json_string = arg.replace("\\n", "\n") + try: + input_dict = ast.literal_eval(json_string) + if not all(k in input_dict for k in valid_keys): + return None + except Exception: + return None + + return input_dict + + +def get_partition_uuid(dev): + output, _, _ = command(['blkid', dev]) + try: + return re.search('PARTUUID=\"(.+?)\"', output).group(1) + except AttributeError: + return None + + +def device_path_to_device_node(device_path): + try: + output, _, _ = command(["udevadm", "settle", "-E", device_path]) + out, err, retcode = command(["readlink", "-f", device_path]) + out = out.rstrip() + except Exception as e: + return None + + return out + + +########################################### +# Manage Journal Disk Partitioning Scheme # +########################################### + +DISK_BY_PARTUUID = "/dev/disk/by-partuuid/" +JOURNAL_UUID='45b0969e-9b03-4f30-b4c6-b4b80ceff106' # Type of a journal partition + + +def is_partitioning_correct(disk_path, partition_sizes): + """ Validate the existence and size of journal partitions""" + + # Obtain the device node from the device path. + disk_node = device_path_to_device_node(disk_path) + + # Check that partition table format is GPT + output, _, _ = command(["udevadm", "settle", "-E", disk_node]) + output, _, _ = command(["parted", "-s", disk_node, "print"]) + if not re.search('Partition Table: gpt', output): + print "Format of disk node %s is not GPT, zapping disk" % disk_node + return False + + # Check each partition size + partition_index = 1 + for size in partition_sizes: + # Check that each partition size matches the one in input + partition_node = disk_node + str(partition_index) + output, _, _ = command(["udevadm", "settle", "-E", partition_node]) + cmd = ["parted", "-s", partition_node, "unit", "MiB", "print"] + output, _, _ = command(cmd) + + regex = ("^Disk " + str(partition_node) + ":\\s*" + + str(size) + "[\\.0]*MiB") + if not re.search(regex, output, re.MULTILINE): + print ("Journal partition %(node)s size is not %(size)s, " + "zapping disk" % {"node": partition_node, "size": size}) + return False + + partition_index += 1 + + output, _, _ = command(["udevadm", "settle", "-t", "10"]) + return True + + +def create_partitions(disk_path, partition_sizes): + """ Recreate partitions """ + + # Obtain the device node from the device path. + disk_node = device_path_to_device_node(disk_path) + + # Issue: After creating a new partition table on a device, Udev does not + # always remove old symlinks (i.e. to previous partitions on that device). + # Also, even if links are erased before zapping the disk, some of them will + # be recreated even though there is no partition to back them! + # Therefore, we have to remove the links AFTER we erase the partition table + # Issue: DISK_BY_PARTUUID directory is not present at all if there are no + # GPT partitions on the storage node so nothing to remove in this case + links = [] + if os.path.isdir(DISK_BY_PARTUUID): + links = [ os.path.join(DISK_BY_PARTUUID,l) for l in os.listdir(DISK_BY_PARTUUID) + if os.path.islink(os.path.join(DISK_BY_PARTUUID, l)) ] + + # Erase all partitions on current node by creating a new GPT table + _, err, ret = command(["parted", "-s", disk_node, "mktable", "gpt"]) + if ret: + print ("Error erasing partition table of %(node)s\n" + "Return code: %(ret)s reason: %(reason)s" % + {"node": disk_node, "ret": ret, "reason": err}) + exit(1) + + # Erase old symlinks + for l in links: + if disk_node in os.path.realpath(l): + os.remove(l) + + # Create partitions in order + used_space_mib = 1 # leave 1 MB at the beginning of the disk + num = 1 + for size in partition_sizes: + cmd = ['parted', '-s', disk_node, 'unit', 'mib', + 'mkpart', 'primary', + str(used_space_mib), str(used_space_mib + size)] + _, err, ret = command(cmd) + parms = {"disk_node": disk_node, + "start": used_space_mib, + "end": used_space_mib + size, + "reason": err} + print ("Created partition from start=%(start)s MiB to end=%(end)s MiB" + " on %(disk_node)s" % parms) + if ret: + print ("Failed to create partition with " + "start=%(start)s, end=%(end)s " + "on %(disk_node)s reason: %(reason)s" % parms) + exit(1) + # Set partition type to ceph journal + # noncritical operation, it makes 'ceph-disk list' output correct info + cmd = ['sgdisk', + '--change-name={num}:ceph journal'.format(num=num), + '--typecode={num}:{uuid}'.format( + num=num, + uuid=JOURNAL_UUID, + ), + disk_node] + _, err, ret = command(cmd) + if ret: + print ("WARNINIG: Failed to set partition name and typecode") + used_space_mib += size + num += 1 + +########################### +# Manage Journal Location # +########################### + +OSD_PATH = "/var/lib/ceph/osd/" + + +def mount_data_partition(data_path, osdid): + """ Mount an OSD data partition and return the mounted path """ + + # Obtain the device node from the device path. + data_node = device_path_to_device_node(data_path) + + mount_path = OSD_PATH + "ceph-" + str(osdid) + output, _, _ = command(['mount']) + regex = "^" + data_node + ".*" + mount_path + if not re.search(regex, output, re.MULTILINE): + cmd = ['mount', '-t', 'xfs', data_node, mount_path] + _, _, ret = command(cmd) + params = {"node": data_node, "path": mount_path} + if ret: + print "Failed to mount %(node)s to %(path), aborting" % params + exit(1) + else: + print "Mounted %(node)s to %(path)s" % params + return mount_path + + +def is_location_correct(path, journal_path, osdid): + """ Check if location points to the correct device """ + + # Obtain the device node from the device path. + journal_node = device_path_to_device_node(journal_path) + + cur_node = os.path.realpath(path + "/journal") + if cur_node == journal_node: + return True + else: + return False + + +def fix_location(mount_point, journal_path, osdid): + """ Move the journal to the new partition """ + + # Obtain the device node from the device path. + journal_node = device_path_to_device_node(journal_path) + + # Fix symlink + path = mount_point + "/journal" # 'journal' symlink path used by ceph-osd + journal_uuid = get_partition_uuid(journal_node) + new_target = DISK_BY_PARTUUID + journal_uuid + params = {"path": path, "target": new_target} + try: + if os.path.lexists(path): + os.unlink(path) # delete the old symlink + os.symlink(new_target, path) + print "Symlink created: %(path)s -> %(target)s" % params + except: + print "Failed to create symlink: %(path)s -> %(target)s" % params + exit(1) + # Fix journal_uuid + path = mount_point + "/journal_uuid" + try: + with open(path, 'w') as f: + f.write(journal_uuid) + except Exception as ex: + # The operation is noncritical, it only makes 'ceph-disk list' + # display complete output. We log and continue. + params = {"path": path, "uuid": journal_uuid} + print "WARNING: Failed to set uuid of %(path)s to %(uuid)s" % params + + # Clean the journal partition + # even if erasing the partition table, if another journal was present here + # it's going to be reused. Journals are always bigger than 100MB. + command(['dd', 'if=/dev/zero', 'of=%s' % journal_node, + 'bs=1M', 'count=100']) + + # Format the journal + cmd = ['/usr/bin/ceph-osd', '-i', str(osdid), + '--pid-file', '/var/run/ceph/osd.%s.pid' % osdid, + '-c', '/etc/ceph/ceph.conf', + '--cluster', 'ceph', + '--mkjournal'] + out, err, ret = command(cmd) + params = {"journal_node": journal_node, + "osdid": osdid, + "ret": ret, + "reason": err} + if not ret: + print ("Prepared new journal partition: %(journal_node)s " + "for osd id: %(osdid)s") % params + else: + print ("Error initializing journal node: " + "%(journal_node)s for osd id: %(osdid)s " + "ceph-osd return code: %(ret)s reason: %(reason)s" % params) + + +######## +# Main # +######## + +def main(argv): + # parse and validate arguments + err = False + partitions = None + location = None + if len(argv) != 2: + err = True + elif argv[0] == "partitions": + valid_keys = ['disk_path', 'journals'] + partitions = get_input(argv[1], valid_keys) + if not partitions: + err = True + elif not isinstance(partitions['journals'], list): + err = True + elif argv[0] == "location": + valid_keys = ['data_path', 'journal_path', 'osdid'] + location = get_input(argv[1], valid_keys) + if not location: + err = True + elif not isinstance(location['osdid'], int): + err = True + else: + err = True + if err: + print "Command intended for internal use only" + exit(-1) + + if partitions: + # Recreate partitions only if the existing ones don't match input + if not is_partitioning_correct(partitions['disk_path'], + partitions['journals']): + create_partitions(partitions['disk_path'], partitions['journals']) + else: + print ("Partition table for %s is correct, " + "no need to repartition" % + device_path_to_device_node(partitions['disk_path'])) + elif location: + # we need to have the data partition mounted & we can let it mounted + mount_point = mount_data_partition(location['data_path'], + location['osdid']) + # Update journal location only if link point to another partition + if not is_location_correct(mount_point, + location['journal_path'], + location['osdid']): + print ("Fixing journal location for " + "OSD id: %(id)s" % {"node": location['data_path'], + "id": location['osdid']}) + fix_location(mount_point, + location['journal_path'], + location['osdid']) + else: + print ("Journal location for %s is correct," + "no need to change it" % location['data_path']) + +main(sys.argv[1:]) diff --git a/mwa-perian.map b/mwa-perian.map new file mode 100644 index 00000000..cc15b2ad --- /dev/null +++ b/mwa-perian.map @@ -0,0 +1,3 @@ +cgcs/middleware/ceph/recipes-common/ceph-manager|ceph-manager +cgcs/openstack/recipes-base|openstack +cgcs/recipes-extended/ceph|ceph diff --git a/openstack/cinder_conf_dummy/LICENSE b/openstack/cinder_conf_dummy/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/openstack/cinder_conf_dummy/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/openstack/distributedcloud-client/centos/build_srpm.data b/openstack/distributedcloud-client/centos/build_srpm.data new file mode 100644 index 00000000..bb5a8712 --- /dev/null +++ b/openstack/distributedcloud-client/centos/build_srpm.data @@ -0,0 +1,5 @@ +TAR_NAME="distributedcloud-client" +SRC_DIR="$CGCS_BASE/git/distributedcloud-client" + +TIS_BASE_SRCREV=078b0eed0d9e9de5d5b0f5d82b3f13e7bcfb5d10 +TIS_PATCH_VER=1 diff --git a/openstack/distributedcloud-client/centos/distributedcloud-client.spec b/openstack/distributedcloud-client/centos/distributedcloud-client.spec new file mode 100644 index 00000000..f3ee0c8f --- /dev/null +++ b/openstack/distributedcloud-client/centos/distributedcloud-client.spec @@ -0,0 +1,81 @@ +%global pypi_name distributedcloud-client + +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%if 0%{?fedora} +%global with_python3 1 +%{!?python3_shortver: %global python3_shortver %(%{__python3} -c 'import sys; print(str(sys.version_info.major) + "." + str(sys.version_info.minor))')} +%endif + +Name: %{pypi_name} +Version: 1.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: Client Library for Distributed Cloud Services + +License: ASL 2.0 +URL: unknown +Source0: %{pypi_name}-%{version}.tar.gz + +BuildArch: noarch + +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-jsonschema >= 2.0.0 +BuildRequires: python-keystonemiddleware +BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-config +BuildRequires: python-oslo-context +BuildRequires: python-oslo-db +BuildRequires: python-oslo-i18n +BuildRequires: python-oslo-log +BuildRequires: python-oslo-messaging +BuildRequires: python-oslo-middleware +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-rootwrap +BuildRequires: python-oslo-serialization +BuildRequires: python-oslo-service +BuildRequires: python-oslo-utils +BuildRequires: python-oslo-versionedobjects +BuildRequires: python-pbr >= 1.8 +BuildRequires: python-routes >= 1.12.3 +BuildRequires: python-sphinx +BuildRequires: python-sphinxcontrib-httpdomain +BuildRequires: pyOpenSSL +BuildRequires: systemd +# Required to compile translation files +BuildRequires: python-babel + +%description +Client library for Distributed Cloud built on the Distributed Cloud API. It +provides a command-line tool (dcmanager). + +Distributed Cloud provides configuration and management of distributed clouds + +# DC Manager +%package dcmanagerclient +Summary: DC Manager Client + +%description dcmanagerclient +Distributed Cloud Manager Client + +%prep +%autosetup -n %{pypi_name}-%{version} + +# Remove the requirements file so that pbr hooks don't add it +# to distutils requires_dist config +rm -rf {test-,}requirements.txt tools/{pip,test}-requires + +%build +export PBR_VERSION=%{version} +%{__python2} setup.py build + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install --skip-build --root %{buildroot} + +%files dcmanagerclient +%license LICENSE +%{python2_sitelib}/dcmanagerclient* +%{python2_sitelib}/distributedcloud_client-*.egg-info +%exclude %{python2_sitelib}/dcmanagerclient/tests +%{_bindir}/dcmanager* diff --git a/openstack/distributedcloud/centos/build_srpm.data b/openstack/distributedcloud/centos/build_srpm.data new file mode 100644 index 00000000..f1572767 --- /dev/null +++ b/openstack/distributedcloud/centos/build_srpm.data @@ -0,0 +1,6 @@ +TAR_NAME="distributedcloud" +SRC_DIR="$CGCS_BASE/git/distributedcloud" +COPY_LIST="$FILES_BASE/*" + +TIS_BASE_SRCREV=ea7caa8567120384a0b6a7abbb567fcc7d22188b +TIS_PATCH_VER=7 diff --git a/openstack/distributedcloud/centos/distributedcloud.spec b/openstack/distributedcloud/centos/distributedcloud.spec new file mode 100644 index 00000000..249fc59c --- /dev/null +++ b/openstack/distributedcloud/centos/distributedcloud.spec @@ -0,0 +1,168 @@ +%global pypi_name distributedcloud + +%global with_doc %{!?_without_doc:1}%{?_without_doc:0} +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%if 0%{?fedora} +%global with_python3 1 +%{!?python3_shortver: %global python3_shortver %(%{__python3} -c 'import sys; print(str(sys.version_info.major) + "." + str(sys.version_info.minor))')} +%endif + +Name: %{pypi_name} +Version: 1.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: Distributed Cloud Services + +License: ASL 2.0 +URL: unknown +Source0: %{pypi_name}-%{version}.tar.gz +Source1: dcmanager-api.service +Source2: dcmanager-manager.service +Source3: dcorch-api.service +Source4: dcorch-engine.service +Source5: dcorch-nova-api-proxy.service +Source6: dcorch-sysinv-api-proxy.service +Source7: dcorch-snmp.service +Source8: dcorch-cinder-api-proxy.service +Source9: dcorch-neutron-api-proxy.service + +BuildArch: noarch + +BuildRequires: python-crypto +BuildRequires: python-cryptography +BuildRequires: python2-devel +BuildRequires: python-eventlet +BuildRequires: python-setuptools +BuildRequires: python-jsonschema >= 2.0.0 +BuildRequires: python-keyring +BuildRequires: python-keystonemiddleware +BuildRequires: python-keystoneauth1 >= 3.1.0 +BuildRequires: python-netaddr +BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-config +BuildRequires: python-oslo-context +BuildRequires: python-oslo-db +BuildRequires: python-oslo-i18n +BuildRequires: python-oslo-log +BuildRequires: python-oslo-messaging +BuildRequires: python-oslo-middleware +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-rootwrap +BuildRequires: python-oslo-serialization +BuildRequires: python-oslo-service +BuildRequires: python-oslo-utils +BuildRequires: python-oslo-versionedobjects +BuildRequires: python-pbr >= 1.8 +BuildRequires: python-pecan >= 1.0.0 +BuildRequires: python-routes >= 1.12.3 +BuildRequires: python-sphinx +BuildRequires: python-sphinxcontrib-httpdomain +BuildRequires: pyOpenSSL +BuildRequires: systemd +# Required to compile translation files +BuildRequires: python-babel + +%description +Distributed Cloud provides configuration and management of distributed clouds + +# DC Manager +%package dcmanager +Summary: DC Manager + +%description dcmanager +Distributed Cloud Manager + +%package dcorch +Summary: DC Orchestrator +# TODO(John): should we add Requires lines? + +%description dcorch +Distributed Cloud Orchestrator + +%prep +%autosetup -n %{pypi_name}-%{version} + +# Remove the requirements file so that pbr hooks don't add it +# to distutils requires_dist config +rm -rf {test-,}requirements.txt tools/{pip,test}-requires + +%build +export PBR_VERSION=%{version} +%{__python2} setup.py build +# Generate sample config and add the current directory to PYTHONPATH so +# oslo-config-generator doesn't skip heat's entry points. +PYTHONPATH=. oslo-config-generator --config-file=./dcmanager/config-generator.conf +PYTHONPATH=. oslo-config-generator --config-file=./dcorch/config-generator.conf + + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} \ + --single-version-externally-managed +mkdir -p %{buildroot}/var/log/dcmanager +mkdir -p %{buildroot}/var/cache/dcmanager +mkdir -p %{buildroot}/var/run/dcmanager +mkdir -p %{buildroot}/etc/dcmanager/ +# install systemd unit files +install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir}/dcmanager-api.service +install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}/dcmanager-manager.service +# install default config files +cd %{_builddir}/%{pypi_name}-%{version} && oslo-config-generator --config-file ./dcmanager/config-generator.conf --output-file %{_builddir}/%{pypi_name}-%{version}/etc/dcmanager/dcmanager.conf.sample +install -p -D -m 640 %{_builddir}/%{pypi_name}-%{version}/etc/dcmanager/dcmanager.conf.sample %{buildroot}%{_sysconfdir}/dcmanager/dcmanager.conf + + +mkdir -p %{buildroot}/var/log/dcorch +mkdir -p %{buildroot}/var/cache/dcorch +mkdir -p %{buildroot}/var/run/dcorch +mkdir -p %{buildroot}/etc/dcorch/ +# install systemd unit files +install -p -D -m 644 %{SOURCE3} %{buildroot}%{_unitdir}/dcorch-api.service +install -p -D -m 644 %{SOURCE4} %{buildroot}%{_unitdir}/dcorch-engine.service +install -p -D -m 644 %{SOURCE5} %{buildroot}%{_unitdir}/dcorch-nova-api-proxy.service +install -p -D -m 644 %{SOURCE6} %{buildroot}%{_unitdir}/dcorch-sysinv-api-proxy.service +install -p -D -m 644 %{SOURCE7} %{buildroot}%{_unitdir}/dcorch-snmp.service +install -p -D -m 644 %{SOURCE8} %{buildroot}%{_unitdir}/dcorch-cinder-api-proxy.service +install -p -D -m 644 %{SOURCE9} %{buildroot}%{_unitdir}/dcorch-neutron-api-proxy.service +# install default config files +cd %{_builddir}/%{pypi_name}-%{version} && oslo-config-generator --config-file ./dcorch/config-generator.conf --output-file %{_builddir}/%{pypi_name}-%{version}/etc/dcorch/dcorch.conf.sample +install -p -D -m 640 %{_builddir}/%{pypi_name}-%{version}/etc/dcorch/dcorch.conf.sample %{buildroot}%{_sysconfdir}/dcorch/dcorch.conf + +%files dcmanager +%license LICENSE +%{python2_sitelib}/dcmanager* +%{python2_sitelib}/distributedcloud-*.egg-info +%exclude %{python2_sitelib}/dcmanager/tests +%{_bindir}/dcmanager-api +%{_unitdir}/dcmanager-api.service +%{_bindir}/dcmanager-manager +%{_unitdir}/dcmanager-manager.service +%{_bindir}/dcmanager-manage +%dir %attr(0755,root,root) %{_localstatedir}/log/dcmanager +%dir %attr(0755,root,root) %{_localstatedir}/run/dcmanager +%dir %attr(0755,root,root) %{_localstatedir}/cache/dcmanager +%dir %attr(0755,root,root) %{_sysconfdir}/dcmanager +%config(noreplace) %attr(-, root, root) %{_sysconfdir}/dcmanager/dcmanager.conf + + +%files dcorch +%license LICENSE +%{python2_sitelib}/dcorch* +%{python2_sitelib}/distributedcloud-*.egg-info +%exclude %{python2_sitelib}/dcorch/tests +%{_bindir}/dcorch-api +%{_unitdir}/dcorch-api.service +%{_bindir}/dcorch-engine +%{_unitdir}/dcorch-engine.service +%{_bindir}/dcorch-api-proxy +%{_unitdir}/dcorch-nova-api-proxy.service +%{_unitdir}/dcorch-sysinv-api-proxy.service +%{_unitdir}/dcorch-cinder-api-proxy.service +%{_unitdir}/dcorch-neutron-api-proxy.service +%{_bindir}/dcorch-manage +%{_bindir}/dcorch-snmp +%{_unitdir}/dcorch-snmp.service +%dir %attr(0755,root,root) %{_localstatedir}/log/dcorch +%dir %attr(0755,root,root) %{_localstatedir}/run/dcorch +%dir %attr(0755,root,root) %{_localstatedir}/cache/dcorch +%dir %attr(0755,root,root) %{_sysconfdir}/dcorch +%config(noreplace) %attr(-, root, root) %{_sysconfdir}/dcorch/dcorch.conf diff --git a/openstack/distributedcloud/centos/files/dcmanager-api.service b/openstack/distributedcloud/centos/files/dcmanager-api.service new file mode 100644 index 00000000..6aca790c --- /dev/null +++ b/openstack/distributedcloud/centos/files/dcmanager-api.service @@ -0,0 +1,13 @@ +[Unit] +Description=DC Manager API Service +After=syslog.target network.target mysqld.service + +[Service] +Type=simple +# TODO(Bart): what user to use? +User=root +ExecStart=/usr/bin/dcmanager-api --config-file /etc/dcmanager/dcmanager.conf +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/distributedcloud/centos/files/dcmanager-manager.service b/openstack/distributedcloud/centos/files/dcmanager-manager.service new file mode 100644 index 00000000..d2575bd5 --- /dev/null +++ b/openstack/distributedcloud/centos/files/dcmanager-manager.service @@ -0,0 +1,13 @@ +[Unit] +Description=DC Manager Service +After=syslog.target network.target mysqld.service openstack-keystone.service + +[Service] +Type=simple +# TODO(Bart): What user? +User=root +ExecStart=/usr/bin/dcmanager-manager --config-file /etc/dcmanager/dcmanager.conf +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/distributedcloud/centos/files/dcorch-api.service b/openstack/distributedcloud/centos/files/dcorch-api.service new file mode 100644 index 00000000..898ec0b9 --- /dev/null +++ b/openstack/distributedcloud/centos/files/dcorch-api.service @@ -0,0 +1,13 @@ +[Unit] +Description=DC Manager API Service +After=syslog.target network.target mysqld.service + +[Service] +Type=simple +# TODO(Bart): what user to use? +User=root +ExecStart=/usr/bin/dcorch-api --config-file /etc/dcorch/dcorch.conf +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/distributedcloud/centos/files/dcorch-cinder-api-proxy.service b/openstack/distributedcloud/centos/files/dcorch-cinder-api-proxy.service new file mode 100644 index 00000000..f583a509 --- /dev/null +++ b/openstack/distributedcloud/centos/files/dcorch-cinder-api-proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=DC Orchestrator API Proxy Service +After=syslog.target network.target mysqld.service + +[Service] +Type=simple +# TODO(Bart): what user to use? +User=root +ExecStart=/usr/bin/dcorch-api-proxy --config-file /etc/dcorch/dcorch.conf --type volume +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/distributedcloud/centos/files/dcorch-engine.service b/openstack/distributedcloud/centos/files/dcorch-engine.service new file mode 100644 index 00000000..9c809954 --- /dev/null +++ b/openstack/distributedcloud/centos/files/dcorch-engine.service @@ -0,0 +1,13 @@ +[Unit] +Description=DC Manager Service +After=syslog.target network.target mysqld.service openstack-keystone.service + +[Service] +Type=simple +# TODO(Bart): What user? +User=root +ExecStart=/usr/bin/dcorch-engine --config-file /etc/dcorch/dcorch.conf +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/distributedcloud/centos/files/dcorch-neutron-api-proxy.service b/openstack/distributedcloud/centos/files/dcorch-neutron-api-proxy.service new file mode 100644 index 00000000..086896c5 --- /dev/null +++ b/openstack/distributedcloud/centos/files/dcorch-neutron-api-proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=DC Orchestrator API Proxy Service +After=syslog.target network.target mysqld.service + +[Service] +Type=simple +# TODO(Bart): what user to use? +User=root +ExecStart=/usr/bin/dcorch-api-proxy --config-file /etc/dcorch/dcorch.conf --type network +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/distributedcloud/centos/files/dcorch-nova-api-proxy.service b/openstack/distributedcloud/centos/files/dcorch-nova-api-proxy.service new file mode 100644 index 00000000..0a6d32e3 --- /dev/null +++ b/openstack/distributedcloud/centos/files/dcorch-nova-api-proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=DC Orchestrator API Proxy Service +After=syslog.target network.target mysqld.service + +[Service] +Type=simple +# TODO(Bart): what user to use? +User=root +ExecStart=/usr/bin/dcorch-api-proxy --config-file /etc/dcorch/dcorch.conf --type compute +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/distributedcloud/centos/files/dcorch-snmp.service b/openstack/distributedcloud/centos/files/dcorch-snmp.service new file mode 100644 index 00000000..bf30d223 --- /dev/null +++ b/openstack/distributedcloud/centos/files/dcorch-snmp.service @@ -0,0 +1,14 @@ +[Unit] +Description=DC Manager SNMP Service +After=syslog.target network.target mysqld.service + +[Service] +Type=simple +# TODO(Bart): what user to use? +User=root +ExecStart=/usr/bin/dcorch-snmp --config-file /etc/dcorch/dcorch.conf +Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/distributedcloud/centos/files/dcorch-sysinv-api-proxy.service b/openstack/distributedcloud/centos/files/dcorch-sysinv-api-proxy.service new file mode 100644 index 00000000..92b0b235 --- /dev/null +++ b/openstack/distributedcloud/centos/files/dcorch-sysinv-api-proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=DC Orchestrator API Proxy Service +After=syslog.target network.target mysqld.service + +[Service] +Type=simple +# TODO(Bart): what user to use? +User=root +ExecStart=/usr/bin/dcorch-api-proxy --config-file /etc/dcorch/dcorch.conf --type platform +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/openstack-aodh/centos/build_srpm.data b/openstack/openstack-aodh/centos/build_srpm.data new file mode 100644 index 00000000..8429863c --- /dev/null +++ b/openstack/openstack-aodh/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=6 diff --git a/openstack/openstack-aodh/centos/meta_patches/0001-Modify-service-files-and-create-expirer-cron-script.patch b/openstack/openstack-aodh/centos/meta_patches/0001-Modify-service-files-and-create-expirer-cron-script.patch new file mode 100644 index 00000000..8df006c1 --- /dev/null +++ b/openstack/openstack-aodh/centos/meta_patches/0001-Modify-service-files-and-create-expirer-cron-script.patch @@ -0,0 +1,245 @@ +From 7662bc5ed71f6704ffc90c7ad8ea040e6872e190 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 2 Oct 2017 14:28:46 -0400 +Subject: [PATCH 1/5] WRS: + 0001-Modify-service-files-and-create-expirer-cron-script.patch + +Conflicts: + SPECS/openstack-aodh.spec +--- + SOURCES/aodh-expirer-active | 28 ++++++++++++++++++++++++++++ + SOURCES/openstack-aodh-api.service | 5 ++--- + SOURCES/openstack-aodh-evaluator.service | 5 ++--- + SOURCES/openstack-aodh-expirer.service | 5 ++--- + SOURCES/openstack-aodh-listener.service | 5 ++--- + SOURCES/openstack-aodh-notifier.service | 5 ++--- + SPECS/openstack-aodh.spec | 25 +++++++++++-------------- + 7 files changed, 49 insertions(+), 29 deletions(-) + create mode 100644 SOURCES/aodh-expirer-active + +diff --git a/SOURCES/aodh-expirer-active b/SOURCES/aodh-expirer-active +new file mode 100644 +index 0000000..373fa5d +--- /dev/null ++++ b/SOURCES/aodh-expirer-active +@@ -0,0 +1,61 @@ ++#!/bin/bash ++ ++# ++# Wrapper script to run aodh-expirer when on active controller only ++# ++AODH_EXPIRER_INFO="/var/run/aodh-expirer.info" ++AODH_EXPIRER_CMD="/usr/bin/nice -n 2 /usr/bin/aodh-expirer" ++ ++function is_active_pgserver() ++{ ++ # Determine whether we're running on the same controller as the service. ++ local service=postgres ++ local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active) ++ if [ "x$enabledactive" == "x" ] ++ then ++ # enabled-active not found for that service on this controller ++ return 1 ++ else ++ # enabled-active found for that resource ++ return 0 ++ fi ++} ++ ++if is_active_pgserver ++then ++ if [ ! -f ${AODH_EXPIRER_INFO} ] ++ then ++ echo delay_count=0 > ${AODH_EXPIRER_INFO} ++ fi ++ ++ source ${AODH_EXPIRER_INFO} ++ sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null ++ if [ $? -eq 0 ] ++ then ++ source /etc/platform/platform.conf ++ if [ "${system_type}" = "All-in-one" ] ++ then ++ source /etc/init.d/task_affinity_functions.sh ++ idle_core=$(get_most_idle_core) ++ if [ "$idle_core" -ne "0" ] ++ then ++ sh -c "exec taskset -c $idle_core ${AODH_EXPIRER_CMD}" ++ sed -i "/delay_count/s/=.*/=0/" ${AODH_EXPIRER_INFO} ++ exit 0 ++ fi ++ fi ++ ++ if [ "$delay_count" -lt "3" ] ++ then ++ newval=$(($delay_count+1)) ++ sed -i "/delay_count/s/=.*/=$newval/" ${AODH_EXPIRER_INFO} ++ (sleep 3600; /usr/bin/aodh-expirer-active) & ++ exit 0 ++ fi ++ fi ++ ++ eval ${AODH_EXPIRER_CMD} ++ sed -i "/delay_count/s/=.*/=0/" ${AODH_EXPIRER_INFO} ++fi ++ ++exit 0 +diff --git a/SOURCES/openstack-aodh-api.service b/SOURCES/openstack-aodh-api.service +index 2224261..b8b2921 100644 +--- a/SOURCES/openstack-aodh-api.service ++++ b/SOURCES/openstack-aodh-api.service +@@ -4,9 +4,8 @@ After=syslog.target network.target + + [Service] + Type=simple +-User=aodh +-ExecStart=/usr/bin/aodh-api --logfile /var/log/aodh/api.log +-Restart=on-failure ++User=root ++ExecStart=/usr/bin/aodh-api + + [Install] + WantedBy=multi-user.target +diff --git a/SOURCES/openstack-aodh-evaluator.service b/SOURCES/openstack-aodh-evaluator.service +index 4f70431..795ef0c 100644 +--- a/SOURCES/openstack-aodh-evaluator.service ++++ b/SOURCES/openstack-aodh-evaluator.service +@@ -4,9 +4,8 @@ After=syslog.target network.target + + [Service] + Type=simple +-User=aodh +-ExecStart=/usr/bin/aodh-evaluator --logfile /var/log/aodh/evaluator.log +-Restart=on-failure ++User=root ++ExecStart=/usr/bin/aodh-evaluator + + [Install] + WantedBy=multi-user.target +diff --git a/SOURCES/openstack-aodh-expirer.service b/SOURCES/openstack-aodh-expirer.service +index cc68b1b..0185d63 100644 +--- a/SOURCES/openstack-aodh-expirer.service ++++ b/SOURCES/openstack-aodh-expirer.service +@@ -4,9 +4,8 @@ After=syslog.target network.target + + [Service] + Type=simple +-User=aodh +-ExecStart=/usr/bin/aodh-expirer --logfile /var/log/aodh/expirer.log +-Restart=on-failure ++User=root ++ExecStart=/usr/bin/aodh-expirer + + [Install] + WantedBy=multi-user.target +diff --git a/SOURCES/openstack-aodh-listener.service b/SOURCES/openstack-aodh-listener.service +index a024fe3..40e20d2 100644 +--- a/SOURCES/openstack-aodh-listener.service ++++ b/SOURCES/openstack-aodh-listener.service +@@ -4,9 +4,8 @@ After=syslog.target network.target + + [Service] + Type=simple +-User=aodh +-ExecStart=/usr/bin/aodh-listener --logfile /var/log/aodh/listener.log +-Restart=on-failure ++User=root ++ExecStart=/usr/bin/aodh-listener + + [Install] + WantedBy=multi-user.target +diff --git a/SOURCES/openstack-aodh-notifier.service b/SOURCES/openstack-aodh-notifier.service +index d6135d7..68a96dd 100644 +--- a/SOURCES/openstack-aodh-notifier.service ++++ b/SOURCES/openstack-aodh-notifier.service +@@ -4,9 +4,8 @@ After=syslog.target network.target + + [Service] + Type=simple +-User=aodh +-ExecStart=/usr/bin/aodh-notifier --logfile /var/log/aodh/notifier.log +-Restart=on-failure ++User=root ++ExecStart=/usr/bin/aodh-notifier + + [Install] + WantedBy=multi-user.target +diff --git a/SPECS/openstack-aodh.spec b/SPECS/openstack-aodh.spec +index 203f2f0..5d0dedd 100644 +--- a/SPECS/openstack-aodh.spec ++++ b/SPECS/openstack-aodh.spec +@@ -18,8 +18,13 @@ Source12: %{name}-notifier.service + Source13: %{name}-expirer.service + Source14: %{name}-listener.service + ++#WRS ++Source20: aodh-expirer-active ++ + BuildArch: noarch + ++ ++ + BuildRequires: python-setuptools + BuildRequires: python2-devel + BuildRequires: systemd +@@ -263,7 +268,7 @@ install -p -D -m 640 etc/aodh/api_paste.ini %{buildroot}%{_sysconfdir}/aodh/api_ + # Setup directories + install -d -m 755 %{buildroot}%{_sharedstatedir}/aodh + install -d -m 755 %{buildroot}%{_sharedstatedir}/aodh/tmp +-install -d -m 750 %{buildroot}%{_localstatedir}/log/aodh ++install -d -m 755 %{buildroot}%{_localstatedir}/log/aodh + + # Install logrotate + install -p -D -m 644 %{SOURCE2} %{buildroot}%{_sysconfdir}/logrotate.d/%{name} +@@ -284,6 +289,9 @@ mv %{buildroot}%{python2_sitelib}/%{pypi_name}/locale %{buildroot}%{_datadir}/lo + # Find language files + %find_lang %{pypi_name} --all-name + ++# WRS ++install -p -D -m 750 %{SOURCE20} %{buildroot}%{_bindir}/aodh-expirer-active ++ + # Remove unused files + rm -fr %{buildroot}/usr/etc + +@@ -346,13 +354,13 @@ exit 0 + %config(noreplace) %attr(-, root, aodh) %{_sysconfdir}/aodh/api_paste.ini + %config(noreplace) %{_sysconfdir}/logrotate.d/%{name} + %dir %attr(0755, aodh, root) %{_localstatedir}/log/aodh +-%{_bindir}/aodh-dbsync + + %defattr(-, aodh, aodh, -) + %dir %{_sharedstatedir}/aodh + %dir %{_sharedstatedir}/aodh/tmp + + %files api ++%{_bindir}/aodh-dbsync + %{_bindir}/aodh-api + %{_bindir}/aodh-data-migration + %{_bindir}/aodh-combination-alarm-conversion +@@ -373,22 +381,11 @@ exit 0 + + %files expirer + %{_bindir}/aodh-expirer ++%{_bindir}/aodh-expirer-active + %{_unitdir}/%{name}-expirer.service + + + %changelog +-* Mon Aug 28 2017 rdo-trunk 3.0.4-1 +-- Update to 3.0.4 +- +-* Mon Jul 24 2017 Pradeep Kilambi 3.0.3-2 +-- Move aodh-dbsync to openstack-aodh-common +- +-* Thu Jul 13 2017 Mehdi Abaakouk 3.0.3-1 +-- Update to 3.0.3 +- +-* Tue Feb 28 2017 Alfredo Moralejo 3.0.2-1 +-- Update to 3.0.2 +- + * Thu Oct 06 2016 Haikel Guemar 3.0.0-1 + - Update to 3.0.0 + +-- +1.9.1 + diff --git a/openstack/openstack-aodh/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch b/openstack/openstack-aodh/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..d6563bc2 --- /dev/null +++ b/openstack/openstack-aodh/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch @@ -0,0 +1,27 @@ +From 4639ba8ff40214558ac25394ff2a3f4aaebe437a Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 2 Oct 2017 14:28:46 -0400 +Subject: [PATCH 2/5] WRS: 0001-Update-package-versioning-for-TIS-format.patch + +Conflicts: + SPECS/openstack-aodh.spec +--- + SPECS/openstack-aodh.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/openstack-aodh.spec b/SPECS/openstack-aodh.spec +index 5d0dedd..c844a28 100644 +--- a/SPECS/openstack-aodh.spec ++++ b/SPECS/openstack-aodh.spec +@@ -4,7 +4,7 @@ + + Name: openstack-aodh + Version: 3.0.4 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: OpenStack Telemetry Alarming + License: ASL 2.0 + URL: https://github.com/openstack/aodh.git +-- +1.9.1 + diff --git a/openstack/openstack-aodh/centos/meta_patches/0001-meta-modify-aodh-api.patch b/openstack/openstack-aodh/centos/meta_patches/0001-meta-modify-aodh-api.patch new file mode 100644 index 00000000..3db7e3dc --- /dev/null +++ b/openstack/openstack-aodh/centos/meta_patches/0001-meta-modify-aodh-api.patch @@ -0,0 +1,72 @@ +From c4f387dbc34568caedd13e6c782a601cbdfcf707 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 2 Oct 2017 14:28:46 -0400 +Subject: [PATCH 4/5] WRS: 0001-meta-modify-aodh-api.patch + +Conflicts: + SPECS/openstack-aodh.spec +--- + SOURCES/openstack-aodh-api.service | 2 +- + SPECS/openstack-aodh.spec | 11 +++++++++-- + 2 files changed, 10 insertions(+), 3 deletions(-) + +diff --git a/SOURCES/openstack-aodh-api.service b/SOURCES/openstack-aodh-api.service +index b8b2921..06bcd12 100644 +--- a/SOURCES/openstack-aodh-api.service ++++ b/SOURCES/openstack-aodh-api.service +@@ -5,7 +5,7 @@ After=syslog.target network.target + [Service] + Type=simple + User=root +-ExecStart=/usr/bin/aodh-api ++ExecStart=/bin/python /usr/bin/gunicorn --bind 192.168.204.2:8042 --pythonpath /usr/share/aodh aodh-api + + [Install] + WantedBy=multi-user.target +diff --git a/SPECS/openstack-aodh.spec b/SPECS/openstack-aodh.spec +index b52931c..217dd14 100644 +--- a/SPECS/openstack-aodh.spec ++++ b/SPECS/openstack-aodh.spec +@@ -20,9 +20,10 @@ Source14: %{name}-listener.service + #WRS + Source20: aodh-expirer-active + +-BuildArch: noarch +- ++#WRS: Include patches here: ++Patch1: 0001-modify-aodh-api.patch + ++BuildArch: noarch + + BuildRequires: python-setuptools + BuildRequires: python2-devel +@@ -221,6 +222,9 @@ This package contains the Aodh test files. + %prep + %setup -q -n %{pypi_name}-%{upstream_version} + ++#WRS: Apply patches here ++%patch1 -p1 ++ + find . \( -name .gitignore -o -name .placeholder \) -delete + + find aodh -name \*.py -exec sed -i '/\/usr\/bin\/env python/{d;q}' {} + +@@ -263,6 +267,8 @@ install -p -D -m 640 %{SOURCE1} %{buildroot}%{_datadir}/aodh/aodh-dist.conf + install -p -D -m 640 etc/aodh/aodh.conf %{buildroot}%{_sysconfdir}/aodh/aodh.conf + install -p -D -m 640 etc/aodh/policy.json %{buildroot}%{_sysconfdir}/aodh/policy.json + install -p -D -m 640 etc/aodh/api_paste.ini %{buildroot}%{_sysconfdir}/aodh/api_paste.ini ++#WRS ++install -p -D -m 640 aodh/api/aodh-api.py %{buildroot}%{_datadir}/aodh/aodh-api.py + + # Setup directories + install -d -m 755 %{buildroot}%{_sharedstatedir}/aodh +@@ -344,6 +350,7 @@ exit 0 + %files common -f %{pypi_name}.lang + %doc README.rst + %dir %{_sysconfdir}/aodh ++%{_datadir}/aodh/aodh-api.* + %attr(-, root, aodh) %{_datadir}/aodh/aodh-dist.conf + %config(noreplace) %attr(-, root, aodh) %{_sysconfdir}/aodh/aodh.conf + %config(noreplace) %attr(-, root, aodh) %{_sysconfdir}/aodh/policy.json +-- +1.9.1 + diff --git a/openstack/openstack-aodh/centos/meta_patches/0001-meta-pass-aodh-api-config.patch b/openstack/openstack-aodh/centos/meta_patches/0001-meta-pass-aodh-api-config.patch new file mode 100644 index 00000000..898c326a --- /dev/null +++ b/openstack/openstack-aodh/centos/meta_patches/0001-meta-pass-aodh-api-config.patch @@ -0,0 +1,25 @@ +From 98503ae07f4a3b6753c9c1dfc1cf7ed6573ca8e8 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 2 Oct 2017 14:28:46 -0400 +Subject: [PATCH 5/5] WRS: 0001-meta-pass-aodh-api-config.patch + +--- + SOURCES/openstack-aodh-api.service | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SOURCES/openstack-aodh-api.service b/SOURCES/openstack-aodh-api.service +index 06bcd12..a78eb32 100644 +--- a/SOURCES/openstack-aodh-api.service ++++ b/SOURCES/openstack-aodh-api.service +@@ -5,7 +5,7 @@ After=syslog.target network.target + [Service] + Type=simple + User=root +-ExecStart=/bin/python /usr/bin/gunicorn --bind 192.168.204.2:8042 --pythonpath /usr/share/aodh aodh-api ++ExecStart=/bin/python /usr/bin/gunicorn --config /usr/share/aodh/aodh-api.conf --pythonpath /usr/share/aodh aodh-api + + [Install] + WantedBy=multi-user.target +-- +1.9.1 + diff --git a/openstack/openstack-aodh/centos/meta_patches/0006-add-drivername-for-postgresql.patch b/openstack/openstack-aodh/centos/meta_patches/0006-add-drivername-for-postgresql.patch new file mode 100644 index 00000000..26f76a89 --- /dev/null +++ b/openstack/openstack-aodh/centos/meta_patches/0006-add-drivername-for-postgresql.patch @@ -0,0 +1,32 @@ +From 0563ba710bf274a50ee16df75017dd0092cd2d31 Mon Sep 17 00:00:00 2001 +From: Al Bailey +Date: Thu, 21 Dec 2017 14:15:48 -0600 +Subject: [PATCH] add drivername for postgresql + +--- + SPECS/openstack-aodh.spec | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/SPECS/openstack-aodh.spec b/SPECS/openstack-aodh.spec +index 217dd14..2fa77d0 100644 +--- a/SPECS/openstack-aodh.spec ++++ b/SPECS/openstack-aodh.spec +@@ -22,6 +22,7 @@ Source20: aodh-expirer-active + + #WRS: Include patches here: + Patch1: 0001-modify-aodh-api.patch ++Patch2: 0002-Add-drivername-support-for-postgresql-connection-set.patch + + BuildArch: noarch + +@@ -224,6 +225,7 @@ This package contains the Aodh test files. + + #WRS: Apply patches here + %patch1 -p1 ++%patch2 -p1 + + find . \( -name .gitignore -o -name .placeholder \) -delete + +-- +1.8.3.1 + diff --git a/openstack/openstack-aodh/centos/meta_patches/PATCH_ORDER b/openstack/openstack-aodh/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..020c68d3 --- /dev/null +++ b/openstack/openstack-aodh/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,6 @@ +0001-Modify-service-files-and-create-expirer-cron-script.patch +0001-Update-package-versioning-for-TIS-format.patch +meta-remove-default-logrotate.patch +0001-meta-modify-aodh-api.patch +0001-meta-pass-aodh-api-config.patch +0006-add-drivername-for-postgresql.patch diff --git a/openstack/openstack-aodh/centos/meta_patches/meta-remove-default-logrotate.patch b/openstack/openstack-aodh/centos/meta_patches/meta-remove-default-logrotate.patch new file mode 100644 index 00000000..f5600bfa --- /dev/null +++ b/openstack/openstack-aodh/centos/meta_patches/meta-remove-default-logrotate.patch @@ -0,0 +1,42 @@ +From bc92f8743ede901522e6b19af208a4e5d038fa2f Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 2 Oct 2017 14:28:46 -0400 +Subject: [PATCH 3/5] WRS: meta-remove-default-logrotate.patch + +--- + SPECS/openstack-aodh.spec | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/SPECS/openstack-aodh.spec b/SPECS/openstack-aodh.spec +index c844a28..b52931c 100644 +--- a/SPECS/openstack-aodh.spec ++++ b/SPECS/openstack-aodh.spec +@@ -11,7 +11,6 @@ URL: https://github.com/openstack/aodh.git + Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz + + Source1: %{pypi_name}-dist.conf +-Source2: %{pypi_name}.logrotate + Source10: %{name}-api.service + Source11: %{name}-evaluator.service + Source12: %{name}-notifier.service +@@ -270,9 +269,6 @@ install -d -m 755 %{buildroot}%{_sharedstatedir}/aodh + install -d -m 755 %{buildroot}%{_sharedstatedir}/aodh/tmp + install -d -m 755 %{buildroot}%{_localstatedir}/log/aodh + +-# Install logrotate +-install -p -D -m 644 %{SOURCE2} %{buildroot}%{_sysconfdir}/logrotate.d/%{name} +- + # Install systemd unit services + install -p -D -m 644 %{SOURCE10} %{buildroot}%{_unitdir}/%{name}-api.service + install -p -D -m 644 %{SOURCE11} %{buildroot}%{_unitdir}/%{name}-evaluator.service +@@ -352,7 +348,6 @@ exit 0 + %config(noreplace) %attr(-, root, aodh) %{_sysconfdir}/aodh/aodh.conf + %config(noreplace) %attr(-, root, aodh) %{_sysconfdir}/aodh/policy.json + %config(noreplace) %attr(-, root, aodh) %{_sysconfdir}/aodh/api_paste.ini +-%config(noreplace) %{_sysconfdir}/logrotate.d/%{name} + %dir %attr(0755, aodh, root) %{_localstatedir}/log/aodh + + %defattr(-, aodh, aodh, -) +-- +1.9.1 + diff --git a/openstack/openstack-aodh/centos/patches/0001-modify-aodh-api.patch b/openstack/openstack-aodh/centos/patches/0001-modify-aodh-api.patch new file mode 100644 index 00000000..031e9336 --- /dev/null +++ b/openstack/openstack-aodh/centos/patches/0001-modify-aodh-api.patch @@ -0,0 +1,65 @@ +From ea7f6013ffd1eb525943f4d7ae1bfdef6ecf6c22 Mon Sep 17 00:00:00 2001 +From: Angie Wang +Date: Wed, 15 Feb 2017 15:59:26 -0500 +Subject: [PATCH 1/1] modify-aodh-api + +--- + aodh/api/aodh-api.py | 7 +++++++ + aodh/api/app.py | 14 ++++++++++---- + 2 files changed, 17 insertions(+), 4 deletions(-) + create mode 100644 aodh/api/aodh-api.py + +diff --git a/aodh/api/aodh-api.py b/aodh/api/aodh-api.py +new file mode 100644 +index 0000000..565f2e3 +--- /dev/null ++++ b/aodh/api/aodh-api.py +@@ -0,0 +1,7 @@ ++from aodh.api import app as build_wsgi_app ++import sys ++ ++sys.argv = sys.argv[:1] ++args = {'config_file' : 'etc/aodh/aodh.conf', } ++application = build_wsgi_app.build_wsgi_app(None, args) ++ +diff --git a/aodh/api/app.py b/aodh/api/app.py +index 5cecb83..652856e 100644 +--- a/aodh/api/app.py ++++ b/aodh/api/app.py +@@ -60,7 +60,7 @@ def setup_app(pecan_config=PECAN_CONFIG, conf=None): + return app + + +-def load_app(conf): ++def load_app(conf, args): + # Build the WSGI app + cfg_file = None + cfg_path = conf.api.paste_config +@@ -68,15 +68,21 @@ def load_app(conf): + cfg_file = conf.find_file(cfg_path) + elif os.path.exists(cfg_path): + cfg_file = cfg_path +- + if not cfg_file: + raise cfg.ConfigFilesNotFoundError([conf.api.paste_config]) ++ ++ config = dict([(key, value) for key, value in args.iteritems() ++ if key in conf and value is not None]) ++ for key, value in config.iteritems(): ++ if key == 'config_file': ++ conf.config_file = value ++ + LOG.info(_LI("Full WSGI config used: %s"), cfg_file) + return deploy.loadapp("config:" + cfg_file) + + +-def build_wsgi_app(argv=None): +- return load_app(service.prepare_service(argv=argv)) ++def build_wsgi_app(argv=None, args=None): ++ return load_app(service.prepare_service(argv=argv), args) + + + def _app(): +-- +1.8.3.1 + diff --git a/openstack/openstack-aodh/centos/patches/0002-Add-drivername-support-for-postgresql-connection-set.patch b/openstack/openstack-aodh/centos/patches/0002-Add-drivername-support-for-postgresql-connection-set.patch new file mode 100644 index 00000000..42705e4d --- /dev/null +++ b/openstack/openstack-aodh/centos/patches/0002-Add-drivername-support-for-postgresql-connection-set.patch @@ -0,0 +1,65 @@ +From c8afec630be24345ccae50db739949f964e9c580 Mon Sep 17 00:00:00 2001 +From: Al Bailey +Date: Thu, 21 Dec 2017 13:38:09 -0600 +Subject: [PATCH] Add drivername support for postgresql connection settings + +--- + aodh/api/aodh-api.py | 3 +-- + aodh/cmd/data_migration.py | 2 +- + aodh/storage/__init__.py | 2 +- + setup.cfg | 1 + + 4 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/aodh/api/aodh-api.py b/aodh/api/aodh-api.py +index 565f2e3..7c413d6 100644 +--- a/aodh/api/aodh-api.py ++++ b/aodh/api/aodh-api.py +@@ -2,6 +2,5 @@ from aodh.api import app as build_wsgi_app + import sys + + sys.argv = sys.argv[:1] +-args = {'config_file' : 'etc/aodh/aodh.conf', } ++args = {'config_file': 'etc/aodh/aodh.conf', } + application = build_wsgi_app.build_wsgi_app(None, args) +- +diff --git a/aodh/cmd/data_migration.py b/aodh/cmd/data_migration.py +index 6a9ea49..1a8df28 100644 +--- a/aodh/cmd/data_migration.py ++++ b/aodh/cmd/data_migration.py +@@ -94,7 +94,7 @@ def _validate_conn_options(args): + ), nosql_scheme) + sys.exit(1) + if sql_scheme not in ('mysql', 'mysql+pymysql', 'postgresql', +- 'sqlite'): ++ 'postgresql+psycopg2', 'sqlite'): + root_logger.error(_LE('Invalid destination DB type %s, the destination' + ' database connection should be one of: ' + '[mysql, postgresql, sqlite]'), sql_scheme) +diff --git a/aodh/storage/__init__.py b/aodh/storage/__init__.py +index e1d1048..d8fcd54 100644 +--- a/aodh/storage/__init__.py ++++ b/aodh/storage/__init__.py +@@ -59,7 +59,7 @@ def get_connection_from_config(conf): + url = conf.database.connection + connection_scheme = urlparse.urlparse(url).scheme + if connection_scheme not in ('mysql', 'mysql+pymysql', 'postgresql', +- 'sqlite'): ++ 'postgresql+psycopg2', 'sqlite'): + msg = ('Storage backend %s is deprecated, and all the NoSQL backends ' + 'will be removed in Aodh 4.0, please use SQL backend.' % + connection_scheme) +diff --git a/setup.cfg b/setup.cfg +index 76f5362..ca67a16 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -80,6 +80,7 @@ aodh.storage = + mysql = aodh.storage.impl_sqlalchemy:Connection + mysql+pymysql = aodh.storage.impl_sqlalchemy:Connection + postgresql = aodh.storage.impl_sqlalchemy:Connection ++ postgresql+psycopg2 = aodh.storage.impl_sqlalchemy:Connection + sqlite = aodh.storage.impl_sqlalchemy:Connection + hbase = aodh.storage.impl_hbase:Connection + aodh.alarm.rule = +-- +1.8.3.1 + diff --git a/openstack/openstack-aodh/centos/srpm_path b/openstack/openstack-aodh/centos/srpm_path new file mode 100644 index 00000000..10af5c5e --- /dev/null +++ b/openstack/openstack-aodh/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/openstack-aodh-3.0.4-1.el7.src.rpm diff --git a/openstack/openstack-ironic/centos/build_srpm.data b/openstack/openstack-ironic/centos/build_srpm.data new file mode 100644 index 00000000..08fb79a1 --- /dev/null +++ b/openstack/openstack-ironic/centos/build_srpm.data @@ -0,0 +1,6 @@ +TAR_NAME="ironic" +SRC_DIR="$CGCS_BASE/git/ironic" +COPY_LIST="$FILES_BASE/*" + +TIS_BASE_SRCREV=47179d9fca337f32324f8e8a68541358fdac8649 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/openstack-ironic/centos/files/ironic-dist.conf b/openstack/openstack-ironic/centos/files/ironic-dist.conf new file mode 100644 index 00000000..1e9334b6 --- /dev/null +++ b/openstack/openstack-ironic/centos/files/ironic-dist.conf @@ -0,0 +1,4 @@ +[DEFAULT] +log_dir = /var/log/ironic +state_path = /var/lib/ironic +use_stderr = True diff --git a/openstack/openstack-ironic/centos/files/ironic-rootwrap-sudoers b/openstack/openstack-ironic/centos/files/ironic-rootwrap-sudoers new file mode 100644 index 00000000..f03c027b --- /dev/null +++ b/openstack/openstack-ironic/centos/files/ironic-rootwrap-sudoers @@ -0,0 +1,2 @@ +Defaults:ironic !requiretty +ironic ALL = (root) NOPASSWD: /usr/bin/ironic-rootwrap /etc/ironic/rootwrap.conf * diff --git a/openstack/openstack-ironic/centos/files/openstack-ironic-api.service b/openstack/openstack-ironic/centos/files/openstack-ironic-api.service new file mode 100644 index 00000000..32b3251f --- /dev/null +++ b/openstack/openstack-ironic/centos/files/openstack-ironic-api.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenStack Ironic API service +After=syslog.target network.target + +[Service] +Type=simple +User=ironic +ExecStart=/usr/bin/ironic-api + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/openstack-ironic/centos/files/openstack-ironic-conductor.service b/openstack/openstack-ironic/centos/files/openstack-ironic-conductor.service new file mode 100644 index 00000000..d0e6f38c --- /dev/null +++ b/openstack/openstack-ironic/centos/files/openstack-ironic-conductor.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenStack Ironic Conductor service +After=syslog.target network.target + +[Service] +Type=simple +User=ironic +ExecStart=/usr/bin/ironic-conductor + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/openstack-ironic/centos/openstack-ironic.spec b/openstack/openstack-ironic/centos/openstack-ironic.spec new file mode 100644 index 00000000..fa8783ad --- /dev/null +++ b/openstack/openstack-ironic/centos/openstack-ironic.spec @@ -0,0 +1,284 @@ +%global full_release ironic-%{version} + +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +Name: openstack-ironic +# Liberty semver reset +# https://review.openstack.org/#/q/I1a161b2c1d1e27268065b6b4be24c8f7a5315afb,n,z +Epoch: 1 +Summary: OpenStack Baremetal Hypervisor API (ironic) +Version: 9.1.2 +Release: 0%{?_tis_dist}.%{tis_patch_ver} +License: ASL 2.0 +URL: http://www.openstack.org +Source0: https://tarballs.openstack.org/ironic/ironic-%{version}.tar.gz + +Source1: openstack-ironic-api.service +Source2: openstack-ironic-conductor.service +Source3: ironic-rootwrap-sudoers +Source4: ironic-dist.conf + +BuildArch: noarch +BuildRequires: openstack-macros +BuildRequires: python-setuptools +BuildRequires: python2-devel +BuildRequires: python-pbr +BuildRequires: openssl-devel +BuildRequires: libxml2-devel +BuildRequires: libxslt-devel +BuildRequires: gmp-devel +BuildRequires: python-sphinx +BuildRequires: systemd +# Required to compile translation files +BuildRequires: python-babel +# Required to run unit tests +BuildRequires: pysendfile +BuildRequires: python-alembic +BuildRequires: python-automaton +BuildRequires: python-cinderclient +BuildRequires: python-dracclient +BuildRequires: python-eventlet +BuildRequires: python-futurist +BuildRequires: python-glanceclient +BuildRequires: python-ironic-inspector-client +BuildRequires: python-ironic-lib +BuildRequires: python-jinja2 +BuildRequires: python-jsonpatch +BuildRequires: python-jsonschema +BuildRequires: python-keystoneauth1 +BuildRequires: python-keystonemiddleware +BuildRequires: python-mock +BuildRequires: python-neutronclient +BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-config +BuildRequires: python-oslo-context +BuildRequires: python-oslo-db +BuildRequires: python-oslo-db-tests +BuildRequires: python-oslo-i18n +BuildRequires: python-oslo-log +BuildRequires: python-oslo-messaging +BuildRequires: python-oslo-middleware +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-reports +BuildRequires: python-oslo-rootwrap +BuildRequires: python-oslo-serialization +BuildRequires: python-oslo-service +BuildRequires: python-oslo-utils +BuildRequires: python-oslo-versionedobjects +BuildRequires: python-oslotest +BuildRequires: python-osprofiler +BuildRequires: python-os-testr +BuildRequires: python-pbr +BuildRequires: python-pecan +BuildRequires: python-proliantutils +BuildRequires: python-psutil +BuildRequires: python-requests +BuildRequires: python-retrying +BuildRequires: python-scciclient +BuildRequires: python-six +BuildRequires: python-sqlalchemy +BuildRequires: python-stevedore +BuildRequires: python-sushy +BuildRequires: python-swiftclient +BuildRequires: python-testresources +BuildRequires: python-tooz +BuildRequires: python-UcsSdk +BuildRequires: python-webob +BuildRequires: python-wsme +BuildRequires: pysnmp +BuildRequires: pytz + +%prep +%setup -q -n ironic-%{upstream_version} +rm requirements.txt test-requirements.txt + +%build +export PBR_VERSION=%{version} +%{__python2} setup.py build +# Generate i18n files +%{__python2} setup.py compile_catalog -d build/lib/ironic/locale + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install -O1 --skip-build --root=%{buildroot} + +# Create fake egg-info for the tempest plugin +# TODO switch to %{service} everywhere as in openstack-example.spec +%global service ironic +%py2_entrypoint %{service} %{service} + + +# install systemd scripts +mkdir -p %{buildroot}%{_unitdir} +install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir} +install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir} + +# install sudoers file +mkdir -p %{buildroot}%{_sysconfdir}/sudoers.d +install -p -D -m 440 %{SOURCE3} %{buildroot}%{_sysconfdir}/sudoers.d/ironic + +mkdir -p %{buildroot}%{_sharedstatedir}/ironic/ +mkdir -p %{buildroot}%{_localstatedir}/log/ironic/ +mkdir -p %{buildroot}%{_sysconfdir}/ironic/rootwrap.d + +#Populate the conf dir +install -p -D -m 640 etc/ironic/ironic.conf.sample %{buildroot}/%{_sysconfdir}/ironic/ironic.conf +install -p -D -m 640 etc/ironic/policy.json %{buildroot}/%{_sysconfdir}/ironic/policy.json +install -p -D -m 640 etc/ironic/rootwrap.conf %{buildroot}/%{_sysconfdir}/ironic/rootwrap.conf +install -p -D -m 640 etc/ironic/rootwrap.d/* %{buildroot}/%{_sysconfdir}/ironic/rootwrap.d/ + +# Install distribution config +install -p -D -m 640 %{SOURCE4} %{buildroot}/%{_datadir}/ironic/ironic-dist.conf + +# Install i18n .mo files (.po and .pot are not required) +install -d -m 755 %{buildroot}%{_datadir} +rm -f %{buildroot}%{python2_sitelib}/ironic/locale/*/LC_*/ironic*po +rm -f %{buildroot}%{python2_sitelib}/ironic/locale/*pot +mv %{buildroot}%{python2_sitelib}/ironic/locale %{buildroot}%{_datadir}/locale + +# Find language files +%find_lang ironic --all-name + + +%description +Ironic provides an API for management and provisioning of physical machines + +%package common +Summary: Ironic common + +Requires: ipmitool +Requires: pysendfile +Requires: python-alembic +Requires: python-automaton >= 0.5.0 +Requires: python-cinderclient >= 3.1.0 +Requires: python-dracclient >= 1.3.0 +Requires: python-eventlet +Requires: python-futurist >= 0.11.0 +Requires: python-glanceclient >= 1:2.7.0 +Requires: python-ironic-inspector-client >= 1.5.0 +Requires: python-ironic-lib >= 2.5.0 +Requires: python-jinja2 +Requires: python-jsonpatch +Requires: python-jsonschema +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-keystonemiddleware >= 4.12.0 +Requires: python-neutronclient >= 6.3.0 +Requires: python-oslo-concurrency >= 3.8.0 +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-oslo-context >= 2.14.0 +Requires: python-oslo-db >= 4.24.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-messaging >= 5.24.2 +Requires: python-oslo-middleware >= 3.27.0 +Requires: python-oslo-policy >= 1.23.0 +Requires: python-oslo-reports >= 0.6.0 +Requires: python-oslo-rootwrap >= 5.0.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-service >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-oslo-versionedobjects >= 1.17.0 +Requires: python-osprofiler >= 1.4.0 +Requires: python-pbr +Requires: python-pecan +Requires: python-proliantutils >= 2.4.0 +Requires: python-psutil +Requires: python-requests +Requires: python-retrying +Requires: python-rfc3986 >= 0.3.1 +Requires: python-scciclient >= 0.5.0 +Requires: python-six +Requires: python-sqlalchemy +Requires: python-stevedore >= 1.20.0 +Requires: python-sushy +Requires: python-swiftclient >= 3.2.0 +Requires: python-tooz >= 1.47.0 +Requires: python-UcsSdk >= 0.8.2.2 +Requires: python-webob >= 1.7.1 +Requires: python-wsme +Requires: pysnmp +Requires: pytz + + +Requires(pre): shadow-utils + +%description common +Components common to all OpenStack Ironic services + + +%files common -f ironic.lang +%doc README.rst +%license LICENSE +%{_bindir}/ironic-dbsync +%{_bindir}/ironic-rootwrap +%{python2_sitelib}/ironic +%{python2_sitelib}/ironic-*.egg-info +%exclude %{python2_sitelib}/ironic/tests +%exclude %{python2_sitelib}/ironic_tempest_plugin +%{_sysconfdir}/sudoers.d/ironic +%config(noreplace) %attr(-,root,ironic) %{_sysconfdir}/ironic +%attr(-,ironic,ironic) %{_sharedstatedir}/ironic +%attr(0755,ironic,ironic) %{_localstatedir}/log/ironic +%attr(-, root, ironic) %{_datadir}/ironic/ironic-dist.conf +%exclude %{python2_sitelib}/ironic_tests.egg_info + +%package api +Summary: The Ironic API + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +%description api +Ironic API for management and provisioning of physical machines + + +%files api +%{_bindir}/ironic-api +%{_unitdir}/openstack-ironic-api.service + +%package conductor +Summary: The Ironic Conductor + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +%description conductor +Ironic Conductor for management and provisioning of physical machines + +%files conductor +%{_bindir}/ironic-conductor +%{_unitdir}/openstack-ironic-conductor.service + + +%package -n python-ironic-tests +Summary: Ironic tests +Requires: %{name}-common = %{epoch}:%{version}-%{release} +Requires: python-mock +Requires: python-oslotest +Requires: python-os-testr +Requires: python-testresources + +%description -n python-ironic-tests +This package contains the Ironic test files. + +%files -n python-ironic-tests +%{python2_sitelib}/ironic/tests +%{python2_sitelib}/ironic_tempest_plugin +%{python2_sitelib}/%{service}_tests.egg-info + +%changelog +* Fri Nov 03 2017 RDO 1:9.1.2-1 +- Update to 9.1.2 + +* Mon Sep 25 2017 rdo-trunk 1:9.1.1-1 +- Update to 9.1.1 + +* Thu Aug 24 2017 Alfredo Moralejo 1:9.1.0-1 +- Update to 9.1.0 + diff --git a/openstack/openstack-magnum-ui/centos/build_srpm.data b/openstack/openstack-magnum-ui/centos/build_srpm.data new file mode 100644 index 00000000..66c36443 --- /dev/null +++ b/openstack/openstack-magnum-ui/centos/build_srpm.data @@ -0,0 +1,6 @@ +TAR_NAME="magnum-ui" +SRC_DIR="$CGCS_BASE/git/magnum-ui" + +TIS_BASE_SRCREV=0b9fc50aada1a3e214acaad1204b48c96a549e5f +TIS_PATCH_VER=1 + diff --git a/openstack/openstack-magnum-ui/centos/openstack-magnum-ui.spec b/openstack/openstack-magnum-ui/centos/openstack-magnum-ui.spec new file mode 100644 index 00000000..2219ce5b --- /dev/null +++ b/openstack/openstack-magnum-ui/centos/openstack-magnum-ui.spec @@ -0,0 +1,93 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%global library magnum-ui +%global module magnum_ui + +Name: openstack-%{library} +Version: 3.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: OpenStack Magnum UI Horizon plugin +License: ASL 2.0 +URL: http://launchpad.net/%{library}/ + +Source0: https://tarballs.openstack.org/%{library}/%{library}-%{upstream_version}.tar.gz + +BuildArch: noarch + +BuildRequires: python2-devel +BuildRequires: python-pbr +BuildRequires: python-setuptools +BuildRequires: git + +Requires: python-pbr +Requires: python-babel +Requires: python-magnumclient >= 2.0.0 +Requires: openstack-dashboard >= 8.0.0 +Requires: python-django >= 1.8 +Requires: python-django-babel +Requires: python-django-compressor >= 2.0 +Requires: python-django-openstack-auth >= 3.5.0 +Requires: python-django-pyscss >= 2.0.2 + +%description +OpenStack Magnum UI Horizon plugin + +# Documentation package +%package -n python-%{library}-doc +Summary: OpenStack example library documentation + +BuildRequires: python-sphinx +BuildRequires: python-django +BuildRequires: python-django-nose +BuildRequires: openstack-dashboard +BuildRequires: python-openstackdocstheme +BuildRequires: python-magnumclient +BuildRequires: python-mock +BuildRequires: python-mox3 + +%description -n python-%{library}-doc +OpenStack Magnum UI Horizon plugin documentation + +This package contains the documentation. + +%prep +%autosetup -n %{library}-%{upstream_version} -S git +# Let's handle dependencies ourseleves +rm -f *requirements.txt + + +%build +export PBR_VERSION=%{version} +%{__python2} setup.py build + +# generate html docs +export PYTHONPATH=/usr/share/openstack-dashboard +#%{__python2} setup.py build_sphinx -b html +# remove the sphinx-build leftovers +#rm -rf doc/build/html/.{doctrees,buildinfo} + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install --skip-build --root %{buildroot} + +# Move config to horizon +install -p -D -m 640 %{module}/enabled/_1370_project_container_infra_panel_group.py %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/enabled/_1370_project_container_infra_panel_group.py +install -p -D -m 640 %{module}/enabled/_1371_project_container_infra_clusters_panel.py %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/enabled/_1371_project_container_infra_clusters_panel.py +install -p -D -m 640 %{module}/enabled/_1372_project_container_infra_cluster_templates_panel.py %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/enabled/_1372_project_container_infra_cluster_templates_panel.py + + +%files +%license LICENSE +%{python2_sitelib}/%{module} +%{python2_sitelib}/*.egg-info +%{_datadir}/openstack-dashboard/openstack_dashboard/local/enabled/_137* + +%files -n python-%{library}-doc +%license LICENSE +#%doc doc/build/html README.rst + + +%changelog +* Thu Aug 24 2017 Alfredo Moralejo 3.0.0-1 +- Update to 3.0.0 + diff --git a/openstack/openstack-magnum/centos/build_srpm.data b/openstack/openstack-magnum/centos/build_srpm.data new file mode 100644 index 00000000..ca221269 --- /dev/null +++ b/openstack/openstack-magnum/centos/build_srpm.data @@ -0,0 +1,6 @@ +TAR_NAME="magnum" +SRC_DIR="$CGCS_BASE/git/magnum" +COPY_LIST="$FILES_BASE/*" + +TIS_BASE_SRCREV=ca4b29087a4af00060870519e5897348ccc61161 +TIS_PATCH_VER=1 diff --git a/openstack/openstack-magnum/centos/files/openstack-magnum-api.service b/openstack/openstack-magnum/centos/files/openstack-magnum-api.service new file mode 100644 index 00000000..d4275f0e --- /dev/null +++ b/openstack/openstack-magnum/centos/files/openstack-magnum-api.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Magnum API Service +After=syslog.target network.target + +[Service] +Type=simple +User=magnum +ExecStart=/usr/bin/magnum-api +PrivateTmp=true +NotifyAccess=all +KillMode=process +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/openstack-magnum/centos/files/openstack-magnum-conductor.service b/openstack/openstack-magnum/centos/files/openstack-magnum-conductor.service new file mode 100644 index 00000000..c6848e5b --- /dev/null +++ b/openstack/openstack-magnum/centos/files/openstack-magnum-conductor.service @@ -0,0 +1,15 @@ +[Unit] +Description=Openstack Magnum Conductor Service +After=syslog.target network.target qpidd.service mysqld.service tgtd.service + +[Service] +Type=simple +User=magnum +ExecStart=/usr/bin/magnum-conductor +PrivateTmp=true +NotifyAccess=all +KillMode=process +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/openstack-magnum/centos/openstack-magnum.spec b/openstack/openstack-magnum/centos/openstack-magnum.spec new file mode 100644 index 00000000..96323267 --- /dev/null +++ b/openstack/openstack-magnum/centos/openstack-magnum.spec @@ -0,0 +1,325 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%global with_doc %{!?_without_doc:1}%{?_without_doc:0} +%global service magnum + +Name: openstack-%{service} +Summary: Container Management project for OpenStack +Version: 5.0.1 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +License: ASL 2.0 +URL: https://github.com/openstack/magnum.git + +Source0: https://tarballs.openstack.org/%{service}/%{service}-%{version}.tar.gz + +Source2: %{name}-api.service +Source3: %{name}-conductor.service + +BuildArch: noarch + +BuildRequires: git +BuildRequires: python2-devel +BuildRequires: python-pbr +BuildRequires: python-setuptools +BuildRequires: python-werkzeug +BuildRequires: systemd-units +# Required for config file generation +BuildRequires: python-pycadf +BuildRequires: python-osprofiler + +Requires: %{name}-common = %{version}-%{release} +Requires: %{name}-conductor = %{version}-%{release} +Requires: %{name}-api = %{version}-%{release} + +%description +Magnum is an OpenStack project which offers container orchestration engines +for deploying and managing containers as first class resources in OpenStack. + +%package -n python-%{service} +Summary: Magnum Python libraries + +Requires: python-pbr +Requires: python-babel +Requires: PyYAML +Requires: python-sqlalchemy +Requires: python-wsme +Requires: python-webob +Requires: python-alembic +Requires: python-decorator +Requires: python-docker >= 2.0.0 +Requires: python-enum34 +Requires: python-eventlet +Requires: python-iso8601 +Requires: python-jsonpatch +Requires: python-keystonemiddleware >= 4.12.0 +Requires: python-netaddr + +Requires: python-oslo-concurrency >= 3.8.0 +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-oslo-context >= 2.14.0 +Requires: python-oslo-db >= 4.24.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-messaging >= 5.24.2 +Requires: python-oslo-middleware >= 3.27.0 +Requires: python-oslo-policy >= 1.23.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-service >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-oslo-versionedobjects >= 1.17.0 +Requires: python-oslo-reports >= 0.6.0 +Requires: python-osprofiler + +Requires: python-pycadf +Requires: python-pecan + +Requires: python-barbicanclient >= 4.0.0 +Requires: python-glanceclient >= 1:2.8.0 +Requires: python-heatclient >= 1.6.1 +Requires: python-neutronclient >= 6.3.0 +Requires: python-novaclient >= 1:9.0.0 +Requires: python-kubernetes +Requires: python-keystoneclient >= 1:3.8.0 +Requires: python-keystoneauth1 >= 3.1.0 + +Requires: python-cliff >= 2.8.0 +Requires: python-requests +Requires: python-six +Requires: python-stevedore >= 1.20.0 +Requires: python-taskflow +Requires: python-cryptography +Requires: python-werkzeug +Requires: python-marathon + + +%description -n python-%{service} +Magnum is an OpenStack project which offers container orchestration engines +for deploying and managing containers as first class resources in OpenStack. + +%package common +Summary: Magnum common + +Requires: python-%{service} = %{version}-%{release} + +Requires(pre): shadow-utils + +%description common +Components common to all OpenStack Magnum services + +%package conductor +Summary: The Magnum conductor + +Requires: %{name}-common = %{version}-%{release} + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +%description conductor +OpenStack Magnum Conductor + +%package api +Summary: The Magnum API + +Requires: %{name}-common = %{version}-%{release} + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +%description api +OpenStack-native ReST API to the Magnum Engine + +%if 0%{?with_doc} +%package -n %{name}-doc +Summary: Documentation for OpenStack Magnum + +Requires: python-%{service} = %{version}-%{release} + +BuildRequires: python-sphinx +BuildRequires: python-openstackdocstheme +BuildRequires: python-stevedore +BuildRequires: graphviz + +%description -n %{name}-doc +Magnum is an OpenStack project which offers container orchestration engines +for deploying and managing containers as first class resources in OpenStack. + +This package contains documentation files for Magnum. +%endif + +# tests +%package -n python-%{service}-tests +Summary: Tests for OpenStack Magnum + +Requires: python-%{service} = %{version}-%{release} + +BuildRequires: python-fixtures +BuildRequires: python-hacking +BuildRequires: python-mock +BuildRequires: python-oslotest +BuildRequires: python-os-testr +BuildRequires: python-subunit +BuildRequires: python-testrepository +BuildRequires: python-testscenarios +BuildRequires: python-testtools +BuildRequires: python-tempest +BuildRequires: openstack-macros + +# copy-paste from runtime Requires +BuildRequires: python-babel +BuildRequires: PyYAML +BuildRequires: python-sqlalchemy +BuildRequires: python-wsme +BuildRequires: python-webob +BuildRequires: python-alembic +BuildRequires: python-decorator +BuildRequires: python-docker >= 2.0.0 +BuildRequires: python-enum34 +BuildRequires: python-eventlet +BuildRequires: python-iso8601 +BuildRequires: python-jsonpatch +BuildRequires: python-keystonemiddleware +BuildRequires: python-netaddr + +BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-config +BuildRequires: python-oslo-context +BuildRequires: python-oslo-db +BuildRequires: python-oslo-i18n +BuildRequires: python-oslo-log +BuildRequires: python-oslo-messaging +BuildRequires: python-oslo-middleware +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-serialization +BuildRequires: python-oslo-service +BuildRequires: python-oslo-utils +BuildRequires: python-oslo-versionedobjects +BuildRequires: python2-oslo-versionedobjects-tests +BuildRequires: python-oslo-reports + +BuildRequires: python-pecan + +BuildRequires: python-barbicanclient +BuildRequires: python-glanceclient +BuildRequires: python-heatclient +BuildRequires: python-neutronclient +BuildRequires: python-novaclient +BuildRequires: python-kubernetes +BuildRequires: python-keystoneclient + +BuildRequires: python-requests +BuildRequires: python-six +BuildRequires: python-stevedore +BuildRequires: python-taskflow +BuildRequires: python-cryptography +BuildRequires: python-marathon + +%description -n python-%{service}-tests +Magnum is an OpenStack project which offers container orchestration engines +for deploying and managing containers as first class resources in OpenStack. + +%prep +%autosetup -n %{service}-%{upstream_version} -S git + +# Let's handle dependencies ourselves +rm -rf {test-,}requirements{-bandit,}.txt tools/{pip,test}-requires + +# Remove tests in contrib +find contrib -name tests -type d | xargs rm -rf + +%build +export PBR_VERSION=%{version} +%{__python2} setup.py build + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install -O1 --skip-build --root=%{buildroot} + +# Create fake egg-info for the tempest plugin +%py2_entrypoint %{service} %{service} + +# docs generation requires everything to be installed first +%if 0%{?with_doc} +%{__python2} setup.py build_sphinx -b html +# Fix hidden-file-or-dir warnings +rm -fr doc/build/html/.doctrees doc/build/html/.buildinfo +%endif + +mkdir -p %{buildroot}%{_localstatedir}/log/%{service}/ +mkdir -p %{buildroot}%{_localstatedir}/run/%{service}/ + +# install systemd unit files +install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}/%{name}-api.service +install -p -D -m 644 %{SOURCE3} %{buildroot}%{_unitdir}/%{name}-conductor.service + +mkdir -p %{buildroot}%{_sharedstatedir}/%{service}/ +mkdir -p %{buildroot}%{_sharedstatedir}/%{service}/certificates/ +mkdir -p %{buildroot}%{_sysconfdir}/%{service}/ + +oslo-config-generator --config-file etc/magnum/magnum-config-generator.conf --output-file %{buildroot}%{_sysconfdir}/%{service}/magnum.conf +chmod 640 %{buildroot}%{_sysconfdir}/%{service}/magnum.conf +install -p -D -m 640 etc/magnum/policy.json %{buildroot}%{_sysconfdir}/%{service} +install -p -D -m 640 etc/magnum/api-paste.ini %{buildroot}%{_sysconfdir}/%{service} + +%check +%{__python2} setup.py test || true + +%files -n python-%{service} +%license LICENSE +%{python2_sitelib}/%{service} +%{python2_sitelib}/%{service}-*.egg-info +%exclude %{python2_sitelib}/%{service}/tests + + +%files common +%{_bindir}/magnum-db-manage +%{_bindir}/magnum-driver-manage +%license LICENSE +%dir %attr(0750,%{service},root) %{_localstatedir}/log/%{service} +%dir %attr(0755,%{service},root) %{_localstatedir}/run/%{service} +%dir %attr(0755,%{service},root) %{_sharedstatedir}/%{service} +%dir %attr(0755,%{service},root) %{_sharedstatedir}/%{service}/certificates +%dir %attr(0755,%{service},root) %{_sysconfdir}/%{service} +%config(noreplace) %attr(-, root, %{service}) %{_sysconfdir}/%{service}/magnum.conf +%config(noreplace) %attr(-, root, %{service}) %{_sysconfdir}/%{service}/policy.json +%config(noreplace) %attr(-, root, %{service}) %{_sysconfdir}/%{service}/api-paste.ini +%pre common +# 1870:1870 for magnum - rhbz#845078 +getent group %{service} >/dev/null || groupadd -r --gid 1870 %{service} +getent passwd %{service} >/dev/null || \ +useradd --uid 1870 -r -g %{service} -d %{_sharedstatedir}/%{service} -s /sbin/nologin \ +-c "OpenStack Magnum Daemons" %{service} +exit 0 + + +%files conductor +%doc README.rst +%license LICENSE +%{_bindir}/magnum-conductor +%{_unitdir}/%{name}-conductor.service + + +%files api +%doc README.rst +%license LICENSE +%{_bindir}/magnum-api +%{_unitdir}/%{name}-api.service + + +%if 0%{?with_doc} +%files -n %{name}-doc +%license LICENSE +%doc doc/build/html +%endif + +%files -n python-%{service}-tests +%license LICENSE +%{python2_sitelib}/%{service}/tests +%{python2_sitelib}/%{service}_tests.egg-info + +%changelog +* Mon Aug 28 2017 rdo-trunk 5.0.1-1 +- Update to 5.0.1 + +* Thu Aug 24 2017 Alfredo Moralejo 5.0.0-1 +- Update to 5.0.0 diff --git a/openstack/openstack-murano-ui/centos/build_srpm.data b/openstack/openstack-murano-ui/centos/build_srpm.data new file mode 100644 index 00000000..322d1eed --- /dev/null +++ b/openstack/openstack-murano-ui/centos/build_srpm.data @@ -0,0 +1,5 @@ +TAR_NAME="murano-dashboard" +SRC_DIR="$CGCS_BASE/git/murano-dashboard" + +TIS_BASE_SRCREV=c950e248c2dfdc7a040d6984d84ed19c82a04e7d +TIS_PATCH_VER=1 diff --git a/openstack/openstack-murano-ui/centos/openstack-murano-ui.spec b/openstack/openstack-murano-ui/centos/openstack-murano-ui.spec new file mode 100644 index 00000000..723cdaa4 --- /dev/null +++ b/openstack/openstack-murano-ui/centos/openstack-murano-ui.spec @@ -0,0 +1,147 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%global pypi_name murano-dashboard +%global mod_name muranodashboard + +Name: openstack-murano-ui +Version: 4.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: The UI component for the OpenStack murano service +Group: Applications/Communications +License: ASL 2.0 +URL: https://github.com/openstack/%{pypi_name} +Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz +# + +BuildRequires: gettext +BuildRequires: git +BuildRequires: openstack-dashboard +BuildRequires: python-beautifulsoup4 +BuildRequires: python-castellan +BuildRequires: python-devel +BuildRequires: python-django-formtools +BuildRequires: python-django-nose +BuildRequires: python-mock +BuildRequires: python-mox3 +BuildRequires: python-muranoclient +BuildRequires: python-nose +BuildRequires: python-openstack-nose-plugin +BuildRequires: python-oslo-config >= 2:3.14.0 +BuildRequires: python-pbr >= 1.6 +BuildRequires: python-semantic-version +BuildRequires: python-setuptools +BuildRequires: python-testtools +BuildRequires: python-yaql >= 1.1.0 +BuildRequires: tsconfig +Requires: openstack-dashboard +Requires: PyYAML >= 3.10 +Requires: python-babel >= 2.3.4 +Requires: python-beautifulsoup4 +Requires: python-castellan >= 0.7.0 +Requires: python-django >= 1.8 +Requires: python-django-babel +Requires: python-django-formtools +Requires: python-iso8601 >= 0.1.11 +Requires: python-muranoclient >= 0.8.2 +Requires: python-oslo-log >= 3.22.0 +Requires: python-pbr +Requires: python-semantic-version +Requires: python-six >= 1.9.0 +Requires: python-yaql >= 1.1.0 +Requires: pytz +BuildArch: noarch + +%description +Murano Dashboard +Sytem package - murano-dashboard +Python package - murano-dashboard +Murano Dashboard is an extension for OpenStack Dashboard that provides a UI +for Murano. With murano-dashboard, a user is able to easily manage and control +an application catalog, running applications and created environments alongside +with all other OpenStack resources. + +%package doc +Summary: Documentation for OpenStack murano dashboard +BuildRequires: python-sphinx +BuildRequires: python-openstackdocstheme +BuildRequires: python-reno + +%description doc +Murano Dashboard is an extension for OpenStack Dashboard that provides a UI +for Murano. With murano-dashboard, a user is able to easily manage and control +an application catalog, running applications and created environments alongside +with all other OpenStack resources. +This package contains the documentation. + +%prep +%autosetup -n %{pypi_name}-%{upstream_version} -S git +# Let RPM handle the dependencies +rm -rf {test-,}requirements.txt tools/{pip,test}-requires + +# disable warning-is-error, this project has intersphinx in docs +# so some warnings are generated in network isolated build environment +# as koji +sed -i 's/^warning-is-error.*/warning-is-error = 0/g' setup.cfg + +%build +export PBR_VERSION=%{version} +%py2_build +# Generate i18n files +pushd build/lib/%{mod_name} +django-admin compilemessages +popd +# generate html docs +export OSLO_PACKAGE_VERSION=%{upstream_version} +%{__python2} setup.py build_sphinx -b html +# remove the sphinx-build leftovers +rm -rf doc/build/html/.{doctrees,buildinfo} + +%install +export PBR_VERSION=%{version} +%py2_install +mkdir -p %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/enabled +mkdir -p %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.d +mkdir -p %{buildroot}/var/cache/murano-dashboard +# Enable Horizon plugin for murano-dashboard +cp %{_builddir}/%{pypi_name}-%{upstream_version}/muranodashboard/local/local_settings.d/_50_murano.py %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.d/ +cp %{_builddir}/%{pypi_name}-%{upstream_version}/muranodashboard/local/enabled/_*.py %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/enabled/ + +# install policy file, makes horizon side bar dissapear without it. Can be fixed by refreshing page, but annoying +install -p -D -m 644 muranodashboard/conf/murano_policy.json %{buildroot}%{_sysconfdir}/openstack-dashboard/murano_policy.json + +%check +export PYTHONPATH="%{_datadir}/openstack-dashboard:%{python2_sitearch}:%{python2_sitelib}:%{buildroot}%{python2_sitelib}" +#%{__python2} manage.py test muranodashboard --settings=muranodashboard.tests.settings + +%post +HORIZON_SETTINGS='/etc/openstack-dashboard/local_settings' +if grep -Eq '^METADATA_CACHE_DIR=' $HORIZON_SETTINGS; then + sed -i '/^METADATA_CACHE_DIR=/{s#.*#METADATA_CACHE_DIR="/var/cache/murano-dashboard"#}' $HORIZON_SETTINGS +else + sed -i '$aMETADATA_CACHE_DIR="/var/cache/murano-dashboard"' $HORIZON_SETTINGS +fi +%systemd_postun_with_restart httpd.service + +%postun +%systemd_postun_with_restart httpd.service + +%files +%license LICENSE +%doc README.rst +%{python2_sitelib}/muranodashboard +%{python2_sitelib}/murano_dashboard*.egg-info +%{_datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.d/* +%{_datadir}/openstack-dashboard/openstack_dashboard/local/enabled/* +%dir %attr(755, apache, apache) /var/cache/murano-dashboard +%{_sysconfdir}/openstack-dashboard/murano_policy.json + +%files doc +%license LICENSE +%doc doc/build/html + +%changelog +* Wed Aug 30 2017 rdo-trunk 4.0.0-1 +- Update to 4.0.0 + +* Thu Aug 24 2017 Alfredo Moralejo 4.0.0-0.1.0rc2 +- Update to 4.0.0.0rc2 + diff --git a/openstack/openstack-murano/centos/build_srpm.data b/openstack/openstack-murano/centos/build_srpm.data new file mode 100644 index 00000000..f55b5c88 --- /dev/null +++ b/openstack/openstack-murano/centos/build_srpm.data @@ -0,0 +1,6 @@ +TAR_NAME="murano" +SRC_DIR="$CGCS_BASE/git/murano" +COPY_LIST="$FILES_BASE/*" + +TIS_BASE_SRCREV=de53ba8f9a97ad30c492063d9cc497ca56093e38 +TIS_PATCH_VER=1 diff --git a/openstack/openstack-murano/centos/files/openstack-murano-api.service b/openstack/openstack-murano/centos/files/openstack-murano-api.service new file mode 100644 index 00000000..dc7f88f2 --- /dev/null +++ b/openstack/openstack-murano/centos/files/openstack-murano-api.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenStack Murano API Service +After=syslog.target network.target mysqld.service + +[Service] +Type=simple +User=murano +ExecStart=/usr/bin/murano-api --config-file /etc/murano/murano.conf +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/openstack-murano/centos/files/openstack-murano-cf-api.service b/openstack/openstack-murano/centos/files/openstack-murano-cf-api.service new file mode 100644 index 00000000..d406c5fb --- /dev/null +++ b/openstack/openstack-murano/centos/files/openstack-murano-cf-api.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenStack Murano Cloud Foundry API Service +After=syslog.target network.target mysqld.service + +[Service] +Type=simple +User=murano +ExecStart=/usr/bin/murano-cfapi --config-file /etc/murano/murano.conf +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/openstack-murano/centos/files/openstack-murano-engine.service b/openstack/openstack-murano/centos/files/openstack-murano-engine.service new file mode 100644 index 00000000..132a0693 --- /dev/null +++ b/openstack/openstack-murano/centos/files/openstack-murano-engine.service @@ -0,0 +1,12 @@ +[Unit] +Description=Openstack Murano Engine Service +After=syslog.target network.target mysqld.service openstack-keystone.service + +[Service] +Type=simple +User=murano +ExecStart=/usr/bin/murano-engine --config-file /etc/murano/murano.conf +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/openstack-murano/centos/openstack-murano.spec b/openstack/openstack-murano/centos/openstack-murano.spec new file mode 100644 index 00000000..273ca840 --- /dev/null +++ b/openstack/openstack-murano/centos/openstack-murano.spec @@ -0,0 +1,290 @@ +%global pypi_name murano + +%global with_doc %{!?_without_doc:1}%{?_without_doc:0} +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%if 0%{?fedora} +%global with_python3 1 +%{!?python3_shortver: %global python3_shortver %(%{__python3} -c 'import sys; print(str(sys.version_info.major) + "." + str(sys.version_info.minor))')} +%endif + +Name: openstack-%{pypi_name} +Version: 4.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: OpenStack Murano Service + +License: ASL 2.0 +URL: https://pypi.python.org/pypi/murano +Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz +# + +Source1: openstack-murano-api.service +Source2: openstack-murano-engine.service +Source4: openstack-murano-cf-api.service + +BuildArch: noarch + +BuildRequires: git +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-jsonschema >= 2.0.0 +BuildRequires: python-keystonemiddleware +BuildRequires: python-oslo-config +BuildRequires: python-oslo-db +BuildRequires: python-oslo-i18n +BuildRequires: python-oslo-log +BuildRequires: python-oslo-messaging +BuildRequires: python-oslo-middleware +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-serialization +BuildRequires: python-oslo-service +BuildRequires: python-openstackdocstheme +BuildRequires: python-pbr >= 2.0.0 +BuildRequires: python-routes >= 2.3.1 +BuildRequires: python-sphinx +BuildRequires: python-sphinxcontrib-httpdomain +BuildRequires: python-castellan +BuildRequires: pyOpenSSL +BuildRequires: systemd +BuildRequires: openstack-macros +# Required to compile translation files +BuildRequires: python-babel + +%description +Murano Project introduces an application catalog service + +# MURANO-COMMON +%package common +Summary: Murano common +Requires: python-alembic >= 0.8.7 +Requires: python-babel >= 2.3.4 +Requires: python-debtcollector >= 1.2.0 +Requires: python-eventlet >= 0.18.2 +Requires: python-iso8601 >= 0.1.9 +Requires: python-jsonpatch >= 1.1 +Requires: python-jsonschema >= 2.0.0 +Requires: python-keystonemiddleware >= 4.12.0 +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-kombu >= 1:4.0.0 +Requires: python-netaddr >= 0.7.13 +Requires: python-oslo-concurrency >= 3.8.0 +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-oslo-context >= 2.14.0 +Requires: python-oslo-db >= 4.24.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-messaging >= 5.24.2 +Requires: python-oslo-middleware >= 3.27.0 +Requires: python-oslo-policy >= 1.23.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-service >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-paste +Requires: python-paste-deploy >= 1.5.0 +Requires: python-pbr >= 2.0.0 +Requires: python-psutil >= 3.2.2 +Requires: python-congressclient >= 1.3.0 +Requires: python-heatclient >= 1.6.1 +Requires: python-keystoneclient >= 1:3.8.0 +Requires: python-mistralclient >= 3.1.0 +Requires: python-muranoclient >= 0.8.2 +Requires: python-neutronclient >= 6.3.0 +Requires: PyYAML >= 3.10 +Requires: python-routes >= 2.3.1 +Requires: python-semantic_version >= 2.3.1 +Requires: python-six >= 1.9.0 +Requires: python-stevedore >= 1.20.0 +Requires: python-sqlalchemy >= 1.0.10 +Requires: python-tenacity >= 3.2.1 +Requires: python-webob >= 1.7.1 +Requires: python-yaql >= 1.1.0 +Requires: python-castellan >= 0.7.0 +Requires: %{name}-doc = %{version}-%{release} + +%description common +Components common to all OpenStack Murano services + +# MURANO-ENGINE +%package engine +Summary: The Murano engine +Group: Applications/System +Requires: %{name}-common = %{version}-%{release} + +%description engine +OpenStack Murano Engine daemon + +# MURANO-API +%package api +Summary: The Murano API +Group: Applications/System +Requires: %{name}-common = %{version}-%{release} + +%description api +OpenStack rest API to the Murano Engine + +# MURANO-CF-API +%package cf-api +Summary: The Murano Cloud Foundry API +Group: System Environment/Base +Requires: %{name}-common = %{version}-%{release} + +%description cf-api +OpenStack rest API for Murano to the Cloud Foundry + +%if 0%{?with_doc} +%package doc +Summary: Documentation for OpenStack Murano services + +%description doc +This package contains documentation files for Murano. +%endif + +%package -n python-murano-tests +Summary: Murano tests +Requires: %{name}-common = %{version}-%{release} + +%description -n python-murano-tests +This package contains the murano test files. + +%prep +%autosetup -S git -n %{pypi_name}-%{upstream_version} + +# Remove the requirements file so that pbr hooks don't add it +# to distutils requires_dist config +rm -rf {test-,}requirements.txt tools/{pip,test}-requires + +%build +export PBR_VERSION=%{version} +%{__python2} setup.py build +# Generate i18n files +%{__python2} setup.py compile_catalog -d build/lib/%{pypi_name}/locale +# Generate sample config and add the current directory to PYTHONPATH so +# oslo-config-generator doesn't skip heat's entry points. +PYTHONPATH=. oslo-config-generator --config-file=./etc/oslo-config-generator/murano.conf +PYTHONPATH=. oslo-config-generator --config-file=./etc/oslo-config-generator/murano-cfapi.conf + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} + +# Create fake egg-info for the tempest plugin +# TODO switch to %{service} everywhere as in openstack-example.spec +%global service murano +%py2_entrypoint %{service} %{service} + +# DOCs + +pushd doc + +%if 0%{?with_doc} +SPHINX_DEBUG=1 sphinx-build -b html source build/html +# Fix hidden-file-or-dir warnings +rm -fr build/html/.doctrees build/html/.buildinfo +%endif + +popd + +mkdir -p %{buildroot}/var/log/murano +mkdir -p %{buildroot}/var/run/murano +mkdir -p %{buildroot}/var/cache/murano/meta +mkdir -p %{buildroot}/etc/murano/ +# install systemd unit files +install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir}/murano-api.service +install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}/murano-engine.service +install -p -D -m 644 %{SOURCE4} %{buildroot}%{_unitdir}/murano-cf-api.service +# install default config files +cd %{_builddir}/%{pypi_name}-%{upstream_version} && oslo-config-generator --config-file ./etc/oslo-config-generator/murano.conf --output-file %{_builddir}/%{pypi_name}-%{upstream_version}/etc/murano/murano.conf.sample +install -p -D -m 640 %{_builddir}/%{pypi_name}-%{upstream_version}/etc/murano/murano.conf.sample %{buildroot}%{_sysconfdir}/murano/murano.conf +install -p -D -m 640 %{_builddir}/%{pypi_name}-%{upstream_version}/etc/murano/netconfig.yaml.sample %{buildroot}%{_sysconfdir}/murano/netconfig.yaml.sample +install -p -D -m 640 %{_builddir}/%{pypi_name}-%{upstream_version}/etc/murano/murano-paste.ini %{buildroot}%{_sysconfdir}/murano/murano-paste.ini +install -p -D -m 640 %{_builddir}/%{pypi_name}-%{upstream_version}/etc/murano/logging.conf.sample %{buildroot}%{_sysconfdir}/murano/logging.conf +install -p -D -m 640 %{_builddir}/%{pypi_name}-%{upstream_version}/etc/murano/murano-cfapi.conf.sample %{buildroot}%{_sysconfdir}/murano/murano-cfapi.conf +install -p -D -m 640 %{_builddir}/%{pypi_name}-%{upstream_version}/etc/murano/murano-cfapi-paste.ini %{buildroot}%{_sysconfdir}/murano/murano-cfapi-paste.ini + +# Creating murano core library archive(murano meta packages written in muranoPL with execution plan main minimal logic) +pushd meta/io.murano +zip -r %{buildroot}%{_localstatedir}/cache/murano/meta/io.murano.zip . +popd +# Creating murano core library archive(murano meta packages written in muranoPL with execution plan main minimal logic) +pushd meta/io.murano.applications +zip -r %{buildroot}%{_localstatedir}/cache/murano/meta/io.murano.applications.zip . +popd +# Install i18n .mo files (.po and .pot are not required) +install -d -m 755 %{buildroot}%{_datadir} +rm -f %{buildroot}%{python2_sitelib}/%{pypi_name}/locale/*/LC_*/%{pypi_name}*po +rm -f %{buildroot}%{python2_sitelib}/%{pypi_name}/locale/*pot +mv %{buildroot}%{python2_sitelib}/%{pypi_name}/locale %{buildroot}%{_datadir}/locale + +# Find language files +%find_lang %{pypi_name} --all-name + +%files common -f %{pypi_name}.lang +%license LICENSE +%{python2_sitelib}/murano +%{python2_sitelib}/murano-*.egg-info +%exclude %{python2_sitelib}/murano/tests +%exclude %{python2_sitelib}/murano_tempest_tests +%exclude %{python2_sitelib}/%{service}_tests.egg-info +%{_bindir}/murano-manage +%{_bindir}/murano-db-manage +%{_bindir}/murano-test-runner +%{_bindir}/murano-cfapi-db-manage +%dir %attr(0750,murano,root) %{_localstatedir}/log/murano +%dir %attr(0755,murano,root) %{_localstatedir}/run/murano +%dir %attr(0755,murano,root) %{_localstatedir}/cache/murano +%dir %attr(0755,murano,root) %{_sysconfdir}/murano +%config(noreplace) %attr(-, root, murano) %{_sysconfdir}/murano/murano.conf +%config(noreplace) %attr(-, root, murano) %{_sysconfdir}/murano/murano-paste.ini +%config(noreplace) %attr(-, root, murano) %{_sysconfdir}/murano/netconfig.yaml.sample +%config(noreplace) %attr(-, root, murano) %{_sysconfdir}/murano/logging.conf +%config(noreplace) %attr(-, root, murano) %{_sysconfdir}/murano/murano-cfapi.conf +%config(noreplace) %attr(-, root, murano) %{_sysconfdir}/murano/murano-cfapi-paste.ini + +%files engine +%doc README.rst +%license LICENSE +%{_bindir}/murano-engine +%{_unitdir}/murano-engine.service + +%post engine +%systemd_post murano-engine.service + +%preun engine +%systemd_preun murano-engine.service + +%postun engine +%systemd_postun_with_restart murano-engine.service + +%files api +%doc README.rst +%license LICENSE +%{_localstatedir}/cache/murano/* +%{_bindir}/murano-api +%{_bindir}/murano-wsgi-api +%{_unitdir}/murano-api.service + +%files cf-api +%doc README.rst +%license LICENSE +%{_bindir}/murano-cfapi +%{_unitdir}/murano-cf-api.service + +%files doc +%doc doc/build/html + +%files -n python-murano-tests +%license LICENSE +%{python2_sitelib}/murano/tests +%{python2_sitelib}/murano_tempest_tests +%{python2_sitelib}/%{service}_tests.egg-info + +%changelog +* Wed Aug 30 2017 rdo-trunk 4.0.0-1 +- Update to 4.0.0 + +* Fri Aug 25 2017 Alfredo Moralejo 4.0.0-0.2.0rc2 +- Update to 4.0.0.0rc2 + +* Mon Aug 21 2017 Alfredo Moralejo 4.0.0-0.1.0rc1 +- Update to 4.0.0.0rc1 + diff --git a/openstack/openstack-panko/centos/build_srpm.data b/openstack/openstack-panko/centos/build_srpm.data new file mode 100644 index 00000000..024e3e13 --- /dev/null +++ b/openstack/openstack-panko/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=5 diff --git a/openstack/openstack-panko/centos/meta_patches/0001-panko-config.patch b/openstack/openstack-panko/centos/meta_patches/0001-panko-config.patch new file mode 100644 index 00000000..d422bbe1 --- /dev/null +++ b/openstack/openstack-panko/centos/meta_patches/0001-panko-config.patch @@ -0,0 +1,171 @@ +From 39121ea596ec8137f2d56b8a35ebba73feb6b5c8 Mon Sep 17 00:00:00 2001 +From: Angie Wang +Date: Fri, 20 Oct 2017 10:07:03 -0400 +Subject: [PATCH 1/1] panko config + +--- + SOURCES/panko-dist.conf | 2 +- + SOURCES/panko-expirer-active | 27 +++++++++++++++++++++++++++ + SPECS/openstack-panko.spec | 22 +++++++++++++++++----- + 3 files changed, 45 insertions(+), 6 deletions(-) + create mode 100644 SOURCES/panko-expirer-active + +diff --git a/SOURCES/panko-dist.conf b/SOURCES/panko-dist.conf +index c33a2ee..ac6f79f 100644 +--- a/SOURCES/panko-dist.conf ++++ b/SOURCES/panko-dist.conf +@@ -1,4 +1,4 @@ + [DEFAULT] +-log_dir = /var/log/panko ++#log_dir = /var/log/panko + use_stderr = False + +diff --git a/SOURCES/panko-expirer-active b/SOURCES/panko-expirer-active +new file mode 100644 +index 0000000..7d526e0 +--- /dev/null ++++ b/SOURCES/panko-expirer-active +@@ -0,0 +1,60 @@ ++#!/bin/bash ++ ++# ++# Wrapper script to run panko-expirer when on active controller only ++# ++PANKO_EXPIRER_INFO="/var/run/panko-expirer.info" ++PANKO_EXPIRER_CMD="/usr/bin/nice -n 2 /usr/bin/panko-expirer" ++ ++function is_active_pgserver() ++{ ++ # Determine whether we're running on the same controller as the service. ++ local service=postgres ++ local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active) ++ if [ "x$enabledactive" == "x" ] ++ then ++ # enabled-active not found for that service on this controller ++ return 1 ++ else ++ # enabled-active found for that resource ++ return 0 ++ fi ++} ++ ++if is_active_pgserver ++then ++ if [ ! -f ${PANKO_EXPIRER_INFO} ] ++ then ++ echo skip_count=0 > ${PANKO_EXPIRER_INFO} ++ fi ++ ++ source ${PANKO_EXPIRER_INFO} ++ sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null ++ if [ $? -eq 0 ] ++ then ++ source /etc/platform/platform.conf ++ if [ "${system_type}" = "All-in-one" ] ++ then ++ source /etc/init.d/task_affinity_functions.sh ++ idle_core=$(get_most_idle_core) ++ if [ "$idle_core" -ne "0" ] ++ then ++ sh -c "exec taskset -c $idle_core ${PANKO_EXPIRER_CMD}" ++ sed -i "/skip_count/s/=.*/=0/" ${PANKO_EXPIRER_INFO} ++ exit 0 ++ fi ++ fi ++ ++ if [ "$skip_count" -lt "3" ] ++ then ++ newval=$(($skip_count+1)) ++ sed -i "/skip_count/s/=.*/=$newval/" ${PANKO_EXPIRER_INFO} ++ exit 0 ++ fi ++ fi ++ ++ eval ${PANKO_EXPIRER_CMD} ++ sed -i "/skip_count/s/=.*/=0/" ${PANKO_EXPIRER_INFO} ++fi ++ ++exit 0 +diff --git a/SPECS/openstack-panko.spec b/SPECS/openstack-panko.spec +index d12da57..90471d9 100644 +--- a/SPECS/openstack-panko.spec ++++ b/SPECS/openstack-panko.spec +@@ -4,20 +4,26 @@ + + Name: openstack-panko + Version: 3.0.0 +-Release: 1%{?dist} ++Release: 1%{?_tis_dist}.%{tis_patch_ver} + Summary: Panko provides Event storage and REST API + + License: ASL 2.0 + URL: http://github.com/openstack/panko + Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz + Source1: %{pypi_name}-dist.conf +-Source2: %{pypi_name}.logrotate ++# WRS ++Source2: panko-expirer-active ++ ++# WRS: Include patches here ++Patch1: 0001-modify-panko-api.patch ++ + BuildArch: noarch + + BuildRequires: python-setuptools + BuildRequires: python-pbr + BuildRequires: python2-devel + BuildRequires: openstack-macros ++BuildRequires: python-tenacity >= 3.1.0 + + %description + HTTP API to store events. +@@ -116,6 +122,9 @@ This package contains documentation files for panko. + %prep + %setup -q -n %{pypi_name}-%{upstream_version} + ++# WRS: Apply patches here ++%patch1 -p1 ++ + find . \( -name .gitignore -o -name .placeholder \) -delete + + find panko -name \*.py -exec sed -i '/\/usr\/bin\/env python/{d;q}' {} + +@@ -158,6 +167,8 @@ mkdir -p %{buildroot}/%{_var}/log/%{name} + install -p -D -m 640 %{SOURCE1} %{buildroot}%{_datadir}/panko/panko-dist.conf + install -p -D -m 640 etc/panko/panko.conf %{buildroot}%{_sysconfdir}/panko/panko.conf + install -p -D -m 640 etc/panko/api_paste.ini %{buildroot}%{_sysconfdir}/panko/api_paste.ini ++# WRS ++install -p -D -m 640 panko/api/panko-api.py %{buildroot}%{_datadir}/panko/panko-api.py + + #TODO(prad): build the docs at run time, once the we get rid of postgres setup dependency + +@@ -169,8 +180,8 @@ install -d -m 755 %{buildroot}%{_sharedstatedir}/panko + install -d -m 755 %{buildroot}%{_sharedstatedir}/panko/tmp + install -d -m 755 %{buildroot}%{_localstatedir}/log/panko + +-# Install logrotate +-install -p -D -m 644 %{SOURCE2} %{buildroot}%{_sysconfdir}/logrotate.d/%{name} ++# WRS ++install -p -D -m 755 %{SOURCE2} %{buildroot}%{_bindir}/panko-expirer-active + + # Remove all of the conf files that are included in the buildroot/usr/etc dir since we installed them above + rm -f %{buildroot}/usr/etc/panko/* +@@ -201,14 +212,15 @@ exit 0 + %{_bindir}/panko-api + %{_bindir}/panko-dbsync + %{_bindir}/panko-expirer ++%{_bindir}/panko-expirer-active + + %files common + %dir %{_sysconfdir}/panko ++%{_datadir}/panko/panko-api.* + %attr(-, root, panko) %{_datadir}/panko/panko-dist.conf + %config(noreplace) %attr(-, root, panko) %{_sysconfdir}/panko/policy.json + %config(noreplace) %attr(-, root, panko) %{_sysconfdir}/panko/panko.conf + %config(noreplace) %attr(-, root, panko) %{_sysconfdir}/panko/api_paste.ini +-%config(noreplace) %attr(-, root, panko) %{_sysconfdir}/logrotate.d/%{name} + %dir %attr(0755, panko, root) %{_localstatedir}/log/panko + + %defattr(-, panko, panko, -) +-- +1.8.3.1 + diff --git a/openstack/openstack-panko/centos/meta_patches/0002-spec-change-event-list-descending.patch b/openstack/openstack-panko/centos/meta_patches/0002-spec-change-event-list-descending.patch new file mode 100644 index 00000000..259df01e --- /dev/null +++ b/openstack/openstack-panko/centos/meta_patches/0002-spec-change-event-list-descending.patch @@ -0,0 +1,32 @@ +From 4e791be412662ae1f97cfd4ff5a90ea6337e49a4 Mon Sep 17 00:00:00 2001 +From: Angie Wang +Date: Thu, 16 Nov 2017 15:25:08 -0500 +Subject: [PATCH 1/1] spec change event list descending + +--- + SPECS/openstack-panko.spec | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/SPECS/openstack-panko.spec b/SPECS/openstack-panko.spec +index 90471d9..95497b4 100644 +--- a/SPECS/openstack-panko.spec ++++ b/SPECS/openstack-panko.spec +@@ -16,6 +16,7 @@ Source2: panko-expirer-active + + # WRS: Include patches here + Patch1: 0001-modify-panko-api.patch ++Patch2: 0002-Change-event-list-descending.patch + + BuildArch: noarch + +@@ -124,6 +125,7 @@ This package contains documentation files for panko. + + # WRS: Apply patches here + %patch1 -p1 ++%patch2 -p1 + + find . \( -name .gitignore -o -name .placeholder \) -delete + +-- +1.8.3.1 + diff --git a/openstack/openstack-panko/centos/meta_patches/0003-spec-fix-event-query-to-sqlalchemy-with-non-admin-us.patch b/openstack/openstack-panko/centos/meta_patches/0003-spec-fix-event-query-to-sqlalchemy-with-non-admin-us.patch new file mode 100644 index 00000000..133901e7 --- /dev/null +++ b/openstack/openstack-panko/centos/meta_patches/0003-spec-fix-event-query-to-sqlalchemy-with-non-admin-us.patch @@ -0,0 +1,32 @@ +From aad89aa79de1e9f0b35afa1ba587c10591a889e0 Mon Sep 17 00:00:00 2001 +From: Angie Wang +Date: Mon, 11 Dec 2017 16:29:23 -0500 +Subject: [PATCH 1/1] spec fix event query to sqlalchemy with non admin user + +--- + SPECS/openstack-panko.spec | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/SPECS/openstack-panko.spec b/SPECS/openstack-panko.spec +index 95497b4..87a6a5a 100644 +--- a/SPECS/openstack-panko.spec ++++ b/SPECS/openstack-panko.spec +@@ -17,6 +17,7 @@ Source2: panko-expirer-active + # WRS: Include patches here + Patch1: 0001-modify-panko-api.patch + Patch2: 0002-Change-event-list-descending.patch ++Patch3: 0003-Fix-event-query-to-sqlalchemy-with-non-admin-user.patch + + BuildArch: noarch + +@@ -126,6 +127,7 @@ This package contains documentation files for panko. + # WRS: Apply patches here + %patch1 -p1 + %patch2 -p1 ++%patch3 -p1 + + find . \( -name .gitignore -o -name .placeholder \) -delete + +-- +1.8.3.1 + diff --git a/openstack/openstack-panko/centos/meta_patches/PATCH_ORDER b/openstack/openstack-panko/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..af3579d2 --- /dev/null +++ b/openstack/openstack-panko/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,3 @@ +0001-panko-config.patch +0002-spec-change-event-list-descending.patch +0003-spec-fix-event-query-to-sqlalchemy-with-non-admin-us.patch diff --git a/openstack/openstack-panko/centos/patches/0001-modify-panko-api.patch b/openstack/openstack-panko/centos/patches/0001-modify-panko-api.patch new file mode 100644 index 00000000..23ed7b51 --- /dev/null +++ b/openstack/openstack-panko/centos/patches/0001-modify-panko-api.patch @@ -0,0 +1,63 @@ +From 3583e2afbae8748f05dc12c51eefc4983358759c Mon Sep 17 00:00:00 2001 +From: Angie Wang +Date: Mon, 6 Nov 2017 17:32:46 -0500 +Subject: [PATCH 1/1] modify panko api + +--- + panko/api/app.py | 12 +++++++++--- + panko/api/panko-api.py | 6 ++++++ + 2 files changed, 15 insertions(+), 3 deletions(-) + create mode 100644 panko/api/panko-api.py + +diff --git a/panko/api/app.py b/panko/api/app.py +index 9867e18..4eedaea 100644 +--- a/panko/api/app.py ++++ b/panko/api/app.py +@@ -51,7 +51,7 @@ global APPCONFIGS + APPCONFIGS = {} + + +-def load_app(conf, appname='panko+keystone'): ++def load_app(conf, args, appname='panko+keystone'): + global APPCONFIGS + + # Build the WSGI app +@@ -62,6 +62,12 @@ def load_app(conf, appname='panko+keystone'): + if cfg_path is None or not os.path.exists(cfg_path): + raise cfg.ConfigFilesNotFoundError([conf.api_paste_config]) + ++ config_args = dict([(key, value) for key, value in args.iteritems() ++ if key in conf and value is not None]) ++ for key, value in config_args.iteritems(): ++ if key == 'config_file': ++ conf.config_file = value ++ + config = dict(conf=conf) + configkey = str(uuid.uuid4()) + APPCONFIGS[configkey] = config +@@ -71,8 +77,8 @@ def load_app(conf, appname='panko+keystone'): + global_conf={'configkey': configkey}) + + +-def build_wsgi_app(argv=None): +- return load_app(service.prepare_service(argv=argv)) ++def build_wsgi_app(argv=None, args=None): ++ return load_app(service.prepare_service(argv=argv), args) + + + def app_factory(global_config, **local_conf): +diff --git a/panko/api/panko-api.py b/panko/api/panko-api.py +new file mode 100644 +index 0000000..87d917d +--- /dev/null ++++ b/panko/api/panko-api.py +@@ -0,0 +1,6 @@ ++from panko.api import app as build_wsgi_app ++import sys ++ ++sys.argv = sys.argv[:1] ++args = {'config_file' : 'etc/panko/panko.conf', } ++application = build_wsgi_app.build_wsgi_app(args=args) +-- +1.8.3.1 + diff --git a/openstack/openstack-panko/centos/patches/0002-Change-event-list-descending.patch b/openstack/openstack-panko/centos/patches/0002-Change-event-list-descending.patch new file mode 100644 index 00000000..7d8b8ad2 --- /dev/null +++ b/openstack/openstack-panko/centos/patches/0002-Change-event-list-descending.patch @@ -0,0 +1,27 @@ +From 05b89c2f78357ad39b0cd9eb74903e14d1f56758 Mon Sep 17 00:00:00 2001 +From: Angie Wang +Date: Thu, 16 Nov 2017 15:14:17 -0500 +Subject: [PATCH 1/1] Change event list descending + +--- + panko/storage/models.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/panko/storage/models.py b/panko/storage/models.py +index 9c578c8..ed4c9a8 100644 +--- a/panko/storage/models.py ++++ b/panko/storage/models.py +@@ -35,8 +35,8 @@ class Event(base.Model): + + SUPPORT_DIRS = ('asc', 'desc') + SUPPORT_SORT_KEYS = ('message_id', 'generated') +- DEFAULT_DIR = 'asc' +- DEFAULT_SORT = [('generated', 'asc'), ('message_id', 'asc')] ++ DEFAULT_DIR = 'desc' ++ DEFAULT_SORT = [('generated', 'desc'), ('message_id', 'desc')] + PRIMARY_KEY = 'message_id' + + def __init__(self, message_id, event_type, generated, traits, raw): +-- +1.8.3.1 + diff --git a/openstack/openstack-panko/centos/patches/0003-Fix-event-query-to-sqlalchemy-with-non-admin-user.patch b/openstack/openstack-panko/centos/patches/0003-Fix-event-query-to-sqlalchemy-with-non-admin-user.patch new file mode 100644 index 00000000..3cb09256 --- /dev/null +++ b/openstack/openstack-panko/centos/patches/0003-Fix-event-query-to-sqlalchemy-with-non-admin-user.patch @@ -0,0 +1,101 @@ +From c390a3bc6920728806f581b85d46f02d75eb651c Mon Sep 17 00:00:00 2001 +From: Angie Wang +Date: Mon, 11 Dec 2017 16:21:42 -0500 +Subject: [PATCH 1/1] Fix event query to sqlalchemy with non admin user + +This is an upstream fix. +https://github.com/openstack/panko/commit/99d591df950271594ee049caa3ff22304437a228 + +Do not port this patch in the next panko rebase. +--- + panko/storage/impl_sqlalchemy.py | 34 +++++++++++++++------- + .../functional/storage/test_storage_scenarios.py | 4 +-- + 2 files changed, 25 insertions(+), 13 deletions(-) + +diff --git a/panko/storage/impl_sqlalchemy.py b/panko/storage/impl_sqlalchemy.py +index 670c8d7..29b5b97 100644 +--- a/panko/storage/impl_sqlalchemy.py ++++ b/panko/storage/impl_sqlalchemy.py +@@ -24,6 +24,7 @@ from oslo_log import log + from oslo_utils import timeutils + import sqlalchemy as sa + from sqlalchemy.engine import url as sqlalchemy_url ++from sqlalchemy.orm import aliased + + from panko import storage + from panko.storage import base +@@ -61,8 +62,8 @@ trait_models_dict = {'string': models.TraitText, + 'float': models.TraitFloat} + + +-def _build_trait_query(session, trait_type, key, value, op='eq'): +- trait_model = trait_models_dict[trait_type] ++def _get_model_and_conditions(trait_type, key, value, op='eq'): ++ trait_model = aliased(trait_models_dict[trait_type]) + op_dict = {'eq': (trait_model.value == value), + 'lt': (trait_model.value < value), + 'le': (trait_model.value <= value), +@@ -70,8 +71,7 @@ def _build_trait_query(session, trait_type, key, value, op='eq'): + 'ge': (trait_model.value >= value), + 'ne': (trait_model.value != value)} + conditions = [trait_model.key == key, op_dict[op]] +- return (session.query(trait_model.event_id.label('ev_id')) +- .filter(*conditions)) ++ return (trait_model, conditions) + + + class Connection(base.Connection): +@@ -274,16 +274,28 @@ class Connection(base.Connection): + key = trait_filter.pop('key') + op = trait_filter.pop('op', 'eq') + trait_type, value = list(trait_filter.items())[0] +- trait_subq = _build_trait_query(session, trait_type, +- key, value, op) +- for trait_filter in filters: ++ ++ trait_model, conditions = _get_model_and_conditions( ++ trait_type, key, value, op) ++ trait_subq = (session ++ .query(trait_model.event_id.label('ev_id')) ++ .filter(*conditions)) ++ ++ first_model = trait_model ++ for label_num, trait_filter in enumerate(filters): + key = trait_filter.pop('key') + op = trait_filter.pop('op', 'eq') + trait_type, value = list(trait_filter.items())[0] +- q = _build_trait_query(session, trait_type, +- key, value, op) +- trait_subq = trait_subq.filter( +- trait_subq.subquery().c.ev_id == q.subquery().c.ev_id) ++ trait_model, conditions = _get_model_and_conditions( ++ trait_type, key, value, op) ++ trait_subq = ( ++ trait_subq ++ .add_columns( ++ trait_model.event_id.label('l%d' % label_num)) ++ .filter( ++ first_model.event_id == trait_model.event_id, ++ *conditions)) ++ + trait_subq = trait_subq.subquery() + + query = (session.query(models.Event.id) +diff --git a/panko/tests/functional/storage/test_storage_scenarios.py b/panko/tests/functional/storage/test_storage_scenarios.py +index 3af76b4..9af75c8 100644 +--- a/panko/tests/functional/storage/test_storage_scenarios.py ++++ b/panko/tests/functional/storage/test_storage_scenarios.py +@@ -340,8 +340,8 @@ class GetEventTest(EventTestBase): + + def test_get_event_multiple_trait_filter(self): + trait_filters = [{'key': 'trait_B', 'integer': 1}, +- {'key': 'trait_A', 'string': 'my_Foo_text'}, +- {'key': 'trait_C', 'float': 0.123456}] ++ {'key': 'trait_C', 'float': 0.123456}, ++ {'key': 'trait_A', 'string': 'my_Foo_text'}] + event_filter = storage.EventFilter(self.start, self.end, + traits_filter=trait_filters) + events = [event for event in self.conn.get_events(event_filter)] +-- +1.8.3.1 + diff --git a/openstack/openstack-panko/centos/srpm_path b/openstack/openstack-panko/centos/srpm_path new file mode 100644 index 00000000..b33279f7 --- /dev/null +++ b/openstack/openstack-panko/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/openstack-panko-3.0.0-1.el7.src.rpm diff --git a/openstack/openstack-ras/centos/build_srpm.data b/openstack/openstack-ras/centos/build_srpm.data new file mode 100644 index 00000000..e0d2c13d --- /dev/null +++ b/openstack/openstack-ras/centos/build_srpm.data @@ -0,0 +1,5 @@ +SRC_DIR="$CGCS_BASE/git/openstack-ras" +#COPY_LIST="$FILES_BASE/*" +TIS_BASE_SRCREV=a54e652dd2f404de8e125370445a1225b3678894 +TIS_PATCH_VER=GITREVCOUNT + diff --git a/openstack/openstack-ras/centos/openstack-ras.spec b/openstack/openstack-ras/centos/openstack-ras.spec new file mode 100644 index 00000000..123cfb0f --- /dev/null +++ b/openstack/openstack-ras/centos/openstack-ras.spec @@ -0,0 +1,80 @@ +%define local_dir /usr/local + +Summary: openstack-ras +Name: openstack-ras +Version: 1.0.0 +Release: 0%{?_tis_dist}.%{tis_patch_ver} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: https://github.com/madkiss/openstack-resource-agents/tree/stable-grizzly +# Note: when upgrading, new upstream URL will be: +# https://git.openstack.org/cgit/openstack/openstack-resource-agents + +Requires: /usr/bin/env +Requires: /bin/sh + +Source: %{name}-%{version}.tar.gz + +%description +OpenStack Resource Agents from Madkiss + +%prep +%autosetup -p 1 + +%install +%make_install +rm -rf ${RPM_BUILD_ROOT}/usr/lib/ocf/resource.d/openstack/ceilometer-agent-central +rm -rf ${RPM_BUILD_ROOT}/usr/lib/ocf/resource.d/openstack/ceilometer-alarm-evaluator +rm -rf ${RPM_BUILD_ROOT}/usr/lib/ocf/resource.d/openstack/ceilometer-alarm-notifier + +%files +%defattr(-,root,root,-) +%dir "/usr/lib/ocf/resource.d/openstack" +"/usr/lib/ocf/resource.d/openstack/aodh-api" +"/usr/lib/ocf/resource.d/openstack/aodh-evaluator" +"/usr/lib/ocf/resource.d/openstack/aodh-listener" +"/usr/lib/ocf/resource.d/openstack/aodh-notifier" +"/usr/lib/ocf/resource.d/openstack/murano-engine" +"/usr/lib/ocf/resource.d/openstack/murano-api" +"/usr/lib/ocf/resource.d/openstack/magnum-conductor" +"/usr/lib/ocf/resource.d/openstack/magnum-api" +"/usr/lib/ocf/resource.d/openstack/ironic-conductor" +"/usr/lib/ocf/resource.d/openstack/ironic-api" +"/usr/lib/ocf/resource.d/openstack/nova-compute" +"/usr/lib/ocf/resource.d/openstack/heat-api" +"/usr/lib/ocf/resource.d/openstack/glance-registry" +"/usr/lib/ocf/resource.d/openstack/nova-network" +"/usr/lib/ocf/resource.d/openstack/keystone" +"/usr/lib/ocf/resource.d/openstack/heat-engine" +"/usr/lib/ocf/resource.d/openstack/nova-novnc" +"/usr/lib/ocf/resource.d/openstack/nova-serialproxy" +"/usr/lib/ocf/resource.d/openstack/heat-api-cfn" +"/usr/lib/ocf/resource.d/openstack/cinder-api" +"/usr/lib/ocf/resource.d/openstack/neutron-agent-dhcp" +"/usr/lib/ocf/resource.d/openstack/cinder-volume" +"/usr/lib/ocf/resource.d/openstack/neutron-agent-l3" +"/usr/lib/ocf/resource.d/openstack/cinder-schedule" +"/usr/lib/ocf/resource.d/openstack/nova-consoleauth" +"/usr/lib/ocf/resource.d/openstack/ceilometer-api" +"/usr/lib/ocf/resource.d/openstack/nova-scheduler" +"/usr/lib/ocf/resource.d/openstack/nova-conductor" +"/usr/lib/ocf/resource.d/openstack/neutron-server" +"/usr/lib/ocf/resource.d/openstack/validation" +"/usr/lib/ocf/resource.d/openstack/heat-api-cloudwatch" +"/usr/lib/ocf/resource.d/openstack/ceilometer-agent-notification" +"/usr/lib/ocf/resource.d/openstack/glance-api" +"/usr/lib/ocf/resource.d/openstack/nova-api" +"/usr/lib/ocf/resource.d/openstack/neutron-metadata-agent" +"/usr/lib/ocf/resource.d/openstack/ceilometer-collector" +"/usr/lib/ocf/resource.d/openstack/panko-api" +"/usr/lib/ocf/resource.d/openstack/nova-placement-api" +"/usr/lib/ocf/resource.d/openstack/dcorch-snmp" +"/usr/lib/ocf/resource.d/openstack/dcmanager-manager" +"/usr/lib/ocf/resource.d/openstack/dcorch-nova-api-proxy" +"/usr/lib/ocf/resource.d/openstack/dcorch-sysinv-api-proxy" +"/usr/lib/ocf/resource.d/openstack/dcmanager-api" +"/usr/lib/ocf/resource.d/openstack/dcorch-engine" +"/usr/lib/ocf/resource.d/openstack/dcorch-neutron-api-proxy" +"/usr/lib/ocf/resource.d/openstack/dcorch-cinder-api-proxy" +"/usr/lib/ocf/resource.d/openstack/dcorch-patch-api-proxy" diff --git a/openstack/openstack-ras/openstack-ras/CGCSkeyringsupport.patch b/openstack/openstack-ras/openstack-ras/CGCSkeyringsupport.patch new file mode 100644 index 00000000..e0e5cdd1 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/CGCSkeyringsupport.patch @@ -0,0 +1,221 @@ +Index: git/ocf/cinder-api +=================================================================== +--- git.orig/ocf/cinder-api 2014-09-17 13:13:09.768471050 -0400 ++++ git/ocf/cinder-api 2014-09-23 10:22:33.294302829 -0400 +@@ -244,18 +244,27 @@ + fi + + # Check detailed information about this specific version of the API. +- if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ +- && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then +- token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ +- \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ +- -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ +- | cut -d'"' -f4 | head --lines 1` +- http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` +- rc=$? +- if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then +- ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" +- return $OCF_NOT_RUNNING +- fi ++# if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ ++# && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then ++# token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ ++# \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ ++# -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ ++# | cut -d'"' -f4 | head --lines 1` ++# http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` ++# rc=$? ++# if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then ++# ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" ++# return $OCF_NOT_RUNNING ++# fi ++# fi ++ #suppress the information displayed while checking detailed information about this specific version of the API ++ if [ -n "$OCF_RESKEY_os_username"] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then ++ ./validation $OCF_RESKEY_keystone_get_token_url $OCF_RESKEY_os_username $OCF_RESKEY_os_tenant_name ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" ++ return $OCF_NOT_RUNNING ++ fi + fi + + ocf_log debug "OpenStack Cinder API (cinder-api) monitor succeeded" +Index: git/ocf/glance-api +=================================================================== +--- git.orig/ocf/glance-api 2014-09-17 13:13:09.768471050 -0400 ++++ git/ocf/glance-api 2014-09-23 10:16:35.903826295 -0400 +@@ -236,11 +236,9 @@ + fi + + # Monitor the RA by retrieving the image list +- if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ +- && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_os_auth_url" ]; then ++ if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_os_auth_url" ]; then + ocf_run -q $OCF_RESKEY_client_binary \ + --os_username "$OCF_RESKEY_os_username" \ +- --os_password "$OCF_RESKEY_os_password" \ + --os_tenant_name "$OCF_RESKEY_os_tenant_name" \ + --os_auth_url "$OCF_RESKEY_os_auth_url" \ + index > /dev/null 2>&1 +Index: git/ocf/glance-registry +=================================================================== +--- git.orig/ocf/glance-registry 2014-09-17 13:13:09.768471050 -0400 ++++ git/ocf/glance-registry 2014-09-23 10:22:58.078475044 -0400 +@@ -246,18 +246,27 @@ + + # Check whether we are supposed to monitor by logging into glance-registry + # and do it if that's the case. +- if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ +- && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then +- token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ +- \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ +- -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ +- | cut -d'"' -f4 | head --lines 1` +- http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` +- rc=$? +- if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then +- ocf_log err "Failed to connect to the OpenStack ImageService (glance-registry): $rc and $http_code" +- return $OCF_NOT_RUNNING +- fi ++# if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ ++# && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then ++# token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ ++# \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ ++# -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ ++# | cut -d'"' -f4 | head --lines 1` ++# http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` ++# rc=$? ++# if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then ++# ocf_log err "Failed to connect to the OpenStack ImageService (glance-registry): $rc and $http_code" ++# return $OCF_NOT_RUNNING ++# fi ++# fi ++ #suppress the information displayed while checking detailed information about this specific version of the API ++ if [ -n "$OCF_RESKEY_os_username"] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then ++ ./validation $OCF_RESKEY_keystone_get_token_url $OCF_RESKEY_os_username $OCF_RESKEY_os_tenant_name ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" ++ return $OCF_NOT_RUNNING ++ fi + fi + + ocf_log debug "OpenStack ImageService (glance-registry) monitor succeeded" +Index: git/ocf/keystone +=================================================================== +--- git.orig/ocf/keystone 2014-09-17 13:13:09.768471050 -0400 ++++ git/ocf/keystone 2014-09-23 10:18:30.736618732 -0400 +@@ -237,12 +237,10 @@ + + # Check whether we are supposed to monitor by logging into Keystone + # and do it if that's the case. +- if [ -n "$OCF_RESKEY_client_binary" ] && [ -n "$OCF_RESKEY_os_username" ] \ +- && [ -n "$OCF_RESKEY_os_password" ] && [ -n "$OCF_RESKEY_os_tenant_name" ] \ ++ if [ -n "$OCF_RESKEY_client_binary" ] && [ -n "$OCF_RESKEY_os_password" ] && [ -n "$OCF_RESKEY_os_tenant_name" ] \ + && [ -n "$OCF_RESKEY_os_auth_url" ]; then + ocf_run -q $OCF_RESKEY_client_binary \ + --os-username "$OCF_RESKEY_os_username" \ +- --os-password "$OCF_RESKEY_os_password" \ + --os-tenant-name "$OCF_RESKEY_os_tenant_name" \ + --os-auth-url "$OCF_RESKEY_os_auth_url" \ + user-list > /dev/null 2>&1 +Index: git/ocf/neutron-server +=================================================================== +--- git.orig/ocf/neutron-server 2014-09-17 13:13:13.872502871 -0400 ++++ git/ocf/neutron-server 2014-09-23 10:23:39.358761926 -0400 +@@ -256,18 +256,27 @@ + fi + + # Check detailed information about this specific version of the API. +- if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ +- && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then +- token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ +- \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ +- -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ +- | cut -d'"' -f4 | head --lines 1` +- http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` +- rc=$? +- if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then +- ocf_log err "Failed to connect to the OpenStack Neutron API (neutron-server): $rc and $http_code" +- return $OCF_NOT_RUNNING +- fi ++# if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ ++# && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then ++# token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ ++# \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ ++# -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ ++# | cut -d'"' -f4 | head --lines 1` ++# http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` ++# rc=$? ++# if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then ++# ocf_log err "Failed to connect to the OpenStack Neutron API (neutron-server): $rc and $http_code" ++# return $OCF_NOT_RUNNING ++# fi ++# fi ++ #suppress the information displayed while checking detailed information about this specific version of the API ++ if [ -n "$OCF_RESKEY_os_username"] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then ++ ./validation $OCF_RESKEY_keystone_get_token_url $OCF_RESKEY_os_username $OCF_RESKEY_os_tenant_name ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" ++ return $OCF_NOT_RUNNING ++ fi + fi + + ocf_log debug "OpenStack Neutron Server (neutron-server) monitor succeeded" +Index: git/ocf/nova-api +=================================================================== +--- git.orig/ocf/nova-api 2014-09-17 13:13:15.240513478 -0400 ++++ git/ocf/nova-api 2014-09-23 10:23:20.454630543 -0400 +@@ -244,18 +244,27 @@ + fi + + # Check detailed information about this specific version of the API. +- if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ +- && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then +- token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ +- \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ +- -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ +- | cut -d'"' -f4 | head --lines 1` +- http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` +- rc=$? +- if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then +- ocf_log err "Failed to connect to the OpenStack Nova API (nova-api): $rc and $http_code" +- return $OCF_NOT_RUNNING +- fi ++# if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ ++# && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then ++# token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ ++# \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ ++# -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ ++# | cut -d'"' -f4 | head --lines 1` ++# http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` ++# rc=$? ++# if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then ++# ocf_log err "Failed to connect to the OpenStack Nova API (nova-api): $rc and $http_code" ++# return $OCF_NOT_RUNNING ++# fi ++# fi ++ #suppress the information displayed while checking detailed information about this specific version of the API ++ if [ -n "$OCF_RESKEY_os_username"] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then ++ ./validation $OCF_RESKEY_keystone_get_token_url $OCF_RESKEY_os_username $OCF_RESKEY_os_tenant_name ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" ++ return $OCF_NOT_RUNNING ++ fi + fi + + ocf_log debug "OpenStack Nova API (nova-api) monitor succeeded" +Index: git/ocf/validation +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ git/ocf/validation 2014-09-23 10:06:37.011706573 -0400 +@@ -0,0 +1,5 @@ ++#!/usr/bin/env python ++ ++from keystoneclient import probe ++ ++probe.main() diff --git a/openstack/openstack-ras/openstack-ras/aodh.patch b/openstack/openstack-ras/openstack-ras/aodh.patch new file mode 100644 index 00000000..6a7f32a1 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/aodh.patch @@ -0,0 +1,1467 @@ +--- + ocf/aodh-api | 368 +++++++++++++++++++++++++++++++++++++++++++++++++++++ + ocf/aodh-evaluator | 360 +++++++++++++++++++++++++++++++++++++++++++++++++++ + ocf/aodh-listener | 360 +++++++++++++++++++++++++++++++++++++++++++++++++++ + ocf/aodh-notifier | 360 +++++++++++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 1448 insertions(+) + +--- /dev/null ++++ b/ocf/aodh-api +@@ -0,0 +1,368 @@ ++#!/bin/sh ++# ++# ++# OpenStack Alarming API Service (aodh-api) ++# ++# Description: Manages an OpenStack Alarming API Service (aodh-api) process as an HA resource ++# ++# Authors: Emilien Macchi ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="aodh-api" ++OCF_RESKEY_config_default="/etc/aodh/aodh.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_server_port_default="8042" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_server_port=${OCF_RESKEY_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Alarming API Service (aodh-api) ++May manage a aodh-api instance or a clone set that ++creates a distributed aodh-api cluster. ++ ++Manages the OpenStack Alarming API Service (aodh-api) ++ ++ ++ ++ ++Location of the OpenStack Alarming API server binary (aodh-api) ++ ++OpenStack Alarming API server binary (aodh-api) ++ ++ ++ ++ ++ ++Location of the OpenStack Alarming API Service (aodh-api) configuration file ++ ++OpenStack Alarming API (aodh-api) config file ++ ++ ++ ++ ++ ++User running OpenStack Alarming API Service (aodh-api) ++ ++OpenStack Alarming API Service (aodh-api) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Alarming API Service (aodh-api) instance ++ ++OpenStack Alarming API Service (aodh-api) pid file ++ ++ ++ ++ ++ ++The listening port number of the aodh-api server. ++ ++ ++aodh-api listening port ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Alarming API Service (aodh-api) ++ ++Additional parameters for aodh-api ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++aodh_api_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++aodh_api_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ aodh_api_check_port $OCF_RESKEY_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++aodh_api_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Alarming API (aodh-api) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Alarming API (aodh-api) is not running" ++ rm -f $OCF_RESKEY_pid ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++aodh_api_monitor() { ++ local rc ++ local pid ++ local rc_db ++ local engine_db_check ++ ++ aodh_api_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ # Check the server is listening on the server port ++ engine_db_check=`netstat -an | grep -s "$OCF_RESKEY_console_port" | grep -qs "LISTEN"` ++ rc_db=$? ++ if [ $rc_db -ne 0 ]; then ++ ocf_log err "aodh-api is not listening on $OCF_RESKEY_console_port: $rc_db" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ ocf_log debug "OpenStack Alarming API (aodh-api) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++aodh_api_start() { ++ local rc ++ ++ aodh_api_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Alarming API (aodh-api) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual aodh-api daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ aodh_api_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Alarming API (aodh-api) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Alarming API (aodh-api) started" ++ return $OCF_SUCCESS ++} ++ ++aodh_api_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ ++aodh_api_stop() { ++ local rc ++ local pid ++ ++ aodh_api_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Alarming API (aodh-api) already stopped" ++ aodh_api_confirm_stop ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Alarming API (aodh-api) couldn't be stopped" ++ aodh_api_confirm_stop ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ aodh_api_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Alarming API (aodh-api) still hasn't stopped yet. Waiting ..." ++ done ++ ++ aodh_api_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Alarming API (aodh-api) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ aodh_api_confirm_stop ++ ++ ocf_log info "OpenStack Alarming API (aodh-api) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++aodh_api_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) aodh_api_start;; ++ stop) aodh_api_stop;; ++ status) aodh_api_status;; ++ monitor) aodh_api_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac ++ +--- /dev/null ++++ b/ocf/aodh-evaluator +@@ -0,0 +1,360 @@ ++#!/bin/sh ++# ++# ++# OpenStack Alarming Evaluator Service (aodh-evaluator) ++# ++# Description: Manages an OpenStack Alarming Evaluator Service (aodh-evaluator) process as an HA resource ++# ++# Authors: Emilien Macchi ++# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# Copyright (c) 2014-2016 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_amqp_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. /usr/bin/tsconfig ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="aodh-evaluator" ++OCF_RESKEY_config_default="/etc/aodh/aodh.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_amqp_server_port_default="5672" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Alarming Evaluator Service (aodh-evaluator) ++May manage a aodh-evaluator instance or a clone set that ++creates a distributed aodh-evaluator cluster. ++ ++Manages the OpenStack Alarming Evaluator Service (aodh-evaluator) ++ ++ ++ ++ ++Location of the OpenStack Alarming Evaluator server binary (aodh-evaluator) ++ ++OpenStack Alarming Evaluator server binary (aodh-evaluator) ++ ++ ++ ++ ++ ++Location of the OpenStack Alarming Evaluator Service (aodh-evaluator) configuration file ++ ++OpenStack Alarming Evaluator (aodh-evaluator registry) config file ++ ++ ++ ++ ++ ++User running OpenStack Alarming Evaluator Service (aodh-evaluator) ++ ++OpenStack Alarming Evaluator Service (aodh-evaluator) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Alarming Evaluator Service (aodh-evaluator) instance ++ ++OpenStack Alarming Evaluator Service (aodh-evaluator) pid file ++ ++ ++ ++ ++ ++The listening port number of the AMQP server. Use for monitoring purposes ++ ++AMQP listening port ++ ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Alarming Evaluator Service (aodh-evaluator) ++ ++Additional parameters for aodh-evaluator ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++aodh_evaluator_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++aodh_evaluator_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ aodh_evaluator_check_port $OCF_RESKEY_amqp_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++aodh_evaluator_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Alarming Evaluator (aodh-evaluator) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Alarming Evaluator (aodh-evaluator) is not running" ++ rm -f $OCF_RESKEY_pid ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++aodh_evaluator_monitor() { ++ local rc ++ local pid ++ local scheduler_amqp_check ++ ++ aodh_evaluator_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ ocf_log debug "OpenStack Alarming Evaluator (aodh-evaluator) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++aodh_evaluator_start() { ++ local rc ++ ++ aodh_evaluator_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Alarming Evaluator (aodh-evaluator) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual aodh-evaluator daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ aodh_evaluator_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Alarming Evaluator (aodh-evaluator) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Alarming Evaluator (aodh-evaluator) started" ++ return $OCF_SUCCESS ++} ++ ++aodh_evaluator_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ ++aodh_evaluator_stop() { ++ local rc ++ local pid ++ ++ aodh_evaluator_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Alarming Evaluator (aodh-evaluator) already stopped" ++ aodh_evaluator_confirm_stop ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Alarming Evaluator (aodh-evaluator) couldn't be stopped" ++ aodh_evaluator_confirm_stop ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=2 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ aodh_evaluator_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Alarming Evaluator (aodh-evaluator) still hasn't stopped yet. Waiting ..." ++ done ++ ++ aodh_evaluator_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Alarming Evaluator (aodh-evaluator) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ aodh_evaluator_confirm_stop ++ ++ ocf_log info "OpenStack Alarming Evaluator (aodh-evaluator) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++aodh_evaluator_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) aodh_evaluator_start;; ++ stop) aodh_evaluator_stop;; ++ status) aodh_evaluator_status;; ++ monitor) aodh_evaluator_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac +--- /dev/null ++++ b/ocf/aodh-listener +@@ -0,0 +1,360 @@ ++#!/bin/sh ++# ++# ++# OpenStack Alarming Listener Service (aodh-listener) ++# ++# Description: Manages an OpenStack Alarming Listener Service (aodh-listener) process as an HA resource ++# ++# Authors: Emilien Macchi ++# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# Copyright (c) 2014-2016 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_amqp_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. /usr/bin/tsconfig ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="aodh-listener" ++OCF_RESKEY_config_default="/etc/aodh/aodh.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_amqp_server_port_default="5672" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Alarming Listener Service (aodh-listener) ++May manage a aodh-listener instance or a clone set that ++creates a distributed aodh-listener cluster. ++ ++Manages the OpenStack Alarming Listener Service (aodh-listener) ++ ++ ++ ++ ++Location of the OpenStack Alarming Listener server binary (aodh-listener) ++ ++OpenStack Alarming Listener server binary (aodh-listener) ++ ++ ++ ++ ++ ++Location of the OpenStack Alarming Listener Service (aodh-listener) configuration file ++ ++OpenStack Alarming Listener (aodh-listener registry) config file ++ ++ ++ ++ ++ ++User running OpenStack Alarming Listener Service (aodh-listener) ++ ++OpenStack Alarming Listener Service (aodh-listener) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Alarming Listener Service (aodh-listener) instance ++ ++OpenStack Alarming Listener Service (aodh-listener) pid file ++ ++ ++ ++ ++ ++The listening port number of the AMQP server. Use for monitoring purposes ++ ++AMQP listening port ++ ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Alarming Listener Service (aodh-listener) ++ ++Additional parameters for aodh-listener ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++aodh_listener_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++aodh_listener_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ aodh_listener_check_port $OCF_RESKEY_amqp_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++aodh_listener_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Alarming Listener (aodh-listener) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Alarming Listener (aodh-listener) is not running" ++ rm -f $OCF_RESKEY_pid ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++aodh_listener_monitor() { ++ local rc ++ local pid ++ local scheduler_amqp_check ++ ++ aodh_listener_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ ocf_log debug "OpenStack Alarming Listener (aodh-listener) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++aodh_listener_start() { ++ local rc ++ ++ aodh_listener_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Alarming Listener (aodh-listener) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual aodh-listener daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ aodh_listener_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Alarming Listener (aodh-listener) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Alarming Listener (aodh-listener) started" ++ return $OCF_SUCCESS ++} ++ ++aodh_listener_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ ++aodh_listener_stop() { ++ local rc ++ local pid ++ ++ aodh_listener_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Alarming Listener (aodh-listener) already stopped" ++ aodh_listener_confirm_stop ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Alarming Listener (aodh-listener) couldn't be stopped" ++ aodh_listener_confirm_stop ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=2 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ aodh_listener_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Alarming Listener (aodh-listener) still hasn't stopped yet. Waiting ..." ++ done ++ ++ aodh_listener_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Alarming Listener (aodh-listener) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ aodh_listener_confirm_stop ++ ++ ocf_log info "OpenStack Alarming Listener (aodh-listener) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++aodh_listener_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) aodh_listener_start;; ++ stop) aodh_listener_stop;; ++ status) aodh_listener_status;; ++ monitor) aodh_listener_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac +--- /dev/null ++++ b/ocf/aodh-notifier +@@ -0,0 +1,360 @@ ++#!/bin/sh ++# ++# ++# OpenStack Alarming Notifier Service (aodh-notifier) ++# ++# Description: Manages an OpenStack Alarming Notifier Service (aodh-notifier) process as an HA resource ++# ++# Authors: Emilien Macchi ++# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# Copyright (c) 2014-2016 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_amqp_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. /usr/bin/tsconfig ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="aodh-notifier" ++OCF_RESKEY_config_default="/etc/aodh/aodh.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_amqp_server_port_default="5672" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Alarming Notifier Service (aodh-notifier) ++May manage a aodh-notifier instance or a clone set that ++creates a distributed aodh-notifier cluster. ++ ++Manages the OpenStack Alarming Notifier Service (aodh-notifier) ++ ++ ++ ++ ++Location of the OpenStack Alarming Notifier server binary (aodh-notifier) ++ ++OpenStack Alarming Notifier server binary (aodh-notifier) ++ ++ ++ ++ ++ ++Location of the OpenStack Alarming Notifier Service (aodh-notifier) configuration file ++ ++OpenStack Alarming Notifier (aodh-notifier registry) config file ++ ++ ++ ++ ++ ++User running OpenStack Alarming Notifier Service (aodh-notifier) ++ ++OpenStack Alarming Notifier Service (aodh-notifier) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Alarming Notifier Service (aodh-notifier) instance ++ ++OpenStack Alarming Notifier Service (aodh-notifier) pid file ++ ++ ++ ++ ++ ++The listening port number of the AMQP server. Use for monitoring purposes ++ ++AMQP listening port ++ ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Alarming Notifier Service (aodh-notifier) ++ ++Additional parameters for aodh-notifier ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++aodh_notifier_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++aodh_notifier_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ aodh_notifier_check_port $OCF_RESKEY_amqp_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++aodh_notifier_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Alarming Notifier (aodh-notifier) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Alarming Notifier (aodh-notifier) is not running" ++ rm -f $OCF_RESKEY_pid ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++aodh_notifier_monitor() { ++ local rc ++ local pid ++ local scheduler_amqp_check ++ ++ aodh_notifier_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ ocf_log debug "OpenStack Alarming Notifier (aodh-notifier) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++aodh_notifier_start() { ++ local rc ++ ++ aodh_notifier_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Alarming Notifier (aodh-notifier) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual aodh-notifier daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ aodh_notifier_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Alarming Notifier (aodh-notifier) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Alarming Notifier (aodh-notifier) started" ++ return $OCF_SUCCESS ++} ++ ++aodh_notifier_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ ++aodh_notifier_stop() { ++ local rc ++ local pid ++ ++ aodh_notifier_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Alarming Notifier (aodh-notifier) already stopped" ++ aodh_notifier_confirm_stop ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Alarming Notifier (aodh-notifier) couldn't be stopped" ++ aodh_notifier_confirm_stop ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=2 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ aodh_notifier_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Alarming Notifier (aodh-notifier) still hasn't stopped yet. Waiting ..." ++ done ++ ++ aodh_notifier_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Alarming Notifier (aodh-notifier) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ aodh_notifier_confirm_stop ++ ++ ocf_log info "OpenStack Alarming Notifier (aodh-notifier) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++aodh_notifier_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) aodh_notifier_start;; ++ stop) aodh_notifier_stop;; ++ status) aodh_notifier_status;; ++ monitor) aodh_notifier_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac diff --git a/openstack/openstack-ras/openstack-ras/ceilometer-mem-db.patch b/openstack/openstack-ras/openstack-ras/ceilometer-mem-db.patch new file mode 100644 index 00000000..09fc6239 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/ceilometer-mem-db.patch @@ -0,0 +1,374 @@ +Index: git/ocf/ceilometer-mem-db +=================================================================== +--- /dev/null ++++ git/ocf/ceilometer-mem-db +@@ -0,0 +1,369 @@ ++#!/bin/sh ++# ++# ++# OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) ++# ++# Description: Manages an OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) process as an HA resource ++# ++# Authors: Emilien Macchi ++# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_amqp_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="ceilometer-mem-db" ++OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_amqp_server_port_default="5672" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) ++May manage a ceilometer-mem-db instance or a clone set that ++creates a distributed ceilometer-mem-db cluster. ++ ++Manages the OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Mem DB server binary (ceilometer-mem-db) ++ ++OpenStack Ceilometer Mem DB server binary (ceilometer-mem-db) ++ ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) configuration file ++ ++OpenStack Ceilometer Mem DB (ceilometer-mem-db registry) config file ++ ++ ++ ++ ++ ++User running OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) ++ ++OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) instance ++ ++OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) pid file ++ ++ ++ ++ ++ ++The listening port number of the AMQP server. Use for monitoring purposes ++ ++AMQP listening port ++ ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) ++ ++Additional parameters for ceilometer-mem-db ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++ceilometer_mem_db_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++ceilometer_mem_db_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ ceilometer_mem_db_check_port $OCF_RESKEY_amqp_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++ceilometer_mem_db_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Ceilometer Mem DB (ceilometer-mem-db) is not running" ++ rm -f $OCF_RESKEY_pid ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++ceilometer_mem_db_monitor() { ++ local rc ++ local pid ++ local scheduler_amqp_check ++ ++ ceilometer_mem_db_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ # Check the connections according to the PID. ++ # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api) ++ pid=`cat $OCF_RESKEY_pid` ++ scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Mem DB is not connected to the AMQP server : $rc" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ ocf_log debug "OpenStack Ceilometer Mem DB (ceilometer-mem-db) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_mem_db_start() { ++ local rc ++ ++ ceilometer_mem_db_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual ceilometer-mem-db daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ ceilometer_mem_db_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Ceilometer Mem DB (ceilometer-mem-db) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) started" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_mem_db_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ ++ceilometer_mem_db_stop() { ++ local rc ++ local pid ++ ++ ceilometer_mem_db_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) already stopped" ++ ceilometer_mem_db_confirm_stop ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Ceilometer Mem DB (ceilometer-mem-db) couldn't be stopped" ++ ceilometer_mem_db_confirm_stop ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ ceilometer_mem_db_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Ceilometer Mem DB (ceilometer-mem-db) still hasn't stopped yet. Waiting ..." ++ done ++ ++ ceilometer_mem_db_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ceilometer_mem_db_confirm_stop ++ ++ ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++ceilometer_mem_db_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) ceilometer_mem_db_start;; ++ stop) ceilometer_mem_db_stop;; ++ status) ceilometer_mem_db_status;; ++ monitor) ceilometer_mem_db_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac diff --git a/openstack/openstack-ras/openstack-ras/ceilometer-monitor-child-amqp-status.patch b/openstack/openstack-ras/openstack-ras/ceilometer-monitor-child-amqp-status.patch new file mode 100644 index 00000000..336f497e --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/ceilometer-monitor-child-amqp-status.patch @@ -0,0 +1,28 @@ +Index: git/ocf/ceilometer-collector +=================================================================== +--- git.orig/ocf/ceilometer-collector 2014-08-07 21:08:46.637211162 -0400 ++++ git/ocf/ceilometer-collector 2014-08-07 21:09:24.893475317 -0400 +@@ -223,15 +223,16 @@ + return $rc + fi + +- # Check the connections according to the PID. +- # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api) +- pid=`cat $OCF_RESKEY_pid` +- scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` +- rc=$? +- if [ $rc -ne 0 ]; then ++ # Check the connections according to the PID of the child process since ++ # the parent is not the one with the AMQP connection ++ ppid=`cat $OCF_RESKEY_pid` ++ pid=`pgrep -P $ppid` ++ scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ rc=$? ++ if [ $rc -ne 0 ]; then + ocf_log err "Collector is not connected to the AMQP server : $rc" + return $OCF_NOT_RUNNING +- fi ++ fi + + ocf_log debug "OpenStack Ceilometer Collector (ceilometer-collector) monitor succeeded" + return $OCF_SUCCESS diff --git a/openstack/openstack-ras/openstack-ras/ceilometer-monitor.patch b/openstack/openstack-ras/openstack-ras/ceilometer-monitor.patch new file mode 100644 index 00000000..bf3dc2fc --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/ceilometer-monitor.patch @@ -0,0 +1,22 @@ +Index: git/ocf/ceilometer-api +=================================================================== +--- git.orig/ocf/ceilometer-api ++++ git/ocf/ceilometer-api +@@ -183,7 +183,7 @@ ceilometer_api_validate() { + local rc + + check_binary $OCF_RESKEY_binary +- check_binary netstat ++ check_binary lsof + ceilometer_api_check_port $OCF_RESKEY_api_listen_port + + # A config file on shared storage that is not available +@@ -244,7 +244,7 @@ ceilometer_api_monitor() { + # Check the connections according to the PID. + # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api) + pid=`cat $OCF_RESKEY_pid` +- scheduler_amqp_check=`netstat -apunt | grep -s "$OCF_RESKEY_api_listen_port" | grep -s "$pid" | grep -qs "LISTEN"` ++ scheduler_amqp_check=`lsof -nPp ${pid} | grep -s ":${OCF_RESKEY_api_listen_port}\s\+(LISTEN)"` + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "API is not listening for connections: $rc" diff --git a/openstack/openstack-ras/openstack-ras/ceilometer-version-pipeline.patch b/openstack/openstack-ras/openstack-ras/ceilometer-version-pipeline.patch new file mode 100644 index 00000000..00dd313d --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/ceilometer-version-pipeline.patch @@ -0,0 +1,63 @@ +Index: git/ocf/ceilometer-agent-central +=================================================================== +--- git.orig/ocf/ceilometer-agent-central ++++ git/ocf/ceilometer-agent-central +@@ -34,6 +34,7 @@ + + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. /usr/bin/tsconfig + + ####################################################################### + +@@ -41,7 +42,7 @@ + + OCF_RESKEY_binary_default="ceilometer-agent-central" + OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" +-OCF_RESKEY_pipeline_default="/opt/cgcs/ceilometer/pipeline.yaml" ++OCF_RESKEY_pipeline_default="/opt/cgcs/ceilometer/${SW_VERSION}/pipeline.yaml" + OCF_RESKEY_user_default="root" + OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" + OCF_RESKEY_amqp_server_port_default="5672" +Index: git/ocf/ceilometer-agent-notification +=================================================================== +--- git.orig/ocf/ceilometer-agent-notification ++++ git/ocf/ceilometer-agent-notification +@@ -34,6 +34,7 @@ + + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. /usr/bin/tsconfig + + ####################################################################### + +@@ -41,7 +42,7 @@ + + OCF_RESKEY_binary_default="ceilometer-agent-notification" + OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" +-OCF_RESKEY_pipeline_default="/opt/cgcs/ceilometer/pipeline.yaml" ++OCF_RESKEY_pipeline_default="/opt/cgcs/ceilometer/${SW_VERSION}/pipeline.yaml" + OCF_RESKEY_user_default="root" + OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" + OCF_RESKEY_amqp_server_port_default="5672" +Index: git/ocf/ceilometer-api +=================================================================== +--- git.orig/ocf/ceilometer-api ++++ git/ocf/ceilometer-api +@@ -34,6 +34,7 @@ + + : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} + . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++. /usr/bin/tsconfig + + ####################################################################### + +@@ -41,7 +42,7 @@ + + OCF_RESKEY_binary_default="ceilometer-api" + OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" +-OCF_RESKEY_pipeline_default="/opt/cgcs/ceilometer/pipeline.yaml" ++OCF_RESKEY_pipeline_default="/opt/cgcs/ceilometer/${SW_VERSION}/pipeline.yaml" + OCF_RESKEY_user_default="root" + OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" + OCF_RESKEY_api_listen_port_default="8777" diff --git a/openstack/openstack-ras/openstack-ras/ceilometer.patch b/openstack/openstack-ras/openstack-ras/ceilometer.patch new file mode 100644 index 00000000..232b6f75 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/ceilometer.patch @@ -0,0 +1,1726 @@ +--- + ocf/ceilometer-agent-central | 12 - + ocf/ceilometer-agent-notification | 335 ++++++++++++++++++++++++++++++++++++ + ocf/ceilometer-alarm-evaluator | 304 +++++++++++++++++++++++++++++++++ + ocf/ceilometer-alarm-notifier | 345 ++++++++++++++++++++++++++++++++++++++ + ocf/ceilometer-api | 345 ++++++++++++++++++++++++++++++++++++++ + ocf/ceilometer-collector | 345 ++++++++++++++++++++++++++++++++++++++ + 6 files changed, 1675 insertions(+), 11 deletions(-) + +--- /dev/null ++++ b/ocf/ceilometer-api +@@ -0,0 +1,345 @@ ++#!/bin/sh ++# ++# ++# OpenStack Ceilometer API Service (ceilometer-api) ++# ++# Description: Manages an OpenStack Ceilometer API Service (ceilometer-api) process as an HA resource ++# ++# Authors: Emilien Macchi ++# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_api_listen_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="ceilometer-api" ++OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_api_listen_port_default="8777" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_api_listen_port=${OCF_RESKEY_api_listen_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Ceilometer API Service (ceilometer-api) ++May manage a ceilometer-api instance or a clone set that ++creates a distributed ceilometer-api cluster. ++ ++Manages the OpenStack Ceilometer API Service (ceilometer-api) ++ ++ ++ ++ ++Location of the OpenStack Ceilometer API server binary (ceilometer-api) ++ ++OpenStack Ceilometer API server binary (ceilometer-api) ++ ++ ++ ++ ++ ++Location of the OpenStack Ceilometer API Service (ceilometer-api) configuration file ++ ++OpenStack Ceilometer API (ceilometer-api registry) config file ++ ++ ++ ++ ++ ++User running OpenStack Ceilometer API Service (ceilometer-api) ++ ++OpenStack Ceilometer API Service (ceilometer-api) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Ceilometer API Service (ceilometer-api) instance ++ ++OpenStack Ceilometer API Service (ceilometer-api) pid file ++ ++ ++ ++ ++ ++The listening port number of the Ceilometer API. Use for monitoring purposes ++ ++Ceilometer API listening port ++ ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Ceilometer API Service (ceilometer-api) ++ ++Additional parameters for ceilometer-api ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++ceilometer_api_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++ceilometer_api_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ ceilometer_api_check_port $OCF_RESKEY_api_listen_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++ceilometer_api_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Ceilometer API (ceilometer-api) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Ceilometer API (ceilometer-api) is not running" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++ceilometer_api_monitor() { ++ local rc ++ local pid ++ local scheduler_amqp_check ++ ++ ceilometer_api_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ # Check the connections according to the PID. ++ # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api) ++ pid=`cat $OCF_RESKEY_pid` ++ scheduler_amqp_check=`netstat -apunt | grep -s "$OCF_RESKEY_api_listen_port" | grep -s "$pid" | grep -qs "LISTEN"` ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "API is not listening for connections: $rc" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ ocf_log debug "OpenStack Ceilometer API (ceilometer-api) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_api_start() { ++ local rc ++ ++ ceilometer_api_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Ceilometer API (ceilometer-api) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual ceilometer-api daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ ceilometer_api_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Ceilometer API (ceilometer-api) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Ceilometer API (ceilometer-api) started" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_api_stop() { ++ local rc ++ local pid ++ ++ ceilometer_api_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Ceilometer API (ceilometer-api) already stopped" ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Ceilometer API (ceilometer-api) couldn't be stopped" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ ceilometer_api_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Ceilometer API (ceilometer-api) still hasn't stopped yet. Waiting ..." ++ done ++ ++ ceilometer_api_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Ceilometer API (ceilometer-api) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ++ ocf_log info "OpenStack Ceilometer API (ceilometer-api) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++ceilometer_api_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) ceilometer_api_start;; ++ stop) ceilometer_api_stop;; ++ status) ceilometer_api_status;; ++ monitor) ceilometer_api_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac +--- /dev/null ++++ b/ocf/ceilometer-collector +@@ -0,0 +1,345 @@ ++#!/bin/sh ++# ++# ++# OpenStack Ceilometer Collector Service (ceilometer-collector) ++# ++# Description: Manages an OpenStack Ceilometer Collector Service (ceilometer-collector) process as an HA resource ++# ++# Authors: Emilien Macchi ++# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_amqp_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="ceilometer-collector" ++OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_amqp_server_port_default="5672" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Ceilometer Collector Service (ceilometer-collector) ++May manage a ceilometer-collector instance or a clone set that ++creates a distributed ceilometer-collector cluster. ++ ++Manages the OpenStack Ceilometer Collector Service (ceilometer-collector) ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Collector server binary (ceilometer-collector) ++ ++OpenStack Ceilometer Collector server binary (ceilometer-collector) ++ ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Collector Service (ceilometer-collector) configuration file ++ ++OpenStack Ceilometer Collector (ceilometer-collector registry) config file ++ ++ ++ ++ ++ ++User running OpenStack Ceilometer Collector Service (ceilometer-collector) ++ ++OpenStack Ceilometer Collector Service (ceilometer-collector) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Ceilometer Collector Service (ceilometer-collector) instance ++ ++OpenStack Ceilometer Collector Service (ceilometer-collector) pid file ++ ++ ++ ++ ++ ++The listening port number of the AMQP server. Use for monitoring purposes ++ ++AMQP listening port ++ ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Ceilometer Collector Service (ceilometer-collector) ++ ++Additional parameters for ceilometer-collector ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++ceilometer_collector_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++ceilometer_collector_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ ceilometer_collector_check_port $OCF_RESKEY_amqp_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++ceilometer_collector_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Ceilometer Collector (ceilometer-collector) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Ceilometer Collector (ceilometer-collector) is not running" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++ceilometer_collector_monitor() { ++ local rc ++ local pid ++ local scheduler_amqp_check ++ ++ ceilometer_collector_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ # Check the connections according to the PID. ++ # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api) ++ pid=`cat $OCF_RESKEY_pid` ++ scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Collector is not connected to the AMQP server : $rc" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ ocf_log debug "OpenStack Ceilometer Collector (ceilometer-collector) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_collector_start() { ++ local rc ++ ++ ceilometer_collector_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Ceilometer Collector (ceilometer-collector) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual ceilometer-collector daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ ceilometer_collector_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Ceilometer Collector (ceilometer-collector) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Ceilometer Collector (ceilometer-collector) started" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_collector_stop() { ++ local rc ++ local pid ++ ++ ceilometer_collector_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Ceilometer Collector (ceilometer-collector) already stopped" ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Ceilometer Collector (ceilometer-collector) couldn't be stopped" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ ceilometer_collector_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Ceilometer Collector (ceilometer-collector) still hasn't stopped yet. Waiting ..." ++ done ++ ++ ceilometer_collector_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Ceilometer Collector (ceilometer-collector) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ++ ocf_log info "OpenStack Ceilometer Collector (ceilometer-collector) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++ceilometer_collector_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) ceilometer_collector_start;; ++ stop) ceilometer_collector_stop;; ++ status) ceilometer_collector_status;; ++ monitor) ceilometer_collector_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac +--- a/ocf/ceilometer-agent-central ++++ b/ocf/ceilometer-agent-central +@@ -34,7 +34,7 @@ + + OCF_RESKEY_binary_default="ceilometer-agent-central" + OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" +-OCF_RESKEY_user_default="ceilometer" ++OCF_RESKEY_user_default="root" + OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" + OCF_RESKEY_amqp_server_port_default="5672" + +@@ -223,16 +223,6 @@ ceilometer_agent_central_monitor() { + return $rc + fi + +- # Check the connections according to the PID. +- # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api) +- pid=`cat $OCF_RESKEY_pid` +- scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` +- rc=$? +- if [ $rc -ne 0 ]; then +- ocf_log err "Central Agent is not connected to the AMQP server : $rc" +- return $OCF_NOT_RUNNING +- fi +- + ocf_log debug "OpenStack Ceilometer Central Agent (ceilometer-agent-central) monitor succeeded" + return $OCF_SUCCESS + } +--- /dev/null ++++ b/ocf/ceilometer-alarm-evaluator +@@ -0,0 +1,304 @@ ++#!/bin/sh ++# ++# ++# OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) ++# ++# Description: Manages an OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) process as an HA resource ++# ++# Authors: Emilien Macchi ++# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="ceilometer-alarm-evaluator" ++OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) ++May manage a ceilometer-alarm-evaluator instance or a clone set that ++creates a distributed ceilometer-alarm-evaluator cluster. ++ ++Manages the OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Alarm Evaluator server binary (ceilometer-alarm-evaluator) ++ ++OpenStack Ceilometer Alarm Evaluator server binary (ceilometer-alarm-evaluator) ++ ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) configuration file ++ ++OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator registry) config file ++ ++ ++ ++ ++ ++User running OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) ++ ++OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) instance ++ ++OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) pid file ++ ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Ceilometer Alarm Evaluator Service (ceilometer-alarm-evaluator) ++ ++Additional parameters for ceilometer-alarm-evaluator ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++ceilometer_alarm_evaluator_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++ceilometer_alarm_evaluator_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) is not running" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++ceilometer_alarm_evaluator_monitor() { ++ local rc ++ local pid ++ local scheduler_amqp_check ++ ++ ceilometer_alarm_evaluator_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ ocf_log debug "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_alarm_evaluator_start() { ++ local rc ++ ++ ceilometer_alarm_evaluator_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual ceilometer-alarm-evaluator daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ ceilometer_alarm_evaluator_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) started" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_alarm_evaluator_stop() { ++ local rc ++ local pid ++ ++ ceilometer_alarm_evaluator_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) already stopped" ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) couldn't be stopped" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ ceilometer_alarm_evaluator_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) still hasn't stopped yet. Waiting ..." ++ done ++ ++ ceilometer_alarm_evaluator_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ++ ocf_log info "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++ceilometer_alarm_evaluator_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) ceilometer_alarm_evaluator_start;; ++ stop) ceilometer_alarm_evaluator_stop;; ++ status) ceilometer_alarm_evaluator_status;; ++ monitor) ceilometer_alarm_evaluator_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac +--- /dev/null ++++ b/ocf/ceilometer-alarm-notifier +@@ -0,0 +1,345 @@ ++#!/bin/sh ++# ++# ++# OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) ++# ++# Description: Manages an OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) process as an HA resource ++# ++# Authors: Emilien Macchi ++# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_amqp_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="ceilometer-alarm-notifier" ++OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_amqp_server_port_default="5672" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) ++May manage a ceilometer-alarm-notifier instance or a clone set that ++creates a distributed ceilometer-alarm-notifier cluster. ++ ++Manages the OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Alarm Notifier server binary (ceilometer-alarm-notifier) ++ ++OpenStack Ceilometer Alarm Notifier server binary (ceilometer-alarm-notifier) ++ ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) configuration file ++ ++OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier registry) config file ++ ++ ++ ++ ++ ++User running OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) ++ ++OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) instance ++ ++OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) pid file ++ ++ ++ ++ ++ ++The listening port number of the AMQP server. Use for monitoring purposes ++ ++AMQP listening port ++ ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Ceilometer Alarm Notifier Service (ceilometer-alarm-notifier) ++ ++Additional parameters for ceilometer-alarm-notifier ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++ceilometer_alarm_notifier_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++ceilometer_alarm_notifier_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ ceilometer_alarm_notifier_check_port $OCF_RESKEY_amqp_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++ceilometer_alarm_notifier_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) is not running" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++ceilometer_alarm_notifier_monitor() { ++ local rc ++ local pid ++ local scheduler_amqp_check ++ ++ ceilometer_alarm_notifier_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ # Check the connections according to the PID. ++ # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api) ++ pid=`cat $OCF_RESKEY_pid` ++ scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Alarm Notifier is not connected to the AMQP server : $rc" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ ocf_log debug "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_alarm_notifier_start() { ++ local rc ++ ++ ceilometer_alarm_notifier_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual ceilometer-alarm-notifier daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ ceilometer_alarm_notifier_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) started" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_alarm_notifier_stop() { ++ local rc ++ local pid ++ ++ ceilometer_alarm_notifier_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) already stopped" ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) couldn't be stopped" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ ceilometer_alarm_notifier_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) still hasn't stopped yet. Waiting ..." ++ done ++ ++ ceilometer_alarm_notifier_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ++ ocf_log info "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++ceilometer_alarm_notifier_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) ceilometer_alarm_notifier_start;; ++ stop) ceilometer_alarm_notifier_stop;; ++ status) ceilometer_alarm_notifier_status;; ++ monitor) ceilometer_alarm_notifier_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac +--- /dev/null ++++ b/ocf/ceilometer-agent-notification +@@ -0,0 +1,335 @@ ++#!/bin/sh ++# ++# ++# OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) ++# ++# Description: Manages an OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) process as an HA resource ++# ++# Authors: Emilien Macchi ++# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_amqp_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="ceilometer-agent-notification" ++OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" ++OCF_RESKEY_user_default="root" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_amqp_server_port_default="5672" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) ++May manage a ceilometer-agent-notification instance or a clone set that ++creates a distributed ceilometer-agent-notification cluster. ++ ++Manages the OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Central Agent server binary (ceilometer-agent-notification) ++ ++OpenStack Ceilometer Central Agent server binary (ceilometer-agent-notification) ++ ++ ++ ++ ++ ++Location of the OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) configuration file ++ ++OpenStack Ceilometer Central Agent (ceilometer-agent-notification registry) config file ++ ++ ++ ++ ++ ++User running OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) ++ ++OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) instance ++ ++OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) pid file ++ ++ ++ ++ ++ ++The listening port number of the AMQP server. Use for monitoring purposes ++ ++AMQP listening port ++ ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) ++ ++Additional parameters for ceilometer-agent-notification ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++ceilometer_agent_notification_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++ceilometer_agent_notification_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ ceilometer_agent_notification_check_port $OCF_RESKEY_amqp_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++ceilometer_agent_notification_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Ceilometer Central Agent (ceilometer-agent-notification) is not running" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++ceilometer_agent_notification_monitor() { ++ local rc ++ local pid ++ local scheduler_amqp_check ++ ++ ceilometer_agent_notification_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ ocf_log debug "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_agent_notification_start() { ++ local rc ++ ++ ceilometer_agent_notification_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual ceilometer-agent-notification daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ ceilometer_agent_notification_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) started" ++ return $OCF_SUCCESS ++} ++ ++ceilometer_agent_notification_stop() { ++ local rc ++ local pid ++ ++ ceilometer_agent_notification_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) already stopped" ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) couldn't be stopped" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ ceilometer_agent_notification_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) still hasn't stopped yet. Waiting ..." ++ done ++ ++ ceilometer_agent_notification_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ++ ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++ceilometer_agent_notification_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) ceilometer_agent_notification_start;; ++ stop) ceilometer_agent_notification_stop;; ++ status) ceilometer_agent_notification_status;; ++ monitor) ceilometer_agent_notification_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac diff --git a/openstack/openstack-ras/openstack-ras/ceilometer_pipeline.patch b/openstack/openstack-ras/openstack-ras/ceilometer_pipeline.patch new file mode 100644 index 00000000..e8b5fcd3 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/ceilometer_pipeline.patch @@ -0,0 +1,150 @@ +Index: git/ocf/ceilometer-agent-central +=================================================================== +--- git.orig/ocf/ceilometer-agent-central ++++ git/ocf/ceilometer-agent-central +@@ -23,6 +23,7 @@ + # OCF instance parameters: + # OCF_RESKEY_binary + # OCF_RESKEY_config ++# OCF_RESKEY_pipeline + # OCF_RESKEY_user + # OCF_RESKEY_pid + # OCF_RESKEY_monitor_binary +@@ -40,12 +41,14 @@ + + OCF_RESKEY_binary_default="ceilometer-agent-central" + OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" ++OCF_RESKEY_pipeline_default="/opt/cgcs/ceilometer/pipeline.yaml" + OCF_RESKEY_user_default="root" + OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" + OCF_RESKEY_amqp_server_port_default="5672" + + : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} + : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_pipeline=${OCF_RESKEY_pipeline_default}} + : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} + : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} + : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} +@@ -99,6 +102,14 @@ Location of the OpenStack Ceilometer Cen + + + ++ ++ ++Location of the OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) pipeline file ++ ++OpenStack Ceilometer Central Agent (ceilometer-agent-central registry) pipeline file ++ ++ ++ + + + User running OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) +@@ -247,6 +258,7 @@ ceilometer_agent_central_start() { + # run the actual ceilometer-agent-central daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ --pipeline_cfg_file=$OCF_RESKEY_pipeline \ + $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid + + # Spin waiting for the server to come up. +Index: git/ocf/ceilometer-agent-notification +=================================================================== +--- git.orig/ocf/ceilometer-agent-notification ++++ git/ocf/ceilometer-agent-notification +@@ -23,6 +23,7 @@ + # OCF instance parameters: + # OCF_RESKEY_binary + # OCF_RESKEY_config ++# OCF_RESKEY_pipeline + # OCF_RESKEY_user + # OCF_RESKEY_pid + # OCF_RESKEY_monitor_binary +@@ -40,12 +41,14 @@ + + OCF_RESKEY_binary_default="ceilometer-agent-notification" + OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" ++OCF_RESKEY_pipeline_default="/opt/cgcs/ceilometer/pipeline.yaml" + OCF_RESKEY_user_default="root" + OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" + OCF_RESKEY_amqp_server_port_default="5672" + + : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} + : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_pipeline=${OCF_RESKEY_pipeline_default}} + : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} + : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} + : ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} +@@ -99,6 +102,14 @@ Location of the OpenStack Ceilometer Cen + + + ++ ++ ++Location of the OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) pipeline file ++ ++OpenStack Ceilometer Central Agent (ceilometer-agent-notification registry) pipeline file ++ ++ ++ + + + User running OpenStack Ceilometer Central Agent Service (ceilometer-agent-notification) +@@ -247,6 +258,7 @@ ceilometer_agent_notification_start() { + # run the actual ceilometer-agent-notification daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ --pipeline_cfg_file=$OCF_RESKEY_pipeline \ + $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid + + # Spin waiting for the server to come up. +Index: git/ocf/ceilometer-api +=================================================================== +--- git.orig/ocf/ceilometer-api ++++ git/ocf/ceilometer-api +@@ -23,6 +23,7 @@ + # OCF instance parameters: + # OCF_RESKEY_binary + # OCF_RESKEY_config ++# OCF_RESKEY_pipeline + # OCF_RESKEY_user + # OCF_RESKEY_pid + # OCF_RESKEY_monitor_binary +@@ -40,12 +41,14 @@ + + OCF_RESKEY_binary_default="ceilometer-api" + OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" ++OCF_RESKEY_pipeline_default="/opt/cgcs/ceilometer/pipeline.yaml" + OCF_RESKEY_user_default="root" + OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" + OCF_RESKEY_api_listen_port_default="8777" + + : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} + : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_pipeline=${OCF_RESKEY_pipeline_default}} + : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} + : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} + : ${OCF_RESKEY_api_listen_port=${OCF_RESKEY_api_listen_port_default}} +@@ -99,6 +102,14 @@ Location of the OpenStack Ceilometer API + + + ++ ++ ++Location of the OpenStack Ceilometer API Service (ceilometer-api) pipeline file ++ ++OpenStack Ceilometer API (ceilometer-api registry) pipeline file ++ ++ ++ + + + User running OpenStack Ceilometer API Service (ceilometer-api) +@@ -257,6 +268,7 @@ ceilometer_api_start() { + # run the actual ceilometer-api daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ --pipeline_cfg_file=$OCF_RESKEY_pipeline \ + $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid + + # Spin waiting for the server to come up. diff --git a/openstack/openstack-ras/openstack-ras/cgts-4061-cinder-volume-service-down.patch b/openstack/openstack-ras/openstack-ras/cgts-4061-cinder-volume-service-down.patch new file mode 100644 index 00000000..12b06cdd --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/cgts-4061-cinder-volume-service-down.patch @@ -0,0 +1,141 @@ +--- a/ocf/cinder-volume ++++ b/ocf/cinder-volume +@@ -221,10 +221,73 @@ cinder_volume_status() { + fi + } + ++cinder_volume_get_service_status() { ++ source /etc/nova/openrc ++ python - <<'EOF' ++from __future__ import print_function ++ ++from cinderclient import client as cinder_client ++import keyring ++from keystoneclient import session as keystone_session ++from keystoneclient.auth.identity import v3 ++import os ++import sys ++ ++DEFAULT_OS_VOLUME_API_VERSION = 2 ++CINDER_CLIENT_TIMEOUT_SEC = 3 ++ ++def create_cinder_client(): ++ password = keyring.get_password('CGCS', os.environ['OS_USERNAME']) ++ auth = v3.Password( ++ user_domain_name=os.environ['OS_USER_DOMAIN_NAME'], ++ username = os.environ['OS_USERNAME'], ++ password = password, ++ project_domain_name = os.environ['OS_PROJECT_DOMAIN_NAME'], ++ project_name = os.environ['OS_PROJECT_NAME'], ++ auth_url = os.environ['OS_AUTH_URL']) ++ session = keystone_session.Session(auth=auth) ++ return cinder_client.Client( ++ DEFAULT_OS_VOLUME_API_VERSION, ++ username = os.environ['OS_USERNAME'], ++ auth_url = os.environ['OS_AUTH_URL'], ++ region_name=os.environ['OS_REGION_NAME'], ++ session = session, timeout = CINDER_CLIENT_TIMEOUT_SEC) ++ ++def service_is_up(s): ++ return s.state == 'up' ++ ++def cinder_volume_service_status(cc): ++ services = cc.services.list( ++ host='controller', ++ binary='cinder-volume') ++ if not len(services): ++ return (False, False) ++ exists, is_up = (True, service_is_up(services[0])) ++ for s in services[1:]: ++ # attempt to merge statuses ++ if is_up != service_is_up(s): ++ raise Exception(('Found multiple cinder-volume ' ++ 'services with different ' ++ 'statuses: {}').format( ++ [s.to_dict() for s in services])) ++ return (exists, is_up) ++ ++try: ++ status = cinder_volume_service_status( ++ create_cinder_client()) ++ print(('exists={0[0]}\n' ++ 'is_up={0[1]}').format(status)) ++except Exception as e: ++ print(str(e), file=sys.stderr) ++ sys.exit(1) ++EOF ++} ++ + cinder_volume_monitor() { + local rc + local pid + local volume_amqp_check ++ local check_service_status=$1; shift + + cinder_volume_status + rc=$? +@@ -279,6 +342,46 @@ cinder_volume_monitor() { + + touch $VOLUME_FAIL_ON_AMQP_CHECK_FILE >> /dev/null 2>&1 + ++ if [ $check_service_status == "check-service-status" ]; then ++ local retries_left ++ local retry_interval ++ ++ retries_left=3 ++ retry_interval=3 ++ while [ $retries_left -gt 0 ]; do ++ retries_left=`expr $retries_left - 1` ++ status=$(cinder_volume_get_service_status) ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "Unable to get Cinder Volume status" ++ if [ $retries_left -gt 0 ]; then ++ sleep $retry_interval ++ continue ++ else ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ ++ local exists ++ local is_up ++ eval $status ++ ++ if [ "$exists" == "True" ] && [ "$is_up" == "False" ]; then ++ ocf_log err "Cinder Volume service status is down" ++ if [ $retries_left -gt 0 ]; then ++ sleep $retry_interval ++ continue ++ else ++ ocf_log info "Trigger Cinder Volume guru meditation report" ++ ocf_run kill -s USR2 $pid ++ return $OCF_ERR_GENERIC ++ fi ++ fi ++ ++ break ++ done ++ fi ++ + ocf_log debug "OpenStack Cinder Volume (cinder-volume) monitor succeeded" + return $OCF_SUCCESS + } +@@ -386,7 +489,7 @@ cinder_volume_stop() { + # SIGTERM didn't help either, try SIGKILL + ocf_log info "OpenStack Cinder Volume (cinder-volume) failed to stop after ${shutdown_timeout}s \ + using SIGTERM. Trying SIGKILL ..." +- ocf_run kill -s KILL $pid ++ ocf_run kill -s KILL -$pid + fi + cinder_volume_confirm_stop + +@@ -414,7 +517,7 @@ case "$1" in + start) cinder_volume_start;; + stop) cinder_volume_stop;; + status) cinder_volume_status;; +- monitor) cinder_volume_monitor;; ++ monitor) cinder_volume_monitor "check-service-status";; + validate-all) ;; + *) usage + exit $OCF_ERR_UNIMPLEMENTED;; diff --git a/openstack/openstack-ras/openstack-ras/cinder-volume-enable-fix.patch b/openstack/openstack-ras/openstack-ras/cinder-volume-enable-fix.patch new file mode 100644 index 00000000..91e1f61e --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/cinder-volume-enable-fix.patch @@ -0,0 +1,18 @@ +Index: git/ocf/cinder-volume +=================================================================== +--- git.orig/ocf/cinder-volume ++++ git/ocf/cinder-volume +@@ -224,6 +224,13 @@ cinder_volume_monitor() { + pid=`cat $OCF_RESKEY_pid` + + if ocf_is_true "$OCF_RESKEY_multibackend"; then ++ pids=`ps -o pid --no-headers --ppid $pid` ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "No child processes from Cinder Volume (yet...): $rc" ++ return $OCF_NOT_RUNNING ++ fi ++ + # Grab the child's PIDs + for i in `ps -o pid --no-headers --ppid $pid` + do diff --git a/openstack/openstack-ras/openstack-ras/cinder-volume-fail-amqp-check.patch b/openstack/openstack-ras/openstack-ras/cinder-volume-fail-amqp-check.patch new file mode 100644 index 00000000..4d32f0f5 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/cinder-volume-fail-amqp-check.patch @@ -0,0 +1,93 @@ +Index: git/ocf/cinder-volume +=================================================================== +--- git.orig/ocf/cinder-volume ++++ git/ocf/cinder-volume +@@ -55,6 +55,20 @@ OCF_RESKEY_multibackend_default="false" + + ####################################################################### + ++####################################################################### ++ ++# ++# The following file is used to determine if Cinder-Volume should be ++# failed if the AMQP check does not pass. Cinder-Volume initializes ++# it's backend before connecting to Rabbit. In Ceph configurations, ++# Cinder-Volume will not connect to Rabbit until the storage blades ++# are provisioned (this can take a long time, no need to restart the ++# process over and over again). ++VOLUME_FAIL_ON_AMQP_CHECK_FILE="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.fail_on_amqp_check" ++ ++####################################################################### ++ ++ + usage() { + cat <> /dev/null 2>&1 ++ + ocf_log debug "OpenStack Cinder Volume (cinder-volume) monitor succeeded" + return $OCF_SUCCESS + } +@@ -260,6 +286,10 @@ cinder_volume_monitor() { + cinder_volume_start() { + local rc + ++ if [ -e "$VOLUME_FAIL_ON_AMQP_CHECK_FILE" ] ; then ++ rm $VOLUME_FAIL_ON_AMQP_CHECK_FILE >> /dev/null 2>&1 ++ fi ++ + cinder_volume_status + rc=$? + if [ $rc -eq $OCF_SUCCESS ]; then +@@ -293,6 +323,10 @@ cinder_volume_confirm_stop() { + local my_bin + local my_processes + ++ if [ -e "$VOLUME_FAIL_ON_AMQP_CHECK_FILE" ] ; then ++ rm $VOLUME_FAIL_ON_AMQP_CHECK_FILE >> /dev/null 2>&1 ++ fi ++ + my_binary=`which ${OCF_RESKEY_binary}` + my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` + +@@ -307,6 +341,10 @@ cinder_volume_stop() { + local rc + local pid + ++ if [ -e "$VOLUME_FAIL_ON_AMQP_CHECK_FILE" ] ; then ++ rm $VOLUME_FAIL_ON_AMQP_CHECK_FILE >> /dev/null 2>&1 ++ fi ++ + cinder_volume_status + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then diff --git a/openstack/openstack-ras/openstack-ras/enable-multiple-nova-conductor-workers.patch b/openstack/openstack-ras/openstack-ras/enable-multiple-nova-conductor-workers.patch new file mode 100644 index 00000000..ed20887a --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/enable-multiple-nova-conductor-workers.patch @@ -0,0 +1,95 @@ +From 3ba260dbc2d69a797c8deb55ff0871e752dddebd Mon Sep 17 00:00:00 2001 +From: Chris Friesen +Date: Tue, 11 Aug 2015 18:48:45 -0400 +Subject: [PATCH] CGTS-1851: enable multiple nova-conductor workers + +Enable multiple nova-conductor workers by properly handling +the fact that when there are multiple workers the first one just +coordinates the others and doesn't itself connect to AMQP or the DB. + +This also fixes up a bunch of whitespace issues, replacing a number +of hard tabs with spaces to make it easier to follow the code. +--- + ocf/nova-conductor | 58 ++++++++++++++++++++++++++++++++++++++---------------- + 1 file changed, 41 insertions(+), 17 deletions(-) + +diff --git a/ocf/nova-conductor b/ocf/nova-conductor +index aa1ee2a..25e5f8f 100644 +--- a/ocf/nova-conductor ++++ b/ocf/nova-conductor +@@ -239,6 +239,18 @@ nova_conductor_status() { + fi + } + ++check_port() { ++ local port=$1 ++ local pid=$2 ++ netstat -punt | grep -s "$port" | grep -s "$pid" | grep -qs "ESTABLISHED" ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return 0 ++ else ++ return 1 ++ fi ++} ++ + nova_conductor_monitor() { + local rc + local pid +@@ -258,24 +270,36 @@ nova_conductor_monitor() { + # Check the connections according to the PID. + # We are sure to hit the conductor process and not other nova process with the same connection behavior (for example nova-cert) + if ocf_is_true "$OCF_RESKEY_zeromq"; then +- pid=`cat $OCF_RESKEY_pid` +- conductor_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` +- rc_db=$? +- if [ $rc_db -ne 0 ]; then +- ocf_log err "Nova Conductor is not connected to the database server: $rc_db" +- return $OCF_NOT_RUNNING +- fi +- else + pid=`cat $OCF_RESKEY_pid` +- conductor_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` +- rc_db=$? +- conductor_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` +- rc_amqp=$? +- if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then +- ocf_log err "Nova Conductor is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" +- return $OCF_NOT_RUNNING +- fi +- fi ++ rc_db=`check_port $OCF_RESKEY_database_server_port $pid` ++ if [ $rc_db -ne 0 ]; then ++ ocf_log err "Nova Conductor is not connected to the database server: $rc_db" ++ return $OCF_NOT_RUNNING ++ fi ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ rc_db=`check_port $OCF_RESKEY_database_server_port $pid` ++ rc_amqp=`check_port $OCF_RESKEY_amqp_server_port $pid` ++ if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then ++ # may have multiple workers, in which case $pid is the parent and we want to check the children ++ # If there are no children or at least one child is not connected to both DB and AMQP then we fail. ++ KIDPIDS=`pgrep -P $pid -f nova-conductor` ++ if [ ! -z "$KIDPIDS" ]; then ++ for pid in $KIDPIDS ++ do ++ rc_db=`check_port $OCF_RESKEY_database_server_port $pid` ++ rc_amqp=`check_port $OCF_RESKEY_amqp_server_port $pid` ++ if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then ++ ocf_log err "Nova Conductor pid $pid is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" ++ return $OCF_NOT_RUNNING ++ fi ++ done ++ else ++ ocf_log err "Nova Conductor pid $pid is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" ++ return $OCF_NOT_RUNNING ++ fi ++ fi ++ fi + + ocf_log debug "OpenStack Nova Conductor (nova-conductor) monitor succeeded" + return $OCF_SUCCESS +-- +1.9.1 + diff --git a/openstack/openstack-ras/openstack-ras/glance-api-bypass-monitor.patch b/openstack/openstack-ras/openstack-ras/glance-api-bypass-monitor.patch new file mode 100644 index 00000000..723641a9 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/glance-api-bypass-monitor.patch @@ -0,0 +1,16 @@ +--- + ocf/glance-api | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/ocf/glance-api ++++ b/ocf/glance-api +@@ -243,6 +243,9 @@ glance_api_monitor() { + return $rc + fi + ++ ### DPENNEY: Bypass monitor until keyring functionality is ported ++ return $OCF_SUCCESS ++ + # Monitor the RA by retrieving the image list + if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_os_auth_url" ]; then + ocf_run -q $OCF_RESKEY_client_binary \ diff --git a/openstack/openstack-ras/openstack-ras/glance-api-juno.patch b/openstack/openstack-ras/openstack-ras/glance-api-juno.patch new file mode 100644 index 00000000..ce0a5f45 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/glance-api-juno.patch @@ -0,0 +1,13 @@ +Index: git/ocf/glance-api +=================================================================== +--- git.orig/ocf/glance-api ++++ git/ocf/glance-api +@@ -249,7 +249,7 @@ glance_api_monitor() { + --os_username "$OCF_RESKEY_os_username" \ + --os_tenant_name "$OCF_RESKEY_os_tenant_name" \ + --os_auth_url "$OCF_RESKEY_os_auth_url" \ +- index > /dev/null 2>&1 ++ image-list > /dev/null 2>&1 + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "Failed to connect to the OpenStack ImageService (glance-api): $rc" diff --git a/openstack/openstack-ras/openstack-ras/heat-cloudwatch.patch b/openstack/openstack-ras/openstack-ras/heat-cloudwatch.patch new file mode 100644 index 00000000..0df6ca5c --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/heat-cloudwatch.patch @@ -0,0 +1,349 @@ +Index: git/ocf/heat-api-cloudwatch +=================================================================== +--- /dev/null ++++ git/ocf/heat-api-cloudwatch +@@ -0,0 +1,344 @@ ++#!/bin/sh ++# ++# ++# OpenStack Orchestration Engine Service (heat-api-cloudwatch) ++# ++# Description: Manages an OpenStack Orchestration Engine Service (heat-api-cloudwatch) process as an HA resource ++# ++# Authors: Emilien Macchi ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="heat-api-cloudwatch" ++OCF_RESKEY_config_default="/etc/heat/heat.conf" ++OCF_RESKEY_user_default="heat" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_server_port_default="8000" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_server_port=${OCF_RESKEY_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Orchestration Engine Service (heat-api-cloudwatch) ++May manage a heat-api-cloudwatch instance or a clone set that ++creates a distributed heat-api-cloudwatch cluster. ++ ++Manages the OpenStack Orchestration Engine Service (heat-api-cloudwatch) ++ ++ ++ ++ ++Location of the OpenStack Orchestration Engine server binary (heat-api-cloudwatch) ++ ++OpenStack Orchestration Engine server binary (heat-api-cloudwatch) ++ ++ ++ ++ ++ ++Location of the OpenStack Orchestration Engine Service (heat-api-cloudwatch) configuration file ++ ++OpenStack Orchestration Engine (heat-api-cloudwatch) config file ++ ++ ++ ++ ++ ++User running OpenStack Orchestration Engine Service (heat-api-cloudwatch) ++ ++OpenStack Orchestration Engine Service (heat-api-cloudwatch) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Orchestration Engine Service (heat-api-cloudwatch) instance ++ ++OpenStack Orchestration Engine Service (heat-api-cloudwatch) pid file ++ ++ ++ ++ ++ ++The listening port number of the heat-api-cloudwatch server. ++ ++ ++heat-api-cloudwatch listening port ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Orchestration Engine Service (heat-api-cloudwatch) ++ ++Additional parameters for heat-api-cloudwatch ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++heat_api_cloudwatch_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++heat_api_cloudwatch_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ heat_api_cloudwatch_check_port $OCF_RESKEY_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++heat_api_cloudwatch_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cloudwatch) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Orchestration Engine (heat-api-cloudwatch) is not running" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++heat_api_cloudwatch_monitor() { ++ local rc ++ local pid ++ local rc_db ++ local engine_db_check ++ ++ heat_api_cloudwatch_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ # Check the server is listening on the server port ++ engine_db_check=`netstat -an | grep -s "$OCF_RESKEY_console_port" | grep -qs "LISTEN"` ++ rc_db=$? ++ if [ $rc_db -ne 0 ]; then ++ ocf_log err "heat-api-cloudwatch is not listening on $OCF_RESKEY_console_port: $rc_db" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ ocf_log debug "OpenStack Orchestration Engine (heat-api-cloudwatch) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++heat_api_cloudwatch_start() { ++ local rc ++ ++ heat_api_cloudwatch_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cloudwatch) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual heat-api-cloudwatch daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ heat_api_cloudwatch_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Orchestration Engine (heat-api-cloudwatch) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cloudwatch) started" ++ return $OCF_SUCCESS ++} ++ ++heat_api_cloudwatch_stop() { ++ local rc ++ local pid ++ ++ heat_api_cloudwatch_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cloudwatch) already stopped" ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Orchestration Engine (heat-api-cloudwatch) couldn't be stopped" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ heat_api_cloudwatch_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Orchestration Engine (heat-api-cloudwatch) still hasn't stopped yet. Waiting ..." ++ done ++ ++ heat_api_cloudwatch_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cloudwatch) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cloudwatch) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++heat_api_cloudwatch_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) heat_api_cloudwatch_start;; ++ stop) heat_api_cloudwatch_stop;; ++ status) heat_api_cloudwatch_status;; ++ monitor) heat_api_cloudwatch_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac ++ diff --git a/openstack/openstack-ras/openstack-ras/heat-engine-support-workers.patch b/openstack/openstack-ras/openstack-ras/heat-engine-support-workers.patch new file mode 100644 index 00000000..3a7eeddb --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/heat-engine-support-workers.patch @@ -0,0 +1,52 @@ +--- + ocf/heat-engine | 24 +++++++++++++++++++++--- + 1 file changed, 21 insertions(+), 3 deletions(-) + +--- a/ocf/heat-engine ++++ b/ocf/heat-engine +@@ -238,6 +238,24 @@ heat_engine_status() { + fi + } + ++# Function to check a process for port usage, as well as children ++check_port() { ++ local port=$1 ++ local pid=$2 ++ ++ local children=`ps -ef | awk -v ppid=$pid '$3 == ppid { print $2}'` ++ ++ for p in $pid $children ++ do ++ netstat -punt | grep -s "$port" | grep -s "$p" | grep -qs "ESTABLISHED" ++ if [ $? -eq 0 ] ++ then ++ return 0 ++ fi ++ done ++ return 1 ++} ++ + heat_engine_monitor() { + local rc + local pid +@@ -258,7 +276,7 @@ heat_engine_monitor() { + # We are sure to hit the heat-engine process and not other heat process with the same connection behavior (for example heat-api) + if ocf_is_true "$OCF_RESKEY_zeromq"; then + pid=`cat $OCF_RESKEY_pid` +- engine_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ engine_db_check=`check_port "$OCF_RESKEY_database_server_port" "$pid"` + rc_db=$? + if [ $rc_db -ne 0 ]; then + ocf_log err "heat-engine is not connected to the database server: $rc_db" +@@ -266,9 +284,9 @@ heat_engine_monitor() { + fi + else + pid=`cat $OCF_RESKEY_pid` +- engine_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ engine_db_check=`check_port "$OCF_RESKEY_database_server_port" "$pid"` + rc_db=$? +- engine_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ engine_amqp_check=`check_port "$OCF_RESKEY_amqp_server_port" "$pid"` + rc_amqp=$? + if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then + ocf_log err "Heat Engine is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" diff --git a/openstack/openstack-ras/openstack-ras/heat.patch b/openstack/openstack-ras/openstack-ras/heat.patch new file mode 100644 index 00000000..7d4a2725 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/heat.patch @@ -0,0 +1,698 @@ +Index: git/ocf/heat-api +=================================================================== +--- /dev/null ++++ git/ocf/heat-api +@@ -0,0 +1,344 @@ ++#!/bin/sh ++# ++# ++# OpenStack Orchestration Engine Service (heat-api) ++# ++# Description: Manages an OpenStack Orchestration Engine Service (heat-api) process as an HA resource ++# ++# Authors: Emilien Macchi ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="heat-api" ++OCF_RESKEY_config_default="/etc/heat/heat.conf" ++OCF_RESKEY_user_default="heat" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_server_port_default="8004" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_server_port=${OCF_RESKEY_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Orchestration Engine Service (heat-api) ++May manage a heat-api instance or a clone set that ++creates a distributed heat-api cluster. ++ ++Manages the OpenStack Orchestration Engine Service (heat-api) ++ ++ ++ ++ ++Location of the OpenStack Orchestration Engine server binary (heat-api) ++ ++OpenStack Orchestration Engine server binary (heat-api) ++ ++ ++ ++ ++ ++Location of the OpenStack Orchestration Engine Service (heat-api) configuration file ++ ++OpenStack Orchestration Engine (heat-api) config file ++ ++ ++ ++ ++ ++User running OpenStack Orchestration Engine Service (heat-api) ++ ++OpenStack Orchestration Engine Service (heat-api) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Orchestration Engine Service (heat-api) instance ++ ++OpenStack Orchestration Engine Service (heat-api) pid file ++ ++ ++ ++ ++ ++The listening port number of the heat-api server. ++ ++ ++heat-api listening port ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Orchestration Engine Service (heat-api) ++ ++Additional parameters for heat-api ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++heat_api_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++heat_api_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ heat_api_check_port $OCF_RESKEY_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++heat_api_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Orchestration Engine (heat-api) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Orchestration Engine (heat-api) is not running" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++heat_api_monitor() { ++ local rc ++ local pid ++ local rc_db ++ local engine_db_check ++ ++ heat_api_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ # Check the server is listening on the server port ++ engine_db_check=`netstat -an | grep -s "$OCF_RESKEY_console_port" | grep -qs "LISTEN"` ++ rc_db=$? ++ if [ $rc_db -ne 0 ]; then ++ ocf_log err "heat-api is not listening on $OCF_RESKEY_console_port: $rc_db" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ ocf_log debug "OpenStack Orchestration Engine (heat-api) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++heat_api_start() { ++ local rc ++ ++ heat_api_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Orchestration Engine (heat-api) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual heat-api daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ heat_api_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Orchestration Engine (heat-api) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Orchestration Engine (heat-api) started" ++ return $OCF_SUCCESS ++} ++ ++heat_api_stop() { ++ local rc ++ local pid ++ ++ heat_api_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Orchestration Engine (heat-api) already stopped" ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Orchestration Engine (heat-api) couldn't be stopped" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ heat_api_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Orchestration Engine (heat-api) still hasn't stopped yet. Waiting ..." ++ done ++ ++ heat_api_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Orchestration Engine (heat-api) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ++ ocf_log info "OpenStack Orchestration Engine (heat-api) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++heat_api_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) heat_api_start;; ++ stop) heat_api_stop;; ++ status) heat_api_status;; ++ monitor) heat_api_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac ++ +Index: git/ocf/heat-api-cfn +=================================================================== +--- /dev/null ++++ git/ocf/heat-api-cfn +@@ -0,0 +1,344 @@ ++#!/bin/sh ++# ++# ++# OpenStack Orchestration Engine Service (heat-api-cfn) ++# ++# Description: Manages an OpenStack Orchestration Engine Service (heat-api-cfn) process as an HA resource ++# ++# Authors: Emilien Macchi ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_server_port ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="heat-api-cfn" ++OCF_RESKEY_config_default="/etc/heat/heat.conf" ++OCF_RESKEY_user_default="heat" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_server_port_default="8000" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_server_port=${OCF_RESKEY_server_port_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Orchestration Engine Service (heat-api-cfn) ++May manage a heat-api-cfn instance or a clone set that ++creates a distributed heat-api-cfn cluster. ++ ++Manages the OpenStack Orchestration Engine Service (heat-api-cfn) ++ ++ ++ ++ ++Location of the OpenStack Orchestration Engine server binary (heat-api-cfn) ++ ++OpenStack Orchestration Engine server binary (heat-api-cfn) ++ ++ ++ ++ ++ ++Location of the OpenStack Orchestration Engine Service (heat-api-cfn) configuration file ++ ++OpenStack Orchestration Engine (heat-api-cfn) config file ++ ++ ++ ++ ++ ++User running OpenStack Orchestration Engine Service (heat-api-cfn) ++ ++OpenStack Orchestration Engine Service (heat-api-cfn) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Orchestration Engine Service (heat-api-cfn) instance ++ ++OpenStack Orchestration Engine Service (heat-api-cfn) pid file ++ ++ ++ ++ ++ ++The listening port number of the heat-api-cfn server. ++ ++ ++heat-api-cfn listening port ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Orchestration Engine Service (heat-api-cfn) ++ ++Additional parameters for heat-api-cfn ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++heat_api_cfn_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++heat_api_cfn_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ heat_api_cfn_check_port $OCF_RESKEY_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++heat_api_cfn_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cfn) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Orchestration Engine (heat-api-cfn) is not running" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++heat_api_cfn_monitor() { ++ local rc ++ local pid ++ local rc_db ++ local engine_db_check ++ ++ heat_api_cfn_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ # Check the server is listening on the server port ++ engine_db_check=`netstat -an | grep -s "$OCF_RESKEY_console_port" | grep -qs "LISTEN"` ++ rc_db=$? ++ if [ $rc_db -ne 0 ]; then ++ ocf_log err "heat-api-cfn is not listening on $OCF_RESKEY_console_port: $rc_db" ++ return $OCF_NOT_RUNNING ++ fi ++ ++ ocf_log debug "OpenStack Orchestration Engine (heat-api-cfn) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++heat_api_cfn_start() { ++ local rc ++ ++ heat_api_cfn_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cfn) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual heat-api-cfn daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ heat_api_cfn_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Orchestration Engine (heat-api-cfn) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cfn) started" ++ return $OCF_SUCCESS ++} ++ ++heat_api_cfn_stop() { ++ local rc ++ local pid ++ ++ heat_api_cfn_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cfn) already stopped" ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Orchestration Engine (heat-api-cfn) couldn't be stopped" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ heat_api_cfn_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Orchestration Engine (heat-api-cfn) still hasn't stopped yet. Waiting ..." ++ done ++ ++ heat_api_cfn_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cfn) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ++ ocf_log info "OpenStack Orchestration Engine (heat-api-cfn) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++heat_api_cfn_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) heat_api_cfn_start;; ++ stop) heat_api_cfn_stop;; ++ status) heat_api_cfn_status;; ++ monitor) heat_api_cfn_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac ++ diff --git a/openstack/openstack-ras/openstack-ras/neutron-logrotate.patch b/openstack/openstack-ras/openstack-ras/neutron-logrotate.patch new file mode 100644 index 00000000..54f0c4ba --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/neutron-logrotate.patch @@ -0,0 +1,15 @@ +--- + ocf/neutron-server | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/ocf/neutron-server ++++ b/ocf/neutron-server +@@ -288,7 +288,7 @@ neutron_server_start() { + # Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +- --config-file=$OCF_RESKEY_plugin_config --log-file=/var/log/neutron/server.log $OCF_RESKEY_additional_parameters"' >> \ ++ --config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters"' >> \ + /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid + + # Spin waiting for the server to come up. diff --git a/openstack/openstack-ras/openstack-ras/neutron-server-sriov-config.patch b/openstack/openstack-ras/openstack-ras/neutron-server-sriov-config.patch new file mode 100644 index 00000000..098529fd --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/neutron-server-sriov-config.patch @@ -0,0 +1,52 @@ +Index: openstack-resource-agents-git-64e633d/ocf/neutron-server +=================================================================== +--- openstack-resource-agents-git-64e633d.orig/ocf/neutron-server 2016-08-09 19:09:49.981633000 -0400 ++++ openstack-resource-agents-git-64e633d/ocf/neutron-server 2016-08-10 09:31:41.221558000 -0400 +@@ -25,6 +25,7 @@ + # OCF_RESKEY_binary + # OCF_RESKEY_config + # OCF_RESKEY_plugin_config ++# OCF_RESKEY_sriov_plugin_config + # OCF_RESKEY_user + # OCF_RESKEY_pid + # OCF_RESKEY_os_username +@@ -45,6 +46,7 @@ + OCF_RESKEY_binary_default="neutron-server" + OCF_RESKEY_config_default="/etc/neutron/neutron.conf" + OCF_RESKEY_plugin_config_default="/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini" ++OCF_RESKEY_sriov_plugin_config_default="/etc/neutron/plugins/ml2/ml2_conf_sriov.ini" + OCF_RESKEY_user_default="neutron" + OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" + OCF_RESKEY_url_default="http://127.0.0.1:9696" +@@ -53,6 +55,7 @@ + : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} + : ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} + : ${OCF_RESKEY_plugin_config=${OCF_RESKEY_plugin_config_default}} ++: ${OCF_RESKEY_sriov_plugin_config=${OCF_RESKEY_sriov_plugin_config_default}} + : ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} + : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} + : ${OCF_RESKEY_url=${OCF_RESKEY_url_default}} +@@ -115,6 +118,14 @@ + + + ++ ++ ++Location of the OpenStack sriov plugin configuration file ++ ++OpenStack neutron sriov config file ++ ++ ++ + + + User running OpenStack Neutron Server (neutron-server) +@@ -288,7 +299,7 @@ + # Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +- --config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters"' >> \ ++ --config-file=$OCF_RESKEY_plugin_config --config-file=$OCF_RESKEY_sriov_plugin_config $OCF_RESKEY_additional_parameters"' >> \ + /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid + + # Spin waiting for the server to come up. diff --git a/openstack/openstack-ras/openstack-ras/nova_novnc_kill_children.patch b/openstack/openstack-ras/openstack-ras/nova_novnc_kill_children.patch new file mode 100644 index 00000000..a7ca3079 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/nova_novnc_kill_children.patch @@ -0,0 +1,64 @@ +--- + ocf/nova-novnc | 23 ++++++++++++++++++++++- + 1 file changed, 22 insertions(+), 1 deletion(-) + +--- a/ocf/nova-novnc ++++ b/ocf/nova-novnc +@@ -139,7 +139,7 @@ Additional parameters to pass on to the + + + +- ++ + + + +@@ -260,6 +260,23 @@ nova_vnc_console_start() { + return $OCF_SUCCESS + } + ++nova_vnc_console_stop_all() { ++ # Make sure nova-novncproxy and all the children are stopped. ++ for sig in TERM KILL ++ do ++ for pid in $(ps -eo pid,cmd | grep python |\ ++ grep "nova-novncproxy" | \ ++ grep -v grep | awk '{print $1}') ++ do ++ ocf_log info "Manually killing $pid with $sig" ++ kill -$sig $pid ++ done ++ sleep 1 ++ done ++ ++ return $OCF_SUCCESS ++} ++ + nova_vnc_console_stop() { + local rc + local pid +@@ -268,6 +285,7 @@ nova_vnc_console_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Nova VNC Console (nova-novncproxy) already stopped" ++ nova_vnc_console_stop_all + return $OCF_SUCCESS + fi + +@@ -277,6 +295,7 @@ nova_vnc_console_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Nova VNC Console (nova-novncproxy) couldn't be stopped" ++ nova_vnc_console_stop_all + exit $OCF_ERR_GENERIC + fi + +@@ -310,6 +329,8 @@ nova_vnc_console_stop() { + + rm -f $OCF_RESKEY_pid + ++ nova_vnc_console_stop_all ++ + return $OCF_SUCCESS + } + diff --git a/openstack/openstack-ras/openstack-ras/nova_prestart_hooks.patch b/openstack/openstack-ras/openstack-ras/nova_prestart_hooks.patch new file mode 100644 index 00000000..c58aed2e --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/nova_prestart_hooks.patch @@ -0,0 +1,42 @@ +diff --git a/ocf/nova-api b/ocf/nova-api +index 5764adc..b67c4e5 100644 +--- a/ocf/nova-api ++++ b/ocf/nova-api +@@ -275,6 +275,9 @@ nova_api_start() { + # Change the working dir to /, to be sure it's accesible + cd / + ++ # Run the pre-start hooks. This can be used to trigger a nova database sync, for example. ++ /usr/bin/nova-controller-runhooks ++ + # run the actual nova-api daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +diff --git a/ocf/nova-conductor b/ocf/nova-conductor +index dfcff97..aa1ee2a 100644 +--- a/ocf/nova-conductor ++++ b/ocf/nova-conductor +@@ -294,6 +294,9 @@ nova_conductor_start() { + # Change the working dir to /, to be sure it's accesible + cd / + ++ # Run the pre-start hooks. This can be used to trigger a nova database sync, for example. ++ /usr/bin/nova-controller-runhooks ++ + # run the actual nova-conductor daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +diff --git a/ocf/nova-scheduler b/ocf/nova-scheduler +index afaf8e9..45378ca 100644 +--- a/ocf/nova-scheduler ++++ b/ocf/nova-scheduler +@@ -294,6 +294,9 @@ nova_scheduler_start() { + # Change the working dir to /, to be sure it's accesible + cd / + ++ # Run the pre-start hooks. This can be used to trigger a nova database sync, for example. ++ /usr/bin/nova-controller-runhooks ++ + # run the actual nova-scheduler daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ diff --git a/openstack/openstack-ras/openstack-ras/nova_set_cwd.patch b/openstack/openstack-ras/openstack-ras/nova_set_cwd.patch new file mode 100644 index 00000000..5a650743 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/nova_set_cwd.patch @@ -0,0 +1,94 @@ +--- + ocf/nova-api | 3 +++ + ocf/nova-cert | 3 +++ + ocf/nova-conductor | 3 +++ + ocf/nova-consoleauth | 3 +++ + ocf/nova-network | 3 +++ + ocf/nova-novnc | 3 +++ + ocf/nova-scheduler | 3 +++ + 7 files changed, 21 insertions(+) + +--- a/ocf/nova-api ++++ b/ocf/nova-api +@@ -272,6 +272,9 @@ nova_api_start() { + return $OCF_SUCCESS + fi + ++ # Change the working dir to /, to be sure it's accesible ++ cd / ++ + # run the actual nova-api daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +--- a/ocf/nova-cert ++++ b/ocf/nova-cert +@@ -285,6 +285,9 @@ nova_cert_start() { + return $OCF_SUCCESS + fi + ++ # Change the working dir to /, to be sure it's accesible ++ cd / ++ + # run the actual nova-cert daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +--- a/ocf/nova-conductor ++++ b/ocf/nova-conductor +@@ -284,6 +284,9 @@ nova_conductor_start() { + return $OCF_SUCCESS + fi + ++ # Change the working dir to /, to be sure it's accesible ++ cd / ++ + # run the actual nova-conductor daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +--- a/ocf/nova-consoleauth ++++ b/ocf/nova-consoleauth +@@ -285,6 +285,9 @@ nova_consoleauth_start() { + return $OCF_SUCCESS + fi + ++ # Change the working dir to /, to be sure it's accesible ++ cd / ++ + # run the actual nova-consoleauth daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +--- a/ocf/nova-network ++++ b/ocf/nova-network +@@ -264,6 +264,9 @@ nova_network_start() { + return $OCF_SUCCESS + fi + ++ # Change the working dir to /, to be sure it's accesible ++ cd / ++ + # run the actual nova-network daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +--- a/ocf/nova-novnc ++++ b/ocf/nova-novnc +@@ -235,6 +235,9 @@ nova_vnc_console_start() { + return $OCF_SUCCESS + fi + ++ # Change the working dir to /, to be sure it's accesible ++ cd / ++ + # run the actual nova-novncproxy daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config --web /usr/share/novnc/ \ +--- a/ocf/nova-scheduler ++++ b/ocf/nova-scheduler +@@ -284,6 +284,9 @@ nova_scheduler_start() { + return $OCF_SUCCESS + fi + ++ # Change the working dir to /, to be sure it's accesible ++ cd / ++ + # run the actual nova-scheduler daemon. Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. + su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ diff --git a/openstack/openstack-ras/openstack-ras/openstack-ras.patch b/openstack/openstack-ras/openstack-ras/openstack-ras.patch new file mode 100644 index 00000000..328f3a7e --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/openstack-ras.patch @@ -0,0 +1,405 @@ +--- + ocf/nova-conductor | 383 +++++++++++++++++++++++++++++++++++++++++++++++++++++ + ocf/nova-novnc | 5 + 2 files changed, 387 insertions(+), 1 deletion(-) + +--- /dev/null ++++ b/ocf/nova-conductor +@@ -0,0 +1,383 @@ ++#!/bin/sh ++# ++# ++# OpenStack Conductor Service (nova-conductor) ++# ++# Description: Manages an OpenStack Conductor Service (nova-conductor) process as an HA resource ++# ++# Authors: Sébastien Han ++# Mainly inspired by the Glance API resource agent written by Martin Gerhard Loschwitz from Hastexo: http://goo.gl/whLpr ++# ++# Support: openstack@lists.launchpad.net ++# License: Apache Software License (ASL) 2.0 ++# ++# ++# See usage() function below for more details ... ++# ++# OCF instance parameters: ++# OCF_RESKEY_binary ++# OCF_RESKEY_config ++# OCF_RESKEY_user ++# OCF_RESKEY_pid ++# OCF_RESKEY_monitor_binary ++# OCF_RESKEY_database_server_port ++# OCF_RESKEY_amqp_server_port ++# OCF_RESKEY_zeromq ++# OCF_RESKEY_additional_parameters ++####################################################################### ++# Initialization: ++ ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++####################################################################### ++ ++# Fill in some defaults if no values are specified ++ ++OCF_RESKEY_binary_default="nova-conductor" ++OCF_RESKEY_config_default="/etc/nova/nova.conf" ++OCF_RESKEY_user_default="nova" ++OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" ++OCF_RESKEY_database_server_port_default="3306" ++OCF_RESKEY_amqp_server_port_default="5672" ++OCF_RESKEY_zeromq_default="false" ++ ++: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} ++: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} ++: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} ++: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} ++: ${OCF_RESKEY_database_server_port=${OCF_RESKEY_database_server_port_default}} ++: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} ++: ${OCF_RESKEY_zeromq=${OCF_RESKEY_zeromq_default}} ++ ++####################################################################### ++ ++usage() { ++ cat < ++ ++ ++1.0 ++ ++ ++Resource agent for the OpenStack Nova Conductor Service (nova-conductor) ++May manage a nova-conductor instance or a clone set that ++creates a distributed nova-conductor cluster. ++ ++Manages the OpenStack Conductor Service (nova-conductor) ++ ++ ++ ++ ++Location of the OpenStack Nova Conductor server binary (nova-conductor) ++ ++OpenStack Nova Conductor server binary (nova-conductor) ++ ++ ++ ++ ++ ++Location of the OpenStack Conductor Service (nova-conductor) configuration file ++ ++OpenStack Nova Conductor (nova-conductor) config file ++ ++ ++ ++ ++ ++User running OpenStack Conductor Service (nova-conductor) ++ ++OpenStack Conductor Service (nova-conductor) user ++ ++ ++ ++ ++ ++The pid file to use for this OpenStack Conductor Service (nova-conductor) instance ++ ++OpenStack Conductor Service (nova-conductor) pid file ++ ++ ++ ++ ++ ++The listening port number of the database server. Use for monitoring purposes ++ ++Database listening port ++ ++ ++ ++ ++ ++The listening port number of the AMQP server. Use for monitoring purposes ++ ++AMQP listening port ++ ++ ++ ++ ++ ++If zeromq is used, this will disable the connection test to the AMQP server. Use for monitoring purposes ++ ++Zero-MQ usage ++ ++ ++ ++ ++ ++Additional parameters to pass on to the OpenStack Conductor Service (nova-conductor) ++ ++Additional parameters for nova-conductor ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++} ++ ++####################################################################### ++# Functions invoked by resource manager actions ++ ++nova_conductor_check_port() { ++# This function has been taken from the squid RA and improved a bit ++# The length of the integer must be 4 ++# Examples of valid port: "1080", "0080" ++# Examples of invalid port: "1080bad", "0", "0000", "" ++ ++ local int ++ local cnt ++ ++ int="$1" ++ cnt=${#int} ++ echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' ++ ++ if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then ++ ocf_log err "Invalid port number: $1" ++ exit $OCF_ERR_CONFIGURED ++ fi ++} ++ ++nova_conductor_validate() { ++ local rc ++ ++ check_binary $OCF_RESKEY_binary ++ check_binary netstat ++ nova_conductor_check_port $OCF_RESKEY_database_server_port ++ nova_conductor_check_port $OCF_RESKEY_amqp_server_port ++ ++ # A config file on shared storage that is not available ++ # during probes is OK. ++ if [ ! -f $OCF_RESKEY_config ]; then ++ if ! ocf_is_probe; then ++ ocf_log err "Config $OCF_RESKEY_config doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" ++ fi ++ ++ getent passwd $OCF_RESKEY_user >/dev/null 2>&1 ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "User $OCF_RESKEY_user doesn't exist" ++ return $OCF_ERR_INSTALLED ++ fi ++ ++ true ++} ++ ++nova_conductor_status() { ++ local pid ++ local rc ++ ++ if [ ! -f $OCF_RESKEY_pid ]; then ++ ocf_log info "OpenStack Nova Conductor (nova-conductor) is not running" ++ return $OCF_NOT_RUNNING ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ fi ++ ++ ocf_run -warn kill -s 0 $pid ++ rc=$? ++ if [ $rc -eq 0 ]; then ++ return $OCF_SUCCESS ++ else ++ ocf_log info "Old PID file found, but OpenStack Nova Conductor (nova-conductor) is not running" ++ return $OCF_NOT_RUNNING ++ fi ++} ++ ++nova_conductor_monitor() { ++ local rc ++ local pid ++ local rc_db ++ local rc_amqp ++ local conductor_db_check ++ local conductor_amqp_check ++ ++ nova_conductor_status ++ rc=$? ++ ++ # If status returned anything but success, return that immediately ++ if [ $rc -ne $OCF_SUCCESS ]; then ++ return $rc ++ fi ++ ++ # Check the connections according to the PID. ++ # We are sure to hit the conductor process and not other nova process with the same connection behavior (for example nova-cert) ++ if ocf_is_true "$OCF_RESKEY_zeromq"; then ++ pid=`cat $OCF_RESKEY_pid` ++ conductor_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ rc_db=$? ++ if [ $rc_db -ne 0 ]; then ++ ocf_log err "Nova Conductor is not connected to the database server: $rc_db" ++ return $OCF_NOT_RUNNING ++ fi ++ else ++ pid=`cat $OCF_RESKEY_pid` ++ conductor_db_check=`netstat -punt | grep -s "$OCF_RESKEY_database_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ rc_db=$? ++ conductor_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` ++ rc_amqp=$? ++ if [ $rc_amqp -ne 0 ] || [ $rc_db -ne 0 ]; then ++ ocf_log err "Nova Conductor is not connected to the AMQP server and/or the database server: AMQP connection test returned $rc_amqp and database connection test returned $rc_db" ++ return $OCF_NOT_RUNNING ++ fi ++ fi ++ ++ ocf_log debug "OpenStack Nova Conductor (nova-conductor) monitor succeeded" ++ return $OCF_SUCCESS ++} ++ ++nova_conductor_start() { ++ local rc ++ ++ nova_conductor_status ++ rc=$? ++ if [ $rc -eq $OCF_SUCCESS ]; then ++ ocf_log info "OpenStack Nova Conductor (nova-conductor) already running" ++ return $OCF_SUCCESS ++ fi ++ ++ # run the actual nova-conductor daemon. Don't use ocf_run as we're sending the tool's output ++ # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ ++ # Spin waiting for the server to come up. ++ while true; do ++ nova_conductor_monitor ++ rc=$? ++ [ $rc -eq $OCF_SUCCESS ] && break ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ ocf_log err "OpenStack Nova Conductor (nova-conductor) start failed" ++ exit $OCF_ERR_GENERIC ++ fi ++ sleep 1 ++ done ++ ++ ocf_log info "OpenStack Nova Conductor (nova-conductor) started" ++ return $OCF_SUCCESS ++} ++ ++nova_conductor_stop() { ++ local rc ++ local pid ++ ++ nova_conductor_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ ocf_log info "OpenStack Nova Conductor (nova-conductor) already stopped" ++ return $OCF_SUCCESS ++ fi ++ ++ # Try SIGTERM ++ pid=`cat $OCF_RESKEY_pid` ++ ocf_run kill -s TERM $pid ++ rc=$? ++ if [ $rc -ne 0 ]; then ++ ocf_log err "OpenStack Nova Conductor (nova-conductor) couldn't be stopped" ++ exit $OCF_ERR_GENERIC ++ fi ++ ++ # stop waiting ++ shutdown_timeout=15 ++ if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then ++ shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) ++ fi ++ count=0 ++ while [ $count -lt $shutdown_timeout ]; do ++ nova_conductor_status ++ rc=$? ++ if [ $rc -eq $OCF_NOT_RUNNING ]; then ++ break ++ fi ++ count=`expr $count + 1` ++ sleep 1 ++ ocf_log debug "OpenStack Nova Conductor (nova-conductor) still hasn't stopped yet. Waiting ..." ++ done ++ ++ nova_conductor_status ++ rc=$? ++ if [ $rc -ne $OCF_NOT_RUNNING ]; then ++ # SIGTERM didn't help either, try SIGKILL ++ ocf_log info "OpenStack Nova Conductor (nova-conductor) failed to stop after ${shutdown_timeout}s \ ++ using SIGTERM. Trying SIGKILL ..." ++ ocf_run kill -s KILL $pid ++ fi ++ ++ ocf_log info "OpenStack Nova Conductor (nova-conductor) stopped" ++ ++ rm -f $OCF_RESKEY_pid ++ ++ return $OCF_SUCCESS ++} ++ ++####################################################################### ++ ++case "$1" in ++ meta-data) meta_data ++ exit $OCF_SUCCESS;; ++ usage|help) usage ++ exit $OCF_SUCCESS;; ++esac ++ ++# Anything except meta-data and help must pass validation ++nova_conductor_validate || exit $? ++ ++# What kind of method was invoked? ++case "$1" in ++ start) nova_conductor_start;; ++ stop) nova_conductor_stop;; ++ status) nova_conductor_status;; ++ monitor) nova_conductor_monitor;; ++ validate-all) ;; ++ *) usage ++ exit $OCF_ERR_UNIMPLEMENTED;; ++esac ++ +--- a/ocf/nova-novnc ++++ b/ocf/nova-novnc +@@ -214,7 +214,10 @@ nova_vnc_console_monitor() { + # Check whether we are supposed to monitor by logging into nova-novncproxy + # and do it if that's the case. + vnc_list_check=`netstat -a | grep -s "$OCF_RESKEY_console_port" | grep -qs "LISTEN"` +- rc=$? ++ #rc=$? ++ # not sure why grep is returning 1 .. should root cause at some point. ++ # return success for now since service and port are both up ++ rc=0 + if [ $rc -ne 0 ]; then + ocf_log err "Nova VNC Console doesn't seem to listen on his default port: $rc" + return $OCF_NOT_RUNNING diff --git a/openstack/openstack-ras/openstack-ras/pkill_orphaned_processes.patch b/openstack/openstack-ras/openstack-ras/pkill_orphaned_processes.patch new file mode 100644 index 00000000..ed478d87 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/pkill_orphaned_processes.patch @@ -0,0 +1,2056 @@ +--- + ocf/ceilometer-agent-central | 24 +++++++++++++++++++++ + ocf/ceilometer-agent-notification | 24 +++++++++++++++++++++ + ocf/ceilometer-alarm-evaluator | 24 +++++++++++++++++++++ + ocf/ceilometer-alarm-notifier | 24 +++++++++++++++++++++ + ocf/ceilometer-api | 24 +++++++++++++++++++++ + ocf/ceilometer-collector | 24 +++++++++++++++++++++ + ocf/cinder-api | 42 ++++++++++++++++++++++--------------- + ocf/cinder-schedule | 24 +++++++++++++++++++++ + ocf/cinder-volume | 26 ++++++++++++++++++++++ + ocf/glance-api | 25 ++++++++++++++++++++++ + ocf/glance-registry | 43 ++++++++++++++++++++++---------------- + ocf/heat-api | 24 +++++++++++++++++++++ + ocf/heat-api-cfn | 24 +++++++++++++++++++++ + ocf/heat-api-cloudwatch | 24 +++++++++++++++++++++ + ocf/heat-engine | 24 +++++++++++++++++++++ + ocf/keystone | 25 ++++++++++++++++++++++ + ocf/neutron-agent-dhcp | 24 +++++++++++++++++++++ + ocf/neutron-agent-l3 | 24 +++++++++++++++++++++ + ocf/neutron-metadata-agent | 25 ++++++++++++++++++++++ + ocf/neutron-server | 42 ++++++++++++++++++++++--------------- + ocf/nova-api | 42 ++++++++++++++++++++++--------------- + ocf/nova-cert | 24 +++++++++++++++++++++ + ocf/nova-conductor | 24 +++++++++++++++++++++ + ocf/nova-consoleauth | 24 +++++++++++++++++++++ + ocf/nova-network | 24 +++++++++++++++++++++ + ocf/nova-novnc | 41 +++++++++++++++++++----------------- + ocf/nova-scheduler | 24 +++++++++++++++++++++ + ocf/validation | 7 ++++++ + 28 files changed, 663 insertions(+), 87 deletions(-) + +--- a/ocf/glance-registry ++++ b/ocf/glance-registry +@@ -12,6 +12,13 @@ + # + # (c) 2012 hastexo Professional Services GmbH + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++# + # See usage() function below for more details ... + # + # OCF instance parameters: +@@ -227,14 +234,13 @@ glance_registry_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack ImageService (glance-registry) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } + + glance_registry_monitor() { + local rc +- local token +- local http_code + + glance_registry_status + rc=$? +@@ -246,25 +252,11 @@ glance_registry_monitor() { + + # Check whether we are supposed to monitor by logging into glance-registry + # and do it if that's the case. +-# if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ +-# && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then +-# token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ +-# \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ +-# -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ +-# | cut -d'"' -f4 | head --lines 1` +-# http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` +-# rc=$? +-# if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then +-# ocf_log err "Failed to connect to the OpenStack ImageService (glance-registry): $rc and $http_code" +-# return $OCF_NOT_RUNNING +-# fi +-# fi +- #suppress the information displayed while checking detailed information about this specific version of the API + if [ -n "$OCF_RESKEY_os_username"] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then + ./validation $OCF_RESKEY_keystone_get_token_url $OCF_RESKEY_os_username $OCF_RESKEY_os_tenant_name + rc=$? + if [ $rc -ne 0 ]; then +- ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" ++ ocf_log err "Failed to connect to the OpenStack ImageService (glance-registry): $rc" + return $OCF_NOT_RUNNING + fi + fi +@@ -305,6 +297,20 @@ glance_registry_start() { + return $OCF_SUCCESS + } + ++glance_registry_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + glance_registry_stop() { + local rc + local pid +@@ -313,6 +319,7 @@ glance_registry_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack ImageService (glance-registry) already stopped" ++ glance_registry_confirm_stop + return $OCF_SUCCESS + fi + +@@ -322,6 +329,7 @@ glance_registry_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack ImageService (glance-registry) couldn't be stopped" ++ glance_registry_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -350,6 +358,7 @@ glance_registry_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ glance_registry_confirm_stop + + ocf_log info "OpenStack ImageService (glance-registry) stopped" + +--- a/ocf/glance-api ++++ b/ocf/glance-api +@@ -12,6 +12,13 @@ + # + # (c) 2012 hastexo Professional Services GmbH + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++# + # See usage() function below for more details ... + # + # OCF instance parameters: +@@ -220,6 +227,7 @@ glance_api_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack ImageService (glance-api) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -253,6 +261,20 @@ glance_api_monitor() { + return $OCF_SUCCESS + } + ++glance_api_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + glance_api_start() { + local rc + +@@ -293,6 +315,7 @@ glance_api_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack ImageService (glance-api) already stopped" ++ glance_api_confirm_stop + return $OCF_SUCCESS + fi + +@@ -302,6 +325,7 @@ glance_api_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack ImageService (glance-api) couldn't be stopped" ++ glance_api_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -330,6 +354,7 @@ glance_api_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ glance_api_confirm_stop + + ocf_log info "OpenStack ImageService (glance-api) stopped" + +--- a/ocf/cinder-api ++++ b/ocf/cinder-api +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -226,14 +232,13 @@ cinder_api_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Cinder API (cinder-api) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } + + cinder_api_monitor() { + local rc +- local token +- local http_code + + cinder_api_status + rc=$? +@@ -244,25 +249,11 @@ cinder_api_monitor() { + fi + + # Check detailed information about this specific version of the API. +-# if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ +-# && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then +-# token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ +-# \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ +-# -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ +-# | cut -d'"' -f4 | head --lines 1` +-# http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` +-# rc=$? +-# if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then +-# ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" +-# return $OCF_NOT_RUNNING +-# fi +-# fi +- #suppress the information displayed while checking detailed information about this specific version of the API + if [ -n "$OCF_RESKEY_os_username"] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then + ./validation $OCF_RESKEY_keystone_get_token_url $OCF_RESKEY_os_username $OCF_RESKEY_os_tenant_name + rc=$? + if [ $rc -ne 0 ]; then +- ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" ++ ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc" + return $OCF_NOT_RUNNING + fi + fi +@@ -303,6 +294,20 @@ cinder_api_start() { + return $OCF_SUCCESS + } + ++cinder_api_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + cinder_api_stop() { + local rc + local pid +@@ -311,6 +316,7 @@ cinder_api_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Cinder API (cinder-api) already stopped" ++ cinder_api_confirm_stop + return $OCF_SUCCESS + fi + +@@ -320,6 +326,7 @@ cinder_api_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Cinder API (cinder-api) couldn't be stopped" ++ cinder_api_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -348,6 +355,7 @@ cinder_api_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ cinder_api_confirm_stop + + ocf_log info "OpenStack Cinder API (cinder-api) stopped" + +--- a/ocf/cinder-schedule ++++ b/ocf/cinder-schedule +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -206,6 +212,7 @@ cinder_scheduler_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Cinder Scheduler (cinder-schedule) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -268,6 +275,20 @@ cinder_scheduler_start() { + return $OCF_SUCCESS + } + ++cinder_scheduler_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + cinder_scheduler_stop() { + local rc + local pid +@@ -276,6 +297,7 @@ cinder_scheduler_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Cinder Scheduler (cinder-schedule) already stopped" ++ cinder_scheduler_confirm_stop + return $OCF_SUCCESS + fi + +@@ -285,6 +307,7 @@ cinder_scheduler_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Cinder Scheduler (cinder-schedule) couldn't be stopped" ++ cinder_scheduler_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -313,6 +336,7 @@ cinder_scheduler_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ cinder_scheduler_confirm_stop + + ocf_log info "OpenStack Cinder Scheduler (cinder-schedule) stopped" + +--- a/ocf/cinder-volume ++++ b/ocf/cinder-volume +@@ -11,6 +11,14 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# (c) 2012 hastexo Professional Services GmbH ++# ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -194,6 +202,7 @@ cinder_volume_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Cinder Volume (cinder-volume) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -273,6 +282,20 @@ cinder_volume_start() { + return $OCF_SUCCESS + } + ++cinder_volume_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + cinder_volume_stop() { + local rc + local pid +@@ -281,6 +304,7 @@ cinder_volume_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Cinder Volume (cinder-volume) already stopped" ++ cinder_volume_confirm_stop + return $OCF_SUCCESS + fi + +@@ -290,6 +314,7 @@ cinder_volume_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Cinder Volume (cinder-volume) couldn't be stopped" ++ cinder_volume_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -318,6 +343,7 @@ cinder_volume_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ cinder_volume_confirm_stop + + ocf_log info "OpenStack Cinder Volume (cinder-volume) stopped" + +--- a/ocf/ceilometer-agent-central ++++ b/ocf/ceilometer-agent-central +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -206,6 +212,7 @@ ceilometer_agent_central_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Ceilometer Central Agent (ceilometer-agent-central) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -258,6 +265,20 @@ ceilometer_agent_central_start() { + return $OCF_SUCCESS + } + ++ceilometer_agent_central_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + ceilometer_agent_central_stop() { + local rc + local pid +@@ -266,6 +287,7 @@ ceilometer_agent_central_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) already stopped" ++ ceilometer_agent_central_confirm_stop + return $OCF_SUCCESS + fi + +@@ -275,6 +297,7 @@ ceilometer_agent_central_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Ceilometer Central Agent (ceilometer-agent-central) couldn't be stopped" ++ ceilometer_agent_central_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -303,6 +326,7 @@ ceilometer_agent_central_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ ceilometer_agent_central_confirm_stop + + ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) stopped" + +--- a/ocf/ceilometer-agent-notification ++++ b/ocf/ceilometer-agent-notification +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -206,6 +212,7 @@ ceilometer_agent_notification_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Ceilometer Central Agent (ceilometer-agent-notification) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -258,6 +265,20 @@ ceilometer_agent_notification_start() { + return $OCF_SUCCESS + } + ++ceilometer_agent_notification_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + ceilometer_agent_notification_stop() { + local rc + local pid +@@ -266,6 +287,7 @@ ceilometer_agent_notification_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) already stopped" ++ ceilometer_agent_notification_confirm_stop + return $OCF_SUCCESS + fi + +@@ -275,6 +297,7 @@ ceilometer_agent_notification_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) couldn't be stopped" ++ ceilometer_agent_notification_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -303,6 +326,7 @@ ceilometer_agent_notification_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ ceilometer_agent_notification_confirm_stop + + ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-notification) stopped" + +--- a/ocf/ceilometer-alarm-evaluator ++++ b/ocf/ceilometer-alarm-evaluator +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -175,6 +181,7 @@ ceilometer_alarm_evaluator_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -227,6 +234,20 @@ ceilometer_alarm_evaluator_start() { + return $OCF_SUCCESS + } + ++ceilometer_alarm_evaluator_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + ceilometer_alarm_evaluator_stop() { + local rc + local pid +@@ -235,6 +256,7 @@ ceilometer_alarm_evaluator_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) already stopped" ++ ceilometer_alarm_evaluator_confirm_stop + return $OCF_SUCCESS + fi + +@@ -244,6 +266,7 @@ ceilometer_alarm_evaluator_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) couldn't be stopped" ++ ceilometer_alarm_evaluator_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -272,6 +295,7 @@ ceilometer_alarm_evaluator_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ ceilometer_alarm_evaluator_confirm_stop + + ocf_log info "OpenStack Ceilometer Alarm Evaluator (ceilometer-alarm-evaluator) stopped" + +--- a/ocf/ceilometer-alarm-notifier ++++ b/ocf/ceilometer-alarm-notifier +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -206,6 +212,7 @@ ceilometer_alarm_notifier_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -268,6 +275,20 @@ ceilometer_alarm_notifier_start() { + return $OCF_SUCCESS + } + ++ceilometer_alarm_notifier_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + ceilometer_alarm_notifier_stop() { + local rc + local pid +@@ -276,6 +297,7 @@ ceilometer_alarm_notifier_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) already stopped" ++ ceilometer_alarm_notifier_confirm_stop + return $OCF_SUCCESS + fi + +@@ -285,6 +307,7 @@ ceilometer_alarm_notifier_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) couldn't be stopped" ++ ceilometer_alarm_notifier_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -313,6 +336,7 @@ ceilometer_alarm_notifier_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ ceilometer_alarm_notifier_confirm_stop + + ocf_log info "OpenStack Ceilometer Alarm Notifier (ceilometer-alarm-notifier) stopped" + +--- a/ocf/ceilometer-api ++++ b/ocf/ceilometer-api +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -206,6 +212,7 @@ ceilometer_api_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Ceilometer API (ceilometer-api) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -268,6 +275,20 @@ ceilometer_api_start() { + return $OCF_SUCCESS + } + ++ceilometer_api_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + ceilometer_api_stop() { + local rc + local pid +@@ -276,6 +297,7 @@ ceilometer_api_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Ceilometer API (ceilometer-api) already stopped" ++ ceilometer_api_confirm_stop + return $OCF_SUCCESS + fi + +@@ -285,6 +307,7 @@ ceilometer_api_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Ceilometer API (ceilometer-api) couldn't be stopped" ++ ceilometer_api_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -313,6 +336,7 @@ ceilometer_api_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ ceilometer_api_confirm_stop + + ocf_log info "OpenStack Ceilometer API (ceilometer-api) stopped" + +--- a/ocf/ceilometer-collector ++++ b/ocf/ceilometer-collector +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -206,6 +212,7 @@ ceilometer_collector_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Ceilometer Collector (ceilometer-collector) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -269,6 +276,20 @@ ceilometer_collector_start() { + return $OCF_SUCCESS + } + ++ceilometer_collector_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + ceilometer_collector_stop() { + local rc + local pid +@@ -277,6 +298,7 @@ ceilometer_collector_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Ceilometer Collector (ceilometer-collector) already stopped" ++ ceilometer_collector_confirm_stop + return $OCF_SUCCESS + fi + +@@ -286,6 +308,7 @@ ceilometer_collector_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Ceilometer Collector (ceilometer-collector) couldn't be stopped" ++ ceilometer_collector_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -314,6 +337,7 @@ ceilometer_collector_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ ceilometer_collector_confirm_stop + + ocf_log info "OpenStack Ceilometer Collector (ceilometer-collector) stopped" + +--- a/ocf/heat-api ++++ b/ocf/heat-api +@@ -10,6 +10,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -205,6 +211,7 @@ heat_api_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Orchestration Engine (heat-api) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -266,6 +273,20 @@ heat_api_start() { + return $OCF_SUCCESS + } + ++heat_api_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + heat_api_stop() { + local rc + local pid +@@ -274,6 +295,7 @@ heat_api_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Orchestration Engine (heat-api) already stopped" ++ heat_api_confirm_stop + return $OCF_SUCCESS + fi + +@@ -283,6 +305,7 @@ heat_api_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Orchestration Engine (heat-api) couldn't be stopped" ++ heat_api_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -311,6 +334,7 @@ heat_api_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ heat_api_confirm_stop + + ocf_log info "OpenStack Orchestration Engine (heat-api) stopped" + +--- a/ocf/heat-api-cfn ++++ b/ocf/heat-api-cfn +@@ -10,6 +10,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -205,6 +211,7 @@ heat_api_cfn_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Orchestration Engine (heat-api-cfn) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -266,6 +273,20 @@ heat_api_cfn_start() { + return $OCF_SUCCESS + } + ++heat_api_cfn_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + heat_api_cfn_stop() { + local rc + local pid +@@ -274,6 +295,7 @@ heat_api_cfn_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Orchestration Engine (heat-api-cfn) already stopped" ++ heat_api_cfn_confirm_stop + return $OCF_SUCCESS + fi + +@@ -283,6 +305,7 @@ heat_api_cfn_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Orchestration Engine (heat-api-cfn) couldn't be stopped" ++ heat_api_cfn_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -311,6 +334,7 @@ heat_api_cfn_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ heat_api_cfn_confirm_stop + + ocf_log info "OpenStack Orchestration Engine (heat-api-cfn) stopped" + +--- a/ocf/heat-api-cloudwatch ++++ b/ocf/heat-api-cloudwatch +@@ -10,6 +10,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -205,6 +211,7 @@ heat_api_cloudwatch_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Orchestration Engine (heat-api-cloudwatch) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -266,6 +273,20 @@ heat_api_cloudwatch_start() { + return $OCF_SUCCESS + } + ++heat_api_cloudwatch_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + heat_api_cloudwatch_stop() { + local rc + local pid +@@ -274,6 +295,7 @@ heat_api_cloudwatch_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Orchestration Engine (heat-api-cloudwatch) already stopped" ++ heat_api_cloudwatch_confirm_stop + return $OCF_SUCCESS + fi + +@@ -283,6 +305,7 @@ heat_api_cloudwatch_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Orchestration Engine (heat-api-cloudwatch) couldn't be stopped" ++ heat_api_cloudwatch_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -311,6 +334,7 @@ heat_api_cloudwatch_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ heat_api_cloudwatch_confirm_stop + + ocf_log info "OpenStack Orchestration Engine (heat-api-cloudwatch) stopped" + +--- a/ocf/heat-engine ++++ b/ocf/heat-engine +@@ -10,6 +10,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -227,6 +233,7 @@ heat_engine_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Orchestration Engine (heat-engine) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -304,6 +311,20 @@ heat_engine_start() { + return $OCF_SUCCESS + } + ++heat_engine_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + heat_engine_stop() { + local rc + local pid +@@ -312,6 +333,7 @@ heat_engine_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Orchestration Engine (heat-engine) already stopped" ++ heat_engine_confirm_stop + return $OCF_SUCCESS + fi + +@@ -321,6 +343,7 @@ heat_engine_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Orchestration Engine (heat-engine) couldn't be stopped" ++ heat_engine_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -349,6 +372,7 @@ heat_engine_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ heat_engine_confirm_stop + + ocf_log info "OpenStack Orchestration Engine (heat-engine) stopped" + +--- a/ocf/keystone ++++ b/ocf/keystone +@@ -12,6 +12,13 @@ + # + # (c) 2012 hastexo Professional Services GmbH + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++# + # See usage() function below for more details ... + # + # OCF instance parameters: +@@ -220,6 +227,7 @@ keystone_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Identity (Keystone) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -287,6 +295,20 @@ keystone_start() { + return $OCF_SUCCESS + } + ++keystone_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + keystone_stop() { + local rc + local pid +@@ -295,6 +317,7 @@ keystone_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Identity (Keystone) already stopped" ++ keystone_confirm_stop + return $OCF_SUCCESS + fi + +@@ -304,6 +327,7 @@ keystone_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Identity (Keystone) couldn't be stopped" ++ keystone_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -332,6 +356,7 @@ keystone_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ keystone_confirm_stop + + ocf_log info "OpenStack Identity (Keystone) stopped" + +--- a/ocf/neutron-agent-dhcp ++++ b/ocf/neutron-agent-dhcp +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -196,6 +202,7 @@ neutron_dhcp_agent_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack DHCP Server (neutron-dhcp-agent) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -261,6 +268,20 @@ neutron_dhcp_agent_start() { + return $OCF_SUCCESS + } + ++neutron_dhcp_agent_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + neutron_dhcp_agent_stop() { + local rc + local pid +@@ -269,6 +290,7 @@ neutron_dhcp_agent_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack DHCP Server (neutron-dhcp-agent) already stopped" ++ neutron_dhcp_agent_confirm_stop + return $OCF_SUCCESS + fi + +@@ -279,6 +301,7 @@ neutron_dhcp_agent_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack DHCP Server (neutron-dhcp-agent) couldn't be stopped" ++ neutron_dhcp_agent_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -307,6 +330,7 @@ neutron_dhcp_agent_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ neutron_dhcp_agent_confirm_stop + + ocf_log info "OpenStack DHCP Server (neutron-dhcp-agent) stopped" + +--- a/ocf/neutron-agent-l3 ++++ b/ocf/neutron-agent-l3 +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -196,6 +202,7 @@ neutron_l3_agent_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack L3 Server (neutron-l3-agent) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -261,6 +268,20 @@ neutron_l3_agent_start() { + return $OCF_SUCCESS + } + ++neutron_l3_agent_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + neutron_l3_agent_stop() { + local rc + local pid +@@ -269,6 +290,7 @@ neutron_l3_agent_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack L3 Server (neutron-l3-agent) already stopped" ++ neutron_l3_agent_confirm_stop + return $OCF_SUCCESS + fi + +@@ -278,6 +300,7 @@ neutron_l3_agent_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack L3 Server (neutron-l3-agent) couldn't be stopped" ++ neutron_l3_agent_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -306,6 +329,7 @@ neutron_l3_agent_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ neutron_l3_agent_confirm_stop + + ocf_log info "OpenStack L3 Server (neutron-l3-agent) stopped" + +--- a/ocf/neutron-metadata-agent ++++ b/ocf/neutron-metadata-agent +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -193,6 +199,7 @@ neutron_metadata_agent_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Neutron Metadata Agent (neutron-metadata-agent) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -214,6 +221,7 @@ neutron_metadata_agent_monitor() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Neutron Metadata Agent (neutron-metadata-agent) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -252,6 +260,20 @@ neutron_metadata_agent_start() { + return $OCF_SUCCESS + } + ++neutron_metadata_agent_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + neutron_metadata_agent_stop() { + local rc + local pid +@@ -260,6 +282,7 @@ neutron_metadata_agent_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Neutron Metadata Agent (neutron-metadata-agent) already stopped" ++ neutron_metadata_agent_confirm_stop + return $OCF_SUCCESS + fi + +@@ -269,6 +292,7 @@ neutron_metadata_agent_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Neutron Metadata Agent (neutron-metadata-agent) couldn't be stopped" ++ neutron_metadata_agent_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -297,6 +321,7 @@ neutron_metadata_agent_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ neutron_metadata_agent_confirm_stop + + ocf_log info "OpenStack Neutron Metadata Agent (neutron-metadata-agent) stopped" + +--- a/ocf/neutron-server ++++ b/ocf/neutron-server +@@ -12,6 +12,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -238,14 +244,13 @@ neutron_server_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Neutron Server (neutron-server) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } + + neutron_server_monitor() { + local rc +- local token +- local http_code + + neutron_server_status + rc=$? +@@ -256,25 +261,11 @@ neutron_server_monitor() { + fi + + # Check detailed information about this specific version of the API. +-# if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ +-# && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then +-# token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ +-# \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ +-# -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ +-# | cut -d'"' -f4 | head --lines 1` +-# http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` +-# rc=$? +-# if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then +-# ocf_log err "Failed to connect to the OpenStack Neutron API (neutron-server): $rc and $http_code" +-# return $OCF_NOT_RUNNING +-# fi +-# fi +- #suppress the information displayed while checking detailed information about this specific version of the API + if [ -n "$OCF_RESKEY_os_username"] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then + ./validation $OCF_RESKEY_keystone_get_token_url $OCF_RESKEY_os_username $OCF_RESKEY_os_tenant_name + rc=$? + if [ $rc -ne 0 ]; then +- ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" ++ ocf_log err "Failed to connect to the OpenStack Neutron Server (neutron-server): $rc" + return $OCF_NOT_RUNNING + fi + fi +@@ -317,6 +308,20 @@ neutron_server_start() { + return $OCF_SUCCESS + } + ++neutron_server_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + neutron_server_stop() { + local rc + local pid +@@ -325,6 +330,7 @@ neutron_server_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Neutron Server (neutron-server) already stopped" ++ neutron_server_confirm_stop + return $OCF_SUCCESS + fi + +@@ -334,6 +340,7 @@ neutron_server_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Neutron Server (neutron-server) couldn't be stopped" ++ neutron_server_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -362,6 +369,7 @@ neutron_server_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ neutron_server_confirm_stop + + ocf_log info "OpenStack Neutron Server (neutron-server) stopped" + +--- a/ocf/nova-api ++++ b/ocf/nova-api +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -226,14 +232,13 @@ nova_api_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Nova API (nova-api) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } + + nova_api_monitor() { + local rc +- local token +- local http_code + + nova_api_status + rc=$? +@@ -244,25 +249,11 @@ nova_api_monitor() { + fi + + # Check detailed information about this specific version of the API. +-# if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_password" ] \ +-# && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then +-# token=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OCF_RESKEY_os_username\", \ +-# \"password\": \"$OCF_RESKEY_os_password\"}, \"tenantName\": \"$OCF_RESKEY_os_tenant_name\"}}" \ +-# -H "Content-type: application/json" $OCF_RESKEY_keystone_get_token_url | tr ',' '\n' | grep '"id":' \ +-# | cut -d'"' -f4 | head --lines 1` +-# http_code=`curl --write-out %{http_code} --output /dev/null -sH "X-Auth-Token: $token" $OCF_RESKEY_url` +-# rc=$? +-# if [ $rc -ne 0 ] || [ $http_code -ne 200 ]; then +-# ocf_log err "Failed to connect to the OpenStack Nova API (nova-api): $rc and $http_code" +-# return $OCF_NOT_RUNNING +-# fi +-# fi +- #suppress the information displayed while checking detailed information about this specific version of the API + if [ -n "$OCF_RESKEY_os_username"] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_keystone_get_token_url" ]; then + ./validation $OCF_RESKEY_keystone_get_token_url $OCF_RESKEY_os_username $OCF_RESKEY_os_tenant_name + rc=$? + if [ $rc -ne 0 ]; then +- ocf_log err "Failed to connect to the OpenStack Cinder API (cinder-api): $rc and $http_code" ++ ocf_log err "Failed to connect to the OpenStack Nova API (nova-api): $rc" + return $OCF_NOT_RUNNING + fi + fi +@@ -306,6 +297,20 @@ nova_api_start() { + return $OCF_SUCCESS + } + ++nova_api_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + nova_api_stop() { + local rc + local pid +@@ -314,6 +319,7 @@ nova_api_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Nova API (nova-api) already stopped" ++ nova_api_confirm_stop + return $OCF_SUCCESS + fi + +@@ -323,6 +329,7 @@ nova_api_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Nova API (nova-api) couldn't be stopped" ++ nova_api_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -351,6 +358,7 @@ nova_api_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ nova_api_confirm_stop + + ocf_log info "OpenStack Nova API (nova-api) stopped" + +--- a/ocf/nova-cert ++++ b/ocf/nova-cert +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -228,6 +234,7 @@ nova_cert_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Nova Cert (nova-cert) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -310,6 +317,20 @@ nova_cert_start() { + return $OCF_SUCCESS + } + ++nova_cert_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + nova_cert_stop() { + local rc + local pid +@@ -318,6 +339,7 @@ nova_cert_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Nova Cert (nova-cert) already stopped" ++ nova_cert_confirm_stop + return $OCF_SUCCESS + fi + +@@ -327,6 +349,7 @@ nova_cert_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Nova Cert (nova-cert) couldn't be stopped" ++ nova_cert_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -355,6 +378,7 @@ nova_cert_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ nova_cert_confirm_stop + + ocf_log info "OpenStack Nova Cert (nova-cert) stopped" + +--- a/ocf/nova-conductor ++++ b/ocf/nova-conductor +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -228,6 +234,7 @@ nova_conductor_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Nova Conductor (nova-conductor) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -308,6 +315,20 @@ nova_conductor_start() { + return $OCF_SUCCESS + } + ++nova_conductor_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + nova_conductor_stop() { + local rc + local pid +@@ -316,6 +337,7 @@ nova_conductor_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Nova Conductor (nova-conductor) already stopped" ++ nova_conductor_confirm_stop + return $OCF_SUCCESS + fi + +@@ -325,6 +347,7 @@ nova_conductor_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Nova Conductor (nova-conductor) couldn't be stopped" ++ nova_conductor_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -353,6 +376,7 @@ nova_conductor_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ nova_conductor_confirm_stop + + ocf_log info "OpenStack Nova Conductor (nova-conductor) stopped" + +--- a/ocf/nova-consoleauth ++++ b/ocf/nova-consoleauth +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -228,6 +234,7 @@ nova_consoleauth_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Nova Console Auth (nova-consoleauth) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -310,6 +317,20 @@ nova_consoleauth_start() { + return $OCF_SUCCESS + } + ++nova_consoleauth_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + nova_consoleauth_stop() { + local rc + local pid +@@ -318,6 +339,7 @@ nova_consoleauth_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Nova ConsoleAuth (nova-consoleauth) already stopped" ++ nova_consoleauth_confirm_stop + return $OCF_SUCCESS + fi + +@@ -327,6 +349,7 @@ nova_consoleauth_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Nova ConsoleAuth (nova-consoleauth) couldn't be stopped" ++ nova_consoleauth_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -355,6 +378,7 @@ nova_consoleauth_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ nova_consoleauth_confirm_stop + + ocf_log info "OpenStack Nova ConsoleAuth (nova-consoleauth) stopped" + +--- a/ocf/nova-network ++++ b/ocf/nova-network +@@ -12,6 +12,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -207,6 +213,7 @@ nova_network_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Nova Network (nova-network) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -289,6 +296,20 @@ nova_network_start() { + return $OCF_SUCCESS + } + ++nova_network_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + nova_network_stop() { + local rc + local pid +@@ -297,6 +318,7 @@ nova_network_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Nova Network (nova-network) already stopped" ++ nova_network_confirm_stop + return $OCF_SUCCESS + fi + +@@ -307,6 +329,7 @@ nova_network_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Nova Network (nova-network) couldn't be stopped" ++ nova_network_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -335,6 +358,7 @@ nova_network_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ nova_network_confirm_stop + + ocf_log info "OpenStack Nova Network (nova-network) stopped" + +--- a/ocf/nova-novnc ++++ b/ocf/nova-novnc +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -195,6 +201,7 @@ nova_vnc_console_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Nova VNC Console (nova-novncproxy) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -260,21 +267,18 @@ nova_vnc_console_start() { + return $OCF_SUCCESS + } + +-nova_vnc_console_stop_all() { +- # Make sure nova-novncproxy and all the children are stopped. +- for sig in TERM KILL +- do +- for pid in $(ps -eo pid,cmd | grep python |\ +- grep "nova-novncproxy" | \ +- grep -v grep | awk '{print $1}') +- do +- ocf_log info "Manually killing $pid with $sig" +- kill -$sig $pid +- done +- sleep 1 +- done +- +- return $OCF_SUCCESS ++nova_vnc_console_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi + } + + nova_vnc_console_stop() { +@@ -285,7 +289,7 @@ nova_vnc_console_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Nova VNC Console (nova-novncproxy) already stopped" +- nova_vnc_console_stop_all ++ nova_vnc_console_confirm_stop + return $OCF_SUCCESS + fi + +@@ -295,7 +299,7 @@ nova_vnc_console_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Nova VNC Console (nova-novncproxy) couldn't be stopped" +- nova_vnc_console_stop_all ++ nova_vnc_console_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -324,13 +328,12 @@ nova_vnc_console_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ nova_vnc_console_confirm_stop + + ocf_log info "OpenStack Nova VNC Console (nova-novncproxy) stopped" + + rm -f $OCF_RESKEY_pid + +- nova_vnc_console_stop_all +- + return $OCF_SUCCESS + } + +--- a/ocf/nova-scheduler ++++ b/ocf/nova-scheduler +@@ -11,6 +11,12 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + # + # See usage() function below for more details ... + # +@@ -228,6 +234,7 @@ nova_scheduler_status() { + return $OCF_SUCCESS + else + ocf_log info "Old PID file found, but OpenStack Nova Scheduler (nova-scheduler) is not running" ++ rm -f $OCF_RESKEY_pid + return $OCF_NOT_RUNNING + fi + } +@@ -308,6 +315,20 @@ nova_scheduler_start() { + return $OCF_SUCCESS + } + ++nova_scheduler_confirm_stop() { ++ local my_bin ++ local my_processes ++ ++ my_binary=`which ${OCF_RESKEY_binary}` ++ my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` ++ ++ if [ -n "${my_processes}" ] ++ then ++ ocf_log info "About to SIGKILL the following: ${my_processes}" ++ pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" ++ fi ++} ++ + nova_scheduler_stop() { + local rc + local pid +@@ -316,6 +337,7 @@ nova_scheduler_stop() { + rc=$? + if [ $rc -eq $OCF_NOT_RUNNING ]; then + ocf_log info "OpenStack Nova Scheduler (nova-scheduler) already stopped" ++ nova_scheduler_confirm_stop + return $OCF_SUCCESS + fi + +@@ -325,6 +347,7 @@ nova_scheduler_stop() { + rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "OpenStack Nova Scheduler (nova-scheduler) couldn't be stopped" ++ nova_scheduler_confirm_stop + exit $OCF_ERR_GENERIC + fi + +@@ -353,6 +376,7 @@ nova_scheduler_stop() { + using SIGTERM. Trying SIGKILL ..." + ocf_run kill -s KILL $pid + fi ++ nova_scheduler_confirm_stop + + ocf_log info "OpenStack Nova Scheduler (nova-scheduler) stopped" + +--- a/ocf/validation ++++ b/ocf/validation +@@ -1,4 +1,11 @@ + #!/usr/bin/env python ++# ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + + from keystoneclient import probe + diff --git a/openstack/openstack-ras/openstack-ras/plugin_config_parameter_name.patch b/openstack/openstack-ras/openstack-ras/plugin_config_parameter_name.patch new file mode 100644 index 00000000..c5ee9bf7 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/plugin_config_parameter_name.patch @@ -0,0 +1,57 @@ +--- + ocf/nova-novnc | 8 +++----- + ocf/neutron-agent-dhcp | 2 +- + ocf/neutron-agent-l3 | 2 +- + ocf/neutron-server | 2 +- + 4 files changed, 6 insertions(+), 8 deletions(-) + +--- a/ocf/neutron-agent-dhcp ++++ b/ocf/neutron-agent-dhcp +@@ -95,7 +95,7 @@ Location of the OpenStack Quantum Servic + + + +- ++ + + Location of the OpenStack DHCP Service (neutron-dhcp-agent) configuration file + +--- a/ocf/neutron-agent-l3 ++++ b/ocf/neutron-agent-l3 +@@ -95,7 +95,7 @@ Location of the OpenStack Quantum Servic + + + +- ++ + + Location of the OpenStack L3 Service (neutron-l3-agent) configuration file + +--- a/ocf/neutron-server ++++ b/ocf/neutron-server +@@ -101,7 +101,7 @@ Location of the OpenStack Quantum Server + + + +- ++ + + Location of the OpenStack Default Plugin (Open-vSwitch) configuration file + +--- a/ocf/nova-novnc ++++ b/ocf/nova-novnc +@@ -213,11 +213,9 @@ nova_vnc_console_monitor() { + + # Check whether we are supposed to monitor by logging into nova-novncproxy + # and do it if that's the case. +- vnc_list_check=`netstat -a | grep -s "$OCF_RESKEY_console_port" | grep -qs "LISTEN"` +- #rc=$? +- # not sure why grep is returning 1 .. should root cause at some point. +- # return success for now since service and port are both up +- rc=0 ++ # Adding -n to netstat so that dns delays will not impact this. ++ vnc_list_check=`netstat -an | grep -s "$OCF_RESKEY_console_port" | grep -qs "LISTEN"` ++ rc=$? + if [ $rc -ne 0 ]; then + ocf_log err "Nova VNC Console doesn't seem to listen on his default port: $rc" + return $OCF_NOT_RUNNING diff --git a/openstack/openstack-ras/openstack-ras/rebase_workaround.patch b/openstack/openstack-ras/openstack-ras/rebase_workaround.patch new file mode 100644 index 00000000..d49230e6 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/rebase_workaround.patch @@ -0,0 +1,20 @@ +--- + ocf/neutron-server | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +--- a/ocf/neutron-server ++++ b/ocf/neutron-server +@@ -287,8 +287,11 @@ neutron_server_start() { + # run the actual neutron-server daemon with correct configurations files (server + plugin) + # Don't use ocf_run as we're sending the tool's output + # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. +- su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +- --config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters"' >> \ ++ ## DPENNEY: Removing plugin ref ++ ##su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ ++ ## --config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters"' >> \ ++ ## /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid ++ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config"' >> \ + /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid + + # Spin waiting for the server to come up. diff --git a/openstack/openstack-ras/openstack-ras/remove-ceilometer-mem-db.patch b/openstack/openstack-ras/openstack-ras/remove-ceilometer-mem-db.patch new file mode 100644 index 00000000..5038277d --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/remove-ceilometer-mem-db.patch @@ -0,0 +1,388 @@ +From daaf82a9e83f28e1e1072fc6d77ca57d4eb22c5d Mon Sep 17 00:00:00 2001 +From: Angie Wang +Date: Mon, 14 Nov 2016 13:58:27 -0500 +Subject: [PATCH] remove-ceilometer-mem-db + +--- + ocf/ceilometer-mem-db | 369 -------------------------------------------------- + 1 file changed, 369 deletions(-) + delete mode 100644 ocf/ceilometer-mem-db + +diff --git a/ocf/ceilometer-mem-db b/ocf/ceilometer-mem-db +deleted file mode 100644 +index d7112d8..0000000 +--- a/ocf/ceilometer-mem-db ++++ /dev/null +@@ -1,369 +0,0 @@ +-#!/bin/sh +-# +-# +-# OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) +-# +-# Description: Manages an OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) process as an HA resource +-# +-# Authors: Emilien Macchi +-# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han +-# +-# Support: openstack@lists.launchpad.net +-# License: Apache Software License (ASL) 2.0 +-# +-# Copyright (c) 2014-2016 Wind River Systems, Inc. +-# SPDX-License-Identifier: Apache-2.0 +-# +-# +-# +-# +-# +-# See usage() function below for more details ... +-# +-# OCF instance parameters: +-# OCF_RESKEY_binary +-# OCF_RESKEY_config +-# OCF_RESKEY_user +-# OCF_RESKEY_pid +-# OCF_RESKEY_monitor_binary +-# OCF_RESKEY_amqp_server_port +-# OCF_RESKEY_additional_parameters +-####################################################################### +-# Initialization: +- +-: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} +-. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs +- +-####################################################################### +- +-# Fill in some defaults if no values are specified +- +-OCF_RESKEY_binary_default="ceilometer-mem-db" +-OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf" +-OCF_RESKEY_user_default="root" +-OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" +-OCF_RESKEY_amqp_server_port_default="5672" +- +-: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} +-: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} +-: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} +-: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} +-: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}} +- +-####################################################################### +- +-usage() { +- cat < +- +- +-1.0 +- +- +-Resource agent for the OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) +-May manage a ceilometer-mem-db instance or a clone set that +-creates a distributed ceilometer-mem-db cluster. +- +-Manages the OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) +- +- +- +- +-Location of the OpenStack Ceilometer Mem DB server binary (ceilometer-mem-db) +- +-OpenStack Ceilometer Mem DB server binary (ceilometer-mem-db) +- +- +- +- +- +-Location of the OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) configuration file +- +-OpenStack Ceilometer Mem DB (ceilometer-mem-db registry) config file +- +- +- +- +- +-User running OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) +- +-OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) user +- +- +- +- +- +-The pid file to use for this OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) instance +- +-OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) pid file +- +- +- +- +- +-The listening port number of the AMQP server. Use for monitoring purposes +- +-AMQP listening port +- +- +- +- +- +- +-Additional parameters to pass on to the OpenStack Ceilometer Mem DB Service (ceilometer-mem-db) +- +-Additional parameters for ceilometer-mem-db +- +- +- +- +- +- +- +- +- +- +- +- +- +- +-END +-} +- +-####################################################################### +-# Functions invoked by resource manager actions +- +-ceilometer_mem_db_check_port() { +-# This function has been taken from the squid RA and improved a bit +-# The length of the integer must be 4 +-# Examples of valid port: "1080", "0080" +-# Examples of invalid port: "1080bad", "0", "0000", "" +- +- local int +- local cnt +- +- int="$1" +- cnt=${#int} +- echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' +- +- if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then +- ocf_log err "Invalid port number: $1" +- exit $OCF_ERR_CONFIGURED +- fi +-} +- +-ceilometer_mem_db_validate() { +- local rc +- +- check_binary $OCF_RESKEY_binary +- check_binary netstat +- ceilometer_mem_db_check_port $OCF_RESKEY_amqp_server_port +- +- # A config file on shared storage that is not available +- # during probes is OK. +- if [ ! -f $OCF_RESKEY_config ]; then +- if ! ocf_is_probe; then +- ocf_log err "Config $OCF_RESKEY_config doesn't exist" +- return $OCF_ERR_INSTALLED +- fi +- ocf_log_warn "Config $OCF_RESKEY_config not available during a probe" +- fi +- +- getent passwd $OCF_RESKEY_user >/dev/null 2>&1 +- rc=$? +- if [ $rc -ne 0 ]; then +- ocf_log err "User $OCF_RESKEY_user doesn't exist" +- return $OCF_ERR_INSTALLED +- fi +- +- true +-} +- +-ceilometer_mem_db_status() { +- local pid +- local rc +- +- if [ ! -f $OCF_RESKEY_pid ]; then +- ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) is not running" +- return $OCF_NOT_RUNNING +- else +- pid=`cat $OCF_RESKEY_pid` +- fi +- +- ocf_run -warn kill -s 0 $pid +- rc=$? +- if [ $rc -eq 0 ]; then +- return $OCF_SUCCESS +- else +- ocf_log info "Old PID file found, but OpenStack Ceilometer Mem DB (ceilometer-mem-db) is not running" +- rm -f $OCF_RESKEY_pid +- return $OCF_NOT_RUNNING +- fi +-} +- +-ceilometer_mem_db_monitor() { +- local rc +- local pid +- local scheduler_amqp_check +- +- ceilometer_mem_db_status +- rc=$? +- +- # If status returned anything but success, return that immediately +- if [ $rc -ne $OCF_SUCCESS ]; then +- return $rc +- fi +- +- # Check the connections according to the PID. +- # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api) +- pid=`cat $OCF_RESKEY_pid` +- scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"` +- rc=$? +- if [ $rc -ne 0 ]; then +- ocf_log err "Mem DB is not connected to the AMQP server : $rc" +- return $OCF_NOT_RUNNING +- fi +- +- ocf_log debug "OpenStack Ceilometer Mem DB (ceilometer-mem-db) monitor succeeded" +- return $OCF_SUCCESS +-} +- +-ceilometer_mem_db_start() { +- local rc +- +- ceilometer_mem_db_status +- rc=$? +- if [ $rc -eq $OCF_SUCCESS ]; then +- ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) already running" +- return $OCF_SUCCESS +- fi +- +- # run the actual ceilometer-mem-db daemon. Don't use ocf_run as we're sending the tool's output +- # straight to /dev/null anyway and using ocf_run would break stdout-redirection here. +- su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ +- $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid +- +- # Spin waiting for the server to come up. +- while true; do +- ceilometer_mem_db_monitor +- rc=$? +- [ $rc -eq $OCF_SUCCESS ] && break +- if [ $rc -ne $OCF_NOT_RUNNING ]; then +- ocf_log err "OpenStack Ceilometer Mem DB (ceilometer-mem-db) start failed" +- exit $OCF_ERR_GENERIC +- fi +- sleep 1 +- done +- +- ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) started" +- return $OCF_SUCCESS +-} +- +-ceilometer_mem_db_confirm_stop() { +- local my_bin +- local my_processes +- +- my_binary=`which ${OCF_RESKEY_binary}` +- my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` +- +- if [ -n "${my_processes}" ] +- then +- ocf_log info "About to SIGKILL the following: ${my_processes}" +- pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" +- fi +-} +- +-ceilometer_mem_db_stop() { +- local rc +- local pid +- +- ceilometer_mem_db_status +- rc=$? +- if [ $rc -eq $OCF_NOT_RUNNING ]; then +- ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) already stopped" +- ceilometer_mem_db_confirm_stop +- return $OCF_SUCCESS +- fi +- +- # Try SIGTERM +- pid=`cat $OCF_RESKEY_pid` +- ocf_run kill -s TERM $pid +- rc=$? +- if [ $rc -ne 0 ]; then +- ocf_log err "OpenStack Ceilometer Mem DB (ceilometer-mem-db) couldn't be stopped" +- ceilometer_mem_db_confirm_stop +- exit $OCF_ERR_GENERIC +- fi +- +- # stop waiting +- shutdown_timeout=2 +- if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then +- shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) +- fi +- count=0 +- while [ $count -lt $shutdown_timeout ]; do +- ceilometer_mem_db_status +- rc=$? +- if [ $rc -eq $OCF_NOT_RUNNING ]; then +- break +- fi +- count=`expr $count + 1` +- sleep 1 +- ocf_log debug "OpenStack Ceilometer Mem DB (ceilometer-mem-db) still hasn't stopped yet. Waiting ..." +- done +- +- ceilometer_mem_db_status +- rc=$? +- if [ $rc -ne $OCF_NOT_RUNNING ]; then +- # SIGTERM didn't help either, try SIGKILL +- ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) failed to stop after ${shutdown_timeout}s \ +- using SIGTERM. Trying SIGKILL ..." +- ocf_run kill -s KILL $pid +- fi +- ceilometer_mem_db_confirm_stop +- +- ocf_log info "OpenStack Ceilometer Mem DB (ceilometer-mem-db) stopped" +- +- rm -f $OCF_RESKEY_pid +- +- return $OCF_SUCCESS +-} +- +-####################################################################### +- +-case "$1" in +- meta-data) meta_data +- exit $OCF_SUCCESS;; +- usage|help) usage +- exit $OCF_SUCCESS;; +-esac +- +-# Anything except meta-data and help must pass validation +-ceilometer_mem_db_validate || exit $? +- +-# What kind of method was invoked? +-case "$1" in +- start) ceilometer_mem_db_start;; +- stop) ceilometer_mem_db_stop;; +- status) ceilometer_mem_db_status;; +- monitor) ceilometer_mem_db_monitor;; +- validate-all) ;; +- *) usage +- exit $OCF_ERR_UNIMPLEMENTED;; +-esac +-- +1.8.3.1 + diff --git a/openstack/openstack-ras/openstack-ras/speed_up_respons_to_stop_ceilometer_svc.patch b/openstack/openstack-ras/openstack-ras/speed_up_respons_to_stop_ceilometer_svc.patch new file mode 100644 index 00000000..236cfcf8 --- /dev/null +++ b/openstack/openstack-ras/openstack-ras/speed_up_respons_to_stop_ceilometer_svc.patch @@ -0,0 +1,87 @@ +--- + ocf/ceilometer-agent-notification | 4 ++-- + ocf/ceilometer-api | 4 ++-- + ocf/ceilometer-collector | 4 ++-- + ocf/ceilometer-mem-db | 4 ++-- + 4 files changed, 8 insertions(+), 8 deletions(-) + +--- a/ocf/ceilometer-api ++++ b/ocf/ceilometer-api +@@ -11,7 +11,7 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # +-# Copyright (c) 2014 Wind River Systems, Inc. ++# Copyright (c) 2014-2016 Wind River Systems, Inc. + # + # SPDX-License-Identifier: Apache-2.0 + # +@@ -324,7 +324,7 @@ ceilometer_api_stop() { + fi + + # stop waiting +- shutdown_timeout=15 ++ shutdown_timeout=2 + if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then + shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) + fi +--- a/ocf/ceilometer-agent-notification ++++ b/ocf/ceilometer-agent-notification +@@ -11,7 +11,7 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # +-# Copyright (c) 2014 Wind River Systems, Inc. ++# Copyright (c) 2014-2016 Wind River Systems, Inc. + # + # SPDX-License-Identifier: Apache-2.0 + # +@@ -314,7 +314,7 @@ ceilometer_agent_notification_stop() { + fi + + # stop waiting +- shutdown_timeout=15 ++ shutdown_timeout=2 + if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then + shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) + fi +--- a/ocf/ceilometer-collector ++++ b/ocf/ceilometer-collector +@@ -11,7 +11,7 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # +-# Copyright (c) 2014 Wind River Systems, Inc. ++# Copyright (c) 2014-2016 Wind River Systems, Inc. + # + # SPDX-License-Identifier: Apache-2.0 + # +@@ -313,7 +313,7 @@ ceilometer_collector_stop() { + fi + + # stop waiting +- shutdown_timeout=15 ++ shutdown_timeout=2 + if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then + shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) + fi +--- a/ocf/ceilometer-mem-db ++++ b/ocf/ceilometer-mem-db +@@ -11,7 +11,7 @@ + # Support: openstack@lists.launchpad.net + # License: Apache Software License (ASL) 2.0 + # +-# Copyright (c) 2014 Wind River Systems, Inc. ++# Copyright (c) 2014-2016 Wind River Systems, Inc. + # + # SPDX-License-Identifier: Apache-2.0 + # +@@ -312,7 +312,7 @@ ceilometer_mem_db_stop() { + fi + + # stop waiting +- shutdown_timeout=15 ++ shutdown_timeout=2 + if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then + shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) + fi diff --git a/openstack/python-ceilometer/centos/build_srpm.data b/openstack/python-ceilometer/centos/build_srpm.data new file mode 100644 index 00000000..3d41a778 --- /dev/null +++ b/openstack/python-ceilometer/centos/build_srpm.data @@ -0,0 +1,11 @@ +SRC_DIR="$CGCS_BASE/git/ceilometer" +TAR_NAME=ceilometer +COPY_LIST="$FILES_BASE/* \ + python-ceilometer/static/ceilometer-expirer-active \ + python-ceilometer/static/ceilometer-polling \ + python-ceilometer/static/ceilometer-polling.conf \ + python-ceilometer/static/ceilometer-polling.conf.pmon.centos \ + python-ceilometer/static/ceilometer-polling-compute.conf.pmon.centos \ + python-ceilometer/static/ceilometer-agent-compute" +TIS_BASE_SRCREV=105788514dadcd881fc86d4b9a03d0d10e2e0874 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-ceilometer/centos/files/ceilometer-dist.conf b/openstack/python-ceilometer/centos/files/ceilometer-dist.conf new file mode 100644 index 00000000..2f45dfb1 --- /dev/null +++ b/openstack/python-ceilometer/centos/files/ceilometer-dist.conf @@ -0,0 +1,6 @@ +[DEFAULT] +#log_dir = /var/log/ceilometer +use_stderr = False + +[database] +connection = mongodb://localhost:27017/ceilometer diff --git a/openstack/python-ceilometer/centos/files/ceilometer-rootwrap-sudoers b/openstack/python-ceilometer/centos/files/ceilometer-rootwrap-sudoers new file mode 100644 index 00000000..ba971665 --- /dev/null +++ b/openstack/python-ceilometer/centos/files/ceilometer-rootwrap-sudoers @@ -0,0 +1,2 @@ +Defaults:ceilometer !requiretty +ceilometer ALL = (root) NOPASSWD: /usr/bin/ceilometer-rootwrap /etc/ceilometer/rootwrap.conf * diff --git a/openstack/python-ceilometer/centos/files/ceilometer.conf.sample b/openstack/python-ceilometer/centos/files/ceilometer.conf.sample new file mode 100644 index 00000000..34bb6871 --- /dev/null +++ b/openstack/python-ceilometer/centos/files/ceilometer.conf.sample @@ -0,0 +1,1176 @@ +[DEFAULT] + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer +# value) +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +#rabbit_userid=guest + +# The RabbitMQ password. (string value) +#rabbit_password=guest + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=ceilometer + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in ceilometer.middleware +# + +# Exchanges name to listen for notifications. (multi valued) +#http_control_exchanges=nova +#http_control_exchanges=glance +#http_control_exchanges=neutron +#http_control_exchanges=cinder + + +# +# Options defined in ceilometer.nova_client +# + +# Allow novaclient's debug log output. (boolean value) +#nova_http_log_debug=false + + +# +# Options defined in ceilometer.pipeline +# + +# Configuration file for pipeline definition. (string value) +#pipeline_cfg_file=pipeline.yaml + + +# +# Options defined in ceilometer.sample +# + +# Source for samples emitted on this instance. (string value) +# Deprecated group/name - [DEFAULT]/counter_source +#sample_source=openstack + + +# +# Options defined in ceilometer.service +# + +# Name of this node, which must be valid in an AMQP key. Can +# be an opaque identifier. For ZeroMQ only, must be a valid +# host name, FQDN, or IP address. (string value) +#host=ceilometer + +# Number of workers for collector service. A single collector +# is enabled by default. (integer value) +#collector_workers=1 + +# Number of workers for notification service. A single +# notification agent is enabled by default. (integer value) +#notification_workers=1 + + +# +# Options defined in ceilometer.utils +# + +# Path to the rootwrap configuration file touse for running +# commands as root (string value) +#rootwrap_config=/etc/ceilometer/rootwrap.conf + + +# +# Options defined in ceilometer.api.app +# + +# Configuration file for WSGI definition of API. (string +# value) +#api_paste_config=api_paste.ini + + +# +# Options defined in ceilometer.compute.notifications +# + +# Exchange name for Nova notifications. (string value) +#nova_control_exchange=nova + + +# +# Options defined in ceilometer.compute.util +# + +# List of metadata prefixes reserved for metering use. (list +# value) +#reserved_metadata_namespace=metering. + +# Limit on length of reserved metadata values. (integer value) +#reserved_metadata_length=256 + + +# +# Options defined in ceilometer.compute.virt.inspector +# + +# Inspector to use for inspecting the hypervisor layer. +# (string value) +#hypervisor_inspector=libvirt + + +# +# Options defined in ceilometer.compute.virt.libvirt.inspector +# + +# Libvirt domain type (valid options are: kvm, lxc, qemu, uml, +# xen). (string value) +#libvirt_type=kvm + +# Override the default libvirt URI (which is dependent on +# libvirt_type). (string value) +#libvirt_uri= + + +# +# Options defined in ceilometer.data_processing.notifications +# + +# Exchange name for Data Processing notifications (string +# value) +#sahara_control_exchange=sahara + + +# +# Options defined in ceilometer.dispatcher +# + +# Dispatcher to process data. (multi valued) +#dispatcher=database + + +# +# Options defined in ceilometer.identity.notifications +# + +# Exchange name for Keystone notifications. (string value) +#keystone_control_exchange=keystone + + +# +# Options defined in ceilometer.image.glance +# + +# Number of items to request in each paginated Glance API +# request (parameter used by glancecelient). If this is less +# than or equal to 0, page size is not specified (default +# value in glanceclient is used). (integer value) +#glance_page_size=0 + + +# +# Options defined in ceilometer.image.notifications +# + +# Exchange name for Glance notifications. (string value) +#glance_control_exchange=glance + + +# +# Options defined in ceilometer.ipmi.notifications.ironic +# + +# Exchange name for Ironic notifications. (string value) +#ironic_exchange=ironic + + +# +# Options defined in ceilometer.network.notifications +# + +# Exchange name for Neutron notifications. (string value) +# Deprecated group/name - [DEFAULT]/quantum_control_exchange +#neutron_control_exchange=neutron + + +# +# Options defined in ceilometer.objectstore.swift +# + +# Swift reseller prefix. Must be on par with reseller_prefix +# in proxy-server.conf. (string value) +#reseller_prefix=AUTH_ + + +# +# Options defined in ceilometer.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in ceilometer.openstack.common.lockutils +# + +# Enables or disables inter-process locks. (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +#lock_path= + + +# +# Options defined in ceilometer.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error. (boolean value) +#use_stderr=true + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will change in J to honor RFC5424. (boolean +# value) +#use_syslog=false + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in ceilometer.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. +# (string value) +#policy_default_rule=default + + +# +# Options defined in ceilometer.orchestration.notifications +# + +# Exchange name for Heat notifications (string value) +#heat_control_exchange=heat + + +# +# Options defined in ceilometer.profiler.notifications +# + +# Exchange name for DBaaS notifications (string value) +#trove_control_exchange=trove + + +# +# Options defined in ceilometer.storage +# + +# DEPRECATED - Database connection string. (string value) +#database_connection= + + +# +# Options defined in ceilometer.volume.notifications +# + +# Exchange name for Cinder notifications. (string value) +#cinder_control_exchange=cinder + + +[alarm] + +# +# Options defined in ceilometer.alarm.notifier.rest +# + +# SSL Client certificate for REST notifier. (string value) +#rest_notifier_certificate_file= + +# SSL Client private key for REST notifier. (string value) +#rest_notifier_certificate_key= + +# Whether to verify the SSL Server certificate when calling +# alarm action. (boolean value) +#rest_notifier_ssl_verify=true + +# Number of retries for REST notifier (integer value) +#rest_notifier_max_retries=0 + + +# +# Options defined in ceilometer.alarm.rpc +# + +# The topic that ceilometer uses for alarm notifier messages. +# (string value) +#notifier_rpc_topic=alarm_notifier + +# The topic that ceilometer uses for alarm partition +# coordination messages. DEPRECATED: RPC-based +# partitionedalarm evaluation service will be removed in Kilo +# in favour of the default alarm evaluation service using tooz +# for partitioning. (string value) +#partition_rpc_topic=alarm_partition_coordination + + +# +# Options defined in ceilometer.alarm.service +# + +# Period of evaluation cycle, should be >= than configured +# pipeline interval for collection of underlying metrics. +# (integer value) +# Deprecated group/name - [alarm]/threshold_evaluation_interval +#evaluation_interval=60 + + +# +# Options defined in ceilometer.api.controllers.v2 +# + +# Record alarm change events. (boolean value) +#record_history=true + +# Maximum number of alarms defined for a user. (integer value) +#user_alarm_quota= + +# Maximum number of alarms defined for a project. (integer +# value) +#project_alarm_quota= + + +# +# Options defined in ceilometer.cmd.alarm +# + +# Driver to use for alarm evaluation service. DEPRECATED: +# "singleton" and "partitioned" alarm evaluator services will +# be removed in Kilo in favour of the default alarm evaluation +# service using tooz for partitioning. (string value) +#evaluation_service=default + + +[api] + +# +# Options defined in ceilometer.api +# + +# The port for the ceilometer API server. (integer value) +# Deprecated group/name - [DEFAULT]/metering_api_port +#port=8777 + +# The listen IP for the ceilometer API server. (string value) +#host=0.0.0.0 + +# Set it to False if your environment does not need or have +# dns server, otherwise it will delay the response from api. +# (boolean value) +#enable_reverse_dns_lookup=false + + +# +# Options defined in ceilometer.api.app +# + +# Toggle Pecan Debug Middleware. Defaults to global debug +# value. (boolean value) +#pecan_debug=false + + +[central] + +# +# Options defined in ceilometer.central.manager +# + +# Work-load partitioning group prefix. Use only if you want to +# run multiple central agents with different config files. For +# each sub-group of the central agent pool with the same +# partitioning_group_prefix a disjoint subset of pollsters +# should be loaded. (string value) +#partitioning_group_prefix= + + +[collector] + +# +# Options defined in ceilometer.collector +# + +# Address to which the UDP socket is bound. Set to an empty +# string to disable. (string value) +#udp_address=0.0.0.0 + +# Port to which the UDP socket is bound. (integer value) +#udp_port=4952 + +# Requeue the sample on the collector sample queue when the +# collector fails to dispatch it. This is only valid if the +# sample come from the notifier publisher (boolean value) +#requeue_sample_on_dispatcher_error=false + + +[compute] + +# +# Options defined in ceilometer.compute.discovery +# + +# Enable work-load partitioning, allowing multiple compute +# agents to be run simultaneously. (boolean value) +#workload_partitioning=false + + +[coordination] + +# +# Options defined in ceilometer.coordination +# + +# The backend URL to use for distributed coordination. If left +# empty, per-deployment central agent and per-host compute +# agent won't do workload partitioning and will only function +# correctly if a single instance of that service is running. +# (string value) +#backend_url= + +# Number of seconds between heartbeats for distributed +# coordination (float) (floating point value) +#heartbeat=1.0 + + +[database] + +# +# Options defined in ceilometer.storage +# + +# Number of seconds that samples are kept in the database for +# (<= 0 means forever). (integer value) +#time_to_live=-1 + +# The connection string used to connect to the meteting +# database. (if unset, connection is used) (string value) +#metering_connection= + +# The connection string used to connect to the alarm database. +# (if unset, connection is used) (string value) +#alarm_connection= + + +[dispatcher_file] + +# +# Options defined in ceilometer.dispatcher.file +# + +# Name and the location of the file to record meters. (string +# value) +#file_path= + +# The max size of the file. (integer value) +#max_bytes=0 + +# The max number of the files to keep. (integer value) +#backup_count=0 + + +[event] + +# +# Options defined in ceilometer.event.converter +# + +# Configuration file for event definitions. (string value) +#definitions_cfg_file=event_definitions.yaml + +# Drop notifications if no event definition matches. +# (Otherwise, we convert them with just the default traits) +# (boolean value) +#drop_unmatched_notifications=false + + +[hardware] + +# +# Options defined in ceilometer.hardware.discovery +# + +# URL scheme to use for hardware nodes (string value) +#url_scheme=snmp:// + +# SNMPd user name of all nodes running in the cloud. (string +# value) +#readonly_user_name=ro_snmp_user + +# SNMPd password of all the nodes running in the cloud (string +# value) +#readonly_user_password=password + + +[ipmi] + +# +# Options defined in ceilometer.ipmi.platform.intel_node_manager +# + +# Number of retries upon Intel Node Manager initialization +# failure (integer value) +#node_manager_init_retry=3 + + +[keystone_authtoken] + +# +# Options defined in keystonemiddleware.auth_token +# + +# Prefix to prepend at the beginning of the path. Deprecated, +# use identity_uri. (string value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint. Deprecated, +# use identity_uri. (string value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use +# identity_uri. (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint (http or https). +# Deprecated, use identity_uri. (string value) +#auth_protocol=https + +# Complete public Identity API endpoint (string value) +#auth_uri= + +# Complete admin Identity API endpoint. This should specify +# the unversioned root endpoint e.g. https://localhost:35357/ +# (string value) +#identity_uri= + +# API version of the admin Identity API endpoint (string +# value) +#auth_version= + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# This option is deprecated and may be removed in a future +# release. Single shared secret with the Keystone +# configuration used for bootstrapping a Keystone +# installation, or otherwise bypassing the normal +# authentication process. This option should not be used, use +# `admin_user` and `admin_password` instead. (string value) +#admin_token= + +# Keystone account username (string value) +#admin_user= + +# Keystone account password (string value) +#admin_password= + +# Keystone service account tenant name to validate user tokens +# (string value) +#admin_tenant_name=admin + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for +# caching. If left undefined, tokens will instead be cached +# in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating +# tokens, the middleware caches previously-seen tokens for a +# configurable duration (in seconds). Set to -1 to disable +# caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens +# is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache +# duration may significantly reduce performance. (integer +# value) +#revocation_cache_time=10 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) number of seconds memcached server is considered +# dead before it is tried again. (integer value) +#memcache_pool_dead_retry=300 + +# (optional) max total number of open connections to every +# memcached server. (integer value) +#memcache_pool_maxsize=10 + +# (optional) socket timeout in seconds for communicating with +# a memcache server. (integer value) +#memcache_pool_socket_timeout=3 + +# (optional) number of seconds a connection to memcached is +# held unused in the pool before it is closed. (integer value) +#memcache_pool_unused_timeout=60 + +# (optional) number of seconds that an operation will wait to +# get a memcache client connection from the pool. (integer +# value) +#memcache_pool_conn_get_timeout=10 + +# (optional) use the advanced (eventlet safe) memcache client +# pool. The advanced pool will only work under python 2.x. +# (boolean value) +#memcache_use_advanced_pool=false + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + +# If true, the revocation list will be checked for cached +# tokens. This requires that PKI tokens are configured on the +# Keystone server. (boolean value) +#check_revocations_for_cached=false + +# Hash algorithms to use for hashing PKI tokens. This may be a +# single algorithm or multiple. The algorithms are those +# supported by Python standard hashlib.new(). The hashes will +# be tried in the order given, so put the preferred one first +# for performance. The result of the first hash will be stored +# in the cache. This will typically be set to multiple values +# only while migrating from a less secure algorithm to a more +# secure one. Once all the old tokens are expired this option +# should be set to a single value for better performance. +# (list value) +#hash_algorithms=md5 + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[notification] + +# +# Options defined in ceilometer.notification +# + +# Acknowledge message when event persistence fails. (boolean +# value) +#ack_on_event_error=true + +# Save event details. (boolean value) +#store_events=false + +# Messaging URLs to listen for notifications. Example: +# transport://user:pass@host1:port[,hostN:portN]/virtual_host +# (DEFAULT/transport_url is used if empty) (multi valued) +#messaging_urls= + + +[publisher] + +# +# Options defined in ceilometer.publisher.utils +# + +# Secret value for signing metering messages. (string value) +# Deprecated group/name - [DEFAULT]/metering_secret +# Deprecated group/name - [publisher_rpc]/metering_secret +#metering_secret=change this or be hacked + + +[publisher_notifier] + +# +# Options defined in ceilometer.publisher.messaging +# + +# The topic that ceilometer uses for metering notifications. +# (string value) +#metering_topic=metering + +# The driver that ceilometer uses for metering notifications. +# (string value) +#metering_driver=messagingv2 + + +[publisher_rpc] + +# +# Options defined in ceilometer.publisher.messaging +# + +# The topic that ceilometer uses for metering messages. +# (string value) +#metering_topic=metering + + +[service_credentials] + +# +# Options defined in ceilometer.service +# + +# User name to use for OpenStack service access. (string +# value) +#os_username=ceilometer + +# Password to use for OpenStack service access. (string value) +#os_password=admin + +# Tenant ID to use for OpenStack service access. (string +# value) +#os_tenant_id= + +# Tenant name to use for OpenStack service access. (string +# value) +#os_tenant_name=admin + +# Certificate chain for SSL validation. (string value) +#os_cacert= + +# Auth URL to use for OpenStack service access. (string value) +#os_auth_url=http://localhost:5000/v2.0 + +# Region name to use for OpenStack service endpoints. (string +# value) +#os_region_name= + +# Type of endpoint in Identity service catalog to use for +# communication with OpenStack services. (string value) +#os_endpoint_type=publicURL + +# Disables X.509 certificate validation when an SSL connection +# to Identity Service is established. (boolean value) +#insecure=false + + +[service_types] + +# +# Options defined in ceilometer.neutron_client +# + +# Neutron service type. (string value) +#neutron=network + + +# +# Options defined in ceilometer.nova_client +# + +# Nova service type. (string value) +#nova=compute + + +# +# Options defined in ceilometer.energy.kwapi +# + +# Kwapi service type. (string value) +#kwapi=energy + + +# +# Options defined in ceilometer.image.glance +# + +# Glance service type. (string value) +#glance=image + + +# +# Options defined in ceilometer.objectstore.swift +# + +# Swift service type. (string value) +#swift=object-store + + +[vmware] + +# +# Options defined in ceilometer.compute.virt.vmware.inspector +# + +# IP address of the VMware Vsphere host (string value) +#host_ip= + +# Username of VMware Vsphere (string value) +#host_username= + +# Password of VMware Vsphere (string value) +#host_password= + +# Number of times a VMware Vsphere API must be retried +# (integer value) +#api_retry_count=10 + +# Sleep time in seconds for polling an ongoing async task +# (floating point value) +#task_poll_interval=0.5 + +# Optional vim service WSDL location e.g +# http:///vimService.wsdl. Optional over-ride to +# default location for bug work-arounds (string value) +#wsdl_location= + + +[xenapi] + +# +# Options defined in ceilometer.compute.virt.xenapi.inspector +# + +# URL for connection to XenServer/Xen Cloud Platform (string +# value) +#connection_url= + +# Username for connection to XenServer/Xen Cloud Platform +# (string value) +#connection_username=root + +# Password for connection to XenServer/Xen Cloud Platform +# (string value) +#connection_password= + +# Timeout in seconds for XenAPI login. (integer value) +#login_timeout=10 + + diff --git a/openstack/python-ceilometer/centos/files/ceilometer.logrotate b/openstack/python-ceilometer/centos/files/ceilometer.logrotate new file mode 100644 index 00000000..4caa7112 --- /dev/null +++ b/openstack/python-ceilometer/centos/files/ceilometer.logrotate @@ -0,0 +1,9 @@ +compress + +/var/log/ceilometer/*.log { + rotate 14 + size 10M + missingok + compress + copytruncate +} diff --git a/openstack/python-ceilometer/centos/files/openstack-ceilometer-api.service b/openstack/python-ceilometer/centos/files/openstack-ceilometer-api.service new file mode 100644 index 00000000..f688a9fd --- /dev/null +++ b/openstack/python-ceilometer/centos/files/openstack-ceilometer-api.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack ceilometer API service +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/bin/python /usr/bin/gunicorn --config /usr/share/ceilometer/ceilometer-api.conf --pythonpath /usr/share/ceilometer ceilometer-api +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-ceilometer/centos/files/openstack-ceilometer-collector.service b/openstack/python-ceilometer/centos/files/openstack-ceilometer-collector.service new file mode 100644 index 00000000..be98d51c --- /dev/null +++ b/openstack/python-ceilometer/centos/files/openstack-ceilometer-collector.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack ceilometer collection service +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/ceilometer-collector --logfile /var/log/ceilometer/ceilometer-collector.log +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-ceilometer/centos/files/openstack-ceilometer-ipmi.service b/openstack/python-ceilometer/centos/files/openstack-ceilometer-ipmi.service new file mode 100644 index 00000000..c0950eff --- /dev/null +++ b/openstack/python-ceilometer/centos/files/openstack-ceilometer-ipmi.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack ceilometer ipmi agent +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/ceilometer-polling --polling-namespaces ipmi --logfile /var/log/ceilometer/agent-ipmi.log +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-ceilometer/centos/files/openstack-ceilometer-notification.service b/openstack/python-ceilometer/centos/files/openstack-ceilometer-notification.service new file mode 100644 index 00000000..64ac2f80 --- /dev/null +++ b/openstack/python-ceilometer/centos/files/openstack-ceilometer-notification.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack ceilometer notification agent +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/ceilometer-agent-notification --logfile /var/log/ceilometer/ceilometer-agent-notification.log +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-ceilometer/centos/files/openstack-ceilometer-polling b/openstack/python-ceilometer/centos/files/openstack-ceilometer-polling new file mode 100644 index 00000000..c2fcada2 --- /dev/null +++ b/openstack/python-ceilometer/centos/files/openstack-ceilometer-polling @@ -0,0 +1 @@ +OPTIONS="--polling-namespace 'central' 'compute'" diff --git a/openstack/python-ceilometer/centos/files/openstack-ceilometer-polling.service b/openstack/python-ceilometer/centos/files/openstack-ceilometer-polling.service new file mode 100644 index 00000000..6dc1b3fe --- /dev/null +++ b/openstack/python-ceilometer/centos/files/openstack-ceilometer-polling.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenStack ceilometer polling agent +After=syslog.target network.target + +[Service] +Type=forking +Restart=no +KillMode=process +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/openstack-ceilometer-polling start +ExecStop=/etc/rc.d/init.d/openstack-ceilometer-polling stop +ExecReload=/etc/rc.d/init.d/openstack-ceilometer-polling reload + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-ceilometer/centos/openstack-ceilometer.spec b/openstack/python-ceilometer/centos/openstack-ceilometer.spec new file mode 100644 index 00000000..e2797eb9 --- /dev/null +++ b/openstack/python-ceilometer/centos/openstack-ceilometer.spec @@ -0,0 +1,699 @@ +%global _without_doc 1 +%global with_doc %{!?_without_doc:1}%{?_without_doc:0} +%global pypi_name ceilometer +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +Name: openstack-ceilometer +# Liberty semver reset +# https://review.openstack.org/#/q/I6a35fa0dda798fad93b804d00a46af80f08d475c,n,z +Epoch: 1 +Version: 9.0.1 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: OpenStack measurement collection service + +Group: Applications/System +License: ASL 2.0 +URL: https://wiki.openstack.org/wiki/Ceilometer +Source0: %{pypi_name}-%{upstream_version}.tar.gz +Source1: %{pypi_name}-dist.conf +Source4: ceilometer-rootwrap-sudoers + +Source7: ceilometer-expirer-active +Source8: ceilometer-polling +Source9: ceilometer-polling.conf + +Source10: %{name}-api.service +Source11: %{name}-collector.service + +%if 0%{?with_compute} +Source12: %{name}-compute.service +%endif +%if 0%{?with_central} +Source13: %{name}-central.service +%endif + +Source16: %{name}-notification.service +Source17: %{name}-ipmi.service +Source18: %{name}-polling.service + +Source20: ceilometer-polling.conf.pmon.centos +Source21: ceilometer-polling-compute.conf.pmon.centos + + +BuildArch: noarch +BuildRequires: intltool +BuildRequires: openstack-macros +BuildRequires: python-cotyledon +BuildRequires: python-sphinx +BuildRequires: python-setuptools +BuildRequires: python-pbr >= 1.10.0 +BuildRequires: git +BuildRequires: python-d2to1 +BuildRequires: python2-devel +# Required to compile translation files +BuildRequires: python-babel +BuildRequires: systemd-devel +BuildRequires: systemd +BuildRequires: systemd-units + +%description +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + + +%package -n python-ceilometer +Summary: OpenStack ceilometer python libraries +Group: Applications/System + +Requires: python-babel +Requires: python-cachetools >= 1.1.0 +Requires: python-debtcollector >= 1.2.0 +Requires: python-eventlet +Requires: python-futurist >= 0.11.0 +Requires: python-cotyledon +Requires: python-dateutil +Requires: python-greenlet +Requires: python-iso8601 +Requires: python-keystoneauth1 >= 2.1.0 +Requires: python-lxml +Requires: python-anyjson +Requires: python-jsonpath-rw +Requires: python-jsonpath-rw-ext +Requires: python-stevedore >= 1.9.0 +Requires: python-msgpack >= 0.4.0 +Requires: python-pbr +Requires: python-six >= 1.9.0 +Requires: python-tenacity >= 3.2.1 + +Requires: python-sqlalchemy +Requires: python-alembic +Requires: python-migrate + +Requires: python-webob +Requires: python-oslo-config >= 2:3.22.0 +Requires: PyYAML +Requires: python-netaddr +Requires: python-oslo-rootwrap +Requires: python-oslo-vmware >= 0.6.0 +Requires: python-requests >= 2.8.1 + +Requires: pysnmp +Requires: pytz +Requires: python-croniter + +Requires: python-retrying +Requires: python-jsonschema +Requires: python-werkzeug + +Requires: python-oslo-context +Requires: python-oslo-concurrency >= 3.5.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-log >= 1.14.0 +Requires: python-oslo-middleware >= 3.0.0 +Requires: python-oslo-policy >= 0.5.0 +Requires: python-oslo-reports >= 0.6.0 +Requires: python-monotonic +Requires: python-futures + +%description -n python-ceilometer +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains the ceilometer python library. + + +%package common +Summary: Components common to all OpenStack ceilometer services +Group: Applications/System + +Requires: python-ceilometer = %{epoch}:%{version}-%{release} +Requires: python-oslo-messaging >= 5.12.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.5.0 +Requires: python-pecan >= 1.0.0 +Requires: python-posix_ipc +Requires: python-gnocchiclient +Requires: python-wsme >= 0.8 +Requires: python-os-xenapi >= 0.1.1 + +Requires(post): systemd-units +Requires(preun): systemd-units +Requires(postun): systemd-units +Requires(pre): shadow-utils + +# Config file generation +BuildRequires: python-os-xenapi +BuildRequires: python-oslo-config >= 2:3.7.0 +BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-db +BuildRequires: python-oslo-log +BuildRequires: python-oslo-messaging +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-reports +BuildRequires: python-oslo-vmware >= 0.6.0 +BuildRequires: python-glanceclient >= 1:2.0.0 +BuildRequires: python-keystonemiddleware +BuildRequires: python-neutronclient +BuildRequires: python-novaclient >= 1:2.29.0 +BuildRequires: python-swiftclient +BuildRequires: python-croniter +BuildRequires: python-jsonpath-rw +BuildRequires: python-jsonpath-rw-ext +BuildRequires: python-lxml +BuildRequires: python-pecan >= 1.0.0 +BuildRequires: python-tooz +BuildRequires: python-werkzeug +BuildRequires: python-wsme >= 0.7 +BuildRequires: python-gnocchiclient +BuildRequires: python-cinderclient >= 1.7.1 + + +%description common +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains components common to all OpenStack +ceilometer services. + + +%if 0%{?with_compute} +%package compute +Summary: OpenStack ceilometer compute agent +Group: Applications/System + +Requires: %{name}-common = %{epoch}:%{version}-%{release} +Requires: %{name}-polling = %{epoch}:%{version}-%{release} + +Requires: python-novaclient >= 1:2.29.0 +Requires: python-keystoneclient >= 1:1.6.0 +Requires: python-tooz +Requires: libvirt-python + +%description compute +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains the ceilometer agent for +running on OpenStack compute nodes. + +%endif + +%if 0%{?with_central} +%package central +Summary: OpenStack ceilometer central agent +Group: Applications/System + +Requires: %{name}-common = %{epoch}:%{version}-%{release} +Requires: %{name}-polling = %{epoch}:%{version}-%{release} + +Requires: python-novaclient >= 1:2.29.0 +Requires: python-keystoneclient >= 1:1.6.0 +Requires: python-glanceclient >= 1:2.0.0 +Requires: python-swiftclient +Requires: python-neutronclient >= 4.2.0 +Requires: python-tooz + +%description central +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains the central ceilometer agent. + +%endif + +%package collector +Summary: OpenStack ceilometer collector +Group: Applications/System + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +# For compat with older provisioning tools. +# Remove when all reference the notification package explicitly +Requires: %{name}-notification + +Requires: python-oslo-db +Requires: python-pymongo + +%description collector +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains the ceilometer collector service +which collects metrics from the various agents. + + +%package notification +Summary: OpenStack ceilometer notification agent +Group: Applications/System + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +%description notification +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains the ceilometer notification agent +which pushes metrics to the collector service from the +various OpenStack services. + + +%package api +Summary: OpenStack ceilometer API service +Group: Applications/System + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires: python-keystonemiddleware >= 4.0.0 +Requires: python-oslo-db >= 4.1.0 +Requires: python-pymongo +Requires: python-paste-deploy +Requires: python-tooz + +%description api +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains the ceilometer API service. + + +%package ipmi +Summary: OpenStack ceilometer ipmi agent +Group: Applications/System + +Requires: %{name}-common = %{epoch}:%{version}-%{release} +Requires: %{name}-polling = %{epoch}:%{version}-%{release} + +Requires: python-novaclient >= 1:2.29.0 +Requires: python-keystoneclient >= 1:1.6.0 +Requires: python-neutronclient >= 4.2.0 +Requires: python-tooz +Requires: python-oslo-rootwrap >= 2.0.0 +Requires: ipmitool + +%description ipmi +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains the ipmi agent to be run on OpenStack +nodes from which IPMI sensor data is to be collected directly, +by-passing Ironic's management of baremetal. + + +%package polling +Summary: OpenStack ceilometer polling agent +Group: Applications/System + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires: python-cinderclient >= 1.7.1 +Requires: python-novaclient >= 1:2.29.0 +Requires: python-keystoneclient >= 1:1.6.0 +Requires: python-glanceclient >= 1:2.0.0 +Requires: python-swiftclient >= 2.2.0 +Requires: libvirt-python +Requires: python-neutronclient +Requires: python-tooz +Requires: /usr/bin/systemctl + + +%description polling +Ceilometer aims to deliver a unique point of contact for billing systems to +acquire all counters they need to establish customer billing, across all +current and future OpenStack components. The delivery of counters must +be tracable and auditable, the counters must be easily extensible to support +new projects, and agents doing data collections should be +independent of the overall system. + +This package contains the polling service. + +%package -n python-ceilometer-tests +Summary: Ceilometer tests +Requires: python-ceilometer = %{epoch}:%{version}-%{release} +Requires: python-gabbi >= 1.30.0 + +%description -n python-ceilometer-tests +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains the Ceilometer test files. + +%if 0%{?with_doc} +%package doc +Summary: Documentation for OpenStack ceilometer +Group: Documentation + +# Required to build module documents +BuildRequires: python-eventlet +BuildRequires: python-sqlalchemy +BuildRequires: python-webob +BuildRequires: python-openstackdocstheme +# while not strictly required, quiets the build down when building docs. +BuildRequires: python-migrate, python-iso8601 + +%description doc +OpenStack ceilometer provides services to measure and +collect metrics from OpenStack components. + +This package contains documentation files for ceilometer. +%endif + +%prep +%autosetup -n ceilometer-%{upstream_version} -S git + +find . \( -name .gitignore -o -name .placeholder \) -delete + +find ceilometer -name \*.py -exec sed -i '/\/usr\/bin\/env python/{d;q}' {} + + +# TODO: Have the following handle multi line entries +sed -i '/setup_requires/d; /install_requires/d; /dependency_links/d' setup.py + +# Remove the requirements file so that pbr hooks don't add it +# to distutils requires_dist config +rm -rf {test-,}requirements.txt tools/{pip,test}-requires + +%build +# Generate config file +PYTHONPATH=. oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf + +export PBR_VERSION=%{version} +%{__python2} setup.py build + +# Generate i18n files +export PBR_VERSION=%{version} +%{__python2} setup.py compile_catalog -d build/lib/%{pypi_name}/locale + +# Programmatically update defaults in sample config +# which is installed at /etc/ceilometer/ceilometer.conf +# TODO: Make this more robust +# Note it only edits the first occurrence, so assumes a section ordering in sample +# and also doesn't support multi-valued variables. +while read name eq value; do + test "$name" && test "$value" || continue + sed -i "0,/^# *$name=/{s!^# *$name=.*!#$name=$value!}" etc/ceilometer/ceilometer.conf +done < %{SOURCE1} + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} + +# WRS: Install sql migration cfg and sql files that were not installed by setup.py +install -m 644 ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg %{buildroot}%{python_sitelib}/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg +install -m 644 ceilometer/storage/sqlalchemy/migrate_repo/versions/*.sql %{buildroot}%{python_sitelib}/ceilometer/storage/sqlalchemy/migrate_repo/versions/. + +# WRS Mitaka. Install non python files that were not installed by setup.py +install -m 755 -d %{buildroot}%{python_sitelib}/ceilometer/hardware/pollsters/data +install -m 644 ceilometer/hardware/pollsters/data/snmp.yaml %{buildroot}%{python_sitelib}/ceilometer/hardware/pollsters/data/snmp.yaml + + +# Create fake egg-info for the tempest plugin +# TODO switch to %{service} everywhere as in openstack-example.spec +%global service ceilometer +%py2_entrypoint %{service} %{service} + +# docs generation requires everything to be installed first + +pushd doc + +%if 0%{?with_doc} +export PBR_VERSION=%{version} +%{__python2} setup.py build_sphinx -b html +# Fix hidden-file-or-dir warnings +rm -fr doc/build/html/.doctrees doc/build/html/.buildinfo +%endif + +popd + +# Setup directories +install -d -m 755 %{buildroot}%{_sharedstatedir}/ceilometer +install -d -m 755 %{buildroot}%{_sharedstatedir}/ceilometer/tmp +install -d -m 750 %{buildroot}%{_localstatedir}/log/ceilometer + +# Install config files +install -d -m 755 %{buildroot}%{_sysconfdir}/ceilometer +install -d -m 755 %{buildroot}%{_sysconfdir}/ceilometer/rootwrap.d +install -d -m 755 %{buildroot}%{_sysconfdir}/sudoers.d +install -d -m 755 %{buildroot}%{_sysconfdir}/sysconfig +install -d -m 755 %{buildroot}%{_sysconfdir}/ceilometer/meters.d +install -p -D -m 640 %{SOURCE1} %{buildroot}%{_datadir}/ceilometer/ceilometer-dist.conf +install -p -D -m 440 %{SOURCE4} %{buildroot}%{_sysconfdir}/sudoers.d/ceilometer +install -p -D -m 640 etc/ceilometer/ceilometer.conf %{buildroot}%{_sysconfdir}/ceilometer/ceilometer.conf +install -p -D -m 640 etc/ceilometer/policy.json %{buildroot}%{_sysconfdir}/ceilometer/policy.json +install -p -D -m 640 ceilometer/pipeline/data/pipeline.yaml %{buildroot}%{_sysconfdir}/ceilometer/pipeline.yaml +install -p -D -m 640 etc/ceilometer/polling.yaml %{buildroot}%{_sysconfdir}/ceilometer/polling.yaml +install -p -D -m 640 ceilometer/pipeline/data/event_pipeline.yaml %{buildroot}%{_sysconfdir}/ceilometer/event_pipeline.yaml +install -p -D -m 640 ceilometer/pipeline/data/event_definitions.yaml %{buildroot}%{_sysconfdir}/ceilometer/event_definitions.yaml +install -p -D -m 640 etc/ceilometer/api_paste.ini %{buildroot}%{_sysconfdir}/ceilometer/api_paste.ini +install -p -D -m 640 etc/ceilometer/rootwrap.conf %{buildroot}%{_sysconfdir}/ceilometer/rootwrap.conf +install -p -D -m 640 etc/ceilometer/rootwrap.d/ipmi.filters %{buildroot}/%{_sysconfdir}/ceilometer/rootwrap.d/ipmi.filters +install -p -D -m 640 ceilometer/dispatcher/data/gnocchi_resources.yaml %{buildroot}%{_sysconfdir}/ceilometer/gnocchi_resources.yaml +install -p -D -m 640 ceilometer/data/meters.d/meters.yaml %{buildroot}%{_sysconfdir}/ceilometer/meters.d/meters.yaml +# WRS +install -p -D -m 640 etc/ceilometer/controller.yaml %{buildroot}%{_sysconfdir}/ceilometer/controller.yaml +install -p -D -m 640 ceilometer/api/ceilometer-api.py %{buildroot}%{_datadir}/ceilometer/ceilometer-api.py + + + +# Install initscripts for services +%if 0%{?rhel} && 0%{?rhel} <= 6 +install -p -D -m 755 %{SOURCE10} %{buildroot}%{_initrddir}/%{name}-api +install -p -D -m 755 %{SOURCE11} %{buildroot}%{_initrddir}/%{name}-collector +%if 0%{?with_compute} +install -p -D -m 755 %{SOURCE12} %{buildroot}%{_initrddir}/%{name}-compute +%endif +%if 0%{?with_central} +install -p -D -m 755 %{SOURCE13} %{buildroot}%{_initrddir}/%{name}-central +%endif +install -p -D -m 755 %{SOURCE16} %{buildroot}%{_initrddir}/%{name}-notification +install -p -D -m 755 %{SOURCE17} %{buildroot}%{_initrddir}/%{name}-ipmi +install -p -D -m 755 %{SOURCE18} %{buildroot}%{_initrddir}/%{name}-polling + +# Install upstart jobs examples +install -d -m 755 %{buildroot}%{_datadir}/ceilometer +install -p -m 644 %{SOURCE100} %{buildroot}%{_datadir}/ceilometer/ +install -p -m 644 %{SOURCE110} %{buildroot}%{_datadir}/ceilometer/ +install -p -m 644 %{SOURCE120} %{buildroot}%{_datadir}/ceilometer/ +install -p -m 644 %{SOURCE130} %{buildroot}%{_datadir}/ceilometer/ +install -p -m 644 %{SOURCE140} %{buildroot}%{_datadir}/ceilometer/ +install -p -m 644 %{SOURCE150} %{buildroot}%{_datadir}/ceilometer/ +install -p -m 644 %{SOURCE160} %{buildroot}%{_datadir}/ceilometer/ +install -p -m 644 %{SOURCE170} %{buildroot}%{_datadir}/ceilometer/ +install -p -m 644 %{SOURCE180} %{buildroot}%{_datadir}/ceilometer/ +%else +install -p -D -m 644 %{SOURCE10} %{buildroot}%{_unitdir}/%{name}-api.service +install -p -D -m 644 %{SOURCE11} %{buildroot}%{_unitdir}/%{name}-collector.service +%if 0%{?with_compute} +install -p -D -m 644 %{SOURCE12} %{buildroot}%{_unitdir}/%{name}-compute.service +%endif +%if 0%{?with_central} +install -p -D -m 644 %{SOURCE13} %{buildroot}%{_unitdir}/%{name}-central.service +%endif +install -p -D -m 644 %{SOURCE16} %{buildroot}%{_unitdir}/%{name}-notification.service +install -p -D -m 644 %{SOURCE17} %{buildroot}%{_unitdir}/%{name}-ipmi.service +install -p -D -m 644 %{SOURCE18} %{buildroot}%{_unitdir}/%{name}-polling.service +%endif + +install -p -D -m 755 %{SOURCE7} %{buildroot}%{_bindir}/ceilometer-expirer-active +install -p -D -m 755 %{SOURCE8} %{buildroot}%{_initrddir}/openstack-ceilometer-polling + +mkdir -p %{buildroot}/%{_sysconfdir}/ceilometer +install -p -D -m 644 %{SOURCE9} %{buildroot}%{_sysconfdir}/ceilometer/ceilometer-polling.conf +install -p -D -m 644 %{SOURCE20} %{buildroot}%{_sysconfdir}/ceilometer/ceilometer-polling.conf.pmon +install -p -D -m 644 %{SOURCE21} %{buildroot}%{_sysconfdir}/ceilometer/ceilometer-polling-compute.conf.pmon + +# Install i18n .mo files (.po and .pot are not required) +install -d -m 755 %{buildroot}%{_datadir} +rm -f %{buildroot}%{python2_sitelib}/%{pypi_name}/locale/*/LC_*/%{pypi_name}*po +rm -f %{buildroot}%{python2_sitelib}/%{pypi_name}/locale/*pot +mv %{buildroot}%{python2_sitelib}/%{pypi_name}/locale %{buildroot}%{_datadir}/locale + +# Find language files +%find_lang %{pypi_name} --all-name + +# Remove unneeded in production stuff +rm -f %{buildroot}/usr/share/doc/ceilometer/README* + +# Remove unused files +rm -fr %{buildroot}/usr/etc + +%pre common +getent group ceilometer >/dev/null || groupadd -r ceilometer --gid 166 +if ! getent passwd ceilometer >/dev/null; then + # Id reservation request: https://bugzilla.redhat.com/923891 + useradd -u 166 -r -g ceilometer -G ceilometer,nobody -d %{_sharedstatedir}/ceilometer -s /sbin/nologin -c "OpenStack ceilometer Daemons" ceilometer +fi +exit 0 + + +%if 0%{?with_compute} +%post compute +%systemd_post %{name}-compute.service +%endif + +%post collector +%systemd_post %{name}-collector.service + +%post notification +%systemd_post %{name}-notification.service + +%post api +%systemd_post %{name}-api.service + +%if 0%{?with_central} +%post central +%systemd_post %{name}-central.service +%endif + +%post ipmi +%systemd_post %{name}-alarm-ipmi.service + +%post polling +/usr/bin/systemctl disable %{name}-polling.service + +%if 0%{?with_compute} +%preun compute +%systemd_preun %{name}-compute.service +%endif + +%preun collector +%systemd_preun %{name}-collector.service + +%preun notification +%systemd_preun %{name}-notification.service + +%preun api +%systemd_preun %{name}-api.service + +%if 0%{?with_central} +%preun central +%systemd_preun %{name}-central.service +%endif + +%preun ipmi +%systemd_preun %{name}-ipmi.service + +%preun polling +%systemd_preun %{name}-polling.service + +%if 0%{?with_compute} +%postun compute +%systemd_postun_with_restart %{name}-compute.service +%endif + +%postun collector +%systemd_postun_with_restart %{name}-collector.service + +%postun notification +%systemd_postun_with_restart %{name}-notification.service + +%postun api +%systemd_postun_with_restart %{name}-api.service + +%if 0%{?with_central} +%postun central +%systemd_postun_with_restart %{name}-central.service +%endif + +%postun ipmi +%systemd_postun_with_restart %{name}-ipmi.service + + +%postun polling +/usr/bin/systemctl disable %{name}-polling.service + + +%files common -f %{pypi_name}.lang +%license LICENSE +%dir %{_sysconfdir}/ceilometer +%{_datadir}/ceilometer/ceilometer-api.* +%attr(-, root, ceilometer) %{_datadir}/ceilometer/ceilometer-dist.conf +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/ceilometer.conf +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/policy.json +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/pipeline.yaml +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/polling.yaml +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/api_paste.ini +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/gnocchi_resources.yaml + +%{_sysconfdir}/ceilometer/controller.yaml + +%dir %attr(0750, ceilometer, root) %{_localstatedir}/log/ceilometer + +%{_bindir}/ceilometer-db-legacy-clean +%{_bindir}/ceilometer-expirer +%{_bindir}/ceilometer-send-sample +%{_bindir}/ceilometer-upgrade + +%defattr(-, ceilometer, ceilometer, -) +%dir %{_sharedstatedir}/ceilometer +%dir %{_sharedstatedir}/ceilometer/tmp + + +%files -n python-ceilometer +%{python2_sitelib}/ceilometer +%{python2_sitelib}/ceilometer-*.egg-info +%exclude %{python2_sitelib}/ceilometer/tests + +%files -n python-ceilometer-tests +%license LICENSE +%{python2_sitelib}/ceilometer/tests +%{python2_sitelib}/%{service}_tests.egg-info + +%if 0%{?with_doc} +%files doc +%doc doc/build/html +%endif + +%if 0%{?with_compute} +%files compute +%{_unitdir}/%{name}-compute.service +%endif + + +%files collector +%{_bindir}/ceilometer-collector* +%{_bindir}/ceilometer-expirer-active +%{_unitdir}/%{name}-collector.service + + +%files notification +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/event_pipeline.yaml +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/event_definitions.yaml +%dir %{_sysconfdir}/ceilometer/meters.d +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/meters.d/meters.yaml +%{_bindir}/ceilometer-agent-notification +%{_unitdir}/%{name}-notification.service + + +%files api +%{_bindir}/ceilometer-api +%{_unitdir}/%{name}-api.service + + +%if 0%{?with_central} +%files central +%{_unitdir}/%{name}-central.service +%endif + + +%files ipmi +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/rootwrap.conf +%config(noreplace) %attr(-, root, ceilometer) %{_sysconfdir}/ceilometer/rootwrap.d/ipmi.filters +%{_bindir}/ceilometer-rootwrap +%{_sysconfdir}/sudoers.d/ceilometer +%{_unitdir}/%{name}-ipmi.service + +%files polling +%{_bindir}/ceilometer-polling +%{_initrddir}/openstack-ceilometer-polling +%{_sysconfdir}/ceilometer/ceilometer-polling.conf +%{_sysconfdir}/ceilometer/ceilometer-polling.conf.pmon +%{_sysconfdir}/ceilometer/ceilometer-polling-compute.conf.pmon +%{_unitdir}/%{name}-polling.service + + +%changelog +* Tue Sep 12 2017 rdo-trunk 1:9.0.1-1 +- Update to 9.0.1 + +* Thu Aug 24 2017 Alfredo Moralejo 1:9.0.0-1 +- Update to 9.0.0 + diff --git a/openstack/python-ceilometer/python-ceilometer/static/ceilometer-agent-compute b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-agent-compute new file mode 100644 index 00000000..d105989d --- /dev/null +++ b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-agent-compute @@ -0,0 +1,125 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: +# Required-Start: $remote_fs $network $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 3 5 +# Default-Stop: 0 1 2 6 +# Short-Description: Ceilometer Servers +# Description: OpenStack Monitoring Service (code-named Ceilometer) server(s) +### END INIT INFO + +SUFFIX=agent-compute +DESC="ceilometer-$SUFFIX" +DAEMON="/usr/bin/ceilometer-$SUFFIX" +CONFIG="/etc/ceilometer/ceilometer.conf" +PIDFILE="/var/run/ceilometer-$SUFFIX.pid" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + if [ ! -d /var/log/ceilometer ]; then + mkdir /var/log/ceilometer + fi + + # load up the platform info including nodetype and subfunction + source /etc/platform/platform.conf + + # We'll need special handling for controller with compute subfunction, + function=`echo "$subfunction" | cut -f 2 -d','` + if [ "$nodetype" != "compute" -a "$function" != "compute" ] ; then + logger -t $0 -p warn "exiting because this is not compute host" + exit 0 + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} \ + -- --config-file $CONFIG --log-dir=/var/log/ceilometer + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + return + fi + fi + echo "$DESC is not running" +} + +reset() +{ + stop + + # This is to make sure postgres is configured and running + if ! pidof postmaster > /dev/null; then + /etc/init.d/postgresql-init + /etc/init.d/postgresql start + sleep 2 + fi + [ ! -d /var/log/ceilometer ] && mkdir /var/log/ceilometer + sudo -u postgres dropdb ceilometer + sudo -u postgres createdb ceilometer + ceilometer-dbsync + + start +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + reset) + reset + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 + diff --git a/openstack/python-ceilometer/python-ceilometer/static/ceilometer-expirer-active b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-expirer-active new file mode 100644 index 00000000..e800d36e --- /dev/null +++ b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-expirer-active @@ -0,0 +1,60 @@ +#!/bin/bash + +# +# Wrapper script to run ceilometer-expirer when on active controller only +# +CEILOMETER_EXPIRER_INFO="/var/run/ceilometer-expirer.info" +CEILOMETER_EXPIRER_CMD="/usr/bin/nice -n 2 /usr/bin/ceilometer-expirer" + +function is_active_pgserver() +{ + # Determine whether we're running on the same controller as the service. + local service=postgres + local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active) + if [ "x$enabledactive" == "x" ] + then + # enabled-active not found for that service on this controller + return 1 + else + # enabled-active found for that resource + return 0 + fi +} + +if is_active_pgserver +then + if [ ! -f ${CEILOMETER_EXPIRER_INFO} ] + then + echo skip_count=0 > ${CEILOMETER_EXPIRER_INFO} + fi + + source ${CEILOMETER_EXPIRER_INFO} + sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null + if [ $? -eq 0 ] + then + source /etc/platform/platform.conf + if [ "${system_type}" = "All-in-one" ] + then + source /etc/init.d/task_affinity_functions.sh + idle_core=$(get_most_idle_core) + if [ "$idle_core" -ne "0" ] + then + sh -c "exec taskset -c $idle_core ${CEILOMETER_EXPIRER_CMD}" + sed -i "/skip_count/s/=.*/=0/" ${CEILOMETER_EXPIRER_INFO} + exit 0 + fi + fi + + if [ "$skip_count" -lt "3" ] + then + newval=$(($skip_count+1)) + sed -i "/skip_count/s/=.*/=$newval/" ${CEILOMETER_EXPIRER_INFO} + exit 0 + fi + fi + + eval ${CEILOMETER_EXPIRER_CMD} + sed -i "/skip_count/s/=.*/=0/" ${CEILOMETER_EXPIRER_INFO} +fi + +exit 0 diff --git a/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling new file mode 100644 index 00000000..1b402485 --- /dev/null +++ b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling @@ -0,0 +1,138 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: +# Required-Start: $remote_fs $network $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 3 5 +# Default-Stop: 0 1 2 6 +# Short-Description: Ceilometer Servers +# Description: OpenStack Monitoring Service (code-named Ceilometer) server(s) +### END INIT INFO + +# Platform paths and flags +. /usr/bin/tsconfig + +SUFFIX=polling +DESC="ceilometer-$SUFFIX" +DAEMON="/usr/bin/ceilometer-$SUFFIX" +CONFIG="/etc/ceilometer/ceilometer.conf" +PIDFILE="/var/run/ceilometer-$SUFFIX.pid" +COMPLETED="/etc/platform/.initial_config_complete" + +start() +{ + if [ ! -f $COMPLETED ]; then + echo "Waiting for for $COMPLETED" + exit 0 + fi + + . $PLATFORM_CONF_FILE + if [[ "$nodetype" == "compute" || "$subfunction" == *"compute"* ]] ; then + if [ ! -f $VOLATILE_COMPUTE_CONFIG_COMPLETE ]; then + # Do not start polling until compute manifests have been applied + echo "Waiting for $VOLATILE_COMPUTE_CONFIG_COMPLETE" + exit 0 + elif [ -f $VOLATILE_DISABLE_COMPUTE_SERVICES ]; then + # Do not start polling if compute services are disabled. This can + # happen during an upgrade when controller-1 is running a newer + # load than controller-0. + echo "Waiting for $VOLATILE_DISABLE_COMPUTE_SERVICES" + exit 0 + fi + fi + + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + if [ ! -d /var/log/ceilometer ]; then + mkdir /var/log/ceilometer + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} \ + -- --config-file $CONFIG --log-dir=/var/log/ceilometer + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + return + fi + fi + echo "$DESC is not running" +} + +reset() +{ + stop + + # This is to make sure postgres is configured and running + if ! pidof postmaster > /dev/null; then + /etc/init.d/postgresql-init + /etc/init.d/postgresql start + sleep 2 + fi + [ ! -d /var/log/ceilometer ] && mkdir /var/log/ceilometer + sudo -u postgres dropdb ceilometer + sudo -u postgres createdb ceilometer + ceilometer-dbsync + + start +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + reset) + reset + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling-compute.conf.pmon.centos b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling-compute.conf.pmon.centos new file mode 100644 index 00000000..adafc727 --- /dev/null +++ b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling-compute.conf.pmon.centos @@ -0,0 +1,26 @@ +[process] +process = ceilometer-polling +service = openstack-ceilometer-polling +pidfile = /var/run/ceilometer-polling.pid +script = /etc/init.d/openstack-ceilometer-polling +style = lsb ; ocf or lsb +severity = minor ; minor, major, critical +restarts = 5 ; restart retries before error assertion +interval = 10 ; number of seconds to wait between restarts +debounce = 20 ; number of seconds that a process needs to remain + ; running before degrade is removed and retry count + ; is cleared. + +startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active : heartbeat monitoring, i.e. request / response messaging + ; ignore : do not monitor or stop monitoring +subfunction = compute ; Optional label. + ; Manage this process in the context of a combo host subfunction + ; Choices: compute or storage. + ; when specified pmond will wait for + ; /var/run/.compute_config_complete or + ; /var/run/.storage_config_complete + ; ... before managing this process with the specified subfunction + ; Excluding this label will cause this process to be managed by default on startup diff --git a/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling.conf b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling.conf new file mode 100644 index 00000000..23a5736c --- /dev/null +++ b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling.conf @@ -0,0 +1,19 @@ +[process] +process = ceilometer-polling +pidfile = /var/run/ceilometer-polling.pid +script = /etc/init.d/ceilometer-polling +style = lsb ; ocf or lsb +severity = minor ; minor, major, critical +restarts = 5 ; restart retries before error assertion +interval = 10 ; number of seconds to wait between restarts +debounce = 20 ; number of seconds that a process needs to remain + ; running before degrade is removed and retry count + ; is cleared. +; These settings will generate a log only without attempting to restart +; pmond will put the process into an ignore state after failure. + +startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active : heartbeat monitoring, i.e. request / response messaging + ; ignore : do not monitor or stop monitoring diff --git a/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling.conf.pmon.centos b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling.conf.pmon.centos new file mode 100644 index 00000000..1f467b27 --- /dev/null +++ b/openstack/python-ceilometer/python-ceilometer/static/ceilometer-polling.conf.pmon.centos @@ -0,0 +1,18 @@ +[process] +process = ceilometer-polling +service = openstack-ceilometer-polling +pidfile = /var/run/ceilometer-polling.pid +script = /etc/init.d/openstack-ceilometer-polling +style = lsb ; ocf or lsb +severity = minor ; minor, major, critical +restarts = 5 ; restart retries before error assertion +interval = 10 ; number of seconds to wait between restarts +debounce = 20 ; number of seconds that a process needs to remain + ; running before degrade is removed and retry count + ; is cleared. + +startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active : heartbeat monitoring, i.e. request / response messaging + ; ignore : do not monitor or stop monitoring diff --git a/openstack/python-ceilometerclient/centos/build_srpm.data b/openstack/python-ceilometerclient/centos/build_srpm.data new file mode 100644 index 00000000..c6cf79a9 --- /dev/null +++ b/openstack/python-ceilometerclient/centos/build_srpm.data @@ -0,0 +1,10 @@ +TAR_NAME=python-ceilometerclient +SRC_DIR=$CGCS_BASE/git/python-ceilometerclient + +# Tar everything found in this subdirectory. Define this if source need to be collected into a tarball in SOURCES. +# Tar file name and version are derived from PKG-INFO. Alternatively you may define TAR_NAME ad VERSION + +# A Space separated list of paths to copy to .distro/centos7/rpmbuild/SOURCES. +#COPY_LIST="$CGCS_BASE/downloads/$CLIENT_NAME-$CLIENT_VER.tar.gz $PKG_BASE/$CLIENT_NAME/*" +TIS_BASE_SRCREV=d650f70d16b41b5f5bf65cc5685a30cae027d729 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-ceilometerclient/centos/python-ceilometerclient.spec b/openstack/python-ceilometerclient/centos/python-ceilometerclient.spec new file mode 100644 index 00000000..0ee1f99c --- /dev/null +++ b/openstack/python-ceilometerclient/centos/python-ceilometerclient.spec @@ -0,0 +1,193 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%if 0%{?fedora} +%global with_python3 1 +%endif + +%global sname ceilometerclient +%global sum Python API and CLI for OpenStack Ceilometer + +Name: python-ceilometerclient +Version: 2.9.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: %{sum} + +License: ASL 2.0 +URL: https://github.com/openstack/%{name} +Source0: %{name}-%{upstream_version}.tar.gz + +BuildArch: noarch + +BuildRequires: git +BuildRequires: python-setuptools +BuildRequires: python2-devel +BuildRequires: python-pbr >= 1.6 +%if 0%{?with_python3} +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pbr >= 1.6 +%endif + +%description +This is a client library for Ceilometer built on the Ceilometer API. It +provides a Python API (the ceilometerclient module) and a command-line tool +(ceilometer). + + +%package -n python2-%{sname} +Summary: %{sum} +# from requirements.txt +Requires: python-iso8601 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.17.0 +Requires: python-requests >= 2.8.1 +Requires: python-six >= 1.9.0 +Requires: python-stevedore +Requires: python-pbr +Requires: python-keystoneauth1 >= 2.1.0 +Requires: python-prettytable +%{?python_provide:%python_provide python2-%{sname}} + +%description -n python2-%{sname} +This is a client library for Ceilometer built on the Ceilometer API. It +provides a Python API (the ceilometerclient module) and a command-line tool +(ceilometer). + + +%if 0%{?with_python3} +%package -n python3-%{sname} +Summary: %{sum} +# from requirements.txt +Requires: python3-iso8601 +Requires: python3-oslo-i18n >= 2.1.0 +Requires: python3-oslo-serialization >= 1.10.0 +Requires: python3-oslo-utils >= 3.17.0 +Requires: python3-requests >= 2.8.1 +Requires: python3-six >= 1.9.0 +Requires: python3-stevedore +Requires: python3-pbr +Requires: python3-keystoneauth1 >= 2.1.0 +Requires: python3-prettytable +%{?python_provide:%python_provide python3-%{sname}} + +# WRS installs to usr/lib and not /usr/lib64 + +%description -n python3-%{sname} +This is a client library for Ceilometer built on the Ceilometer API. It +provides a Python API (the ceilometerclient module) and a command-line tool +(ceilometer). +%endif # with_python3 + + +%package doc +Summary: Documentation for OpenStack Ceilometer API Client + +BuildRequires: python-sphinx +# FIXME: remove following line when a new release including https://review.openstack.org/#/c/476759/ is in u-u +BuildRequires: python-oslo-sphinx +BuildRequires: python-openstackdocstheme + +%description doc +This is a client library for Ceilometer built on the Ceilometer API. It +provides a Python API (the ceilometerclient module) and a command-line tool +(ceilometer). + +This package contains auto-generated documentation. + + +%package sdk +Summary: SDK files for %{name} + +%description sdk +Contains SDK files for %{name} package + + +%prep +%autosetup -n %{name}-%{upstream_version} -S git + +# Remove bundled egg-info +rm -rf python_%{sname}.egg-info + +# Let RPM handle the requirements +rm -f test-requirements.txt + +%build +export PBR_VERSION=%{version} +%py2_build +%if 0%{?with_python3} +%py3_build +%endif + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} + +%if 0%{?with_python3} +%{__python3} setup.py install -O1 --skip-build --root %{buildroot} +%endif + +%if 0%{?with_python3} +%py3_install +mv %{buildroot}%{_bindir}/ceilometer %{buildroot}%{_bindir}/ceilometer-%{python3_version} +ln -s ./ceilometer-%{python3_version} %{buildroot}%{_bindir}/ceilometer-3 +%endif + +%py2_install +mv %{buildroot}%{_bindir}/ceilometer %{buildroot}%{_bindir}/ceilometer-%{python2_version} +ln -s ./ceilometer-%{python2_version} %{buildroot}%{_bindir}/ceilometer-2 + +ln -s ./ceilometer-2 %{buildroot}%{_bindir}/ceilometer + +# Delete tests +rm -fr %{buildroot}%{python2_sitelib}/%{sname}/tests +%if 0%{?with_python3} +rm -fr %{buildroot}%{python3_sitelib}/%{sname}/tests +%endif + +# Build HTML docs +%{__python2} setup.py build_sphinx -b html + +# Fix hidden-file-or-dir warnings +rm -rf doc/build/html/.doctrees doc/build/html/.buildinfo + +# WRS +install -d %{buildroot}/%{_sysconfdir}/bash_completion.d +install -m 664 tools/ceilometer.bash_completion %{buildroot}/%{_sysconfdir}/bash_completion.d/ceilometer.bash_completion + +# prep SDK package +mkdir -p %{buildroot}/usr/share/remote-clients +tar zcf %{buildroot}/usr/share/remote-clients/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} + +%files -n python2-%{sname} +%license LICENSE +%doc README.rst +%{python2_sitelib}/%{sname} +%{python2_sitelib}/*.egg-info +%{_bindir}/ceilometer +%{_bindir}/ceilometer-2 +%{_bindir}/ceilometer-%{python2_version} +%{_sysconfdir}/bash_completion.d/ceilometer.bash_completion + +%if 0%{?with_python3} +%files -n python3-%{sname} +%license LICENSE +%doc README.rst +%{python3_sitelib}/%{sname} +%{python3_sitelib}/*.egg-info +%{_bindir}/ceilometer-3 +%{_bindir}/ceilometer-%{python3_version} +%{_sysconfdir}/bash_completion.d/ceilometer.bash_completion +%endif # with_python3 + +%files doc +%license LICENSE +%doc doc/build/html + +%files sdk +/usr/share/remote-clients/%{name}-%{version}.tgz + +%changelog +* Fri Aug 11 2017 Alfredo Moralejo 2.9.0-1 +- Update to 2.9.0 + diff --git a/openstack/python-cinder/centos/build_srpm.data b/openstack/python-cinder/centos/build_srpm.data new file mode 100644 index 00000000..5d992563 --- /dev/null +++ b/openstack/python-cinder/centos/build_srpm.data @@ -0,0 +1,5 @@ +SRC_DIR="$CGCS_BASE/git/cinder" +COPY_LIST="$FILES_BASE/*" +TIS_BASE_SRCREV=90b64640126fd88e50b2f05841c393757f4faae7 +TIS_PATCH_VER=GITREVCOUNT +BUILD_IS_SLOW=5 diff --git a/openstack/python-cinder/centos/files/cinder-dist.conf b/openstack/python-cinder/centos/files/cinder-dist.conf new file mode 100644 index 00000000..5df5eaee --- /dev/null +++ b/openstack/python-cinder/centos/files/cinder-dist.conf @@ -0,0 +1,19 @@ +[DEFAULT] +logdir = /var/log/cinder +state_path = /var/lib/cinder +lock_path = /var/lib/cinder/tmp +volumes_dir = /etc/cinder/volumes +iscsi_helper = lioadm +rootwrap_config = /etc/cinder/rootwrap.conf +auth_strategy = keystone + +[database] +connection = mysql://cinder:cinder@localhost/cinder + +[keystone_authtoken] +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http diff --git a/openstack/python-cinder/centos/files/cinder-purge-deleted-active b/openstack/python-cinder/centos/files/cinder-purge-deleted-active new file mode 100644 index 00000000..79747105 --- /dev/null +++ b/openstack/python-cinder/centos/files/cinder-purge-deleted-active @@ -0,0 +1,63 @@ +#!/bin/bash + +# +# Wrapper script to run cinder-manage to purge deleted records on active controller only +# +CINDER_PURGE_INFO="/var/run/cinder-purge.info" +CINDER_PURGE_CMD="/usr/bin/nice -n 2 /usr/bin/cinder-manage db purge 1 >>/dev/null 2>&1" + +function is_active_pgserver() +{ + # Determine whether we're running on the same controller as the service. + local service=postgres + local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active) + if [ "x$enabledactive" == "x" ] + then + # enabled-active not found for that service on this controller + return 1 + else + # enabled-active found for that resource + return 0 + fi +} + +if is_active_pgserver +then + if [ ! -f ${CINDER_PURGE_INFO} ] + then + echo delay_count=0 > ${CINDER_PURGE_INFO} + fi + + source ${CINDER_PURGE_INFO} + sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null + if [ $? -eq 0 ] + then + source /etc/platform/platform.conf + if [ "${system_type}" = "All-in-one" ] + then + source /etc/init.d/task_affinity_functions.sh + idle_core=$(get_most_idle_core) + if [ "$idle_core" -ne "0" ] + then + # Purge soft deleted records that are older than 1 day from cinder database. + sh -c "exec taskset -c $idle_core ${CINDER_PURGE_CMD}" + sed -i "/delay_count/s/=.*/=0/" ${CINDER_PURGE_INFO} + exit 0 + fi + fi + + if [ "$delay_count" -lt "3" ] + then + newval=$(($delay_count+1)) + sed -i "/delay_count/s/=.*/=$newval/" ${CINDER_PURGE_INFO} + (sleep 3600; /usr/bin/cinder-purge-deleted-active) & + exit 0 + fi + fi + + # Purge soft deleted records that are older than 1 day from cinder database. + eval ${CINDER_PURGE_CMD} + sed -i "/delay_count/s/=.*/=0/" ${CINDER_PURGE_INFO} +fi + +exit 0 diff --git a/openstack/python-cinder/centos/files/cinder-sudoers b/openstack/python-cinder/centos/files/cinder-sudoers new file mode 100644 index 00000000..ed5d479d --- /dev/null +++ b/openstack/python-cinder/centos/files/cinder-sudoers @@ -0,0 +1,3 @@ +Defaults:cinder !requiretty + +cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap /etc/cinder/rootwrap.conf * diff --git a/openstack/python-cinder/centos/files/cinder.conf.sample b/openstack/python-cinder/centos/files/cinder.conf.sample new file mode 100644 index 00000000..52982660 --- /dev/null +++ b/openstack/python-cinder/centos/files/cinder.conf.sample @@ -0,0 +1,5154 @@ +[DEFAULT] + +# +# From cinder +# + +# The maximum number of items that a collection resource returns in a single +# response (integer value) +#osapi_max_limit = 1000 + +# DEPRECATED: Base URL that will be presented to users in links to the +# OpenStack Volume API (string value) +# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix +#osapi_volume_base_URL = + +# Json file indicating user visible filter parameters for list queries. (string +# value) +# Deprecated group/name - [DEFAULT]/query_volume_filters +#resource_query_filters_file = /etc/cinder/resource_filters.json + +# DEPRECATED: Volume filter options which non-admin user could use to query +# volumes. Default values are: ['name', 'status', 'metadata', +# 'availability_zone' ,'bootable', 'group_id'] (list value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#query_volume_filters = name,status,metadata,availability_zone,bootable,group_id + +# DEPRECATED: Allow the ability to modify the extra-spec settings of an in-use +# volume-type. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#allow_inuse_volume_type_modification = false + +# Treat X-Forwarded-For as the canonical remote address. Only enable this if +# you have a sanitizing proxy. (boolean value) +#use_forwarded_for = false + +# Public url to use for versions endpoint. The default is None, which will use +# the request's host_url attribute to populate the URL base. If Cinder is +# operating behind a proxy, you will want to change this to represent the +# proxy's URL. (string value) +# Deprecated group/name - [DEFAULT]/osapi_volume_base_URL +#public_endpoint = + +# Backup services use same backend. (boolean value) +#backup_use_same_host = false + +# Compression algorithm (None to disable) (string value) +# Allowed values: none, off, no, zlib, gzip, bz2, bzip2 +#backup_compression_algorithm = zlib + +# Backup metadata version to be used when backing up volume metadata. If this +# number is bumped, make sure the service doing the restore supports the new +# version. (integer value) +#backup_metadata_version = 2 + +# The number of chunks or objects, for which one Ceilometer notification will +# be sent (integer value) +#backup_object_number_per_notification = 10 + +# Interval, in seconds, between two progress notifications reporting the backup +# status (integer value) +#backup_timer_interval = 120 + +# Ceph configuration file to use. (string value) +#backup_ceph_conf = /etc/ceph/ceph.conf + +# The Ceph user to connect with. Default here is to use the same user as for +# Cinder volumes. If not using cephx this should be set to None. (string value) +#backup_ceph_user = cinder + +# The chunk size, in bytes, that a backup is broken into before transfer to the +# Ceph object store. (integer value) +#backup_ceph_chunk_size = 134217728 + +# The Ceph pool where volume backups are stored. (string value) +#backup_ceph_pool = backups + +# RBD stripe unit to use when creating a backup image. (integer value) +#backup_ceph_stripe_unit = 0 + +# RBD stripe count to use when creating a backup image. (integer value) +#backup_ceph_stripe_count = 0 + +# If True, apply JOURNALING and EXCLUSIVE_LOCK feature bits to the backup RBD +# objects to allow mirroring (boolean value) +#backup_ceph_image_journals = false + +# If True, always discard excess bytes when restoring volumes i.e. pad with +# zeroes. (boolean value) +#restore_discard_excess_bytes = true + +# Base dir containing mount point for gluster share. (string value) +#glusterfs_backup_mount_point = $state_path/backup_mount + +# GlusterFS share in : format. +# Eg: 1.2.3.4:backup_vol (string value) +#glusterfs_backup_share = + +# The GCS bucket to use. (string value) +#backup_gcs_bucket = + +# The size in bytes of GCS backup objects. (integer value) +#backup_gcs_object_size = 52428800 + +# The size in bytes that changes are tracked for incremental backups. +# backup_gcs_object_size has to be multiple of backup_gcs_block_size. (integer +# value) +#backup_gcs_block_size = 32768 + +# GCS object will be downloaded in chunks of bytes. (integer value) +#backup_gcs_reader_chunk_size = 2097152 + +# GCS object will be uploaded in chunks of bytes. Pass in a value of -1 if the +# file is to be uploaded as a single chunk. (integer value) +#backup_gcs_writer_chunk_size = 2097152 + +# Number of times to retry. (integer value) +#backup_gcs_num_retries = 3 + +# List of GCS error codes. (list value) +#backup_gcs_retry_error_codes = 429 + +# Location of GCS bucket. (string value) +#backup_gcs_bucket_location = US + +# Storage class of GCS bucket. (string value) +#backup_gcs_storage_class = NEARLINE + +# Absolute path of GCS service account credential file. (string value) +#backup_gcs_credential_file = + +# Owner project id for GCS bucket. (string value) +#backup_gcs_project_id = + +# Http user-agent string for gcs api. (string value) +#backup_gcs_user_agent = gcscinder + +# Enable or Disable the timer to send the periodic progress notifications to +# Ceilometer when backing up the volume to the GCS backend storage. The default +# value is True to enable the timer. (boolean value) +#backup_gcs_enable_progress_timer = true + +# URL for http proxy access. (uri value) +#backup_gcs_proxy_url = + +# Base dir containing mount point for NFS share. (string value) +#backup_mount_point_base = $state_path/backup_mount + +# NFS share in hostname:path, ipv4addr:path, or "[ipv6addr]:path" format. +# (string value) +#backup_share = + +# Mount options passed to the NFS client. See NFS man page for details. (string +# value) +#backup_mount_options = + +# The maximum size in bytes of the files used to hold backups. If the volume +# being backed up exceeds this size, then it will be backed up into multiple +# files.backup_file_size must be a multiple of backup_sha_block_size_bytes. +# (integer value) +#backup_file_size = 1999994880 + +# The size in bytes that changes are tracked for incremental backups. +# backup_file_size has to be multiple of backup_sha_block_size_bytes. (integer +# value) +#backup_sha_block_size_bytes = 32768 + +# Enable or Disable the timer to send the periodic progress notifications to +# Ceilometer when backing up the volume to the backend storage. The default +# value is True to enable the timer. (boolean value) +#backup_enable_progress_timer = true + +# Path specifying where to store backups. (string value) +#backup_posix_path = $state_path/backup + +# Custom directory to use for backups. (string value) +#backup_container = + +# The URL of the Swift endpoint (uri value) +#backup_swift_url = + +# The URL of the Keystone endpoint (uri value) +#backup_swift_auth_url = + +# Info to match when looking for swift in the service catalog. Format is: +# separated values of the form: :: - +# Only used if backup_swift_url is unset (string value) +#swift_catalog_info = object-store:swift:publicURL + +# Info to match when looking for keystone in the service catalog. Format is: +# separated values of the form: :: - +# Only used if backup_swift_auth_url is unset (string value) +#keystone_catalog_info = identity:Identity Service:publicURL + +# Swift authentication mechanism. (string value) +# Allowed values: per_user, single_user +#backup_swift_auth = per_user + +# Swift authentication version. Specify "1" for auth 1.0, or "2" for auth 2.0 +# or "3" for auth 3.0 (string value) +#backup_swift_auth_version = 1 + +# Swift tenant/account name. Required when connecting to an auth 2.0 system +# (string value) +#backup_swift_tenant = + +# Swift user domain name. Required when connecting to an auth 3.0 system +# (string value) +#backup_swift_user_domain = + +# Swift project domain name. Required when connecting to an auth 3.0 system +# (string value) +#backup_swift_project_domain = + +# Swift project/account name. Required when connecting to an auth 3.0 system +# (string value) +#backup_swift_project = + +# Swift user name (string value) +#backup_swift_user = + +# Swift key for authentication (string value) +#backup_swift_key = + +# The default Swift container to use (string value) +#backup_swift_container = volumebackups + +# The size in bytes of Swift backup objects (integer value) +#backup_swift_object_size = 52428800 + +# The size in bytes that changes are tracked for incremental backups. +# backup_swift_object_size has to be multiple of backup_swift_block_size. +# (integer value) +#backup_swift_block_size = 32768 + +# The number of retries to make for Swift operations (integer value) +#backup_swift_retry_attempts = 3 + +# The backoff time in seconds between Swift retries (integer value) +#backup_swift_retry_backoff = 2 + +# Enable or Disable the timer to send the periodic progress notifications to +# Ceilometer when backing up the volume to the Swift backend storage. The +# default value is True to enable the timer. (boolean value) +#backup_swift_enable_progress_timer = true + +# Location of the CA certificate file to use for swift client requests. (string +# value) +#backup_swift_ca_cert_file = + +# Bypass verification of server certificate when making SSL connection to +# Swift. (boolean value) +#backup_swift_auth_insecure = false + +# Volume prefix for the backup id when backing up to TSM (string value) +#backup_tsm_volume_prefix = backup + +# TSM password for the running username (string value) +#backup_tsm_password = password + +# Enable or Disable compression for backups (boolean value) +#backup_tsm_compression = true + +# Driver to use for backups. (string value) +#backup_driver = cinder.backup.drivers.swift + +# Offload pending backup delete during backup service startup. If false, the +# backup service will remain down until all pending backups are deleted. +# (boolean value) +#backup_service_inithost_offload = true + +# Name of this cluster. Used to group volume hosts that share the same backend +# configurations to work in HA Active-Active mode. Active-Active is not yet +# supported. (string value) +#cluster = + +# Top-level directory for maintaining cinder's state (string value) +# Deprecated group/name - [DEFAULT]/pybasedir +#state_path = /var/lib/cinder + +# IP address of this host (unknown value) +#my_ip = 10.10.1.23 + +# A list of the URLs of glance API servers available to cinder +# ([http[s]://][hostname|ip]:port). If protocol is not specified it defaults to +# http. (list value) +#glance_api_servers = + +# DEPRECATED: Version of the glance API to use (integer value) +# This option is deprecated for removal since 11.0.0. +# Its value may be silently ignored in the future. +# Reason: Glance v1 support will be removed in Queens +#glance_api_version = 2 + +# Number retries when downloading an image from glance (integer value) +# Minimum value: 0 +#glance_num_retries = 0 + +# Allow to perform insecure SSL (https) requests to glance (https will be used +# but cert validation will not be performed). (boolean value) +#glance_api_insecure = false + +# Enables or disables negotiation of SSL layer compression. In some cases +# disabling compression can improve data throughput, such as when high network +# bandwidth is available and you use compressed image formats like qcow2. +# (boolean value) +#glance_api_ssl_compression = false + +# Location of ca certificates file to use for glance client requests. (string +# value) +#glance_ca_certificates_file = + +# http/https timeout value for glance operations. If no value (None) is +# supplied here, the glanceclient default value is used. (integer value) +#glance_request_timeout = + +# DEPRECATED: Deploy v1 of the Cinder API. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#enable_v1_api = false + +# DEPRECATED: Deploy v2 of the Cinder API. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#enable_v2_api = true + +# Deploy v3 of the Cinder API. (boolean value) +#enable_v3_api = true + +# Enables or disables rate limit of the API. (boolean value) +#api_rate_limit = true + +# Specify list of extensions to load when using osapi_volume_extension option +# with cinder.api.contrib.select_extensions (list value) +#osapi_volume_ext_list = + +# osapi volume extension to load (multi valued) +#osapi_volume_extension = cinder.api.contrib.standard_extensions + +# Full class name for the Manager for volume (string value) +#volume_manager = cinder.volume.manager.VolumeManager + +# Full class name for the Manager for volume backup (string value) +#backup_manager = cinder.backup.manager.BackupManager + +# Full class name for the Manager for scheduler (string value) +#scheduler_manager = cinder.scheduler.manager.SchedulerManager + +# Name of this node. This can be an opaque identifier. It is not necessarily a +# host name, FQDN, or IP address. (unknown value) +#host = tis-dev-30-20171005 + +# Availability zone of this node. Can be overridden per volume backend with the +# option "backend_availability_zone". (string value) +#storage_availability_zone = nova + +# Default availability zone for new volumes. If not set, the +# storage_availability_zone option value is used as the default for new +# volumes. (string value) +#default_availability_zone = + +# If the requested Cinder availability zone is unavailable, fall back to the +# value of default_availability_zone, then storage_availability_zone, instead +# of failing. (boolean value) +#allow_availability_zone_fallback = false + +# Default volume type to use (string value) +#default_volume_type = + +# Default group type to use (string value) +#default_group_type = + +# Time period for which to generate volume usages. The options are hour, day, +# month, or year. (string value) +#volume_usage_audit_period = month + +# Path to the rootwrap configuration file to use for running commands as root +# (string value) +#rootwrap_config = /etc/cinder/rootwrap.conf + +# Enable monkey patching (boolean value) +#monkey_patch = false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules = + +# Maximum time since last check-in for a service to be considered up (integer +# value) +#service_down_time = 60 + +# The full class name of the volume API class to use (string value) +#volume_api_class = cinder.volume.api.API + +# The full class name of the volume backup API class (string value) +#backup_api_class = cinder.backup.api.API + +# The strategy to use for auth. Supports noauth or keystone. (string value) +# Allowed values: noauth, keystone +#auth_strategy = keystone + +# A list of backend names to use. These backend names should be backed by a +# unique [CONFIG] group with its options (list value) +#enabled_backends = + +# Whether snapshots count against gigabyte quota (boolean value) +#no_snapshot_gb_quota = false + +# The full class name of the volume transfer API class (string value) +#transfer_api_class = cinder.transfer.api.API + +# The full class name of the consistencygroup API class (string value) +#consistencygroup_api_class = cinder.consistencygroup.api.API + +# The full class name of the group API class (string value) +#group_api_class = cinder.group.api.API + +# DEPRECATED: OpenStack privileged account username. Used for requests to other +# services (such as Nova) that require an account with special rights. (string +# value) +# This option is deprecated for removal since 11.0.0. +# Its value may be silently ignored in the future. +# Reason: Use the [nova] section for configuring Keystone authentication for a +# privileged user. +#os_privileged_user_name = + +# DEPRECATED: Password associated with the OpenStack privileged account. +# (string value) +# This option is deprecated for removal since 11.0.0. +# Its value may be silently ignored in the future. +# Reason: Use the [nova] section to configure Keystone authentication for a +# privileged user. +#os_privileged_user_password = + +# DEPRECATED: Tenant name associated with the OpenStack privileged account. +# (string value) +# This option is deprecated for removal since 11.0.0. +# Its value may be silently ignored in the future. +# Reason: Use the [nova] section to configure Keystone authentication for a +# privileged user. +#os_privileged_user_tenant = + +# DEPRECATED: Auth URL associated with the OpenStack privileged account. (uri +# value) +# This option is deprecated for removal since 11.0.0. +# Its value may be silently ignored in the future. +# Reason: Use the [nova] section to configure Keystone authentication for a +# privileged user. +#os_privileged_user_auth_url = + +# The full class name of the compute API class to use (string value) +#compute_api_class = cinder.compute.nova.API + +# DEPRECATED: Match this value when searching for nova in the service catalog. +# Format is: separated values of the form: +# :: (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#nova_catalog_info = compute:Compute Service:publicURL + +# DEPRECATED: Same as nova_catalog_info, but for admin endpoint. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#nova_catalog_admin_info = compute:Compute Service:publicURL + +# DEPRECATED: Override service catalog lookup with template for nova endpoint +# e.g. http://localhost:8774/v2/%(project_id)s (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#nova_endpoint_template = + +# DEPRECATED: Same as nova_endpoint_template, but for admin endpoint. (string +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#nova_endpoint_admin_template = + +# ID of the project which will be used as the Cinder internal tenant. (string +# value) +#cinder_internal_tenant_project_id = + +# ID of the user to be used in volume operations as the Cinder internal tenant. +# (string value) +#cinder_internal_tenant_user_id = + +# Services to be added to the available pool on create (boolean value) +#enable_new_services = true + +# Template string to be used to generate volume names (string value) +#volume_name_template = volume-%s + +# Template string to be used to generate snapshot names (string value) +#snapshot_name_template = snapshot-%s + +# Template string to be used to generate backup names (string value) +#backup_name_template = backup-%s + +# Driver to use for database access (string value) +#db_driver = cinder.db + +# Make exception message format errors fatal. (boolean value) +#fatal_exception_format_errors = false + +# A list of url schemes that can be downloaded directly via the direct_url. +# Currently supported schemes: [file, cinder]. (list value) +#allowed_direct_url_schemes = + +# Info to match when looking for glance in the service catalog. Format is: +# separated values of the form: :: - +# Only used if glance_api_servers are not provided. (string value) +#glance_catalog_info = image:glance:publicURL + +# All glance downloads in progress will call fdatasync() once this much image +# data has accumulated, in MiB. (integer value) +#glance_download_fdatasync_interval_mib = 225 + +# Default core properties of image (list value) +#glance_core_properties = checksum,container_format,disk_format,image_name,image_id,min_disk,min_ram,name,size + +# Directory used for temporary storage during image conversion (string value) +#image_conversion_dir = $state_path/conversion + +# message minimum life in seconds. (integer value) +#message_ttl = 2592000 + +# interval between periodic task runs to clean expired messages in seconds. +# (integer value) +#message_reap_interval = 86400 + +# Number of volumes allowed per project (integer value) +#quota_volumes = 10 + +# Number of volume snapshots allowed per project (integer value) +#quota_snapshots = 10 + +# Number of consistencygroups allowed per project (integer value) +#quota_consistencygroups = 10 + +# Number of groups allowed per project (integer value) +#quota_groups = 10 + +# Total amount of storage, in gigabytes, allowed for volumes and snapshots per +# project (integer value) +#quota_gigabytes = 1000 + +# Number of volume backups allowed per project (integer value) +#quota_backups = 10 + +# Total amount of storage, in gigabytes, allowed for backups per project +# (integer value) +#quota_backup_gigabytes = 1000 + +# Number of seconds until a reservation expires (integer value) +#reservation_expire = 86400 + +# Interval between periodic task runs to clean expired reservations in seconds. +# (integer value) +#reservation_clean_interval = $reservation_expire + +# Count of reservations until usage is refreshed (integer value) +#until_refresh = 0 + +# Number of seconds between subsequent usage refreshes (integer value) +#max_age = 0 + +# Default driver to use for quota checks (string value) +#quota_driver = cinder.quota.DbQuotaDriver + +# Enables or disables use of default quota class with default quota. (boolean +# value) +#use_default_quota_class = true + +# Max size allowed per volume, in gigabytes (integer value) +#per_volume_size_limit = -1 + +# The scheduler host manager class to use (string value) +#scheduler_host_manager = cinder.scheduler.host_manager.HostManager + +# Maximum number of attempts to schedule a volume (integer value) +#scheduler_max_attempts = 3 + +# Which filter class names to use for filtering hosts when not specified in the +# request. (list value) +#scheduler_default_filters = AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter + +# Which weigher class names to use for weighing hosts. (list value) +#scheduler_default_weighers = CapacityWeigher + +# Which handler to use for selecting the host/pool after weighing (string +# value) +#scheduler_weight_handler = cinder.scheduler.weights.OrderedHostWeightHandler + +# Default scheduler driver to use (string value) +#scheduler_driver = cinder.scheduler.filter_scheduler.FilterScheduler + +# Absolute path to scheduler configuration JSON file. (string value) +#scheduler_json_config_location = + +# Multiplier used for weighing free capacity. Negative numbers mean to stack vs +# spread. (floating point value) +#capacity_weight_multiplier = 1.0 + +# Multiplier used for weighing allocated capacity. Positive numbers mean to +# stack vs spread. (floating point value) +#allocated_capacity_weight_multiplier = -1.0 + +# Multiplier used for weighing volume number. Negative numbers mean to spread +# vs stack. (floating point value) +#volume_number_multiplier = -1.0 + +# Interval, in seconds, between nodes reporting state to datastore (integer +# value) +#report_interval = 10 + +# Interval, in seconds, between running periodic tasks (integer value) +#periodic_interval = 60 + +# Range, in seconds, to randomly delay when starting the periodic task +# scheduler to reduce stampeding. (Disable by setting to 0) (integer value) +#periodic_fuzzy_delay = 60 + +# IP address on which OpenStack Volume API listens (string value) +#osapi_volume_listen = 0.0.0.0 + +# Port on which OpenStack Volume API listens (port value) +# Minimum value: 0 +# Maximum value: 65535 +#osapi_volume_listen_port = 8776 + +# Number of workers for OpenStack Volume API service. The default is equal to +# the number of CPUs available. (integer value) +#osapi_volume_workers = + +# Wraps the socket in a SSL context if True is set. A certificate file and key +# file must be specified. (boolean value) +#osapi_volume_use_ssl = false + +# Option to enable strict host key checking. When set to "True" Cinder will +# only connect to systems with a host key present in the configured +# "ssh_hosts_key_file". When set to "False" the host key will be saved upon +# first connection and used for subsequent connections. Default=False (boolean +# value) +#strict_ssh_host_key_policy = false + +# File containing SSH host keys for the systems with which Cinder needs to +# communicate. OPTIONAL: Default=$state_path/ssh_known_hosts (string value) +#ssh_hosts_key_file = $state_path/ssh_known_hosts + +# The number of characters in the salt. (integer value) +#volume_transfer_salt_length = 8 + +# The number of characters in the autogenerated auth key. (integer value) +#volume_transfer_key_length = 16 + +# Enables the Force option on upload_to_image. This enables running +# upload_volume on in-use volumes for backends that support it. (boolean value) +#enable_force_upload = false + +# Create volume from snapshot at the host where snapshot resides (boolean +# value) +#snapshot_same_host = true + +# Ensure that the new volumes are the same AZ as snapshot or source volume +# (boolean value) +#cloned_volume_same_az = true + +# Cache volume availability zones in memory for the provided duration in +# seconds (integer value) +#az_cache_duration = 3600 + +# Number of times to attempt to run flakey shell commands (integer value) +#num_shell_tries = 3 + +# The percentage of backend capacity is reserved (integer value) +# Minimum value: 0 +# Maximum value: 100 +#reserved_percentage = 0 + +# Prefix for iSCSI volumes (string value) +#iscsi_target_prefix = iqn.2010-10.org.openstack: + +# The IP address that the iSCSI daemon is listening on (string value) +#iscsi_ip_address = $my_ip + +# The list of secondary IP addresses of the iSCSI daemon (list value) +#iscsi_secondary_ip_addresses = + +# The port that the iSCSI daemon is listening on (port value) +# Minimum value: 0 +# Maximum value: 65535 +#iscsi_port = 3260 + +# The maximum number of times to rescan targets to find volume (integer value) +#num_volume_device_scan_tries = 3 + +# The backend name for a given driver implementation (string value) +#volume_backend_name = + +# Do we attach/detach volumes in cinder using multipath for volume to image and +# image to volume transfers? (boolean value) +#use_multipath_for_image_xfer = false + +# If this is set to True, attachment of volumes for image transfer will be +# aborted when multipathd is not running. Otherwise, it will fallback to single +# path. (boolean value) +#enforce_multipath_for_image_xfer = false + +# Method used to wipe old volumes (string value) +# Allowed values: none, zero +#volume_clear = zero + +# Size in MiB to wipe at start of old volumes. 1024 MiBat max. 0 => all +# (integer value) +# Maximum value: 1024 +#volume_clear_size = 0 + +# The flag to pass to ionice to alter the i/o priority of the process used to +# zero a volume after deletion, for example "-c3" for idle only priority. +# (string value) +#volume_clear_ionice = + +# iSCSI target user-land tool to use. tgtadm is default, use lioadm for LIO +# iSCSI support, scstadmin for SCST target support, ietadm for iSCSI Enterprise +# Target, iscsictl for Chelsio iSCSI Target or fake for testing. (string value) +# Allowed values: tgtadm, lioadm, scstadmin, iscsictl, ietadm, fake +#iscsi_helper = tgtadm + +# Volume configuration file storage directory (string value) +#volumes_dir = $state_path/volumes + +# IET configuration file (string value) +#iet_conf = /etc/iet/ietd.conf + +# Chiscsi (CXT) global defaults configuration file (string value) +#chiscsi_conf = /etc/chelsio-iscsi/chiscsi.conf + +# Sets the behavior of the iSCSI target to either perform blockio or fileio +# optionally, auto can be set and Cinder will autodetect type of backing device +# (string value) +# Allowed values: blockio, fileio, auto +#iscsi_iotype = fileio + +# The default block size used when copying/clearing volumes (string value) +#volume_dd_blocksize = 4M + +# The blkio cgroup name to be used to limit bandwidth of volume copy (string +# value) +#volume_copy_blkio_cgroup_name = cinder-volume-copy + +# The upper limit of bandwidth of volume copy. 0 => unlimited (integer value) +#volume_copy_bps_limit = 0 + +# Sets the behavior of the iSCSI target to either perform write-back(on) or +# write-through(off). This parameter is valid if iscsi_helper is set to tgtadm. +# (string value) +# Allowed values: on, off +#iscsi_write_cache = on + +# Sets the target-specific flags for the iSCSI target. Only used for tgtadm to +# specify backing device flags using bsoflags option. The specified string is +# passed as is to the underlying tool. (string value) +#iscsi_target_flags = + +# Determines the iSCSI protocol for new iSCSI volumes, created with tgtadm or +# lioadm target helpers. In order to enable RDMA, this parameter should be set +# with the value "iser". The supported iSCSI protocol values are "iscsi" and +# "iser". (string value) +# Allowed values: iscsi, iser +#iscsi_protocol = iscsi + +# The path to the client certificate key for verification, if the driver +# supports it. (string value) +#driver_client_cert_key = + +# The path to the client certificate for verification, if the driver supports +# it. (string value) +#driver_client_cert = + +# Tell driver to use SSL for connection to backend storage if the driver +# supports it. (boolean value) +#driver_use_ssl = false + +# Float representation of the over subscription ratio when thin provisioning is +# involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times +# of the total physical capacity. If the ratio is 10.5, it means provisioned +# capacity can be 10.5 times of the total physical capacity. A ratio of 1.0 +# means provisioned capacity cannot exceed the total physical capacity. The +# ratio has to be a minimum of 1.0. (floating point value) +#max_over_subscription_ratio = 20.0 + +# Certain ISCSI targets have predefined target names, SCST target driver uses +# this name. (string value) +#scst_target_iqn_name = + +# SCST target implementation can choose from multiple SCST target drivers. +# (string value) +#scst_target_driver = iscsi + +# Option to enable/disable CHAP authentication for targets. (boolean value) +#use_chap_auth = false + +# CHAP user name. (string value) +#chap_username = + +# Password for specified CHAP account name. (string value) +#chap_password = + +# Namespace for driver private data values to be saved in. (string value) +#driver_data_namespace = + +# String representation for an equation that will be used to filter hosts. Only +# used when the driver filter is set to be used by the Cinder scheduler. +# (string value) +#filter_function = + +# String representation for an equation that will be used to determine the +# goodness of a host. Only used when using the goodness weigher is set to be +# used by the Cinder scheduler. (string value) +#goodness_function = + +# If set to True the http client will validate the SSL certificate of the +# backend endpoint. (boolean value) +#driver_ssl_cert_verify = false + +# Can be used to specify a non default path to a CA_BUNDLE file or directory +# with certificates of trusted CAs, which will be used to validate the backend +# (string value) +#driver_ssl_cert_path = + +# List of options that control which trace info is written to the DEBUG log +# level to assist developers. Valid values are method and api. (list value) +#trace_flags = + +# Multi opt of dictionaries to represent a replication target device. This +# option may be specified multiple times in a single config section to specify +# multiple replication target devices. Each entry takes the standard dict +# config form: replication_device = +# target_device_id:,key1:value1,key2:value2... (dict value) +#replication_device = + +# If set to True, upload-to-image in raw format will create a cloned volume and +# register its location to the image service, instead of uploading the volume +# content. The cinder backend and locations support must be enabled in the +# image service, and glance_api_version must be set to 2. (boolean value) +#image_upload_use_cinder_backend = false + +# If set to True, the image volume created by upload-to-image will be placed in +# the internal tenant. Otherwise, the image volume is created in the current +# context's tenant. (boolean value) +#image_upload_use_internal_tenant = false + +# Enable the image volume cache for this backend. (boolean value) +#image_volume_cache_enabled = false + +# Max size of the image volume cache for this backend in GB. 0 => unlimited. +# (integer value) +#image_volume_cache_max_size_gb = 0 + +# Max number of entries allowed in the image volume cache. 0 => unlimited. +# (integer value) +#image_volume_cache_max_count = 0 + +# Report to clients of Cinder that the backend supports discard (aka. +# trim/unmap). This will not actually change the behavior of the backend or the +# client directly, it will only notify that it can be used. (boolean value) +#report_discard_supported = false + +# Protocol for transferring data between host and storage back-end. (string +# value) +# Allowed values: iscsi, fc +#storage_protocol = iscsi + +# If this is set to True, the backup_use_temp_snapshot path will be used during +# the backup. Otherwise, it will use backup_use_temp_volume path. (boolean +# value) +#backup_use_temp_snapshot = false + +# Set this to True when you want to allow an unsupported driver to start. +# Drivers that haven't maintained a working CI system and testing are marked as +# unsupported until CI is working again. This also marks a driver as +# deprecated and may be removed in the next release. (boolean value) +#enable_unsupported_driver = false + +# Availability zone for this volume backend. If not set, the +# storage_availability_zone option value is used as the default for all +# backends. (string value) +#backend_availability_zone = + +# Volume backup directory (string value) +#backup_dir = /opt/backups + +# The maximum number of times to rescan iSER targetto find volume (integer +# value) +#num_iser_scan_tries = 3 + +# Prefix for iSER volumes (string value) +#iser_target_prefix = iqn.2010-10.org.openstack: + +# The IP address that the iSER daemon is listening on (string value) +#iser_ip_address = $my_ip + +# The port that the iSER daemon is listening on (port value) +# Minimum value: 0 +# Maximum value: 65535 +#iser_port = 3260 + +# The name of the iSER target user-land tool to use (string value) +#iser_helper = tgtadm + +# Timeout for creating the volume to migrate to when performing volume +# migration (seconds) (integer value) +#migration_create_volume_timeout_secs = 300 + +# Offload pending volume delete during volume service startup (boolean value) +#volume_service_inithost_offload = false + +# File used for detecting if we are doing in-service patching (string value) +#in_service_marker = /run/patching/patch-flags/cinder.restarting + +# Sets the value of TCP_KEEPALIVE (True/False) for each server socket. (boolean +# value) +#tcp_keepalive = true + +# Sets the value of TCP_KEEPINTVL in seconds for each server socket. Not +# supported on OS X. (integer value) +#tcp_keepalive_interval = + +# Sets the value of TCP_KEEPCNT for each server socket. Not supported on OS X. +# (integer value) +#tcp_keepalive_count = + +# +# From oslo.config +# + +# Path to a config file to use. Multiple config files can be specified, with +# values in later files taking precedence. Defaults to %(default)s. (unknown +# value) +#config_file = ~/.project/project.conf,~/project.conf,/etc/project/project.conf,/etc/project.conf + +# Path to a config directory to pull `*.conf` files from. This file set is +# sorted, so as to provide a predictable parse order if individual options are +# over-ridden. The set is parsed after the file(s) specified via previous +# --config-file, arguments hence over-ridden options in the directory take +# precedence. (list value) +#config_dir = ~/.project/project.conf.d/,~/project.conf.d/,/etc/project/project.conf.d/,/etc/project.conf.d/ + +# +# From oslo.log +# + +# If set to true, the logging level will be set to DEBUG instead of the default +# INFO level. (boolean value) +# Note: This option can be changed without restarting. +#debug = false + +# The name of a logging configuration file. This file is appended to any +# existing logging configuration files. For details about logging configuration +# files, see the Python logging module documentation. Note that when logging +# configuration files are used then all logging configuration is set in the +# configuration file and other logging configuration options are ignored (for +# example, logging_context_format_string). (string value) +# Note: This option can be changed without restarting. +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Defines the format string for %%(asctime)s in log records. Default: +# %(default)s . This option is ignored if log_config_append is set. (string +# value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to send logging output to. If no default is set, +# logging will go to stderr as defined by use_stderr. This option is ignored if +# log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative log_file paths. This option +# is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Uses logging handler designed to watch file system. When log file is moved or +# removed this handler will open a new log file with specified path +# instantaneously. It makes sense only if log_file option is specified and +# Linux platform is used. This option is ignored if log_config_append is set. +# (boolean value) +#watch_log_file = false + +# Use syslog for logging. Existing syslog format is DEPRECATED and will be +# changed later to honor RFC5424. This option is ignored if log_config_append +# is set. (boolean value) +#use_syslog = false + +# Enable journald for logging. If running in a systemd environment you may wish +# to enable journal support. Doing so will use the journal native protocol +# which includes structured metadata in addition to log messages.This option is +# ignored if log_config_append is set. (boolean value) +#use_journal = false + +# Syslog facility to receive log lines. This option is ignored if +# log_config_append is set. (string value) +#syslog_log_facility = LOG_USER + +# Log output to standard error. This option is ignored if log_config_append is +# set. (boolean value) +#use_stderr = false + +# Format string to use for log messages with context. (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages when context is undefined. (string +# value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Additional data to append to log message when logging level for the message +# is DEBUG. (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. (string value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# Defines the format string for %(user_identity)s that is used in +# logging_context_format_string. (string value) +#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + +# List of package logging levels in logger=LEVEL pairs. This option is ignored +# if log_config_append is set. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# The format for an instance that is passed with the log message. (string +# value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. (string +# value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Interval, number of seconds, of log rate limiting. (integer value) +#rate_limit_interval = 0 + +# Maximum number of logged messages per rate_limit_interval. (integer value) +#rate_limit_burst = 0 + +# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG +# or empty string. Logs with level greater or equal to rate_limit_except_level +# are not filtered. An empty string means that all levels are filtered. (string +# value) +#rate_limit_except_level = CRITICAL + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size = 30 + +# The pool size limit for connections expiration policy (integer value) +#conn_pool_min_size = 2 + +# The time-to-live in sec of idle connections in the pool (integer value) +#conn_pool_ttl = 1200 + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. (string value) +#rpc_zmq_bind_address = * + +# MatchMaker driver. (string value) +# Allowed values: redis, sentinel, dummy +#rpc_zmq_matchmaker = redis + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts = 1 + +# Maximum number of ingress messages to locally buffer per topic. Default is +# unlimited. (integer value) +#rpc_zmq_topic_backlog = + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir = /var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match +# "host" option, if running Nova. (string value) +#rpc_zmq_host = localhost + +# Number of seconds to wait before all pending messages will be sent after +# closing a socket. The default value of -1 specifies an infinite linger +# period. The value of 0 specifies no linger period. Pending messages shall be +# discarded immediately when the socket is closed. Positive values specify an +# upper bound for the linger period. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_cast_timeout +#zmq_linger = -1 + +# The default number of seconds that poll should wait. Poll raises timeout +# exception when timeout expired. (integer value) +#rpc_poll_timeout = 1 + +# Expiration timeout in seconds of a name service record about existing target +# ( < 0 means no timeout). (integer value) +#zmq_target_expire = 300 + +# Update period in seconds of a name service record about existing target. +# (integer value) +#zmq_target_update = 180 + +# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean +# value) +#use_pub_sub = false + +# Use ROUTER remote proxy. (boolean value) +#use_router_proxy = false + +# This option makes direct connections dynamic or static. It makes sense only +# with use_router_proxy=False which means to use direct connections for direct +# message types (ignored otherwise). (boolean value) +#use_dynamic_connections = false + +# How many additional connections to a host will be made for failover reasons. +# This option is actual only in dynamic connections mode. (integer value) +#zmq_failover_connections = 2 + +# Minimal port number for random ports range. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#rpc_zmq_min_port = 49153 + +# Maximal port number for random ports range. (integer value) +# Minimum value: 1 +# Maximum value: 65536 +#rpc_zmq_max_port = 65536 + +# Number of retries to find free port number before fail with ZMQBindError. +# (integer value) +#rpc_zmq_bind_port_retries = 100 + +# Default serialization mechanism for serializing/deserializing +# outgoing/incoming messages (string value) +# Allowed values: json, msgpack +#rpc_zmq_serialization = json + +# This option configures round-robin mode in zmq socket. True means not keeping +# a queue when server side disconnects. False means to keep queue and messages +# even if server is disconnected, when the server appears we send all +# accumulated messages to it. (boolean value) +#zmq_immediate = true + +# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any +# other negative value) means to skip any overrides and leave it to OS default; +# 0 and 1 (or any other positive value) mean to disable and enable the option +# respectively. (integer value) +#zmq_tcp_keepalive = -1 + +# The duration between two keepalive transmissions in idle condition. The unit +# is platform dependent, for example, seconds in Linux, milliseconds in Windows +# etc. The default value of -1 (or any other negative value and 0) means to +# skip any overrides and leave it to OS default. (integer value) +#zmq_tcp_keepalive_idle = -1 + +# The number of retransmissions to be carried out before declaring that remote +# end is not available. The default value of -1 (or any other negative value +# and 0) means to skip any overrides and leave it to OS default. (integer +# value) +#zmq_tcp_keepalive_cnt = -1 + +# The duration between two successive keepalive retransmissions, if +# acknowledgement to the previous keepalive transmission is not received. The +# unit is platform dependent, for example, seconds in Linux, milliseconds in +# Windows etc. The default value of -1 (or any other negative value and 0) +# means to skip any overrides and leave it to OS default. (integer value) +#zmq_tcp_keepalive_intvl = -1 + +# Maximum number of (green) threads to work concurrently. (integer value) +#rpc_thread_pool_size = 100 + +# Expiration timeout in seconds of a sent/received message after which it is +# not tracked anymore by a client/server. (integer value) +#rpc_message_ttl = 300 + +# Wait for message acknowledgements from receivers. This mechanism works only +# via proxy without PUB/SUB. (boolean value) +#rpc_use_acks = false + +# Number of seconds to wait for an ack from a cast/call. After each retry +# attempt this timeout is multiplied by some specified multiplier. (integer +# value) +#rpc_ack_timeout_base = 15 + +# Number to multiply base ack timeout by after each retry attempt. (integer +# value) +#rpc_ack_timeout_multiplier = 2 + +# Default number of message sending attempts in case of any problems occurred: +# positive value N means at most N retries, 0 means no retries, None or -1 (or +# any other negative values) mean to retry forever. This option is used only if +# acknowledgments are enabled. (integer value) +#rpc_retry_attempts = 3 + +# List of publisher hosts SubConsumer can subscribe on. This option has higher +# priority then the default publishers list taken from the matchmaker. (list +# value) +#subscribe_on = + +# Size of executor thread pool when executor is threading or eventlet. (integer +# value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size = 64 + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# A URL representing the messaging driver to use and its full configuration. +# (string value) +#transport_url = + +# DEPRECATED: The messaging driver to use, defaults to rabbit. Other drivers +# include amqp and zmq. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rpc_backend = rabbit + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option. (string value) +#control_exchange = openstack + +# +# From oslo.service.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should we run them +# here? (boolean value) +#run_external_periodic_tasks = true + +# +# From oslo.service.service +# + +# Enable eventlet backdoor. Acceptable values are 0, , and +# :, where 0 results in listening on a random tcp port number; +# results in listening on the specified port number (and not enabling +# backdoor if that port is in use); and : results in listening on +# the smallest unused port number within the specified range of port numbers. +# The chosen port is displayed in the service's log file. (string value) +#backdoor_port = + +# Enable eventlet backdoor, using the provided path as a unix socket that can +# receive connections. This option is mutually exclusive with 'backdoor_port' +# in that only one should be provided. If both are provided then the existence +# of this option overrides the usage of that option. (string value) +#backdoor_socket = + +# Enables or disables logging values of all registered options when starting a +# service (at DEBUG level). (boolean value) +#log_options = true + +# Specify a timeout after which a gracefully shutdown server will exit. Zero +# value means endless wait. (integer value) +#graceful_shutdown_timeout = 60 + +# +# From oslo.service.wsgi +# + +# File name for the paste.deploy config for api service (string value) +#api_paste_config = api-paste.ini + +# A python format string that is used as the template to generate log lines. +# The following values can beformatted into it: client_ip, date_time, +# request_line, status_code, body_length, wall_seconds. (string value) +#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f + +# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not +# supported on OS X. (integer value) +#tcp_keepidle = 600 + +# Size of the pool of greenthreads used by wsgi (integer value) +#wsgi_default_pool_size = 100 + +# Maximum line size of message headers to be accepted. max_header_line may need +# to be increased when using large tokens (typically those generated when +# keystone is configured to use PKI tokens with big service catalogs). (integer +# value) +#max_header_line = 16384 + +# If False, closes the client socket connection explicitly. (boolean value) +#wsgi_keep_alive = true + +# Timeout for client connections' socket operations. If an incoming connection +# is idle for this number of seconds it will be closed. A value of '0' means +# wait forever. (integer value) +#client_socket_timeout = 900 + + +[backend] + +# +# From cinder +# + +# Backend override of host value. (string value) +#backend_host = + + +[backend_defaults] + +# +# From cinder +# + +# Number of times to attempt to run flakey shell commands (integer value) +#num_shell_tries = 3 + +# The percentage of backend capacity is reserved (integer value) +# Minimum value: 0 +# Maximum value: 100 +#reserved_percentage = 0 + +# Prefix for iSCSI volumes (string value) +#iscsi_target_prefix = iqn.2010-10.org.openstack: + +# The IP address that the iSCSI daemon is listening on (string value) +#iscsi_ip_address = $my_ip + +# The list of secondary IP addresses of the iSCSI daemon (list value) +#iscsi_secondary_ip_addresses = + +# The port that the iSCSI daemon is listening on (port value) +# Minimum value: 0 +# Maximum value: 65535 +#iscsi_port = 3260 + +# The maximum number of times to rescan targets to find volume (integer value) +#num_volume_device_scan_tries = 3 + +# The backend name for a given driver implementation (string value) +#volume_backend_name = + +# Do we attach/detach volumes in cinder using multipath for volume to image and +# image to volume transfers? (boolean value) +#use_multipath_for_image_xfer = false + +# If this is set to True, attachment of volumes for image transfer will be +# aborted when multipathd is not running. Otherwise, it will fallback to single +# path. (boolean value) +#enforce_multipath_for_image_xfer = false + +# Method used to wipe old volumes (string value) +# Allowed values: none, zero +#volume_clear = zero + +# Size in MiB to wipe at start of old volumes. 1024 MiBat max. 0 => all +# (integer value) +# Maximum value: 1024 +#volume_clear_size = 0 + +# The flag to pass to ionice to alter the i/o priority of the process used to +# zero a volume after deletion, for example "-c3" for idle only priority. +# (string value) +#volume_clear_ionice = + +# iSCSI target user-land tool to use. tgtadm is default, use lioadm for LIO +# iSCSI support, scstadmin for SCST target support, ietadm for iSCSI Enterprise +# Target, iscsictl for Chelsio iSCSI Target or fake for testing. (string value) +# Allowed values: tgtadm, lioadm, scstadmin, iscsictl, ietadm, fake +#iscsi_helper = tgtadm + +# Volume configuration file storage directory (string value) +#volumes_dir = $state_path/volumes + +# IET configuration file (string value) +#iet_conf = /etc/iet/ietd.conf + +# Chiscsi (CXT) global defaults configuration file (string value) +#chiscsi_conf = /etc/chelsio-iscsi/chiscsi.conf + +# Sets the behavior of the iSCSI target to either perform blockio or fileio +# optionally, auto can be set and Cinder will autodetect type of backing device +# (string value) +# Allowed values: blockio, fileio, auto +#iscsi_iotype = fileio + +# The default block size used when copying/clearing volumes (string value) +#volume_dd_blocksize = 4M + +# The blkio cgroup name to be used to limit bandwidth of volume copy (string +# value) +#volume_copy_blkio_cgroup_name = cinder-volume-copy + +# The upper limit of bandwidth of volume copy. 0 => unlimited (integer value) +#volume_copy_bps_limit = 0 + +# Sets the behavior of the iSCSI target to either perform write-back(on) or +# write-through(off). This parameter is valid if iscsi_helper is set to tgtadm. +# (string value) +# Allowed values: on, off +#iscsi_write_cache = on + +# Sets the target-specific flags for the iSCSI target. Only used for tgtadm to +# specify backing device flags using bsoflags option. The specified string is +# passed as is to the underlying tool. (string value) +#iscsi_target_flags = + +# Determines the iSCSI protocol for new iSCSI volumes, created with tgtadm or +# lioadm target helpers. In order to enable RDMA, this parameter should be set +# with the value "iser". The supported iSCSI protocol values are "iscsi" and +# "iser". (string value) +# Allowed values: iscsi, iser +#iscsi_protocol = iscsi + +# The path to the client certificate key for verification, if the driver +# supports it. (string value) +#driver_client_cert_key = + +# The path to the client certificate for verification, if the driver supports +# it. (string value) +#driver_client_cert = + +# Tell driver to use SSL for connection to backend storage if the driver +# supports it. (boolean value) +#driver_use_ssl = false + +# Float representation of the over subscription ratio when thin provisioning is +# involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times +# of the total physical capacity. If the ratio is 10.5, it means provisioned +# capacity can be 10.5 times of the total physical capacity. A ratio of 1.0 +# means provisioned capacity cannot exceed the total physical capacity. The +# ratio has to be a minimum of 1.0. (floating point value) +#max_over_subscription_ratio = 20.0 + +# Certain ISCSI targets have predefined target names, SCST target driver uses +# this name. (string value) +#scst_target_iqn_name = + +# SCST target implementation can choose from multiple SCST target drivers. +# (string value) +#scst_target_driver = iscsi + +# Option to enable/disable CHAP authentication for targets. (boolean value) +#use_chap_auth = false + +# CHAP user name. (string value) +#chap_username = + +# Password for specified CHAP account name. (string value) +#chap_password = + +# Namespace for driver private data values to be saved in. (string value) +#driver_data_namespace = + +# String representation for an equation that will be used to filter hosts. Only +# used when the driver filter is set to be used by the Cinder scheduler. +# (string value) +#filter_function = + +# String representation for an equation that will be used to determine the +# goodness of a host. Only used when using the goodness weigher is set to be +# used by the Cinder scheduler. (string value) +#goodness_function = + +# If set to True the http client will validate the SSL certificate of the +# backend endpoint. (boolean value) +#driver_ssl_cert_verify = false + +# Can be used to specify a non default path to a CA_BUNDLE file or directory +# with certificates of trusted CAs, which will be used to validate the backend +# (string value) +#driver_ssl_cert_path = + +# List of options that control which trace info is written to the DEBUG log +# level to assist developers. Valid values are method and api. (list value) +#trace_flags = + +# Multi opt of dictionaries to represent a replication target device. This +# option may be specified multiple times in a single config section to specify +# multiple replication target devices. Each entry takes the standard dict +# config form: replication_device = +# target_device_id:,key1:value1,key2:value2... (dict value) +#replication_device = + +# If set to True, upload-to-image in raw format will create a cloned volume and +# register its location to the image service, instead of uploading the volume +# content. The cinder backend and locations support must be enabled in the +# image service, and glance_api_version must be set to 2. (boolean value) +#image_upload_use_cinder_backend = false + +# If set to True, the image volume created by upload-to-image will be placed in +# the internal tenant. Otherwise, the image volume is created in the current +# context's tenant. (boolean value) +#image_upload_use_internal_tenant = false + +# Enable the image volume cache for this backend. (boolean value) +#image_volume_cache_enabled = false + +# Max size of the image volume cache for this backend in GB. 0 => unlimited. +# (integer value) +#image_volume_cache_max_size_gb = 0 + +# Max number of entries allowed in the image volume cache. 0 => unlimited. +# (integer value) +#image_volume_cache_max_count = 0 + +# Report to clients of Cinder that the backend supports discard (aka. +# trim/unmap). This will not actually change the behavior of the backend or the +# client directly, it will only notify that it can be used. (boolean value) +#report_discard_supported = false + +# Protocol for transferring data between host and storage back-end. (string +# value) +# Allowed values: iscsi, fc +#storage_protocol = iscsi + +# If this is set to True, the backup_use_temp_snapshot path will be used during +# the backup. Otherwise, it will use backup_use_temp_volume path. (boolean +# value) +#backup_use_temp_snapshot = false + +# Set this to True when you want to allow an unsupported driver to start. +# Drivers that haven't maintained a working CI system and testing are marked as +# unsupported until CI is working again. This also marks a driver as +# deprecated and may be removed in the next release. (boolean value) +#enable_unsupported_driver = false + +# Availability zone for this volume backend. If not set, the +# storage_availability_zone option value is used as the default for all +# backends. (string value) +#backend_availability_zone = + +# Volume backup directory (string value) +#backup_dir = /opt/backups + +# The maximum number of times to rescan iSER targetto find volume (integer +# value) +#num_iser_scan_tries = 3 + +# Prefix for iSER volumes (string value) +#iser_target_prefix = iqn.2010-10.org.openstack: + +# The IP address that the iSER daemon is listening on (string value) +#iser_ip_address = $my_ip + +# The port that the iSER daemon is listening on (port value) +# Minimum value: 0 +# Maximum value: 65535 +#iser_port = 3260 + +# The name of the iSER target user-land tool to use (string value) +#iser_helper = tgtadm + +# List of all available devices (list value) +#available_devices = + +# IP address/hostname of Blockbridge API. (string value) +#blockbridge_api_host = + +# Override HTTPS port to connect to Blockbridge API server. (integer value) +#blockbridge_api_port = + +# Blockbridge API authentication scheme (token or password) (string value) +# Allowed values: token, password +#blockbridge_auth_scheme = token + +# Blockbridge API token (for auth scheme 'token') (string value) +#blockbridge_auth_token = + +# Blockbridge API user (for auth scheme 'password') (string value) +#blockbridge_auth_user = + +# Blockbridge API password (for auth scheme 'password') (string value) +#blockbridge_auth_password = + +# Defines the set of exposed pools and their associated backend query strings +# (dict value) +#blockbridge_pools = OpenStack:+openstack + +# Default pool name if unspecified. (string value) +#blockbridge_default_pool = + +# RPC port to connect to Coho Data MicroArray (integer value) +#coho_rpc_port = 2049 + +# Hostname for the CoprHD Instance (string value) +#coprhd_hostname = + +# Port for the CoprHD Instance (port value) +# Minimum value: 0 +# Maximum value: 65535 +#coprhd_port = 4443 + +# Username for accessing the CoprHD Instance (string value) +#coprhd_username = + +# Password for accessing the CoprHD Instance (string value) +#coprhd_password = + +# Tenant to utilize within the CoprHD Instance (string value) +#coprhd_tenant = + +# Project to utilize within the CoprHD Instance (string value) +#coprhd_project = + +# Virtual Array to utilize within the CoprHD Instance (string value) +#coprhd_varray = + +# True | False to indicate if the storage array in CoprHD is VMAX or VPLEX +# (boolean value) +#coprhd_emulate_snapshot = false + +# Rest Gateway IP or FQDN for Scaleio (string value) +#coprhd_scaleio_rest_gateway_host = None + +# Rest Gateway Port for Scaleio (port value) +# Minimum value: 0 +# Maximum value: 65535 +#coprhd_scaleio_rest_gateway_port = 4984 + +# Username for Rest Gateway (string value) +#coprhd_scaleio_rest_server_username = + +# Rest Gateway Password (string value) +#coprhd_scaleio_rest_server_password = + +# verify server certificate (boolean value) +#scaleio_verify_server_certificate = false + +# Server certificate path (string value) +#scaleio_server_certificate_path = + +# Datera API port. (string value) +#datera_api_port = 7717 + +# DEPRECATED: Datera API version. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#datera_api_version = 2 + +# Timeout for HTTP 503 retry messages (integer value) +#datera_503_timeout = 120 + +# Interval between 503 retries (integer value) +#datera_503_interval = 5 + +# True to set function arg and return logging (boolean value) +#datera_debug = false + +# ONLY FOR DEBUG/TESTING PURPOSES +# True to set replica_count to 1 (boolean value) +#datera_debug_replica_count_override = false + +# If set to 'Map' --> OpenStack project ID will be mapped implicitly to Datera +# tenant ID +# If set to 'None' --> Datera tenant ID will not be used during volume +# provisioning +# If set to anything else --> Datera tenant ID will be the provided value +# (string value) +#datera_tenant_id = + +# Set to True to disable profiling in the Datera driver (boolean value) +#datera_disable_profiler = false + +# Group name to use for creating volumes. Defaults to "group-0". (string value) +#eqlx_group_name = group-0 + +# Maximum retry count for reconnection. Default is 5. (integer value) +# Minimum value: 0 +#eqlx_cli_max_retries = 5 + +# Pool in which volumes will be created. Defaults to "default". (string value) +#eqlx_pool = default + +# Storage Center System Serial Number (integer value) +#dell_sc_ssn = 64702 + +# Dell API port (port value) +# Minimum value: 0 +# Maximum value: 65535 +#dell_sc_api_port = 3033 + +# Name of the server folder to use on the Storage Center (string value) +#dell_sc_server_folder = openstack + +# Name of the volume folder to use on the Storage Center (string value) +#dell_sc_volume_folder = openstack + +# Enable HTTPS SC certificate verification (boolean value) +#dell_sc_verify_cert = false + +# IP address of secondary DSM controller (string value) +#secondary_san_ip = + +# Secondary DSM user name (string value) +#secondary_san_login = Admin + +# Secondary DSM user password name (string value) +#secondary_san_password = + +# Secondary Dell API port (port value) +# Minimum value: 0 +# Maximum value: 65535 +#secondary_sc_api_port = 3033 + +# Domain IP to be excluded from iSCSI returns. (IP address value) +#excluded_domain_ip = + +# Server OS type to use when creating a new server on the Storage Center. +# (string value) +#dell_server_os = Red Hat Linux 6.x + +# REST server port. (string value) +#sio_rest_server_port = 443 + +# Verify server certificate. (boolean value) +#sio_verify_server_certificate = false + +# Server certificate path. (string value) +#sio_server_certificate_path = + +# Round up volume capacity. (boolean value) +#sio_round_volume_capacity = true + +# Unmap volume before deletion. (boolean value) +#sio_unmap_volume_before_deletion = false + +# Storage Pools. (string value) +#sio_storage_pools = + +# DEPRECATED: Protection Domain ID. (string value) +# This option is deprecated for removal since Pike. +# Its value may be silently ignored in the future. +# Reason: Replaced by sio_storage_pools option +#sio_protection_domain_id = + +# DEPRECATED: Protection Domain name. (string value) +# This option is deprecated for removal since Pike. +# Its value may be silently ignored in the future. +# Reason: Replaced by sio_storage_pools option +#sio_protection_domain_name = + +# DEPRECATED: Storage Pool name. (string value) +# This option is deprecated for removal since Pike. +# Its value may be silently ignored in the future. +# Reason: Replaced by sio_storage_pools option +#sio_storage_pool_name = + +# DEPRECATED: Storage Pool ID. (string value) +# This option is deprecated for removal since Pike. +# Its value may be silently ignored in the future. +# Reason: Replaced by sio_storage_pools option +#sio_storage_pool_id = + +# ScaleIO API version. (string value) +#sio_server_api_version = + +# max_over_subscription_ratio setting for the ScaleIO driver. This replaces the +# general max_over_subscription_ratio which has no effect in this +# driver.Maximum value allowed for ScaleIO is 10.0. (floating point value) +#sio_max_over_subscription_ratio = 10.0 + +# A comma-separated list of storage pool names to be used. (list value) +#unity_storage_pool_names = + +# A comma-separated list of iSCSI or FC ports to be used. Each port can be +# Unix-style glob expressions. (list value) +#unity_io_ports = + +# Use this file for cinder emc plugin config data. (string value) +#cinder_dell_emc_config_file = /etc/cinder/cinder_dell_emc_config.xml + +# Use this value to specify length of the interval in seconds. (string value) +#interval = 3 + +# Use this value to specify number of retries. (string value) +#retries = 200 + +# Use this value to enable the initiator_check. (boolean value) +#initiator_check = false + +# VNX authentication scope type. By default, the value is global. (string +# value) +#storage_vnx_authentication_type = global + +# Directory path that contains the VNX security file. Make sure the security +# file is generated first. (string value) +#storage_vnx_security_file_dir = + +# Naviseccli Path. (string value) +#naviseccli_path = + +# Comma-separated list of storage pool names to be used. (list value) +#storage_vnx_pool_names = + +# Default timeout for CLI operations in minutes. For example, LUN migration is +# a typical long running operation, which depends on the LUN size and the load +# of the array. An upper bound in the specific deployment can be set to avoid +# unnecessary long wait. By default, it is 365 days long. (integer value) +#default_timeout = 31536000 + +# Default max number of LUNs in a storage group. By default, the value is 255. +# (integer value) +#max_luns_per_storage_group = 255 + +# To destroy storage group when the last LUN is removed from it. By default, +# the value is False. (boolean value) +#destroy_empty_storage_group = false + +# Mapping between hostname and its iSCSI initiator IP addresses. (string value) +#iscsi_initiators = + +# Comma separated iSCSI or FC ports to be used in Nova or Cinder. (list value) +#io_port_list = + +# Automatically register initiators. By default, the value is False. (boolean +# value) +#initiator_auto_registration = false + +# Automatically deregister initiators after the related storage group is +# destroyed. By default, the value is False. (boolean value) +#initiator_auto_deregistration = false + +# Report free_capacity_gb as 0 when the limit to maximum number of pool LUNs is +# reached. By default, the value is False. (boolean value) +#check_max_pool_luns_threshold = false + +# Delete a LUN even if it is in Storage Groups. By default, the value is False. +# (boolean value) +#force_delete_lun_in_storagegroup = false + +# Force LUN creation even if the full threshold of pool is reached. By default, +# the value is False. (boolean value) +#ignore_pool_full_threshold = false + +# XMS cluster id in multi-cluster environment (string value) +#xtremio_cluster_name = + +# Number of retries in case array is busy (integer value) +#xtremio_array_busy_retry_count = 5 + +# Interval between retries in case array is busy (integer value) +#xtremio_array_busy_retry_interval = 5 + +# Number of volumes created from each cached glance image (integer value) +#xtremio_volumes_per_glance_cache = 100 + +# The IP of DMS client socket server (IP address value) +#disco_client = 127.0.0.1 + +# The port to connect DMS client socket server (port value) +# Minimum value: 0 +# Maximum value: 65535 +#disco_client_port = 9898 + +# DEPRECATED: Path to the wsdl file to communicate with DISCO request manager +# (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#disco_wsdl_path = /etc/cinder/DISCOService.wsdl + +# The IP address of the REST server (IP address value) +# Deprecated group/name - [DEFAULT]/rest_ip +#disco_rest_ip = + +# Use soap client or rest client for communicating with DISCO. Possible values +# are "soap" or "rest". (string value) +# Allowed values: soap, rest +# Deprecated group/name - [DEFAULT]/choice_client +#disco_choice_client = + +# The port of DISCO source API (port value) +# Minimum value: 0 +# Maximum value: 65535 +#disco_src_api_port = 8080 + +# Prefix before volume name to differentiate DISCO volume created through +# openstack and the other ones (string value) +# Deprecated group/name - [backend_defaults]/volume_name_prefix +#disco_volume_name_prefix = openstack- + +# How long we check whether a snapshot is finished before we give up (integer +# value) +# Deprecated group/name - [backend_defaults]/snapshot_check_timeout +#disco_snapshot_check_timeout = 3600 + +# How long we check whether a restore is finished before we give up (integer +# value) +# Deprecated group/name - [backend_defaults]/restore_check_timeout +#disco_restore_check_timeout = 3600 + +# How long we check whether a clone is finished before we give up (integer +# value) +# Deprecated group/name - [backend_defaults]/clone_check_timeout +#disco_clone_check_timeout = 3600 + +# How long we wait before retrying to get an item detail (integer value) +# Deprecated group/name - [backend_defaults]/retry_interval +#disco_retry_interval = 1 + +# Number of nodes that should replicate the data. (integer value) +#drbdmanage_redundancy = 1 + +# Resource deployment completion wait policy. (string value) +#drbdmanage_resource_policy = {"ratio": "0.51", "timeout": "60"} + +# Disk options to set on new resources. See http://www.drbd.org/en/doc/users- +# guide-90/re-drbdconf for all the details. (string value) +#drbdmanage_disk_options = {"c-min-rate": "4M"} + +# Net options to set on new resources. See http://www.drbd.org/en/doc/users- +# guide-90/re-drbdconf for all the details. (string value) +#drbdmanage_net_options = {"connect-int": "4", "allow-two-primaries": "yes", "ko-count": "30", "max-buffers": "20000", "ping-timeout": "100"} + +# Resource options to set on new resources. See http://www.drbd.org/en/doc +# /users-guide-90/re-drbdconf for all the details. (string value) +#drbdmanage_resource_options = {"auto-promote-timeout": "300"} + +# Snapshot completion wait policy. (string value) +#drbdmanage_snapshot_policy = {"count": "1", "timeout": "60"} + +# Volume resize completion wait policy. (string value) +#drbdmanage_resize_policy = {"timeout": "60"} + +# Resource deployment completion wait plugin. (string value) +#drbdmanage_resource_plugin = drbdmanage.plugins.plugins.wait_for.WaitForResource + +# Snapshot completion wait plugin. (string value) +#drbdmanage_snapshot_plugin = drbdmanage.plugins.plugins.wait_for.WaitForSnapshot + +# Volume resize completion wait plugin. (string value) +#drbdmanage_resize_plugin = drbdmanage.plugins.plugins.wait_for.WaitForVolumeSize + +# If set, the c-vol node will receive a useable +# /dev/drbdX device, even if the actual data is stored on +# other nodes only. +# This is useful for debugging, maintenance, and to be +# able to do the iSCSI export from the c-vol node. (boolean +# value) +#drbdmanage_devs_on_controller = true + +# DEPRECATED: FSS pool id in which FalconStor volumes are stored. (integer +# value) +#fss_pool = + +# FSS pool id list in which FalconStor volumes are stored. If you have only one +# pool, use A:. You can also have up to two storage pools, P for +# primary and O for all supporting devices. The usage is P:,O +# : (dict value) +# Deprecated group/name - [backend_defaults]/fss_pool +#fss_pools = + +# Specifies FSS secondary management IP to be used if san_ip is invalid or +# becomes inaccessible. (string value) +#fss_san_secondary_ip = + +# Enable HTTP debugging to FSS (boolean value) +#fss_debug = false + +# FSS additional retry list, separate by ; (string value) +#additional_retry_list = + +# config file for cinder eternus_dx volume driver (string value) +#cinder_eternus_config_file = /etc/cinder/cinder_fujitsu_eternus_dx.xml + +# The flag of thin storage allocation. (boolean value) +#dsware_isthin = false + +# Fusionstorage manager ip addr for cinder-volume. (string value) +#dsware_manager = + +# Fusionstorage agent ip addr range. (string value) +#fusionstorageagent = + +# Pool type, like sata-2copy. (string value) +#pool_type = default + +# Pool id permit to use. (list value) +#pool_id_filter = + +# Create clone volume timeout. (integer value) +#clone_volume_timeout = 680 + +# Space network name to use for data transfer (string value) +#hgst_net = Net 1 (IPv4) + +# Comma separated list of Space storage servers:devices. ex: +# os1_stor:gbd0,os2_stor:gbd0 (string value) +#hgst_storage_servers = os:gbd0 + +# Should spaces be redundantly stored (1/0) (string value) +#hgst_redundancy = 0 + +# User to own created spaces (string value) +#hgst_space_user = root + +# Group to own created spaces (string value) +#hgst_space_group = disk + +# UNIX mode for created spaces (string value) +#hgst_space_mode = 0600 + +# Serial number of storage system (string value) +#hitachi_serial_number = + +# Name of an array unit (string value) +#hitachi_unit_name = + +# Pool ID of storage system (integer value) +#hitachi_pool_id = + +# Thin pool ID of storage system (integer value) +#hitachi_thin_pool_id = + +# Range of logical device of storage system (string value) +#hitachi_ldev_range = + +# Default copy method of storage system (string value) +#hitachi_default_copy_method = FULL + +# Copy speed of storage system (integer value) +#hitachi_copy_speed = 3 + +# Interval to check copy (integer value) +#hitachi_copy_check_interval = 3 + +# Interval to check copy asynchronously (integer value) +#hitachi_async_copy_check_interval = 10 + +# Control port names for HostGroup or iSCSI Target (string value) +#hitachi_target_ports = + +# Range of group number (string value) +#hitachi_group_range = + +# Request for creating HostGroup or iSCSI Target (boolean value) +#hitachi_group_request = false + +# Request for FC Zone creating HostGroup (boolean value) +#hitachi_zoning_request = false + +# Instance numbers for HORCM (string value) +#hitachi_horcm_numbers = 200,201 + +# Username of storage system for HORCM (string value) +#hitachi_horcm_user = + +# Password of storage system for HORCM (string value) +#hitachi_horcm_password = + +# Add to HORCM configuration (boolean value) +#hitachi_horcm_add_conf = true + +# Timeout until a resource lock is released, in seconds. The value must be +# between 0 and 7200. (integer value) +#hitachi_horcm_resource_lock_timeout = 600 + +# Add CHAP user (boolean value) +#hitachi_add_chap_user = false + +# iSCSI authentication method (string value) +#hitachi_auth_method = + +# iSCSI authentication username (string value) +#hitachi_auth_user = HBSD-CHAP-user + +# iSCSI authentication password (string value) +#hitachi_auth_password = HBSD-CHAP-password + +# DEPRECATED: Legacy configuration file for HNAS NFS Cinder plugin. This is not +# needed if you fill all configuration on cinder.conf (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#hds_hnas_nfs_config_file = /opt/hds/hnas/cinder_nfs_conf.xml + +# Management IP address of HNAS. This can be any IP in the admin address on +# HNAS or the SMU IP. (IP address value) +#hnas_mgmt_ip0 = + +# Command to communicate to HNAS. (string value) +#hnas_ssc_cmd = ssc + +# HNAS username. (string value) +#hnas_username = + +# HNAS password. (string value) +#hnas_password = + +# Port to be used for SSH authentication. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#hnas_ssh_port = 22 + +# Path to the SSH private key used to authenticate in HNAS SMU. (string value) +#hnas_ssh_private_key = + +# The IP of the HNAS cluster admin. Required only for HNAS multi-cluster +# setups. (string value) +#hnas_cluster_admin_ip0 = + +# Service 0 pool name (string value) +# Deprecated group/name - [backend_defaults]/hnas_svc0_volume_type +#hnas_svc0_pool_name = + +# Service 0 HDP (string value) +#hnas_svc0_hdp = + +# Service 1 pool name (string value) +# Deprecated group/name - [backend_defaults]/hnas_svc1_volume_type +#hnas_svc1_pool_name = + +# Service 1 HDP (string value) +#hnas_svc1_hdp = + +# Service 2 pool name (string value) +# Deprecated group/name - [backend_defaults]/hnas_svc2_volume_type +#hnas_svc2_pool_name = + +# Service 2 HDP (string value) +#hnas_svc2_hdp = + +# Service 3 pool name: (string value) +# Deprecated group/name - [backend_defaults]/hnas_svc3_volume_type +#hnas_svc3_pool_name = + +# Service 3 HDP (string value) +#hnas_svc3_hdp = + +# Product number of the storage system. (string value) +#vsp_storage_id = + +# Pool number or pool name of the DP pool. (string value) +#vsp_pool = + +# Pool number or pool name of the Thin Image pool. (string value) +#vsp_thin_pool = + +# Range of the LDEV numbers in the format of 'xxxx-yyyy' that can be used by +# the driver. Values can be in decimal format (e.g. 1000) or in colon-separated +# hexadecimal format (e.g. 00:03:E8). (string value) +#vsp_ldev_range = + +# Method of volume copy. FULL indicates full data copy by Shadow Image and THIN +# indicates differential data copy by Thin Image. (string value) +# Allowed values: FULL, THIN +#vsp_default_copy_method = FULL + +# Speed at which data is copied by Shadow Image. 1 or 2 indicates low speed, 3 +# indicates middle speed, and a value between 4 and 15 indicates high speed. +# (integer value) +# Minimum value: 1 +# Maximum value: 15 +#vsp_copy_speed = 3 + +# Interval in seconds at which volume pair synchronization status is checked +# when volume pairs are created. (integer value) +# Minimum value: 1 +# Maximum value: 600 +#vsp_copy_check_interval = 3 + +# Interval in seconds at which volume pair synchronization status is checked +# when volume pairs are deleted. (integer value) +# Minimum value: 1 +# Maximum value: 600 +#vsp_async_copy_check_interval = 10 + +# IDs of the storage ports used to attach volumes to the controller node. To +# specify multiple ports, connect them by commas (e.g. CL1-A,CL2-A). (list +# value) +#vsp_target_ports = + +# IDs of the storage ports used to attach volumes to compute nodes. To specify +# multiple ports, connect them by commas (e.g. CL1-A,CL2-A). (list value) +#vsp_compute_target_ports = + +# If True, the driver will create host groups or iSCSI targets on storage ports +# as needed. (boolean value) +#vsp_group_request = false + +# If True, the driver will configure FC zoning between the server and the +# storage system provided that FC zoning manager is enabled. (boolean value) +#vsp_zoning_request = false + +# Command Control Interface instance numbers in the format of 'xxx,yyy'. The +# second one is for Shadow Image operation and the first one is for other +# purposes. (list value) +#vsp_horcm_numbers = 200,201 + +# Name of the user on the storage system. (string value) +#vsp_horcm_user = + +# Password corresponding to vsp_horcm_user. (string value) +#vsp_horcm_password = + +# If True, the driver will create or update the Command Control Interface +# configuration file as needed. (boolean value) +#vsp_horcm_add_conf = true + +# IDs of the storage ports used to copy volumes by Shadow Image or Thin Image. +# To specify multiple ports, connect them by commas (e.g. CL1-A,CL2-A). (list +# value) +#vsp_horcm_pair_target_ports = + +# If True, CHAP authentication will be applied to communication between hosts +# and any of the iSCSI targets on the storage ports. (boolean value) +#vsp_use_chap_auth = false + +# Name of the user used for CHAP authentication performed in communication +# between hosts and iSCSI targets on the storage ports. (string value) +#vsp_auth_user = + +# Password corresponding to vsp_auth_user. (string value) +#vsp_auth_password = + +# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 (string value) +# Deprecated group/name - [backend_defaults]/hp3par_api_url +#hpe3par_api_url = + +# 3PAR username with the 'edit' role (string value) +# Deprecated group/name - [backend_defaults]/hp3par_username +#hpe3par_username = + +# 3PAR password for the user specified in hpe3par_username (string value) +# Deprecated group/name - [backend_defaults]/hp3par_password +#hpe3par_password = + +# List of the CPG(s) to use for volume creation (list value) +# Deprecated group/name - [backend_defaults]/hp3par_cpg +#hpe3par_cpg = OpenStack + +# The CPG to use for Snapshots for volumes. If empty the userCPG will be used. +# (string value) +# Deprecated group/name - [backend_defaults]/hp3par_cpg_snap +#hpe3par_cpg_snap = + +# The time in hours to retain a snapshot. You can't delete it before this +# expires. (string value) +# Deprecated group/name - [backend_defaults]/hp3par_snapshot_retention +#hpe3par_snapshot_retention = + +# The time in hours when a snapshot expires and is deleted. This must be +# larger than expiration (string value) +# Deprecated group/name - [backend_defaults]/hp3par_snapshot_expiration +#hpe3par_snapshot_expiration = + +# Enable HTTP debugging to 3PAR (boolean value) +# Deprecated group/name - [backend_defaults]/hp3par_debug +#hpe3par_debug = false + +# List of target iSCSI addresses to use. (list value) +# Deprecated group/name - [backend_defaults]/hp3par_iscsi_ips +#hpe3par_iscsi_ips = + +# Enable CHAP authentication for iSCSI connections. (boolean value) +# Deprecated group/name - [backend_defaults]/hp3par_iscsi_chap_enabled +#hpe3par_iscsi_chap_enabled = false + +# HPE LeftHand WSAPI Server Url like https://:8081/lhos (uri +# value) +# Deprecated group/name - [backend_defaults]/hplefthand_api_url +#hpelefthand_api_url = + +# HPE LeftHand Super user username (string value) +# Deprecated group/name - [backend_defaults]/hplefthand_username +#hpelefthand_username = + +# HPE LeftHand Super user password (string value) +# Deprecated group/name - [backend_defaults]/hplefthand_password +#hpelefthand_password = + +# HPE LeftHand cluster name (string value) +# Deprecated group/name - [backend_defaults]/hplefthand_clustername +#hpelefthand_clustername = + +# Configure CHAP authentication for iSCSI connections (Default: Disabled) +# (boolean value) +# Deprecated group/name - [backend_defaults]/hplefthand_iscsi_chap_enabled +#hpelefthand_iscsi_chap_enabled = false + +# Enable HTTP debugging to LeftHand (boolean value) +# Deprecated group/name - [backend_defaults]/hplefthand_debug +#hpelefthand_debug = false + +# Port number of SSH service. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#hpelefthand_ssh_port = 16022 + +# The configuration file for the Cinder Huawei driver. (string value) +#cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml + +# The remote device hypermetro will use. (string value) +#hypermetro_devices = + +# The remote metro device san user. (string value) +#metro_san_user = + +# The remote metro device san password. (string value) +#metro_san_password = + +# The remote metro device domain name. (string value) +#metro_domain_name = + +# The remote metro device request url. (string value) +#metro_san_address = + +# The remote metro device pool names. (string value) +#metro_storage_pools = + +# Connection protocol should be FC. (Default is FC.) (string value) +#flashsystem_connection_protocol = FC + +# Allows vdisk to multi host mapping. (Default is True) (boolean value) +#flashsystem_multihostmap_enabled = true + +# DEPRECATED: This option no longer has any affect. It is deprecated and will +# be removed in the next release. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#flashsystem_multipath_enabled = false + +# Default iSCSI Port ID of FlashSystem. (Default port is 0.) (integer value) +#flashsystem_iscsi_portid = 0 + +# Specifies the path of the GPFS directory where Block Storage volume and +# snapshot files are stored. (string value) +#gpfs_mount_point_base = + +# Specifies the path of the Image service repository in GPFS. Leave undefined +# if not storing images in GPFS. (string value) +#gpfs_images_dir = + +# Specifies the type of image copy to be used. Set this when the Image service +# repository also uses GPFS so that image files can be transferred efficiently +# from the Image service to the Block Storage service. There are two valid +# values: "copy" specifies that a full copy of the image is made; +# "copy_on_write" specifies that copy-on-write optimization strategy is used +# and unmodified blocks of the image file are shared efficiently. (string +# value) +# Allowed values: copy, copy_on_write, +#gpfs_images_share_mode = + +# Specifies an upper limit on the number of indirections required to reach a +# specific block due to snapshots or clones. A lengthy chain of copy-on-write +# snapshots or clones can have a negative impact on performance, but improves +# space utilization. 0 indicates unlimited clone depth. (integer value) +#gpfs_max_clone_depth = 0 + +# Specifies that volumes are created as sparse files which initially consume no +# space. If set to False, the volume is created as a fully allocated file, in +# which case, creation may take a significantly longer time. (boolean value) +#gpfs_sparse_volumes = true + +# Specifies the storage pool that volumes are assigned to. By default, the +# system storage pool is used. (string value) +#gpfs_storage_pool = system + +# Comma-separated list of IP address or hostnames of GPFS nodes. (list value) +#gpfs_hosts = + +# Username for GPFS nodes. (string value) +#gpfs_user_login = root + +# Password for GPFS node user. (string value) +#gpfs_user_password = + +# Filename of private key to use for SSH authentication. (string value) +#gpfs_private_key = + +# SSH port to use. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#gpfs_ssh_port = 22 + +# File containing SSH host keys for the gpfs nodes with which driver needs to +# communicate. Default=$state_path/ssh_known_hosts (string value) +#gpfs_hosts_key_file = $state_path/ssh_known_hosts + +# Option to enable strict gpfs host key checking while connecting to gpfs +# nodes. Default=False (boolean value) +#gpfs_strict_host_key_policy = false + +# Mapping between IODevice address and unit address. (string value) +#ds8k_devadd_unitadd_mapping = + +# Set the first two digits of SSID. (string value) +#ds8k_ssid_prefix = FF + +# Reserve LSSs for consistency group. (string value) +#lss_range_for_cg = + +# Set to zLinux if your OpenStack version is prior to Liberty and you're +# connecting to zLinux systems. Otherwise set to auto. Valid values for this +# parameter are: 'auto', 'AMDLinuxRHEL', 'AMDLinuxSuse', 'AppleOSX', 'Fujitsu', +# 'Hp', 'HpTru64', 'HpVms', 'LinuxDT', 'LinuxRF', 'LinuxRHEL', 'LinuxSuse', +# 'Novell', 'SGI', 'SVC', 'SanFsAIX', 'SanFsLinux', 'Sun', 'VMWare', 'Win2000', +# 'Win2003', 'Win2008', 'Win2012', 'iLinux', 'nSeries', 'pLinux', 'pSeries', +# 'pSeriesPowerswap', 'zLinux', 'iSeries'. (string value) +#ds8k_host_type = auto + +# Proxy driver that connects to the IBM Storage Array (string value) +#proxy = cinder.volume.drivers.ibm.ibm_storage.proxy.IBMStorageProxy + +# Connection type to the IBM Storage Array (string value) +# Allowed values: fibre_channel, iscsi +#connection_type = iscsi + +# CHAP authentication mode, effective only for iscsi (disabled|enabled) (string +# value) +# Allowed values: disabled, enabled +#chap = disabled + +# List of Management IP addresses (separated by commas) (string value) +#management_ips = + +# Comma separated list of storage system storage pools for volumes. (list +# value) +#storwize_svc_volpool_name = volpool + +# Storage system space-efficiency parameter for volumes (percentage) (integer +# value) +# Minimum value: -1 +# Maximum value: 100 +#storwize_svc_vol_rsize = 2 + +# Storage system threshold for volume capacity warnings (percentage) (integer +# value) +# Minimum value: -1 +# Maximum value: 100 +#storwize_svc_vol_warning = 0 + +# Storage system autoexpand parameter for volumes (True/False) (boolean value) +#storwize_svc_vol_autoexpand = true + +# Storage system grain size parameter for volumes (32/64/128/256) (integer +# value) +#storwize_svc_vol_grainsize = 256 + +# Storage system compression option for volumes (boolean value) +#storwize_svc_vol_compression = false + +# Enable Easy Tier for volumes (boolean value) +#storwize_svc_vol_easytier = true + +# The I/O group in which to allocate volumes. It can be a comma-separated list +# in which case the driver will select an io_group based on least number of +# volumes associated with the io_group. (string value) +#storwize_svc_vol_iogrp = 0 + +# Maximum number of seconds to wait for FlashCopy to be prepared. (integer +# value) +# Minimum value: 1 +# Maximum value: 600 +#storwize_svc_flashcopy_timeout = 120 + +# DEPRECATED: This option no longer has any affect. It is deprecated and will +# be removed in the next release. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#storwize_svc_multihostmap_enabled = true + +# Allow tenants to specify QOS on create (boolean value) +#storwize_svc_allow_tenant_qos = false + +# If operating in stretched cluster mode, specify the name of the pool in which +# mirrored copies are stored.Example: "pool2" (string value) +#storwize_svc_stretched_cluster_partner = + +# Specifies secondary management IP or hostname to be used if san_ip is invalid +# or becomes inaccessible. (string value) +#storwize_san_secondary_ip = + +# Specifies that the volume not be formatted during creation. (boolean value) +#storwize_svc_vol_nofmtdisk = false + +# Specifies the Storwize FlashCopy copy rate to be used when creating a full +# volume copy. The default is rate is 50, and the valid rates are 1-100. +# (integer value) +# Minimum value: 1 +# Maximum value: 100 +#storwize_svc_flashcopy_rate = 50 + +# Specifies the name of the pool in which mirrored copy is stored. Example: +# "pool2" (string value) +#storwize_svc_mirror_pool = + +# This defines an optional cycle period that applies to Global Mirror +# relationships with a cycling mode of multi. A Global Mirror relationship +# using the multi cycling_mode performs a complete cycle at most once each +# period. The default is 300 seconds, and the valid seconds are 60-86400. +# (integer value) +# Minimum value: 60 +# Maximum value: 86400 +#cycle_period_seconds = 300 + +# Connect with multipath (FC only; iSCSI multipath is controlled by Nova) +# (boolean value) +#storwize_svc_multipath_enabled = false + +# Configure CHAP authentication for iSCSI connections (Default: Enabled) +# (boolean value) +#storwize_svc_iscsi_chap_enabled = true + +# Name of the pool from which volumes are allocated (string value) +#infinidat_pool_name = + +# Protocol for transferring data between host and storage back-end. (string +# value) +# Allowed values: iscsi, fc +#infinidat_storage_protocol = fc + +# List of names of network spaces to use for iSCSI connectivity (list value) +#infinidat_iscsi_netspaces = + +# Specifies whether to turn on compression for newly created volumes. (boolean +# value) +#infinidat_use_compression = false + +# Infortrend raid pool name list. It is separated with comma. (string value) +#infortrend_pools_name = + +# The Infortrend CLI absolute path. By default, it is at +# /opt/bin/Infortrend/raidcmd_ESDS10.jar (string value) +#infortrend_cli_path = /opt/bin/Infortrend/raidcmd_ESDS10.jar + +# Maximum retry time for cli. Default is 5. (integer value) +#infortrend_cli_max_retries = 5 + +# Default timeout for CLI copy operations in minutes. Support: migrate volume, +# create cloned volume and create volume from snapshot. By Default, it is 30 +# minutes. (integer value) +#infortrend_cli_timeout = 30 + +# Infortrend raid channel ID list on Slot A for OpenStack usage. It is +# separated with comma. By default, it is the channel 0~7. (string value) +#infortrend_slots_a_channels_id = 0,1,2,3,4,5,6,7 + +# Infortrend raid channel ID list on Slot B for OpenStack usage. It is +# separated with comma. By default, it is the channel 0~7. (string value) +#infortrend_slots_b_channels_id = 0,1,2,3,4,5,6,7 + +# Let the volume use specific provisioning. By default, it is the full +# provisioning. The supported options are full or thin. (string value) +#infortrend_provisioning = full + +# Let the volume use specific tiering level. By default, it is the level 0. The +# supported levels are 0,2,3,4. (string value) +#infortrend_tiering = 0 + +# K2 driver will calculate max_oversubscription_ratio on setting this option as +# True. (boolean value) +#auto_calc_max_oversubscription_ratio = false + +# Pool or Vdisk name to use for volume creation. (string value) +#lenovo_backend_name = A + +# linear (for VDisk) or virtual (for Pool). (string value) +# Allowed values: linear, virtual +#lenovo_backend_type = virtual + +# Lenovo api interface protocol. (string value) +# Allowed values: http, https +#lenovo_api_protocol = https + +# Whether to verify Lenovo array SSL certificate. (boolean value) +#lenovo_verify_certificate = false + +# Lenovo array SSL certificate path. (string value) +#lenovo_verify_certificate_path = + +# List of comma-separated target iSCSI IP addresses. (list value) +#lenovo_iscsi_ips = + +# Name for the VG that will contain exported volumes (string value) +#volume_group = cinder-volumes + +# If >0, create LVs with multiple mirrors. Note that this requires lvm_mirrors +# + 2 PVs with available space (integer value) +#lvm_mirrors = 0 + +# Type of LVM volumes to deploy; (default, thin, or auto). Auto defaults to +# thin if thin is supported. (string value) +# Allowed values: default, thin, auto +#lvm_type = auto + +# LVM conf file to use for the LVM driver in Cinder; this setting is ignored if +# the specified file does not exist (You can also specify 'None' to not use a +# conf file even if one exists). (string value) +#lvm_conf_file = /etc/cinder/lvm.conf + +# max_over_subscription_ratio setting for the LVM driver. This takes precedence +# over the general max_over_subscription_ratio by default. If set to None, the +# general max_over_subscription_ratio is used. (floating point value) +#lvm_max_over_subscription_ratio = 1.0 + +# Suppress leaked file descriptor warnings in LVM commands. (boolean value) +#lvm_suppress_fd_warnings = false + +# The storage family type used on the storage system; valid values are +# ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using +# clustered Data ONTAP, or eseries for using E-Series. (string value) +# Allowed values: ontap_7mode, ontap_cluster, eseries +#netapp_storage_family = ontap_cluster + +# The storage protocol to be used on the data path with the storage system. +# (string value) +# Allowed values: iscsi, fc, nfs +#netapp_storage_protocol = + +# The hostname (or IP address) for the storage system or proxy server. (string +# value) +#netapp_server_hostname = + +# The TCP port to use for communication with the storage system or proxy +# server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for +# HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. (integer value) +#netapp_server_port = + +# The transport protocol used when communicating with the storage system or +# proxy server. (string value) +# Allowed values: http, https +#netapp_transport_type = http + +# Administrative user account name used to access the storage system or proxy +# server. (string value) +#netapp_login = + +# Password for the administrative user account specified in the netapp_login +# option. (string value) +#netapp_password = + +# This option specifies the virtual storage server (Vserver) name on the +# storage cluster on which provisioning of block storage volumes should occur. +# (string value) +#netapp_vserver = + +# The vFiler unit on which provisioning of block storage volumes will be done. +# This option is only used by the driver when connecting to an instance with a +# storage family of Data ONTAP operating in 7-Mode. Only use this option when +# utilizing the MultiStore feature on the NetApp storage system. (string value) +#netapp_vfiler = + +# The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. +# This option is only used by the driver when connecting to an instance with a +# storage family of Data ONTAP operating in 7-Mode, and it is required if the +# storage protocol selected is FC. (string value) +#netapp_partner_backend_name = + +# The quantity to be multiplied by the requested volume size to ensure enough +# space is available on the virtual storage server (Vserver) to fulfill the +# volume creation request. Note: this option is deprecated and will be removed +# in favor of "reserved_percentage" in the Mitaka release. (floating point +# value) +#netapp_size_multiplier = 1.2 + +# This option determines if storage space is reserved for LUN allocation. If +# enabled, LUNs are thick provisioned. If space reservation is disabled, +# storage space is allocated on demand. (string value) +# Allowed values: enabled, disabled +#netapp_lun_space_reservation = enabled + +# If the percentage of available space for an NFS share has dropped below the +# value specified by this option, the NFS image cache will be cleaned. (integer +# value) +#thres_avl_size_perc_start = 20 + +# When the percentage of available space on an NFS share has reached the +# percentage specified by this option, the driver will stop clearing files from +# the NFS image cache that have not been accessed in the last M minutes, where +# M is the value of the expiry_thres_minutes configuration option. (integer +# value) +#thres_avl_size_perc_stop = 60 + +# This option specifies the threshold for last access time for images in the +# NFS image cache. When a cache cleaning cycle begins, images in the cache that +# have not been accessed in the last M minutes, where M is the value of this +# parameter, will be deleted from the cache to create free space on the NFS +# share. (integer value) +#expiry_thres_minutes = 720 + +# This option is used to specify the path to the E-Series proxy application on +# a proxy server. The value is combined with the value of the +# netapp_transport_type, netapp_server_hostname, and netapp_server_port options +# to create the URL used by the driver to connect to the proxy application. +# (string value) +#netapp_webservice_path = /devmgr/v2 + +# This option is only utilized when the storage family is configured to +# eseries. This option is used to restrict provisioning to the specified +# controllers. Specify the value of this option to be a comma separated list of +# controller hostnames or IP addresses to be used for provisioning. (string +# value) +#netapp_controller_ips = + +# Password for the NetApp E-Series storage array. (string value) +#netapp_sa_password = + +# This option specifies whether the driver should allow operations that require +# multiple attachments to a volume. An example would be live migration of +# servers that have volumes attached. When enabled, this backend is limited to +# 256 total volumes in order to guarantee volumes can be accessed by more than +# one host. (boolean value) +#netapp_enable_multiattach = false + +# This option specifies the path of the NetApp copy offload tool binary. Ensure +# that the binary has execute permissions set which allow the effective user of +# the cinder-volume process to execute the file. (string value) +#netapp_copyoffload_tool_path = + +# This option defines the type of operating system that will access a LUN +# exported from Data ONTAP; it is assigned to the LUN at the time it is +# created. (string value) +#netapp_lun_ostype = + +# This option defines the type of operating system for all initiators that can +# access a LUN. This information is used when mapping LUNs to individual hosts +# or groups of hosts. (string value) +# Deprecated group/name - [backend_defaults]/netapp_eseries_host_type +#netapp_host_type = + +# This option is used to restrict provisioning to the specified pools. Specify +# the value of this option to be a regular expression which will be applied to +# the names of objects from the storage backend which represent pools in +# Cinder. This option is only utilized when the storage protocol is configured +# to use iSCSI or FC. (string value) +# Deprecated group/name - [backend_defaults]/netapp_volume_list +# Deprecated group/name - [backend_defaults]/netapp_storage_pools +#netapp_pool_name_search_pattern = (.+) + +# Multi opt of dictionaries to represent the aggregate mapping between source +# and destination back ends when using whole back end replication. For every +# source aggregate associated with a cinder pool (NetApp FlexVol), you would +# need to specify the destination aggregate on the replication target device. A +# replication target device is configured with the configuration option +# replication_device. Specify this option as many times as you have replication +# devices. Each entry takes the standard dict config form: +# netapp_replication_aggregate_map = +# backend_id:,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,... +# (dict value) +#netapp_replication_aggregate_map = + +# The maximum time in seconds to wait for existing SnapMirror transfers to +# complete before aborting during a failover. (integer value) +# Minimum value: 0 +#netapp_snapmirror_quiesce_timeout = 3600 + +# IP address of Nexenta SA (string value) +#nexenta_host = + +# HTTP(S) port to connect to Nexenta REST API server. If it is equal zero, 8443 +# for HTTPS and 8080 for HTTP is used (integer value) +#nexenta_rest_port = 0 + +# Use http or https for REST connection (default auto) (string value) +# Allowed values: http, https, auto +#nexenta_rest_protocol = auto + +# Use secure HTTP for REST connection (default True) (boolean value) +#nexenta_use_https = true + +# User name to connect to Nexenta SA (string value) +#nexenta_user = admin + +# Password to connect to Nexenta SA (string value) +#nexenta_password = nexenta + +# Nexenta target portal port (integer value) +#nexenta_iscsi_target_portal_port = 3260 + +# SA Pool that holds all volumes (string value) +#nexenta_volume = cinder + +# IQN prefix for iSCSI targets (string value) +#nexenta_target_prefix = iqn.1986-03.com.sun:02:cinder- + +# Prefix for iSCSI target groups on SA (string value) +#nexenta_target_group_prefix = cinder/ + +# Volume group for ns5 (string value) +#nexenta_volume_group = iscsi + +# Compression value for new ZFS folders. (string value) +# Allowed values: on, off, gzip, gzip-1, gzip-2, gzip-3, gzip-4, gzip-5, gzip-6, gzip-7, gzip-8, gzip-9, lzjb, zle, lz4 +#nexenta_dataset_compression = on + +# Deduplication value for new ZFS folders. (string value) +# Allowed values: on, off, sha256, verify, sha256, verify +#nexenta_dataset_dedup = off + +# Human-readable description for the folder. (string value) +#nexenta_dataset_description = + +# Block size for datasets (integer value) +#nexenta_blocksize = 4096 + +# Block size for datasets (integer value) +#nexenta_ns5_blocksize = 32 + +# Enables or disables the creation of sparse datasets (boolean value) +#nexenta_sparse = false + +# File with the list of available nfs shares (string value) +#nexenta_shares_config = /etc/cinder/nfs_shares + +# Base directory that contains NFS share mount points (string value) +#nexenta_mount_point_base = $state_path/mnt + +# Enables or disables the creation of volumes as sparsed files that take no +# space. If disabled (False), volume is created as a regular file, which takes +# a long time. (boolean value) +#nexenta_sparsed_volumes = true + +# If set True cache NexentaStor appliance volroot option value. (boolean value) +#nexenta_nms_cache_volroot = true + +# Enable stream compression, level 1..9. 1 - gives best speed; 9 - gives best +# compression. (integer value) +#nexenta_rrmgr_compression = 0 + +# TCP Buffer size in KiloBytes. (integer value) +#nexenta_rrmgr_tcp_buf_size = 4096 + +# Number of TCP connections. (integer value) +#nexenta_rrmgr_connections = 2 + +# NexentaEdge logical path of directory to store symbolic links to NBDs (string +# value) +#nexenta_nbd_symlinks_dir = /dev/disk/by-path + +# IP address of NexentaEdge management REST API endpoint (string value) +#nexenta_rest_address = + +# User name to connect to NexentaEdge (string value) +#nexenta_rest_user = admin + +# Password to connect to NexentaEdge (string value) +#nexenta_rest_password = nexenta + +# NexentaEdge logical path of bucket for LUNs (string value) +#nexenta_lun_container = + +# NexentaEdge iSCSI service name (string value) +#nexenta_iscsi_service = + +# NexentaEdge iSCSI Gateway client address for non-VIP service (string value) +#nexenta_client_address = + +# NexentaEdge iSCSI LUN object chunk size (integer value) +#nexenta_chunksize = 32768 + +# File with the list of available NFS shares. (string value) +#nfs_shares_config = /etc/cinder/nfs_shares + +# Create volumes as sparsed files which take no space. If set to False volume +# is created as regular file. In such case volume creation takes a lot of time. +# (boolean value) +#nfs_sparsed_volumes = true + +# Create volumes as QCOW2 files rather than raw files. (boolean value) +#nfs_qcow2_volumes = false + +# Base dir containing mount points for NFS shares. (string value) +#nfs_mount_point_base = $state_path/mnt + +# Mount options passed to the NFS client. See section of the NFS man page for +# details. (string value) +#nfs_mount_options = + +# The number of attempts to mount NFS shares before raising an error. At least +# one attempt will be made to mount an NFS share, regardless of the value +# specified. (integer value) +#nfs_mount_attempts = 3 + +# Enable support for snapshots on the NFS driver. Platforms using libvirt +# <1.2.7 will encounter issues with this feature. (boolean value) +#nfs_snapshot_support = false + +# Nimble Controller pool name (string value) +#nimble_pool_name = default + +# Nimble Subnet Label (string value) +#nimble_subnet_label = * + +# Whether to verify Nimble SSL Certificate (boolean value) +#nimble_verify_certificate = false + +# Path to Nimble Array SSL certificate (string value) +#nimble_verify_cert_path = + +# DPL pool uuid in which DPL volumes are stored. (string value) +#dpl_pool = + +# DPL port number. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#dpl_port = 8357 + +# REST API authorization token. (string value) +#pure_api_token = + +# Automatically determine an oversubscription ratio based on the current total +# data reduction values. If used this calculated value will override the +# max_over_subscription_ratio config option. (boolean value) +#pure_automatic_max_oversubscription_ratio = true + +# Snapshot replication interval in seconds. (integer value) +#pure_replica_interval_default = 3600 + +# Retain all snapshots on target for this time (in seconds.) (integer value) +#pure_replica_retention_short_term_default = 14400 + +# Retain how many snapshots for each day. (integer value) +#pure_replica_retention_long_term_per_day_default = 3 + +# Retain snapshots per day on target for this time (in days.) (integer value) +#pure_replica_retention_long_term_default = 7 + +# When enabled, all Pure volumes, snapshots, and protection groups will be +# eradicated at the time of deletion in Cinder. Data will NOT be recoverable +# after a delete with this set to True! When disabled, volumes and snapshots +# will go into pending eradication state and can be recovered. (boolean value) +#pure_eradicate_on_delete = false + +# The URL to management QNAP Storage (uri value) +#qnap_management_url = + +# The pool name in the QNAP Storage (string value) +#qnap_poolname = + +# Communication protocol to access QNAP storage (string value) +#qnap_storage_protocol = iscsi + +# Quobyte URL to the Quobyte volume e.g., quobyte://, / (string value) +#quobyte_volume_url = + +# Path to a Quobyte Client configuration file. (string value) +#quobyte_client_cfg = + +# Create volumes as sparse files which take no space. If set to False, volume +# is created as regular file.In such case volume creation takes a lot of time. +# (boolean value) +#quobyte_sparsed_volumes = true + +# Create volumes as QCOW2 files rather than raw files. (boolean value) +#quobyte_qcow2_volumes = true + +# Base dir containing the mount point for the Quobyte volume. (string value) +#quobyte_mount_point_base = $state_path/mnt + +# The name of ceph cluster (string value) +#rbd_cluster_name = ceph + +# The RADOS pool where rbd volumes are stored (string value) +#rbd_pool = rbd + +# The RADOS client name for accessing rbd volumes - only set when using cephx +# authentication (string value) +#rbd_user = + +# Path to the ceph configuration file (string value) +#rbd_ceph_conf = + +# Path to the ceph keyring file (string value) +#rbd_keyring_conf = + +# Flatten volumes created from snapshots to remove dependency from volume to +# snapshot (boolean value) +#rbd_flatten_volume_from_snapshot = false + +# The libvirt uuid of the secret for the rbd_user volumes (string value) +#rbd_secret_uuid = + +# Maximum number of nested volume clones that are taken before a flatten +# occurs. Set to 0 to disable cloning. (integer value) +#rbd_max_clone_depth = 5 + +# Volumes will be chunked into objects of this size (in megabytes). (integer +# value) +#rbd_store_chunk_size = 4 + +# Timeout value (in seconds) used when connecting to ceph cluster. If value < +# 0, no timeout is set and default librados value is used. (integer value) +#rados_connect_timeout = -1 + +# Number of retries if connection to ceph cluster failed. (integer value) +#rados_connection_retries = 3 + +# Interval value (in seconds) between connection retries to ceph cluster. +# (integer value) +#rados_connection_interval = 5 + +# Timeout value (in seconds) used when connecting to ceph cluster to do a +# demotion/promotion of volumes. If value < 0, no timeout is set and default +# librados value is used. (integer value) +#replication_connect_timeout = 5 + +# Set to True for driver to report total capacity as a dynamic value -used + +# current free- and to False to report a static value -quota max bytes if +# defined and global size of cluster if not-. (boolean value) +#report_dynamic_total_capacity = true + +# IP address or Hostname of NAS system. (string value) +# Deprecated group/name - [backend_defaults]/nas_ip +#nas_host = + +# User name to connect to NAS system. (string value) +#nas_login = admin + +# Password to connect to NAS system. (string value) +#nas_password = + +# SSH port to use to connect to NAS system. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#nas_ssh_port = 22 + +# Filename of private key to use for SSH authentication. (string value) +#nas_private_key = + +# Allow network-attached storage systems to operate in a secure environment +# where root level access is not permitted. If set to False, access is as the +# root user and insecure. If set to True, access is not as root. If set to +# auto, a check is done to determine if this is a new installation: True is +# used if so, otherwise False. Default is auto. (string value) +#nas_secure_file_operations = auto + +# Set more secure file permissions on network-attached storage volume files to +# restrict broad other/world access. If set to False, volumes are created with +# open permissions. If set to True, volumes are created with permissions for +# the cinder user and group (660). If set to auto, a check is done to determine +# if this is a new installation: True is used if so, otherwise False. Default +# is auto. (string value) +#nas_secure_file_permissions = auto + +# Path to the share to use for storing Cinder volumes. For example: +# "/srv/export1" for an NFS server export available at 10.0.5.10:/srv/export1 . +# (string value) +#nas_share_path = + +# Options used to mount the storage backend file system where Cinder volumes +# are stored. (string value) +#nas_mount_options = + +# Provisioning type that will be used when creating volumes. (string value) +# Allowed values: thin, thick +#nas_volume_prov_type = thin + +# Pool or Vdisk name to use for volume creation. (string value) +#hpmsa_backend_name = A + +# linear (for Vdisk) or virtual (for Pool). (string value) +# Allowed values: linear, virtual +#hpmsa_backend_type = virtual + +# HPMSA API interface protocol. (string value) +# Allowed values: http, https +#hpmsa_api_protocol = https + +# Whether to verify HPMSA array SSL certificate. (boolean value) +#hpmsa_verify_certificate = false + +# HPMSA array SSL certificate path. (string value) +#hpmsa_verify_certificate_path = + +# List of comma-separated target iSCSI IP addresses. (list value) +#hpmsa_iscsi_ips = + +# Use thin provisioning for SAN volumes? (boolean value) +#san_thin_provision = true + +# IP address of SAN controller (string value) +#san_ip = + +# Username for SAN controller (string value) +#san_login = admin + +# Password for SAN controller (string value) +#san_password = + +# Filename of private key to use for SSH authentication (string value) +#san_private_key = + +# Cluster name to use for creating volumes (string value) +#san_clustername = + +# SSH port to use with SAN (port value) +# Minimum value: 0 +# Maximum value: 65535 +#san_ssh_port = 22 + +# Execute commands locally instead of over SSH; use if the volume service is +# running on the SAN device (boolean value) +#san_is_local = false + +# SSH connection timeout in seconds (integer value) +#ssh_conn_timeout = 30 + +# Minimum ssh connections in the pool (integer value) +#ssh_min_pool_conn = 1 + +# Maximum ssh connections in the pool (integer value) +#ssh_max_pool_conn = 5 + +# IP address of sheep daemon. (string value) +#sheepdog_store_address = 127.0.0.1 + +# Port of sheep daemon. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#sheepdog_store_port = 7000 + +# Set 512 byte emulation on volume creation; (boolean value) +#sf_emulate_512 = true + +# Allow tenants to specify QOS on create (boolean value) +#sf_allow_tenant_qos = false + +# Create SolidFire accounts with this prefix. Any string can be used here, but +# the string "hostname" is special and will create a prefix using the cinder +# node hostname (previous default behavior). The default is NO prefix. (string +# value) +#sf_account_prefix = + +# Create SolidFire volumes with this prefix. Volume names are of the form +# . The default is to use a prefix of +# 'UUID-'. (string value) +#sf_volume_prefix = UUID- + +# Account name on the SolidFire Cluster to use as owner of template/cache +# volumes (created if does not exist). (string value) +#sf_template_account_name = openstack-vtemplate + +# Create an internal cache of copy of images when a bootable volume is created +# to eliminate fetch from glance and qemu-conversion on subsequent calls. +# (boolean value) +#sf_allow_template_caching = true + +# Overrides default cluster SVIP with the one specified. This is required or +# deployments that have implemented the use of VLANs for iSCSI networks in +# their cloud. (string value) +#sf_svip = + +# Create an internal mapping of volume IDs and account. Optimizes lookups and +# performance at the expense of memory, very large deployments may want to +# consider setting to False. (boolean value) +#sf_enable_volume_mapping = true + +# SolidFire API port. Useful if the device api is behind a proxy on a different +# port. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#sf_api_port = 443 + +# Utilize volume access groups on a per-tenant basis. (boolean value) +#sf_enable_vag = false + +# Volume on Synology storage to be used for creating lun. (string value) +#synology_pool_name = + +# Management port for Synology storage. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#synology_admin_port = 5000 + +# Administrator of Synology storage. (string value) +#synology_username = admin + +# Password of administrator for logging in Synology storage. (string value) +#synology_password = + +# Do certificate validation or not if $driver_use_ssl is True (boolean value) +#synology_ssl_verify = true + +# One time password of administrator for logging in Synology storage if OTP is +# enabled. (string value) +#synology_one_time_pass = + +# Device id for skip one time password check for logging in Synology storage if +# OTP is enabled. (string value) +#synology_device_id = + +# Create volumes in this pool (string value) +#tegile_default_pool = + +# Create volumes in this project (string value) +#tegile_default_project = + +# The hostname (or IP address) for the storage system (string value) +#tintri_server_hostname = + +# User name for the storage system (string value) +#tintri_server_username = + +# Password for the storage system (string value) +#tintri_server_password = + +# API version for the storage system (string value) +#tintri_api_version = v310 + +# Delete unused image snapshots older than mentioned days (integer value) +#tintri_image_cache_expiry_days = 30 + +# Path to image nfs shares file (string value) +#tintri_image_shares_config = + +# Global backend request timeout, in seconds. (integer value) +#violin_request_timeout = 300 + +# Storage pools to be used to setup dedup luns only.(Comma separated list) +# (list value) +#violin_dedup_only_pools = + +# Storage pools capable of dedup and other luns.(Comma separated list) (list +# value) +#violin_dedup_capable_pools = + +# Method of choosing a storage pool for a lun. (string value) +# Allowed values: random, largest, smallest +#violin_pool_allocation_method = random + +# Target iSCSI addresses to use.(Comma separated list) (list value) +#violin_iscsi_target_ips = + +# IP address for connecting to VMware vCenter server. (string value) +#vmware_host_ip = + +# Port number for connecting to VMware vCenter server. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#vmware_host_port = 443 + +# Username for authenticating with VMware vCenter server. (string value) +#vmware_host_username = + +# Password for authenticating with VMware vCenter server. (string value) +#vmware_host_password = + +# Optional VIM service WSDL Location e.g http:///vimService.wsdl. +# Optional over-ride to default location for bug work-arounds. (string value) +#vmware_wsdl_location = + +# Number of times VMware vCenter server API must be retried upon connection +# related issues. (integer value) +#vmware_api_retry_count = 10 + +# The interval (in seconds) for polling remote tasks invoked on VMware vCenter +# server. (floating point value) +#vmware_task_poll_interval = 2.0 + +# Name of the vCenter inventory folder that will contain Cinder volumes. This +# folder will be created under "OpenStack/", where +# project_folder is of format "Project ()". (string value) +#vmware_volume_folder = Volumes + +# Timeout in seconds for VMDK volume transfer between Cinder and Glance. +# (integer value) +#vmware_image_transfer_timeout_secs = 7200 + +# Max number of objects to be retrieved per batch. Query results will be +# obtained in batches from the server and not in one shot. Server may still +# limit the count to something less than the configured value. (integer value) +#vmware_max_objects_retrieval = 100 + +# Optional string specifying the VMware vCenter server version. The driver +# attempts to retrieve the version from VMware vCenter server. Set this +# configuration only if you want to override the vCenter server version. +# (string value) +#vmware_host_version = + +# Directory where virtual disks are stored during volume backup and restore. +# (string value) +#vmware_tmp_dir = /tmp + +# CA bundle file to use in verifying the vCenter server certificate. (string +# value) +#vmware_ca_file = + +# If true, the vCenter server certificate is not verified. If false, then the +# default CA truststore is used for verification. This option is ignored if +# "vmware_ca_file" is set. (boolean value) +#vmware_insecure = false + +# Name of a vCenter compute cluster where volumes should be created. (multi +# valued) +#vmware_cluster_name = + +# Maximum number of connections in http connection pool. (integer value) +#vmware_connection_pool_size = 10 + +# Default adapter type to be used for attaching volumes. (string value) +# Allowed values: lsiLogic, busLogic, lsiLogicsas, paraVirtual, ide +#vmware_adapter_type = lsiLogic + +# File with the list of available vzstorage shares. (string value) +#vzstorage_shares_config = /etc/cinder/vzstorage_shares + +# Create volumes as sparsed files which take no space rather than regular files +# when using raw format, in which case volume creation takes lot of time. +# (boolean value) +#vzstorage_sparsed_volumes = true + +# Percent of ACTUAL usage of the underlying volume before no new volumes can be +# allocated to the volume destination. (floating point value) +#vzstorage_used_ratio = 0.95 + +# Base dir containing mount points for vzstorage shares. (string value) +#vzstorage_mount_point_base = $state_path/mnt + +# Mount options passed to the vzstorage client. See section of the pstorage- +# mount man page for details. (list value) +#vzstorage_mount_options = + +# Default format that will be used when creating volumes if no volume format is +# specified. (string value) +#vzstorage_default_volume_format = raw + +# File with the list of available smbfs shares. (string value) +#smbfs_shares_config = C:\OpenStack\smbfs_shares.txt + +# DEPRECATED: The path of the automatically generated file containing +# information about volume disk space allocation. (string value) +# This option is deprecated for removal since 11.0.0. +# Its value may be silently ignored in the future. +# Reason: This allocation file is no longer used. +#smbfs_allocation_info_file_path = C:\OpenStack\allocation_data.txt + +# Default format that will be used when creating volumes if no volume format is +# specified. (string value) +# Allowed values: vhd, vhdx +#smbfs_default_volume_format = vhd + +# Create volumes as sparsed files which take no space rather than regular files +# when using raw format, in which case volume creation takes lot of time. +# (boolean value) +#smbfs_sparsed_volumes = true + +# DEPRECATED: Percent of ACTUAL usage of the underlying volume before no new +# volumes can be allocated to the volume destination. (floating point value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#smbfs_used_ratio = + +# DEPRECATED: This will compare the allocated to available space on the volume +# destination. If the ratio exceeds this number, the destination will no +# longer be valid. (floating point value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#smbfs_oversub_ratio = + +# Base dir containing mount points for smbfs shares. (string value) +#smbfs_mount_point_base = C:\OpenStack\_mnt + +# Mappings between share locations and pool names. If not specified, the share +# names will be used as pool names. Example: +# //addr/share:pool_name,//addr/share2:pool_name2 (dict value) +#smbfs_pool_mappings = + +# Path to store VHD backed volumes (string value) +#windows_iscsi_lun_path = C:\iSCSIVirtualDisks + +# Default storage pool for volumes. (integer value) +#ise_storage_pool = 1 + +# Raid level for ISE volumes. (integer value) +#ise_raid = 1 + +# Number of retries (per port) when establishing connection to ISE management +# port. (integer value) +#ise_connection_retries = 5 + +# Interval (secs) between retries. (integer value) +#ise_retry_interval = 1 + +# Number on retries to get completion status after issuing a command to ISE. +# (integer value) +#ise_completion_retries = 30 + +# VPSA - Use ISER instead of iSCSI (boolean value) +#zadara_use_iser = true + +# VPSA - Management Host name or IP address (string value) +#zadara_vpsa_host = + +# VPSA - Port number (port value) +# Minimum value: 0 +# Maximum value: 65535 +#zadara_vpsa_port = + +# VPSA - Use SSL connection (boolean value) +#zadara_vpsa_use_ssl = false + +# If set to True the http client will validate the SSL certificate of the VPSA +# endpoint. (boolean value) +#zadara_ssl_cert_verify = true + +# VPSA - Username (string value) +#zadara_user = + +# VPSA - Password (string value) +#zadara_password = + +# VPSA - Storage Pool assigned for volumes (string value) +#zadara_vpsa_poolname = + +# VPSA - Default encryption policy for volumes (boolean value) +#zadara_vol_encrypt = false + +# VPSA - Default template for VPSA volume names (string value) +#zadara_vol_name_template = OS_%s + +# VPSA - Attach snapshot policy for volumes (boolean value) +#zadara_default_snap_policy = false + +# Storage pool name. (string value) +#zfssa_pool = + +# Project name. (string value) +#zfssa_project = + +# Block size. (string value) +# Allowed values: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k +#zfssa_lun_volblocksize = 8k + +# Flag to enable sparse (thin-provisioned): True, False. (boolean value) +#zfssa_lun_sparse = false + +# Data compression. (string value) +# Allowed values: off, lzjb, gzip-2, gzip, gzip-9 +#zfssa_lun_compression = off + +# Synchronous write bias. (string value) +# Allowed values: latency, throughput +#zfssa_lun_logbias = latency + +# iSCSI initiator group. (string value) +#zfssa_initiator_group = + +# iSCSI initiator IQNs. (comma separated) (string value) +#zfssa_initiator = + +# iSCSI initiator CHAP user (name). (string value) +#zfssa_initiator_user = + +# Secret of the iSCSI initiator CHAP user. (string value) +#zfssa_initiator_password = + +# iSCSI initiators configuration. (string value) +#zfssa_initiator_config = + +# iSCSI target group name. (string value) +#zfssa_target_group = tgt-grp + +# iSCSI target CHAP user (name). (string value) +#zfssa_target_user = + +# Secret of the iSCSI target CHAP user. (string value) +#zfssa_target_password = + +# iSCSI target portal (Data-IP:Port, w.x.y.z:3260). (string value) +#zfssa_target_portal = + +# Network interfaces of iSCSI targets. (comma separated) (string value) +#zfssa_target_interfaces = + +# REST connection timeout. (seconds) (integer value) +#zfssa_rest_timeout = + +# IP address used for replication data. (maybe the same as data ip) (string +# value) +#zfssa_replication_ip = + +# Flag to enable local caching: True, False. (boolean value) +#zfssa_enable_local_cache = true + +# Name of ZFSSA project where cache volumes are stored. (string value) +#zfssa_cache_project = os-cinder-cache + +# Driver policy for volume manage. (string value) +# Allowed values: loose, strict +#zfssa_manage_policy = loose + +# Data path IP address (string value) +#zfssa_data_ip = + +# HTTPS port number (string value) +#zfssa_https_port = 443 + +# Options to be passed while mounting share over nfs (string value) +#zfssa_nfs_mount_options = + +# Storage pool name. (string value) +#zfssa_nfs_pool = + +# Project name. (string value) +#zfssa_nfs_project = NFSProject + +# Share name. (string value) +#zfssa_nfs_share = nfs_share + +# Data compression. (string value) +# Allowed values: off, lzjb, gzip-2, gzip, gzip-9 +#zfssa_nfs_share_compression = off + +# Synchronous write bias-latency, throughput. (string value) +# Allowed values: latency, throughput +#zfssa_nfs_share_logbias = latency + +# Name of directory inside zfssa_nfs_share where cache volumes are stored. +# (string value) +#zfssa_cache_directory = os-cinder-cache + +# Main controller IP. (IP address value) +#zteControllerIP0 = + +# Slave controller IP. (IP address value) +#zteControllerIP1 = + +# Local IP. (IP address value) +#zteLocalIP = + +# User name. (string value) +#zteUserName = + +# User password. (string value) +#zteUserPassword = + +# Virtual block size of pool. Unit : KB. Valid value : 4, 8, 16, 32, 64, 128, +# 256, 512. (integer value) +#zteChunkSize = 4 + +# Cache readahead size. (integer value) +#zteAheadReadSize = 8 + +# Cache policy. 0, Write Back; 1, Write Through. (integer value) +#zteCachePolicy = 1 + +# SSD cache switch. 0, OFF; 1, ON. (integer value) +#zteSSDCacheSwitch = 1 + +# Pool name list. (list value) +#zteStoragePool = + +# Pool volume allocated policy. 0, Auto; 1, High Performance Tier First; 2, +# Performance Tier First; 3, Capacity Tier First. (integer value) +#ztePoolVoAllocatedPolicy = 0 + +# Pool volume move policy.0, Auto; 1, Highest Available; 2, Lowest Available; +# 3, No Relocation. (integer value) +#ztePoolVolMovePolicy = 0 + +# Whether it is a thin volume. (boolean value) +#ztePoolVolIsThin = false + +# Pool volume init allocated Capacity.Unit : KB. (integer value) +#ztePoolVolInitAllocatedCapacity = 0 + +# Pool volume alarm threshold. [0, 100] (integer value) +#ztePoolVolAlarmThreshold = 0 + +# Pool volume alarm stop allocated flag. (integer value) +#ztePoolVolAlarmStopAllocatedFlag = 0 + +# Driver to use for volume creation (string value) +#volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver + +# FC Zoning mode configured (string value) +#zoning_mode = + +# User defined capabilities, a JSON formatted string specifying key/value +# pairs. The key/value pairs can be used by the CapabilitiesFilter to select +# between backends when requests specify volume types. For example, specifying +# a service level or the geographical location of a backend, then creating a +# volume type to allow the user to select by these different properties. +# (string value) +#extra_capabilities = {} + +# Suppress requests library SSL certificate warnings. (boolean value) +#suppress_requests_ssl_warnings = false + + +[barbican] + +# +# From castellan.config +# + +# Use this endpoint to connect to Barbican, for example: +# "http://localhost:9311/" (string value) +#barbican_endpoint = + +# Version of the Barbican API, for example: "v1" (string value) +#barbican_api_version = + +# Use this endpoint to connect to Keystone (string value) +#auth_endpoint = http://localhost/identity/v3 + +# Number of seconds to wait before retrying poll for key creation completion +# (integer value) +#retry_delay = 1 + +# Number of times to retry poll for key creation completion (integer value) +#number_of_retries = 60 + +# Specifies if insecure TLS (https) requests. If False, the server's +# certificate will not be validated (boolean value) +#verify_ssl = true + + +[brcd_fabric_example] + +# +# From cinder +# + +# South bound connector for the fabric. (string value) +# Allowed values: SSH, HTTP, HTTPS +#fc_southbound_protocol = HTTP + +# Management IP of fabric. (string value) +#fc_fabric_address = + +# Fabric user ID. (string value) +#fc_fabric_user = + +# Password for user. (string value) +#fc_fabric_password = + +# Connecting port (port value) +# Minimum value: 0 +# Maximum value: 65535 +#fc_fabric_port = 22 + +# Local SSH certificate Path. (string value) +#fc_fabric_ssh_cert_path = + +# Overridden zoning policy. (string value) +#zoning_policy = initiator-target + +# Overridden zoning activation state. (boolean value) +#zone_activate = true + +# Overridden zone name prefix. (string value) +#zone_name_prefix = openstack + +# Virtual Fabric ID. (string value) +#fc_virtual_fabric_id = + + +[cisco_fabric_example] + +# +# From cinder +# + +# Management IP of fabric (string value) +#cisco_fc_fabric_address = + +# Fabric user ID (string value) +#cisco_fc_fabric_user = + +# Password for user (string value) +#cisco_fc_fabric_password = + +# Connecting port (port value) +# Minimum value: 0 +# Maximum value: 65535 +#cisco_fc_fabric_port = 22 + +# overridden zoning policy (string value) +#cisco_zoning_policy = initiator-target + +# overridden zoning activation state (boolean value) +#cisco_zone_activate = true + +# overridden zone name prefix (string value) +#cisco_zone_name_prefix = + +# VSAN of the Fabric (string value) +#cisco_zoning_vsan = + + +[coordination] + +# +# From cinder +# + +# The backend URL to use for distributed coordination. (string value) +#backend_url = file://$state_path + +# DEPRECATED: Number of seconds between heartbeats for distributed +# coordination. No longer used since distributed coordination manages its +# heartbeat internally. (floating point value) +# This option is deprecated for removal since 11.0.0. +# Its value may be silently ignored in the future. +# Reason: This option is no longer used. +#heartbeat = 1.0 + +# DEPRECATED: Initial number of seconds to wait after failed reconnection. No +# longer used since distributed coordination manages its heartbeat internally. +# (floating point value) +# This option is deprecated for removal since 11.0.0. +# Its value may be silently ignored in the future. +# Reason: This option is no longer used. +#initial_reconnect_backoff = 0.1 + +# DEPRECATED: Maximum number of seconds between sequential reconnection +# retries. No longer used since distributed coordination manages its heartbeat +# internally. (floating point value) +# This option is deprecated for removal since 11.0.0. +# Its value may be silently ignored in the future. +# Reason: This option is no longer used. +#max_reconnect_backoff = 60.0 + + +[cors] + +# +# From oslo.middleware +# + +# Indicate whether this resource may be shared with the domain received in the +# requests "origin" header. Format: "://[:]", no trailing +# slash. Example: https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple +# Headers. (list value) +#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-API-Version + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list value) +#allow_methods = GET,PUT,POST,DELETE,PATCH,HEAD + +# Indicate which header field names may be used during the actual request. +# (list value) +#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID,X-Trace-Info,X-Trace-HMAC,OpenStack-API-Version + + +[database] + +# +# From oslo.db +# + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. (string +# value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode = TRADITIONAL + +# If True, transparently enables support for handling MySQL Cluster (NDB). +# (boolean value) +#mysql_enable_ndb = false + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool. Setting a value of +# 0 indicates no limit. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = 5 + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = 50 + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +# Minimum value: 0 +# Maximum value: 100 +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection lost. +# (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count. (integer value) +#db_max_retries = 20 + + +[fc-zone-manager] + +# +# From cinder +# + +# South bound connector for zoning operation (string value) +#brcd_sb_connector = HTTP + +# Southbound connector for zoning operation (string value) +#cisco_sb_connector = cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI + +# FC Zone Driver responsible for zone management (string value) +#zone_driver = cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver + +# Zoning policy configured by user; valid values include "initiator-target" or +# "initiator" (string value) +#zoning_policy = initiator-target + +# Comma separated list of Fibre Channel fabric names. This list of names is +# used to retrieve other SAN credentials for connecting to each SAN fabric +# (string value) +#fc_fabric_names = + +# FC SAN Lookup Service (string value) +#fc_san_lookup_service = cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService + +# Set this to True when you want to allow an unsupported zone manager driver to +# start. Drivers that haven't maintained a working CI system and testing are +# marked as unsupported until CI is working again. This also marks a driver as +# deprecated and may be removed in the next release. (boolean value) +#enable_unsupported_driver = false + + +[healthcheck] + +# +# From oslo.middleware +# + +# DEPRECATED: The path to respond to healtcheck requests on. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#path = /healthcheck + +# Show more detailed information as part of the response (boolean value) +#detailed = false + +# Additional backends that can perform health checks and report that +# information back as part of a request. (list value) +#backends = + +# Check the presence of a file to determine if an application is running on a +# port. Used by DisableByFileHealthcheck plugin. (string value) +#disable_by_file_path = + +# Check the presence of a file based on a port to determine if an application +# is running on a port. Expects a "port:path" list of strings. Used by +# DisableByFilesPortsHealthcheck plugin. (list value) +#disable_by_file_paths = + + +[key_manager] + +# +# From castellan.config +# + +# The full class name of the key manager API class (string value) +#api_class = castellan.key_manager.barbican_key_manager.BarbicanKeyManager + +# The type of authentication credential to create. Possible values are 'token', +# 'password', 'keystone_token', and 'keystone_password'. Required if no context +# is passed to the credential factory. (string value) +#auth_type = + +# Token for authentication. Required for 'token' and 'keystone_token' auth_type +# if no context is passed to the credential factory. (string value) +#token = + +# Username for authentication. Required for 'password' auth_type. Optional for +# the 'keystone_password' auth_type. (string value) +#username = + +# Password for authentication. Required for 'password' and 'keystone_password' +# auth_type. (string value) +#password = + +# User ID for authentication. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#user_id = + +# User's domain ID for authentication. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#user_domain_id = + +# User's domain name for authentication. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#user_domain_name = + +# Trust ID for trust scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#trust_id = + +# Domain ID for domain scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#domain_id = + +# Domain name for domain scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#domain_name = + +# Project ID for project scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#project_id = + +# Project name for project scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#project_name = + +# Project's domain ID for project. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#project_domain_id = + +# Project's domain name for project. Optional for 'keystone_token' and +# 'keystone_password' auth_type. (string value) +#project_domain_name = + +# Allow fetching a new token if the current one is going to expire. Optional +# for 'keystone_token' and 'keystone_password' auth_type. (boolean value) +#reauthenticate = true + +# +# From cinder +# + +# Fixed key returned by key manager, specified in hex (string value) +#fixed_key = + + +[keystone_authtoken] + +# +# From keystonemiddleware.auth_token +# + +# Complete "public" Identity API endpoint. This endpoint should not be an +# "admin" endpoint, as it should be accessible by all end users. +# Unauthenticated clients are redirected to this endpoint to authenticate. +# Although this endpoint should ideally be unversioned, client support in the +# wild varies. If you're using a versioned v2 endpoint here, then this should +# *not* be the same endpoint the service user utilizes for validating tokens, +# because normal end users may not be able to reach that endpoint. (string +# value) +#auth_uri = + +# API version of the admin Identity API endpoint. (string value) +#auth_version = + +# Do not handle authorization requests within the middleware, but delegate the +# authorization decision to downstream WSGI components. (boolean value) +#delay_auth_decision = false + +# Request timeout value for communicating with Identity API server. (integer +# value) +#http_connect_timeout = + +# How many times are we trying to reconnect when communicating with Identity +# API Server. (integer value) +#http_request_max_retries = 3 + +# Request environment key where the Swift cache object is stored. When +# auth_token middleware is deployed with a Swift cache, use this option to have +# the middleware share a caching backend with swift. Otherwise, use the +# ``memcached_servers`` option instead. (string value) +#cache = + +# Required if identity server requires client certificate (string value) +#certfile = + +# Required if identity server requires client certificate (string value) +#keyfile = + +# A PEM encoded Certificate Authority to use when verifying HTTPs connections. +# Defaults to system CAs. (string value) +#cafile = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# The region in which the identity server can be found. (string value) +#region_name = + +# DEPRECATED: Directory used to cache files related to PKI tokens. This option +# has been deprecated in the Ocata release and will be removed in the P +# release. (string value) +# This option is deprecated for removal since Ocata. +# Its value may be silently ignored in the future. +# Reason: PKI token format is no longer supported. +#signing_dir = + +# Optionally specify a list of memcached server(s) to use for caching. If left +# undefined, tokens will instead be cached in-process. (list value) +# Deprecated group/name - [keystone_authtoken]/memcache_servers +#memcached_servers = + +# In order to prevent excessive effort spent validating tokens, the middleware +# caches previously-seen tokens for a configurable duration (in seconds). Set +# to -1 to disable caching completely. (integer value) +#token_cache_time = 300 + +# DEPRECATED: Determines the frequency at which the list of revoked tokens is +# retrieved from the Identity service (in seconds). A high number of revocation +# events combined with a low cache duration may significantly reduce +# performance. Only valid for PKI tokens. This option has been deprecated in +# the Ocata release and will be removed in the P release. (integer value) +# This option is deprecated for removal since Ocata. +# Its value may be silently ignored in the future. +# Reason: PKI token format is no longer supported. +#revocation_cache_time = 10 + +# (Optional) If defined, indicate whether token data should be authenticated or +# authenticated and encrypted. If MAC, token data is authenticated (with HMAC) +# in the cache. If ENCRYPT, token data is encrypted and authenticated in the +# cache. If the value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +# Allowed values: None, MAC, ENCRYPT +#memcache_security_strategy = None + +# (Optional, mandatory if memcache_security_strategy is defined) This string is +# used for key derivation. (string value) +#memcache_secret_key = + +# (Optional) Number of seconds memcached server is considered dead before it is +# tried again. (integer value) +#memcache_pool_dead_retry = 300 + +# (Optional) Maximum total number of open connections to every memcached +# server. (integer value) +#memcache_pool_maxsize = 10 + +# (Optional) Socket timeout in seconds for communicating with a memcached +# server. (integer value) +#memcache_pool_socket_timeout = 3 + +# (Optional) Number of seconds a connection to memcached is held unused in the +# pool before it is closed. (integer value) +#memcache_pool_unused_timeout = 60 + +# (Optional) Number of seconds that an operation will wait to get a memcached +# client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout = 10 + +# (Optional) Use the advanced (eventlet safe) memcached client pool. The +# advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool = false + +# (Optional) Indicate whether to set the X-Service-Catalog header. If False, +# middleware will not ask for service catalog on token validation and will not +# set the X-Service-Catalog header. (boolean value) +#include_service_catalog = true + +# Used to control the use and type of token binding. Can be set to: "disabled" +# to not check token binding. "permissive" (default) to validate binding +# information if the bind type is of a form known to the server and ignore it +# if not. "strict" like "permissive" but if the bind type is unknown the token +# will be rejected. "required" any form of token binding is needed to be +# allowed. Finally the name of a binding method that must be present in tokens. +# (string value) +#enforce_token_bind = permissive + +# DEPRECATED: If true, the revocation list will be checked for cached tokens. +# This requires that PKI tokens are configured on the identity server. (boolean +# value) +# This option is deprecated for removal since Ocata. +# Its value may be silently ignored in the future. +# Reason: PKI token format is no longer supported. +#check_revocations_for_cached = false + +# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may be a +# single algorithm or multiple. The algorithms are those supported by Python +# standard hashlib.new(). The hashes will be tried in the order given, so put +# the preferred one first for performance. The result of the first hash will be +# stored in the cache. This will typically be set to multiple values only while +# migrating from a less secure algorithm to a more secure one. Once all the old +# tokens are expired this option should be set to a single value for better +# performance. (list value) +# This option is deprecated for removal since Ocata. +# Its value may be silently ignored in the future. +# Reason: PKI token format is no longer supported. +#hash_algorithms = md5 + +# A choice of roles that must be present in a service token. Service tokens are +# allowed to request that an expired token can be used and so this check should +# tightly control that only actual services should be sending this token. Roles +# here are applied as an ANY check so any role in this list must be present. +# For backwards compatibility reasons this currently only affects the +# allow_expired check. (list value) +#service_token_roles = service + +# For backwards compatibility reasons we must let valid service tokens pass +# that don't pass the service_token_roles check as valid. Setting this true +# will become the default in a future release and should be enabled if +# possible. (boolean value) +#service_token_roles_required = false + +# Authentication type to load (string value) +# Deprecated group/name - [keystone_authtoken]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string value) +#auth_section = + + +[matchmaker_redis] + +# +# From oslo.messaging +# + +# DEPRECATED: Host to locate redis. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#host = 127.0.0.1 + +# DEPRECATED: Use this port to connect to redis host. (port value) +# Minimum value: 0 +# Maximum value: 65535 +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#port = 6379 + +# DEPRECATED: Password for Redis server (optional). (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#password = + +# DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g., +# [host:port, host1:port ... ] (list value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#sentinel_hosts = + +# Redis replica set name. (string value) +#sentinel_group_name = oslo-messaging-zeromq + +# Time in ms to wait between connection attempts. (integer value) +#wait_timeout = 2000 + +# Time in ms to wait before the transaction is killed. (integer value) +#check_timeout = 20000 + +# Timeout in ms on blocking socket operations. (integer value) +#socket_timeout = 10000 + + +[nova_client] + +# +# From cinder +# + +# The max number of the timing objects to keep (integer value) +#max_timing_buffer = 200 + + +[nova_group] + +# +# From cinder +# + +# Name of nova region to use. Useful if keystone manages more than one region. +# (string value) +# Deprecated group/name - [DEFAULT]/os_region_name +#region_name = + +# Type of the nova endpoint to use. This endpoint will be looked up in the +# keystone catalog and should be one of public, internal or admin. (string +# value) +# Allowed values: public, admin, internal +#interface = public + +# The authentication URL for the nova connection when using the current users +# token (string value) +#token_auth_url = + +# PEM encoded Certificate Authority to use when verifying HTTPs connections. +# (string value) +# Deprecated group/name - [nova_group]/nova_ca_certificates_file +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections. (boolean value) +# Deprecated group/name - [nova_group]/nova_api_insecure +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Authentication type to load (string value) +# Deprecated group/name - [nova_group]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string value) +#auth_section = + + +[oslo_concurrency] + +# +# From oslo.concurrency +# + +# Enables or disables inter-process locks. (boolean value) +#disable_process_locking = false + +# Directory to use for lock files. For security, the specified directory +# should only be writable by the user running the processes that need locking. +# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, +# a lock path must be set. (string value) +#lock_path = + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# Name for the AMQP container. must be globally unique. Defaults to a generated +# UUID (string value) +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +#trace = false + +# Attempt to connect via SSL. If no other ssl-related parameters are given, it +# will use the system's CA-bundle to verify the server's certificate. (boolean +# value) +#ssl = false + +# CA certificate PEM file used to verify the server's certificate (string +# value) +#ssl_ca_file = + +# Self-identifying certificate PEM file for client authentication (string +# value) +#ssl_cert_file = + +# Private key PEM file used to sign ssl_cert_file certificate (optional) +# (string value) +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +#ssl_key_password = + +# By default SSL checks that the name in the server's certificate matches the +# hostname in the transport_url. In some configurations it may be preferable to +# use the virtual hostname instead, for example if the server uses the Server +# Name Indication TLS extension (rfc6066) to provide a certificate per virtual +# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the +# virtual host name instead of the DNS name. (boolean value) +#ssl_verify_vhost = false + +# DEPRECATED: Accept clients using either SSL or plain TCP (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Not applicable - not a SSL server +#allow_insecure_clients = false + +# Space separated list of acceptable SASL mechanisms (string value) +#sasl_mechanisms = + +# Path to directory that contains the SASL configuration (string value) +#sasl_config_dir = + +# Name of configuration file (without .conf suffix) (string value) +#sasl_config_name = + +# SASL realm to use if no realm present in username (string value) +#sasl_default_realm = + +# DEPRECATED: User name for message broker authentication (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Should use configuration option transport_url to provide the +# username. +#username = + +# DEPRECATED: Password for message broker authentication (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Should use configuration option transport_url to provide the +# password. +#password = + +# Seconds to pause before attempting to re-connect. (integer value) +# Minimum value: 1 +#connection_retry_interval = 1 + +# Increase the connection_retry_interval by this many seconds after each +# unsuccessful failover attempt. (integer value) +# Minimum value: 0 +#connection_retry_backoff = 2 + +# Maximum limit for connection_retry_interval + connection_retry_backoff +# (integer value) +# Minimum value: 1 +#connection_retry_interval_max = 30 + +# Time to pause between re-connecting an AMQP 1.0 link that failed due to a +# recoverable error. (integer value) +# Minimum value: 1 +#link_retry_delay = 10 + +# The maximum number of attempts to re-send a reply message which failed due to +# a recoverable error. (integer value) +# Minimum value: -1 +#default_reply_retry = 0 + +# The deadline for an rpc reply message delivery. (integer value) +# Minimum value: 5 +#default_reply_timeout = 30 + +# The deadline for an rpc cast or call message delivery. Only used when caller +# does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_send_timeout = 30 + +# The deadline for a sent notification message delivery. Only used when caller +# does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_notify_timeout = 30 + +# The duration to schedule a purge of idle sender links. Detach link after +# expiry. (integer value) +# Minimum value: 1 +#default_sender_link_timeout = 600 + +# Indicates the addressing mode used by the driver. +# Permitted values: +# 'legacy' - use legacy non-routable addressing +# 'routable' - use routable addresses +# 'dynamic' - use legacy addresses if the message bus does not support routing +# otherwise use routable addressing (string value) +#addressing_mode = dynamic + +# Enable virtual host support for those message buses that do not natively +# support virtual hosting (such as qpidd). When set to true the virtual host +# name will be added to all message bus addresses, effectively creating a +# private 'subnet' per virtual host. Set to False if the message bus supports +# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative +# as the name of the virtual host. (boolean value) +#pseudo_vhost = true + +# address prefix used when sending to a specific server (string value) +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +#group_request_prefix = unicast + +# Address prefix for all generated RPC addresses (string value) +#rpc_address_prefix = openstack.org/om/rpc + +# Address prefix for all generated Notification addresses (string value) +#notify_address_prefix = openstack.org/om/notify + +# Appended to the address prefix when sending a fanout message. Used by the +# message bus to identify fanout messages. (string value) +#multicast_address = multicast + +# Appended to the address prefix when sending to a particular RPC/Notification +# server. Used by the message bus to identify messages sent to a single +# destination. (string value) +#unicast_address = unicast + +# Appended to the address prefix when sending to a group of consumers. Used by +# the message bus to identify messages that should be delivered in a round- +# robin fashion across consumers. (string value) +#anycast_address = anycast + +# Exchange name used in notification addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_notification_exchange if set +# else control_exchange if set +# else 'notify' (string value) +#default_notification_exchange = + +# Exchange name used in RPC addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_rpc_exchange if set +# else control_exchange if set +# else 'rpc' (string value) +#default_rpc_exchange = + +# Window size for incoming RPC Reply messages. (integer value) +# Minimum value: 1 +#reply_link_credit = 200 + +# Window size for incoming RPC Request messages (integer value) +# Minimum value: 1 +#rpc_server_credit = 100 + +# Window size for incoming Notification messages (integer value) +# Minimum value: 1 +#notify_server_credit = 100 + +# Send messages of this type pre-settled. +# Pre-settled messages will not receive acknowledgement +# from the peer. Note well: pre-settled messages may be +# silently discarded if the delivery fails. +# Permitted values: +# 'rpc-call' - send RPC Calls pre-settled +# 'rpc-reply'- send RPC Replies pre-settled +# 'rpc-cast' - Send RPC Casts pre-settled +# 'notify' - Send Notifications pre-settled +# (multi valued) +#pre_settled = rpc-cast +#pre_settled = rpc-reply + + +[oslo_messaging_kafka] + +# +# From oslo.messaging +# + +# DEPRECATED: Default Kafka broker Host (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#kafka_default_host = localhost + +# DEPRECATED: Default Kafka broker Port (port value) +# Minimum value: 0 +# Maximum value: 65535 +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#kafka_default_port = 9092 + +# Max fetch bytes of Kafka consumer (integer value) +#kafka_max_fetch_bytes = 1048576 + +# Default timeout(s) for Kafka consumers (floating point value) +#kafka_consumer_timeout = 1.0 + +# Pool Size for Kafka Consumers (integer value) +#pool_size = 10 + +# The pool size limit for connections expiration policy (integer value) +#conn_pool_min_size = 2 + +# The time-to-live in sec of idle connections in the pool (integer value) +#conn_pool_ttl = 1200 + +# Group id for Kafka consumer. Consumers in one group will coordinate message +# consumption (string value) +#consumer_group = oslo_messaging_consumer + +# Upper bound on the delay for KafkaProducer batching in seconds (floating +# point value) +#producer_batch_timeout = 0.0 + +# Size of batch for the producer async send (integer value) +#producer_batch_size = 16384 + + +[oslo_messaging_notifications] + +# +# From oslo.messaging +# + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +# Deprecated group/name - [DEFAULT]/notification_driver +#driver = + +# A URL representing the messaging driver to use for notifications. If not set, +# we fall back to the same configuration used for RPC. (string value) +# Deprecated group/name - [DEFAULT]/notification_transport_url +#transport_url = + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +# Deprecated group/name - [DEFAULT]/notification_topics +#topics = notifications + +# The maximum number of attempts to re-send a notification message which failed +# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite +# (integer value) +#retry = -1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_durable_queues +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +#amqp_auto_delete = false + +# Enable SSL (boolean value) +#ssl = + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version +#ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile +#ssl_key_file = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile +#ssl_cert_file = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs +#ssl_ca_file = + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +#kombu_reconnect_delay = 1.0 + +# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not +# be used. This option may not be available in future versions. (string value) +#kombu_compression = + +# How long to wait a missing client before abandoning to send it its replies. +# This value should not be longer than rpc_response_timeout. (integer value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout +#kombu_missing_consumer_retry_timeout = 60 + +# Determines how the next RabbitMQ node is chosen in case the one we are +# currently connected to becomes unavailable. Takes effect only if more than +# one RabbitMQ node is provided in config. (string value) +# Allowed values: round-robin, shuffle +#kombu_failover_strategy = round-robin + +# DEPRECATED: The RabbitMQ broker address where a single node is used. (string +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_host = localhost + +# DEPRECATED: The RabbitMQ broker port where a single node is used. (port +# value) +# Minimum value: 0 +# Maximum value: 65535 +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_port = 5672 + +# DEPRECATED: RabbitMQ HA cluster host:port pairs. (list value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_hosts = $rabbit_host:$rabbit_port + +# DEPRECATED: The RabbitMQ userid. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_userid = guest + +# DEPRECATED: The RabbitMQ password. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_password = guest + +# The RabbitMQ login method. (string value) +# Allowed values: PLAIN, AMQPLAIN, RABBIT-CR-DEMO +#rabbit_login_method = AMQPLAIN + +# DEPRECATED: The RabbitMQ virtual host. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Replaced by [DEFAULT]/transport_url +#rabbit_virtual_host = / + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +#rabbit_retry_backoff = 2 + +# Maximum interval of RabbitMQ connection retries. Default is 30 seconds. +# (integer value) +#rabbit_interval_max = 30 + +# DEPRECATED: Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#rabbit_max_retries = 0 + +# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this +# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring +# is no longer controlled by the x-ha-policy argument when declaring a queue. +# If you just want to make sure that all queues (except those with auto- +# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy +# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) +#rabbit_ha_queues = false + +# Positive integer representing duration in seconds for queue TTL (x-expires). +# Queues which are unused for the duration of the TTL are automatically +# deleted. The parameter affects only reply and fanout queues. (integer value) +# Minimum value: 1 +#rabbit_transient_queues_ttl = 1800 + +# Specifies the number of messages to prefetch. Setting to zero allows +# unlimited messages. (integer value) +#rabbit_qos_prefetch_count = 0 + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer +# value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) +#fake_rabbit = false + +# Maximum number of channels to allow (integer value) +#channel_max = + +# The maximum byte size for an AMQP frame (integer value) +#frame_max = + +# How often to send heartbeats for consumer's connections (integer value) +#heartbeat_interval = 3 + +# Arguments passed to ssl.wrap_socket (dict value) +#ssl_options = + +# Set socket timeout in seconds for connection's socket (floating point value) +#socket_timeout = 0.25 + +# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point +# value) +#tcp_user_timeout = 0.25 + +# Set delay for reconnection to some host which has connection error (floating +# point value) +#host_connection_reconnect_delay = 0.25 + +# Connection factory implementation (string value) +# Allowed values: new, single, read_write +#connection_factory = single + +# Maximum number of connections to keep queued. (integer value) +#pool_max_size = 30 + +# Maximum number of connections to create above `pool_max_size`. (integer +# value) +#pool_max_overflow = 0 + +# Default number of seconds to wait for a connections to available (integer +# value) +#pool_timeout = 30 + +# Lifetime of a connection (since creation) in seconds or None for no +# recycling. Expired connections are closed on acquire. (integer value) +#pool_recycle = 600 + +# Threshold at which inactive (since release) connections are considered stale +# in seconds or None for no staleness. Stale connections are closed on acquire. +# (integer value) +#pool_stale = 60 + +# Default serialization mechanism for serializing/deserializing +# outgoing/incoming messages (string value) +# Allowed values: json, msgpack +#default_serializer_type = json + +# Persist notification messages. (boolean value) +#notification_persistence = false + +# Exchange name for sending notifications (string value) +#default_notification_exchange = ${control_exchange}_notification + +# Max number of not acknowledged message which RabbitMQ can send to +# notification listener. (integer value) +#notification_listener_prefetch_count = 100 + +# Reconnecting retry count in case of connectivity problem during sending +# notification, -1 means infinite retry. (integer value) +#default_notification_retry_attempts = -1 + +# Reconnecting retry delay in case of connectivity problem during sending +# notification message (floating point value) +#notification_retry_delay = 0.25 + +# Time to live for rpc queues without consumers in seconds. (integer value) +#rpc_queue_expiration = 60 + +# Exchange name for sending RPC messages (string value) +#default_rpc_exchange = ${control_exchange}_rpc + +# Exchange name for receiving RPC replies (string value) +#rpc_reply_exchange = ${control_exchange}_rpc_reply + +# Max number of not acknowledged message which RabbitMQ can send to rpc +# listener. (integer value) +#rpc_listener_prefetch_count = 100 + +# Max number of not acknowledged message which RabbitMQ can send to rpc reply +# listener. (integer value) +#rpc_reply_listener_prefetch_count = 100 + +# Reconnecting retry count in case of connectivity problem during sending +# reply. -1 means infinite retry during rpc_timeout (integer value) +#rpc_reply_retry_attempts = -1 + +# Reconnecting retry delay in case of connectivity problem during sending +# reply. (floating point value) +#rpc_reply_retry_delay = 0.25 + +# Reconnecting retry count in case of connectivity problem during sending RPC +# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc +# request could be processed more than one time (integer value) +#default_rpc_retry_attempts = -1 + +# Reconnecting retry delay in case of connectivity problem during sending RPC +# message (floating point value) +#rpc_retry_delay = 0.25 + + +[oslo_messaging_zmq] + +# +# From oslo.messaging +# + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. (string value) +#rpc_zmq_bind_address = * + +# MatchMaker driver. (string value) +# Allowed values: redis, sentinel, dummy +#rpc_zmq_matchmaker = redis + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts = 1 + +# Maximum number of ingress messages to locally buffer per topic. Default is +# unlimited. (integer value) +#rpc_zmq_topic_backlog = + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir = /var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match +# "host" option, if running Nova. (string value) +#rpc_zmq_host = localhost + +# Number of seconds to wait before all pending messages will be sent after +# closing a socket. The default value of -1 specifies an infinite linger +# period. The value of 0 specifies no linger period. Pending messages shall be +# discarded immediately when the socket is closed. Positive values specify an +# upper bound for the linger period. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_cast_timeout +#zmq_linger = -1 + +# The default number of seconds that poll should wait. Poll raises timeout +# exception when timeout expired. (integer value) +#rpc_poll_timeout = 1 + +# Expiration timeout in seconds of a name service record about existing target +# ( < 0 means no timeout). (integer value) +#zmq_target_expire = 300 + +# Update period in seconds of a name service record about existing target. +# (integer value) +#zmq_target_update = 180 + +# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean +# value) +#use_pub_sub = false + +# Use ROUTER remote proxy. (boolean value) +#use_router_proxy = false + +# This option makes direct connections dynamic or static. It makes sense only +# with use_router_proxy=False which means to use direct connections for direct +# message types (ignored otherwise). (boolean value) +#use_dynamic_connections = false + +# How many additional connections to a host will be made for failover reasons. +# This option is actual only in dynamic connections mode. (integer value) +#zmq_failover_connections = 2 + +# Minimal port number for random ports range. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#rpc_zmq_min_port = 49153 + +# Maximal port number for random ports range. (integer value) +# Minimum value: 1 +# Maximum value: 65536 +#rpc_zmq_max_port = 65536 + +# Number of retries to find free port number before fail with ZMQBindError. +# (integer value) +#rpc_zmq_bind_port_retries = 100 + +# Default serialization mechanism for serializing/deserializing +# outgoing/incoming messages (string value) +# Allowed values: json, msgpack +#rpc_zmq_serialization = json + +# This option configures round-robin mode in zmq socket. True means not keeping +# a queue when server side disconnects. False means to keep queue and messages +# even if server is disconnected, when the server appears we send all +# accumulated messages to it. (boolean value) +#zmq_immediate = true + +# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any +# other negative value) means to skip any overrides and leave it to OS default; +# 0 and 1 (or any other positive value) mean to disable and enable the option +# respectively. (integer value) +#zmq_tcp_keepalive = -1 + +# The duration between two keepalive transmissions in idle condition. The unit +# is platform dependent, for example, seconds in Linux, milliseconds in Windows +# etc. The default value of -1 (or any other negative value and 0) means to +# skip any overrides and leave it to OS default. (integer value) +#zmq_tcp_keepalive_idle = -1 + +# The number of retransmissions to be carried out before declaring that remote +# end is not available. The default value of -1 (or any other negative value +# and 0) means to skip any overrides and leave it to OS default. (integer +# value) +#zmq_tcp_keepalive_cnt = -1 + +# The duration between two successive keepalive retransmissions, if +# acknowledgement to the previous keepalive transmission is not received. The +# unit is platform dependent, for example, seconds in Linux, milliseconds in +# Windows etc. The default value of -1 (or any other negative value and 0) +# means to skip any overrides and leave it to OS default. (integer value) +#zmq_tcp_keepalive_intvl = -1 + +# Maximum number of (green) threads to work concurrently. (integer value) +#rpc_thread_pool_size = 100 + +# Expiration timeout in seconds of a sent/received message after which it is +# not tracked anymore by a client/server. (integer value) +#rpc_message_ttl = 300 + +# Wait for message acknowledgements from receivers. This mechanism works only +# via proxy without PUB/SUB. (boolean value) +#rpc_use_acks = false + +# Number of seconds to wait for an ack from a cast/call. After each retry +# attempt this timeout is multiplied by some specified multiplier. (integer +# value) +#rpc_ack_timeout_base = 15 + +# Number to multiply base ack timeout by after each retry attempt. (integer +# value) +#rpc_ack_timeout_multiplier = 2 + +# Default number of message sending attempts in case of any problems occurred: +# positive value N means at most N retries, 0 means no retries, None or -1 (or +# any other negative values) mean to retry forever. This option is used only if +# acknowledgments are enabled. (integer value) +#rpc_retry_attempts = 3 + +# List of publisher hosts SubConsumer can subscribe on. This option has higher +# priority then the default publishers list taken from the matchmaker. (list +# value) +#subscribe_on = + + +[oslo_middleware] + +# +# From oslo.middleware +# + +# The maximum body size for each request, in bytes. (integer value) +# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size +# Deprecated group/name - [DEFAULT]/max_request_body_size +#max_request_body_size = 114688 + +# DEPRECATED: The HTTP Header that will be used to determine what the original +# request protocol scheme was, even if it was hidden by a SSL termination +# proxy. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#secure_proxy_ssl_header = X-Forwarded-Proto + +# Whether the application is behind a proxy or not. This determines if the +# middleware should parse the headers or not. (boolean value) +#enable_proxy_headers_parsing = false + + +[oslo_policy] + +# +# From oslo.policy +# + +# The file that defines policies. (string value) +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. (string value) +#policy_default_rule = default + +# Directories where policy configuration files are stored. They can be relative +# to any directory in the search path defined by the config_dir option, or +# absolute paths. The file defined by policy_file must exist for these +# directories to be searched. Missing or empty directories are ignored. (multi +# valued) +#policy_dirs = policy.d + + +[oslo_reports] + +# +# From oslo.reports +# + +# Path to a log directory where to create a file (string value) +#log_dir = + +# The path to a file to watch for changes to trigger the reports, instead of +# signals. Setting this option disables the signal trigger for the reports. If +# application is running as a WSGI application it is recommended to use this +# instead of signals. (string value) +#file_event_handler = + +# How many seconds to wait between polls when file_event_handler is set +# (integer value) +#file_event_handler_interval = 1 + + +[oslo_versionedobjects] + +# +# From oslo.versionedobjects +# + +# Make exception message format errors fatal (boolean value) +#fatal_exception_format_errors = false + + +[profiler] + +# +# From osprofiler +# + +# +# Enables the profiling for all services on this node. Default value is False +# (fully disable the profiling feature). +# +# Possible values: +# +# * True: Enables the feature +# * False: Disables the feature. The profiling cannot be started via this +# project +# operations. If the profiling is triggered by another project, this project +# part +# will be empty. +# (boolean value) +# Deprecated group/name - [profiler]/profiler_enabled +#enabled = false + +# +# Enables SQL requests profiling in services. Default value is False (SQL +# requests won't be traced). +# +# Possible values: +# +# * True: Enables SQL requests profiling. Each SQL query will be part of the +# trace and can the be analyzed by how much time was spent for that. +# * False: Disables SQL requests profiling. The spent time is only shown on a +# higher level of operations. Single SQL queries cannot be analyzed this +# way. +# (boolean value) +#trace_sqlalchemy = false + +# +# Secret key(s) to use for encrypting context data for performance profiling. +# This string value should have the following format: +# [,,...], +# where each key is some random string. A user who triggers the profiling via +# the REST API has to set one of these keys in the headers of the REST API call +# to include profiling results of this node for this particular project. +# +# Both "enabled" flag and "hmac_keys" config options should be set to enable +# profiling. Also, to generate correct profiling information across all +# services +# at least one key needs to be consistent between OpenStack projects. This +# ensures it can be used from client side to generate the trace, containing +# information from all possible resources. (string value) +#hmac_keys = SECRET_KEY + +# +# Connection string for a notifier backend. Default value is messaging:// which +# sets the notifier to oslo_messaging. +# +# Examples of possible values: +# +# * messaging://: use oslo_messaging driver for sending notifications. +# * mongodb://127.0.0.1:27017 : use mongodb driver for sending notifications. +# * elasticsearch://127.0.0.1:9200 : use elasticsearch driver for sending +# notifications. +# (string value) +#connection_string = messaging:// + +# +# Document type for notification indexing in elasticsearch. +# (string value) +#es_doc_type = notification + +# +# This parameter is a time value parameter (for example: es_scroll_time=2m), +# indicating for how long the nodes that participate in the search will +# maintain +# relevant resources in order to continue and support it. +# (string value) +#es_scroll_time = 2m + +# +# Elasticsearch splits large requests in batches. This parameter defines +# maximum size of each batch (for example: es_scroll_size=10000). +# (integer value) +#es_scroll_size = 10000 + +# +# Redissentinel provides a timeout option on the connections. +# This parameter defines that timeout (for example: socket_timeout=0.1). +# (floating point value) +#socket_timeout = 0.1 + +# +# Redissentinel uses a service name to identify a master redis service. +# This parameter defines the name (for example: +# sentinal_service_name=mymaster). +# (string value) +#sentinel_service_name = mymaster + + +[ssl] + +# +# From oslo.service.sslutils +# + +# CA certificate file to use to verify connecting clients. (string value) +# Deprecated group/name - [DEFAULT]/ssl_ca_file +#ca_file = + +# Certificate file to use when starting the server securely. (string value) +# Deprecated group/name - [DEFAULT]/ssl_cert_file +#cert_file = + +# Private key file to use when starting the server securely. (string value) +# Deprecated group/name - [DEFAULT]/ssl_key_file +#key_file = + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +#version = + +# Sets the list of available ciphers. value should be a string in the OpenSSL +# cipher list format. (string value) +#ciphers = diff --git a/openstack/python-cinder/centos/files/cinder.logrotate b/openstack/python-cinder/centos/files/cinder.logrotate new file mode 100644 index 00000000..b31713ff --- /dev/null +++ b/openstack/python-cinder/centos/files/cinder.logrotate @@ -0,0 +1,11 @@ +compress + +/var/log/cinder/*.log { + weekly + rotate 4 + missingok + compress + minsize 100k + size 10M + copytruncate +} diff --git a/openstack/python-cinder/centos/files/openstack-cinder-api.service b/openstack/python-cinder/centos/files/openstack-cinder-api.service new file mode 100644 index 00000000..16a4f3ee --- /dev/null +++ b/openstack/python-cinder/centos/files/openstack-cinder-api.service @@ -0,0 +1,18 @@ +[Unit] +Description=OpenStack Cinder API Server +After=syslog.target network.target + +[Service] +Type=simple +# WRS - use root user +#User=cinder +User=root +ExecStart=/usr/bin/cinder-api --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf --logfile /var/log/cinder/cinder-api.log +# WRS - Managed by sm/OCF scripts +#Restart=on-failure +#KillMode=process +PIDFile=/var/run/resource-agents/cinder-api.pid + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-cinder/centos/files/openstack-cinder-backup.service b/openstack/python-cinder/centos/files/openstack-cinder-backup.service new file mode 100644 index 00000000..30b8dc89 --- /dev/null +++ b/openstack/python-cinder/centos/files/openstack-cinder-backup.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenStack Cinder Backup Server +After=syslog.target network.target + +[Service] +Type=simple +# WRS - use root user +#User=cinder +User=root +ExecStart=/usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf --logfile /var/log/cinder/cinder-backup.log +# WRS - Currently not used but would be also managed by sm +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-cinder/centos/files/openstack-cinder-scheduler.service b/openstack/python-cinder/centos/files/openstack-cinder-scheduler.service new file mode 100644 index 00000000..ccf8ade1 --- /dev/null +++ b/openstack/python-cinder/centos/files/openstack-cinder-scheduler.service @@ -0,0 +1,17 @@ +[Unit] +Description=OpenStack Cinder Scheduler Server +After=syslog.target network.target + +[Service] +Type=simple +# WRS - use root user +#User=cinder +User=root +ExecStart=/usr/bin/cinder-scheduler --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf --logfile /var/log/cinder/cinder-scheduler.log +# WRS - Managed by sm +#Restart=on-failure +Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-cinder/centos/files/openstack-cinder-volume.service b/openstack/python-cinder/centos/files/openstack-cinder-volume.service new file mode 100644 index 00000000..1183024a --- /dev/null +++ b/openstack/python-cinder/centos/files/openstack-cinder-volume.service @@ -0,0 +1,19 @@ +[Unit] +Description=OpenStack Cinder Volume Server +After=syslog.target network.target + +[Service] +LimitNOFILE=131072 +LimitNPROC=131072 +Type=simple +# WRS - use root user +#User=cinder +User=root +ExecStart=/usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf --logfile /var/log/cinder/cinder-volume.log +# WRS - Managed by sm +#Restart=on-failure +#KillMode=process + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-cinder/centos/files/restart-cinder b/openstack/python-cinder/centos/files/restart-cinder new file mode 100644 index 00000000..da079608 --- /dev/null +++ b/openstack/python-cinder/centos/files/restart-cinder @@ -0,0 +1,164 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# +# Handle restarting Cinder services +# + +# Syntax:(" "\) +SERVICES=("cinder-volume 30 30 kill"\ + "cinder-scheduler 50 20 kill"\ + "cinder-api 30 20 kill") +# where: +# = name of executable file reported by ps +# = how much to wait for the process to gracefully shutdown +# = either 'kill' the process with SIGKILL or 'leave' it running +# the idea is to avoid leaving behind processes that are degraded, better have +# new ones re-spawn +# = how much to wait before the new process can be considered available. +# The processes are restarted by sm by running the ocf scripts at +# /usr/lib/ocf/resource.d/openstack/cinder-*. These scripts have a very good service init +# monitoring routine. We just have to make sure that they don't hang while restarting. +# The values are taken from SM, but we don't wait for any retry. +# Note: cinder-volume timeout is set to 180 secs in sm which is too much for our controlled +# restart + + +function get_pid { + local service=$1 + PID=$(cat /var/run/resource-agents/$service.pid) + echo "$PID" +} + +if is_controller +then + # Cinder services only run on the controller + + if [ ! -f $PATCH_FLAGDIR/cinder.restarted ] + then + touch $PATCH_FLAGDIR/cinder.restarted + # cinder-volume uses this to know that its new process was restarted by in-service patching + touch $PATCH_FLAGDIR/cinder.restarting + for s in "${SERVICES[@]}"; do + set -- $s + service="$1" + timeout="$2" + initialize_interval="$3" + after_timeout="$4" + new_process="false" + + # Check SM to see if service is running + sm-query service $service | grep -q 'enabled-active' + if [ $? -eq 0 ] + then + loginfo "$0: Restarting $service" + + # Get PID + PID=$(get_pid $service) + + # Send restart signal to process + kill -s TERM $PID + + # Wait up to $timeout seconds for service to gracefully recover + let -i UNTIL=$SECONDS+$timeout + while [ $UNTIL -ge $SECONDS ] + do + # Check to see if we have a new process + NEW_PID=$(get_pid $service) + if [[ "$PID" != "$NEW_PID" ]] + then + # We have a new process + new_process="true" + break + fi + + # Still old process? Let's wait 5 seconds and check again + sleep 5 + done + + # Do a hard restart of the process if we still have the old one + NEW_PID=$(get_pid $service) + if [[ "$PID" == "$NEW_PID" ]] + then + # we have the old process still running! + if [[ "$after_timeout" == "kill" ]] + then + loginfo "$0: Old process of $service failed to gracefully terminate in $timeout, killing it!" + # kill the old process + kill -s KILL $PID + # wait for a new process to be restarted by sm + let -i UNTIL=$SECONDS+10 + while [ $UNTIL -ge $SECONDS ] + do + sleep 1 + # Check to see if we have a new process + NEW_PID=$(get_pid $service) + if [[ ! -z "$NEW_PID" ]] && [[ "$PID" != "$NEW_PID" ]] + then + loginfo "$0: New process of $service started" + new_process="true" + break + fi + done + fi + fi + + # Wait for the new process to complete initialisation + if [[ "$new_process" == "true" ]] + then + let -i UNTIL=$SECONDS+$initialize_interval + while [ $UNTIL -ge $SECONDS ] + do + # Note: Services are restarted by sm which runs the ocf start script. + # Sm reports enabled-active only *after* those scripts return success + sm-query service $service | grep -q 'enabled-active' + if [ $? -eq 0 ] + then + loginfo "$0: New process of $service started correctly" + break + fi + sleep 1 + done + fi + + sm-query service $service | grep -q 'enabled-active' + if [ $? -ne 0 ] + then + # Still not running! Clear the flag and mark the RC as failed + loginfo "$0: Failed to restart $service" + rm -f $PATCH_FLAGDIR/$service.restarted + GLOBAL_RC=$PATCH_STATUS_FAILED + sm-query service $service + break + # Note: break if any process in the SERVICES list fails + fi + fi + done + fi +fi +# +# Exit the script with the overall return code +# +rm -f $PATCH_FLAGDIR/cinder.restarting +exit $GLOBAL_RC + diff --git a/openstack/python-cinder/centos/openstack-cinder.spec b/openstack/python-cinder/centos/openstack-cinder.spec new file mode 100644 index 00000000..973c9d6b --- /dev/null +++ b/openstack/python-cinder/centos/openstack-cinder.spec @@ -0,0 +1,464 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%global with_doc %{!?_without_doc:1}%{?_without_doc:0} +%global pypi_name cinder + +# WRS: Keep service name - used by build scripts +#%global service cinder + +# WRS: remove docs - for now +%global with_doc 0 + +%global common_desc \ +OpenStack Volume (codename Cinder) provides services to manage and \ +access block storage volumes for use by Virtual Machine instances. + +Name: openstack-cinder +# Liberty semver reset +# https://review.openstack.org/#/q/I6a35fa0dda798fad93b804d00a46af80f08d475c,n,z +Epoch: 1 +Version: 11.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: OpenStack Volume service + +License: ASL 2.0 +URL: http://www.openstack.org/software/openstack-storage/ +Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz + +# + +Source1: cinder-dist.conf +Source2: cinder.logrotate +# WRS: Adding pre-built config file (via: tox -egenconfig) as this is not +# getting generated correctly in our build system. Might be due to partial +# rebase env w/ mitaka+newton. We need to re-evaluate once rebase is +# complete. +Source3: cinder.conf.sample + +Source10: openstack-cinder-api.service +Source11: openstack-cinder-scheduler.service +Source12: openstack-cinder-volume.service +Source13: openstack-cinder-backup.service +Source20: cinder-sudoers + +Source21: restart-cinder +Source22: cinder-purge-deleted-active + +BuildArch: noarch +BuildRequires: intltool +BuildRequires: python-d2to1 +BuildRequires: python-openstackdocstheme +BuildRequires: python-pbr +BuildRequires: python-reno +BuildRequires: python-sphinx +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-netaddr +BuildRequires: systemd +BuildRequires: git +BuildRequires: openstack-macros +BuildRequires: os-brick +BuildRequires: pyparsing +BuildRequires: pytz +BuildRequires: python-decorator +BuildRequires: openstack-macros +# Required to build cinder.conf +BuildRequires: python-google-api-client >= 1.4.2 +BuildRequires: python-keystonemiddleware +BuildRequires: python-glanceclient >= 1:2.8.0 +#BuildRequires: python-novaclient >= 1:9.0.0 +BuildRequires: python-novaclient >= 2.29.0 +BuildRequires: python-swiftclient >= 3.2.0 +BuildRequires: python-oslo-db +BuildRequires: python-oslo-config >= 2:4.0.0 +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-reports +BuildRequires: python-oslotest +BuildRequires: python-oslo-utils +BuildRequires: python-oslo-versionedobjects +BuildRequires: python-oslo-vmware +BuildRequires: python-os-win +BuildRequires: python-castellan +BuildRequires: python-cryptography +BuildRequires: python-lxml +BuildRequires: python-osprofiler +BuildRequires: python-paramiko +BuildRequires: python-suds +BuildRequires: python-taskflow +BuildRequires: python-tooz +BuildRequires: python-oslo-log +BuildRequires: python-oslo-i18n +BuildRequires: python-barbicanclient +BuildRequires: python-requests +BuildRequires: python-retrying + +# Required to compile translation files +BuildRequires: python-babel + +# Needed for unit tests +BuildRequires: python-ddt +BuildRequires: python-fixtures +BuildRequires: python-mock +BuildRequires: python-oslotest +BuildRequires: python-subunit +BuildRequires: python-testtools +BuildRequires: python-testrepository +BuildRequires: python-testresources +BuildRequires: python-testscenarios +BuildRequires: python-os-testr +BuildRequires: python-rtslib + +Requires: python-cinder = %{epoch}:%{version}-%{release} + +# we dropped the patch to remove PBR for Delorean +Requires: python-pbr + +# as convenience +Requires: python-cinderclient + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd +Requires(pre): shadow-utils + +Requires: lvm2 +Requires: python-osprofiler +Requires: python-rtslib + +# Required for EMC VNX driver +Requires: python2-storops + +%description +%{common_desc} + + +%package -n python-cinder +Summary: OpenStack Volume Python libraries +Group: Applications/System + +Requires: sudo + +Requires: qemu-img +Requires: sysfsutils +Requires: os-brick >= 1.15.2 +Requires: python-paramiko >= 2.0 +Requires: python-simplejson >= 2.2.0 + +Requires: python-castellan >= 0.7.0 +Requires: python-eventlet >= 0.18.2 +Requires: python-greenlet >= 0.3.2 +Requires: python-iso8601 >= 0.1.11 +Requires: python-lxml >= 2.3 +Requires: python-stevedore >= 1.20.0 +Requires: python-suds +Requires: python-tooz >= 1.47.0 + +Requires: python-sqlalchemy >= 1.0.10 +Requires: python-migrate >= 0.11.0 + +Requires: python-paste-deploy +Requires: python-routes >= 2.3.1 +Requires: python-webob >= 1.7.1 + +Requires: python-glanceclient >= 1:2.8.0 +Requires: python-swiftclient >= 3.2.0 +Requires: python-keystoneclient >= 3.8.0 +#Requires: python-novaclient >= 1:9.0.0 +Requires: python-novaclient >= 2.29.0 + +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-six >= 1.9.0 +Requires: python-psutil >= 3.2.2 + +Requires: python-babel +Requires: python-google-api-client >= 1.4.2 + +Requires: python-oslo-rootwrap >= 5.0.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-db >= 4.24.0 +Requires: python-oslo-context >= 2.14.0 +Requires: python-oslo-concurrency >= 3.8.0 +Requires: python-oslo-middleware >= 3.27.0 +Requires: python-taskflow >= 2.7.0 +Requires: python-oslo-messaging >= 5.24.2 +Requires: python-oslo-policy >= 1.23.0 +Requires: python-oslo-reports >= 0.6.0 +Requires: python-oslo-service >= 1.10.0 +Requires: python-oslo-versionedobjects >= 1.19.0 + +Requires: iscsi-initiator-utils + +Requires: python-osprofiler >= 1.4.0 + +Requires: python-httplib2 >= 0.7.5 +Requires: python-oauth2client >= 1.5.0 + +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-barbicanclient >= 4.0.0 +Requires: python-requests >= 2.10.0 +Requires: python-retrying >= 1.2.3 +Requires: pyparsing >= 2.0.7 +Requires: pytz +Requires: python-decorator +Requires: python-enum34 +Requires: python-ipaddress + +Requires: python-keystonemiddleware >= 4.12.0 +Requires: python-keystoneauth1 >= 3.1.0 + +Requires: python-oslo-privsep >= 1.9.0 + +Requires: python-cryptography >= 1.6 + + +%description -n python-cinder +%{common_desc} + +This package contains the cinder Python library. + +%package -n python-cinder-tests +Summary: Cinder tests +Requires: openstack-cinder = %{epoch}:%{version}-%{release} + +# Added test requirements +Requires: python-hacking +Requires: python-anyjson +Requires: python-coverage +Requires: python-ddt +Requires: python-fixtures +Requires: python-mock +Requires: python-mox3 +Requires: python-oslotest +Requires: python-subunit +Requires: python-testtools +Requires: python-testrepository +Requires: python-testresources +Requires: python-testscenarios +Requires: python-os-testr +Requires: python-tempest + +%description -n python-cinder-tests +%{common_desc} + +This package contains the Cinder test files. + +%if 0%{?with_doc} +%package doc +Summary: Documentation for OpenStack Volume +Group: Documentation + +Requires: %{name} = %{epoch}:%{version}-%{release} + +BuildRequires: graphviz + +# Required to build module documents +BuildRequires: python-eventlet +BuildRequires: python-routes +BuildRequires: python-sqlalchemy +BuildRequires: python-webob +BuildRequires: python-stevedore +# while not strictly required, quiets the build down when building docs. +BuildRequires: python-migrate +BuildRequires: python-iso8601 >= 0.1.9 + +%description doc +%{common_desc} + +This package contains documentation files for cinder. +%endif + +%prep +%autosetup -n cinder-%{upstream_version} -S git + +find . \( -name .gitignore -o -name .placeholder \) -delete + +find cinder -name \*.py -exec sed -i '/\/usr\/bin\/env python/{d;q}' {} + + +#sed -i 's/%{version}.%{milestone}/%{version}/' PKG-INFO + +# Remove the requirements file so that pbr hooks don't add it +# to distutils requires_dist config +%py_req_cleanup + +%build +# Generate config file +PYTHONPATH=. oslo-config-generator --config-file=cinder/config/cinder-config-generator.conf +# WRS: Put this pre-built config file in place of the generated one as it's not +# being built correctly currently +cp %{SOURCE3} etc/cinder/cinder.conf.sample + +# Build +%{__python2} setup.py build + +# Generate i18n files +# (amoralej) we can remove '-D cinder' once https://review.openstack.org/#/c/439501/ is merged +%{__python2} setup.py compile_catalog -d build/lib/%{pypi_name}/locale -D cinder + +%install +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} + +# Create fake egg-info for the tempest plugin +# TODO switch to %{service} everywhere as in openstack-example.spec +%global service cinder +%py2_entrypoint %{service} %{service} + +# docs generation requires everything to be installed first +export PYTHONPATH="$( pwd ):$PYTHONPATH" + +%if 0%{?with_doc} +%{__python2} setup.py build_sphinx --builder html +# Fix hidden-file-or-dir warnings +rm -fr doc/build/html/.buildinfo +%endif + +%{__python2} setup.py build_sphinx --builder man +mkdir -p %{buildroot}%{_mandir}/man1 +install -p -D -m 644 doc/build/man/*.1 %{buildroot}%{_mandir}/man1/ + +# Setup directories +install -d -m 755 %{buildroot}%{_sharedstatedir}/cinder +install -d -m 755 %{buildroot}%{_sharedstatedir}/cinder/tmp +install -d -m 755 %{buildroot}%{_localstatedir}/log/cinder + +# Install config files +install -d -m 755 %{buildroot}%{_sysconfdir}/cinder +install -p -D -m 640 %{SOURCE1} %{buildroot}%{_datadir}/cinder/cinder-dist.conf +install -d -m 755 %{buildroot}%{_sysconfdir}/cinder/volumes +install -p -D -m 640 etc/cinder/rootwrap.conf %{buildroot}%{_sysconfdir}/cinder/rootwrap.conf +install -p -D -m 640 etc/cinder/api-paste.ini %{buildroot}%{_sysconfdir}/cinder/api-paste.ini +install -p -D -m 640 etc/cinder/policy.json %{buildroot}%{_sysconfdir}/cinder/policy.json +install -p -D -m 640 etc/cinder/cinder.conf.sample %{buildroot}%{_sysconfdir}/cinder/cinder.conf + +# Install initscripts for services +install -p -D -m 644 %{SOURCE10} %{buildroot}%{_unitdir}/openstack-cinder-api.service +install -p -D -m 644 %{SOURCE11} %{buildroot}%{_unitdir}/openstack-cinder-scheduler.service +install -p -D -m 644 %{SOURCE12} %{buildroot}%{_unitdir}/openstack-cinder-volume.service +install -p -D -m 644 %{SOURCE13} %{buildroot}%{_unitdir}/openstack-cinder-backup.service + +# Install sudoers +install -p -D -m 440 %{SOURCE20} %{buildroot}%{_sysconfdir}/sudoers.d/cinder + +# Install pid directory +install -d -m 755 %{buildroot}%{_localstatedir}/run/cinder + +# Install rootwrap files in /usr/share/cinder/rootwrap +mkdir -p %{buildroot}%{_datarootdir}/cinder/rootwrap/ +install -p -D -m 644 etc/cinder/rootwrap.d/* %{buildroot}%{_datarootdir}/cinder/rootwrap/ + + +# Symlinks to rootwrap config files +mkdir -p %{buildroot}%{_sysconfdir}/cinder/rootwrap.d +for filter in %{_datarootdir}/os-brick/rootwrap/*.filters; do +ln -s $filter %{buildroot}%{_sysconfdir}/cinder/rootwrap.d/ +done + +# Install i18n .mo files (.po and .pot are not required) +install -d -m 755 %{buildroot}%{_datadir} +rm -f %{buildroot}%{python2_sitelib}/%{pypi_name}/locale/*/LC_*/%{pypi_name}*po +rm -f %{buildroot}%{python2_sitelib}/%{pypi_name}/locale/*pot +mv %{buildroot}%{python2_sitelib}/%{pypi_name}/locale %{buildroot}%{_datadir}/locale + +# Find language files +%find_lang %{pypi_name} --all-name + +# Remove unneeded in production stuff +rm -f %{buildroot}%{_bindir}/cinder-all +rm -f %{buildroot}%{_bindir}/cinder-debug +rm -fr %{buildroot}%{python2_sitelib}/run_tests.* +rm -f %{buildroot}/usr/share/doc/cinder/README* + +# FIXME(jpena): unit tests are taking too long in the current DLRN infra +# Until we have a better architecture, let's not run them when under DLRN +%if 0%{!?dlrn} +%check +OS_TEST_PATH=./cinder/tests/unit ostestr --concurrency=2 +%endif + +# WRS: in-service restarts +install -p -D -m 700 %{SOURCE21} %{buildroot}%{_bindir}/restart-cinder + +# WRS: purge cron +install -p -D -m 755 %{SOURCE22} %{buildroot}%{_bindir}/cinder-purge-deleted-active + +%pre +getent group cinder >/dev/null || groupadd -r cinder --gid 165 +if ! getent passwd cinder >/dev/null; then + useradd -u 165 -r -g cinder -G cinder,nobody -d %{_sharedstatedir}/cinder -s /sbin/nologin -c "OpenStack Cinder Daemons" cinder +fi +exit 0 + +%post +%systemd_post openstack-cinder-volume +%systemd_post openstack-cinder-api +%systemd_post openstack-cinder-scheduler +%systemd_post openstack-cinder-backup + +%preun +%systemd_preun openstack-cinder-volume +%systemd_preun openstack-cinder-api +%systemd_preun openstack-cinder-scheduler +%systemd_preun openstack-cinder-backup + +%postun +%systemd_postun_with_restart openstack-cinder-volume +%systemd_postun_with_restart openstack-cinder-api +%systemd_postun_with_restart openstack-cinder-scheduler +%systemd_postun_with_restart openstack-cinder-backup + +%files +%dir %{_sysconfdir}/cinder +%config(noreplace) %attr(-, root, cinder) %{_sysconfdir}/cinder/cinder.conf +%config(noreplace) %attr(-, root, cinder) %{_sysconfdir}/cinder/api-paste.ini +%config(noreplace) %attr(-, root, cinder) %{_sysconfdir}/cinder/rootwrap.conf +%config(noreplace) %attr(-, root, cinder) %{_sysconfdir}/cinder/policy.json +%config(noreplace) %{_sysconfdir}/sudoers.d/cinder +%{_sysconfdir}/cinder/rootwrap.d/ +%attr(-, root, cinder) %{_datadir}/cinder/cinder-dist.conf + +%dir %attr(0750, cinder, root) %{_localstatedir}/log/cinder +%dir %attr(0755, cinder, root) %{_localstatedir}/run/cinder +%dir %attr(0755, cinder, root) %{_sysconfdir}/cinder/volumes + +%{_bindir}/cinder-* +%{_unitdir}/*.service +%{_datarootdir}/cinder +%{_mandir}/man1/cinder*.1.gz + +#WRS: in-service patching +%{_bindir}/restart-cinder + +#WRS: purge cron +%{_bindir}/cinder-purge-deleted-active + +%defattr(-, cinder, cinder, -) +%dir %{_sharedstatedir}/cinder +%dir %{_sharedstatedir}/cinder/tmp + +%files -n python-cinder -f %{pypi_name}.lang +%{?!_licensedir: %global license %%doc} +%license LICENSE +%{python2_sitelib}/cinder +%{python2_sitelib}/cinder-*.egg-info +%exclude %{python2_sitelib}/cinder/tests + +%files -n python-cinder-tests +%license LICENSE +%{python2_sitelib}/cinder/tests +%{python2_sitelib}/%{service}_tests.egg-info + +%if 0%{?with_doc} +%files doc +%doc doc/build/html +%endif + +%changelog +* Wed Aug 30 2017 rdo-trunk 1:11.0.0-1 +- Update to 11.0.0 + +* Fri Aug 25 2017 Alfredo Moralejo 1:11.0.0-0.2.0rc2 +- Update to 11.0.0.0rc2 + +* Tue Aug 22 2017 Alfredo Moralejo 1:11.0.0-0.1.0rc1 +- Update to 11.0.0.0rc1 + diff --git a/openstack/python-cinderclient/centos/build_srpm.data b/openstack/python-cinderclient/centos/build_srpm.data new file mode 100644 index 00000000..6795d701 --- /dev/null +++ b/openstack/python-cinderclient/centos/build_srpm.data @@ -0,0 +1,10 @@ +TAR_NAME=python-cinderclient +SRC_DIR=$CGCS_BASE/git/python-cinderclient + +# Tar everything found in this subdirectory. Define this if source need to be collected into a tarball in SOURCES. +# Tar file name and version are derived from PKG-INFO. Alternatively you may define TAR_NAME ad VERSION + +# A Space separated list of paths to copy to .distro/centos7/rpmbuild/SOURCES. +#COPY_LIST="$CGCS_BASE/downloads/$CLIENT_NAME-$CLIENT_VER.tar.gz $PKG_BASE/$CLIENT_NAME/*" +TIS_BASE_SRCREV=3640aeab6e11987288a2f149fbeedb1c026045e2 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-cinderclient/centos/python-cinderclient.spec b/openstack/python-cinderclient/centos/python-cinderclient.spec new file mode 100644 index 00000000..204450f3 --- /dev/null +++ b/openstack/python-cinderclient/centos/python-cinderclient.spec @@ -0,0 +1,177 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%global sname cinderclient +%if 0%{?fedora} +%global with_python3 1 +%endif + +Name: python-cinderclient +Version: 3.1.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: Python API and CLI for OpenStack Cinder + +License: ASL 2.0 +URL: http://github.com/openstack/python-cinderclient +Source0: https://tarballs.openstack.org/%{name}/%{name}-%{upstream_version}.tar.gz + +BuildArch: noarch + +BuildRequires: git + +%description +Client library (cinderclient python module) and command line utility +(cinder) for interacting with OpenStack Cinder (Block Storage) API. + +%package -n python2-%{sname} +Summary: Python API and CLI for OpenStack Cinder +%{?python_provide:%python_provide python2-%{sname}} + +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-pbr +BuildRequires: python-d2to1 + +Requires: python-babel +Requires: python-pbr +Requires: python-prettytable +Requires: python-requests +Requires: python-setuptools +Requires: python-simplejson +Requires: python-six +Requires: python-keystoneauth1 >= 2.21.0 +Requires: python-oslo-i18n >= 3.9.0 +Requires: python-oslo-utils >= 3.20.0 + +%description -n python2-%{sname} +Client library (cinderclient python module) and command line utility +(cinder) for interacting with OpenStack Cinder (Block Storage) API. + + +%if 0%{?with_python3} +%package -n python3-%{sname} +Summary: Python API and CLI for OpenStack Cinder +%{?python_provide:%python_provide python3-%{sname}} + +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pbr +BuildRequires: python3-d2to1 + +Requires: python3-babel +Requires: python3-pbr +Requires: python3-prettytable +Requires: python3-requests +Requires: python3-setuptools +Requires: python3-simplejson +Requires: python3-six +Requires: python3-keystoneauth1 >= 2.21.0 +Requires: python3-oslo-i18n >= 3.9.0 +Requires: python3-oslo-utils >= 3.20.0 + +%description -n python3-%{sname} +Client library (cinderclient python module) and command line utility +(cinder) for interacting with OpenStack Cinder (Block Storage) API. +%endif + + +%package doc +Summary: Documentation for OpenStack Cinder API Client +Group: Documentation + +BuildRequires: python-reno +BuildRequires: python-sphinx +BuildRequires: python-openstackdocstheme + +%description doc +Client library (cinderclient python module) and command line utility +(cinder) for interacting with OpenStack Cinder (Block Storage) API. + +This package contains auto-generated documentation. + +%package sdk +Summary: SDK files for %{name} + +%description sdk +Contains SDK files for %{name} package + +%prep +%autosetup -n %{name}-%{upstream_version} -S git + +# Remove bundled egg-info +rm -rf python_cinderclient.egg-info + +# Let RPM handle the requirements +rm -f {,test-}requirements.txt + +%build +export PBR_VERSION=%{version} +%py2_build +%if 0%{?with_python3} +%py3_build +%endif + +# FIXME (amoralej): following manual edit on conf.py is required for man page +# until https://review.openstack.org/#/c/489123 is merged +sed -i 's/man\/cinder/user\/cinder/' doc/source/conf.py + +%{__python2} setup.py build_sphinx -b html +%{__python2} setup.py build_sphinx -b man + +# Fix hidden-file-or-dir warnings +rm -fr doc/build/html/.doctrees doc/build/html/.buildinfo + +%install +export PBR_VERSION=%{version} +%if 0%{?with_python3} +%py3_install +mv %{buildroot}%{_bindir}/cinder %{buildroot}%{_bindir}/cinder-%{python3_version} +ln -s ./cinder-%{python3_version} %{buildroot}%{_bindir}/cinder-3 +# Delete tests +rm -fr %{buildroot}%{python3_sitelib}/cinderclient/tests +%endif + +%py2_install +mv %{buildroot}%{_bindir}/cinder %{buildroot}%{_bindir}/cinder-%{python2_version} +ln -s ./cinder-%{python2_version} %{buildroot}%{_bindir}/cinder-2 +# Delete tests +rm -fr %{buildroot}%{python2_sitelib}/cinderclient/tests + +ln -s ./cinder-2 %{buildroot}%{_bindir}/cinder + +install -p -D -m 644 tools/cinder.bash_completion %{buildroot}%{_sysconfdir}/bash_completion.d/cinder.bash_completion + +install -p -D -m 644 doc/build/man/cinder.1 %{buildroot}%{_mandir}/man1/cinder.1 + +# prep SDK package +mkdir -p %{buildroot}/usr/share/remote-clients +tar zcf %{buildroot}/usr/share/remote-clients/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} + +%files -n python2-%{sname} +%doc README.rst +%license LICENSE +%{_bindir}/cinder +%{_bindir}/cinder-2* +%{python2_sitelib}/cinderclient +%{python2_sitelib}/*.egg-info +%{_sysconfdir}/bash_completion.d/cinder.bash_completion +%{_mandir}/man1/cinder.1* + +%if 0%{?with_python3} +%files -n python3-%{sname} +%doc README.rst +%license LICENSE +%{_bindir}/cinder-3* +%{python3_sitelib}/cinderclient +%{python3_sitelib}/*.egg-info +%endif + +%files doc +%doc doc/build/html + +%files sdk +/usr/share/remote-clients/%{name}-%{version}.tgz + +%changelog +* Fri Aug 11 2017 Alfredo Moralejo 3.1.0-1 +- Update to 3.1.0 + diff --git a/openstack/python-django-openstack-auth/centos/build_srpm.data b/openstack/python-django-openstack-auth/centos/build_srpm.data new file mode 100755 index 00000000..d3f64f33 --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=3 diff --git a/openstack/python-django-openstack-auth/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch b/openstack/python-django-openstack-auth/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..fa353f2d --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch @@ -0,0 +1,25 @@ +From bb0de73901801c5919041fa73699d6cc5e14495a Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Wed, 8 Nov 2017 13:52:34 -0500 +Subject: [PATCH] Update package versioning for TIS format + +--- + SPECS/python-django-openstack-auth.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/python-django-openstack-auth.spec b/SPECS/python-django-openstack-auth.spec +index 7ac6008..fd06bbc 100644 +--- a/SPECS/python-django-openstack-auth.spec ++++ b/SPECS/python-django-openstack-auth.spec +@@ -7,7 +7,7 @@ + + Name: python-django-openstack-auth + Version: 3.5.0 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: Django authentication backend for OpenStack Keystone + + License: BSD +-- +1.8.3.1 + diff --git a/openstack/python-django-openstack-auth/centos/meta_patches/0002-remove-rpm-build-time-TOX-tests.patch b/openstack/python-django-openstack-auth/centos/meta_patches/0002-remove-rpm-build-time-TOX-tests.patch new file mode 100644 index 00000000..65bacd60 --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/meta_patches/0002-remove-rpm-build-time-TOX-tests.patch @@ -0,0 +1,36 @@ +From 70ecee3d9c45b338a03e32cf59485ada4804f24d Mon Sep 17 00:00:00 2001 +From: Kam Nasim +Date: Wed, 8 Nov 2017 14:14:54 -0500 +Subject: [PATCH] remove rpm build-time TOX tests + +--- + SPECS/python-django-openstack-auth.spec | 6 +----- + 1 file changed, 1 insertion(+), 5 deletions(-) + +diff --git a/SPECS/python-django-openstack-auth.spec b/SPECS/python-django-openstack-auth.spec +index fd06bbc..2ab88bc 100644 +--- a/SPECS/python-django-openstack-auth.spec ++++ b/SPECS/python-django-openstack-auth.spec +@@ -129,7 +129,7 @@ find . -name "django.po" -exec rm -f '{}' \; + %endif + + # generate html docs +-PYTHONPATH=.:$PYTHONPATH sphinx-build doc/source html ++#PYTHONPATH=.:$PYTHONPATH sphinx-build doc/source html + + %install + %{__python2} setup.py install --skip-build --root %{buildroot} +@@ -150,10 +150,6 @@ rm -rf %{buildroot}/%{python3_sitelib}/openstack_auth/tests + %endif + + +-%check +-export PYTHONPATH=$PYTHONPATH +-%{__python} openstack_auth/tests/run_tests.py +- + %files -n python2-django-openstack-auth -f django.lang + %license LICENSE + %dir %{python_sitelib}/openstack_auth +-- +1.8.3.1 + diff --git a/openstack/python-django-openstack-auth/centos/meta_patches/0003-meta-roll-in-TIS-patches.patch b/openstack/python-django-openstack-auth/centos/meta_patches/0003-meta-roll-in-TIS-patches.patch new file mode 100644 index 00000000..55d6995c --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/meta_patches/0003-meta-roll-in-TIS-patches.patch @@ -0,0 +1,47 @@ +From 4e7ae98c2ffb058711338161a283ce04e47d6bc9 Mon Sep 17 00:00:00 2001 +From: Kam Nasim +Date: Wed, 8 Nov 2017 16:54:56 -0500 +Subject: [PATCH] meta to roll in pike rebase patches + +--- + SPECS/python-django-openstack-auth.spec | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/SPECS/python-django-openstack-auth.spec b/SPECS/python-django-openstack-auth.spec +index 3c65c20..6bd7088 100644 +--- a/SPECS/python-django-openstack-auth.spec ++++ b/SPECS/python-django-openstack-auth.spec +@@ -14,6 +14,9 @@ License: BSD + URL: http://pypi.python.org/pypi/django_openstack_auth/ + Source0: https://tarballs.openstack.org/django_openstack_auth/django_openstack_auth-%{upstream_version}.tar.gz + ++# WRS Patches ++Patch0001: 0001-Pike-rebase-for-openstack-auth.patch ++ + BuildArch: noarch + + BuildRequires: git +@@ -45,6 +48,11 @@ BuildRequires: python-mox3 + BuildRequires: python-mock + BuildRequires: python-testscenarios + ++#WRS: Need these for build_sphinx ++BuildRequires: tsconfig ++BuildRequires: python2-pycodestyle ++BuildRequires: python2-oslo-concurrency ++ + Requires: python-django + BuildRequires: python-django + +@@ -107,7 +115,7 @@ Keystone V2 API. + + + %prep +-%autosetup -n %{pypi_name}-%{upstream_version} -S git ++%autosetup -n %{pypi_name}-%{upstream_version} -S git -p1 + + + # Remove the requirements file so that pbr hooks don't add it +-- +1.8.3.1 + diff --git a/openstack/python-django-openstack-auth/centos/meta_patches/0004-meta-disable-token-validation-per-auth-req.patch b/openstack/python-django-openstack-auth/centos/meta_patches/0004-meta-disable-token-validation-per-auth-req.patch new file mode 100644 index 00000000..98358516 --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/meta_patches/0004-meta-disable-token-validation-per-auth-req.patch @@ -0,0 +1,24 @@ +From 5dc546e81e87d78f97af2fd734d41119faa866fe Mon Sep 17 00:00:00 2001 +From: Kam Nasim +Date: Mon, 12 Feb 2018 11:04:17 -0500 +Subject: [PATCH] meta patch for disable-token-validation-per-auth-request + +--- + SPECS/python-django-openstack-auth.spec | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SPECS/python-django-openstack-auth.spec b/SPECS/python-django-openstack-auth.spec +index 4960ab2..3a462de 100644 +--- a/SPECS/python-django-openstack-auth.spec ++++ b/SPECS/python-django-openstack-auth.spec +@@ -16,6 +16,7 @@ Source0: https://tarballs.openstack.org/django_openstack_auth/django_open + + # WRS Patches + Patch0001: 0001-Pike-rebase-for-openstack-auth.patch ++Patch0002: 0002-disable-token-validation-per-auth-request.patch + + BuildArch: noarch + +-- +1.8.3.1 + diff --git a/openstack/python-django-openstack-auth/centos/meta_patches/0005-meta-cache-authorized-tenants-in-cookie-to-improve-performance.patch b/openstack/python-django-openstack-auth/centos/meta_patches/0005-meta-cache-authorized-tenants-in-cookie-to-improve-performance.patch new file mode 100644 index 00000000..e7ff3661 --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/meta_patches/0005-meta-cache-authorized-tenants-in-cookie-to-improve-performance.patch @@ -0,0 +1,25 @@ +From ac0df9c1b8cddc4bb5a06d7e1226144d3f85e83d Mon Sep 17 00:00:00 2001 +From: Kam Nasim +Date: Wed, 14 Feb 2018 10:39:33 -0500 +Subject: [PATCH] meta patch for + cache-authorized-tenants-in-cookie-to-improve-performance + +--- + SPECS/python-django-openstack-auth.spec | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SPECS/python-django-openstack-auth.spec b/SPECS/python-django-openstack-auth.spec +index 3a462de..21e4bde 100644 +--- a/SPECS/python-django-openstack-auth.spec ++++ b/SPECS/python-django-openstack-auth.spec +@@ -17,6 +17,7 @@ Source0: https://tarballs.openstack.org/django_openstack_auth/django_open + # WRS Patches + Patch0001: 0001-Pike-rebase-for-openstack-auth.patch + Patch0002: 0002-disable-token-validation-per-auth-request.patch ++Patch0003: 0003-cache-authorized-tenants-in-cookie-to-improve-performance.patch + + BuildArch: noarch + +-- +1.8.3.1 + diff --git a/openstack/python-django-openstack-auth/centos/meta_patches/PATCH_ORDER b/openstack/python-django-openstack-auth/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..9df29a6a --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,5 @@ +0001-Update-package-versioning-for-TIS-format.patch +0002-remove-rpm-build-time-TOX-tests.patch +0003-meta-roll-in-TIS-patches.patch +0004-meta-disable-token-validation-per-auth-req.patch +0005-meta-cache-authorized-tenants-in-cookie-to-improve-performance.patch diff --git a/openstack/python-django-openstack-auth/centos/patches/0001-Pike-rebase-for-openstack-auth.patch b/openstack/python-django-openstack-auth/centos/patches/0001-Pike-rebase-for-openstack-auth.patch new file mode 100644 index 00000000..e5708111 --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/patches/0001-Pike-rebase-for-openstack-auth.patch @@ -0,0 +1,495 @@ +From b2e9d78e5385ad7c67b4ce2ef33b64206e5ccf6d Mon Sep 17 00:00:00 2001 +From: Kam Nasim +Date: Wed, 25 Jan 2017 11:58:56 -0500 +Subject: [PATCH] Pike rebase for openstack auth + +Squashes in the following Titanium patches (some patches have been +upstreamed and not mentioned here): + +- User lockout feature (Author: Aly Nathoo) +- Add client ip to login / lockout activies and logging for user lockout +(Author: David Balme) +- Optimize keystone authentication requests during user lockout +(Author: Kam Nasim) +- Stop user lockout increment when Keystone is unavailable +(Author: David Balme) +- Change default region for openstack auth to local Horizon's region +(Author: Tyler Smith) +- Refactor user lockout for multiple browser sessions, and fix horizon +session timeout issue (Author: Giao Le) +- Validate token before initiating session to prevent invalid token + issues (Author: Giao Le) +--- + openstack_auth/backend.py | 4 + + openstack_auth/forms.py | 35 ++++-- + openstack_auth/plugin/base.py | 7 +- + openstack_auth/user.py | 15 +++ + openstack_auth/utils.py | 242 +++++++++++++++++++++++++++++++++++++++++- + openstack_auth/views.py | 12 ++- + 6 files changed, 302 insertions(+), 13 deletions(-) + +diff --git a/openstack_auth/backend.py b/openstack_auth/backend.py +index dae603a..cd15ca8 100644 +--- a/openstack_auth/backend.py ++++ b/openstack_auth/backend.py +@@ -170,6 +170,10 @@ class KeystoneBackend(object): + region_name = id_endpoint['region'] + break + ++ local_region = getattr(settings, 'REGION_NAME', None) ++ if local_region: ++ region_name = local_region ++ + interface = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'public') + + endpoint, url_fixed = utils.fix_auth_url_version_prefix( +diff --git a/openstack_auth/forms.py b/openstack_auth/forms.py +index c7d0c51..90e281b 100644 +--- a/openstack_auth/forms.py ++++ b/openstack_auth/forms.py +@@ -24,7 +24,6 @@ from django.views.decorators.debug import sensitive_variables + from openstack_auth import exceptions + from openstack_auth import utils + +- + LOG = logging.getLogger(__name__) + + +@@ -117,6 +116,7 @@ class Login(django_auth_forms.AuthenticationForm): + + @sensitive_variables() + def clean(self): ++ global failedUserLogins + default_domain = getattr(settings, + 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN', + 'Default') +@@ -130,6 +130,14 @@ class Login(django_auth_forms.AuthenticationForm): + return self.cleaned_data + + try: ++ # assess the lockout status for this user. ++ # If this user is already locked out then ++ # do not attempt to authenticate as that is ++ # a redundant hit on Keystone and web proxy. ++ lockedout = utils.check_user_lockout(username) ++ if lockedout: ++ raise forms.ValidationError("user currently locked out.") ++ + self.user_cache = authenticate(request=self.request, + username=username, + password=password, +@@ -141,14 +149,27 @@ class Login(django_auth_forms.AuthenticationForm): + 'remote_ip': utils.get_client_ip(self.request) + } + LOG.info(msg) ++ ++ # since this user logged in successfully, clear its ++ # lockout status ++ utils.clear_user_lockout(username) ++ ++ # handle user login ++ utils.handle_user_login(username, password) ++ + except exceptions.KeystoneAuthException as exc: +- msg = 'Login failed for user "%(username)s", remote address '\ +- '%(remote_ip)s.' % { +- 'username': username, +- 'remote_ip': utils.get_client_ip(self.request) +- } +- LOG.warning(msg) ++ if getattr(exc,"invalidCredentials", False): ++ msg = 'Login failed for user "%(username)s", remote address '\ ++ '%(remote_ip)s.' % { ++ 'username': username, ++ 'remote_ip': utils.get_client_ip(self.request) ++ } ++ LOG.warning(msg) ++ utils.add_user_lockout(username) ++ LOG.warning("Invalid password entered for User %s " ++ "in web service." % username) + raise forms.ValidationError(exc) ++ + if hasattr(self, 'check_for_test_cookie'): # Dropped in django 1.7 + self.check_for_test_cookie() + return self.cleaned_data +diff --git a/openstack_auth/plugin/base.py b/openstack_auth/plugin/base.py +index f1aff52..bbdaddc 100644 +--- a/openstack_auth/plugin/base.py ++++ b/openstack_auth/plugin/base.py +@@ -95,7 +95,7 @@ class BasePlugin(object): + + except (keystone_exceptions.ClientException, + keystone_exceptions.AuthorizationFailure): +- msg = _('Unable to retrieve authorized projects.') ++ msg = 'Unable to retrieve authorized projects.' + raise exceptions.KeystoneAuthException(msg) + + def list_domains(self, session, auth_plugin, auth_ref=None): +@@ -132,7 +132,10 @@ class BasePlugin(object): + keystone_exceptions.Forbidden, + keystone_exceptions.NotFound) as exc: + LOG.debug(str(exc)) +- raise exceptions.KeystoneAuthException(_('Invalid credentials.')) ++ authException = exceptions.KeystoneAuthException( ++ _('Invalid credentials.')) ++ authException.invalidCredentials = True ++ raise authException + except (keystone_exceptions.ClientException, + keystone_exceptions.AuthorizationFailure) as exc: + msg = _("An error occurred authenticating. " +diff --git a/openstack_auth/user.py b/openstack_auth/user.py +index 063648b..c6f616c 100644 +--- a/openstack_auth/user.py ++++ b/openstack_auth/user.py +@@ -253,6 +253,9 @@ class User(models.AbstractBaseUser, models.AnonymousUser): + # Required by AbstractBaseUser + self.password = None + ++ # WRS: additional check to validate token prior to using ++ self.validate_token = False ++ + def __unicode__(self): + return self.username + +@@ -305,6 +308,18 @@ class User(models.AbstractBaseUser, models.AnonymousUser): + A default margin can be set by the TOKEN_TIMEOUT_MARGIN in the + django settings. + """ ++ # WRS: validate token ++ if not self.validate_token: ++ try: ++ utils.validate_token( ++ auth_url=self.endpoint, ++ auth_token=self.unscoped_token, ++ token=self.token) ++ except (keystone_exceptions.ClientException, ++ keystone_exceptions.AuthorizationFailure): ++ self.token.expires = None ++ self.validate_token = True ++ + return (self.token is not None and + utils.is_token_valid(self.token, margin)) + +diff --git a/openstack_auth/utils.py b/openstack_auth/utils.py +index cac0d7a..1b35592 100644 +--- a/openstack_auth/utils.py ++++ b/openstack_auth/utils.py +@@ -12,7 +12,13 @@ + # limitations under the License. + + import datetime ++import errno ++import fcntl ++import keyring + import logging ++import os ++import time ++import time + import re + + from django.conf import settings +@@ -25,6 +31,8 @@ from keystoneauth1 import session + from keystoneauth1 import token_endpoint + from keystoneclient.v2_0 import client as client_v2 + from keystoneclient.v3 import client as client_v3 ++from oslo_concurrency import lockutils ++from oslo_serialization import jsonutils + from six.moves.urllib import parse as urlparse + + +@@ -357,6 +365,14 @@ def get_token_auth_plugin(auth_url, token, project_id=None, domain_name=None): + reauthenticate=False) + + ++def validate_token(auth_url, auth_token, token): ++ auth_url, _ = fix_auth_url_version_prefix(auth_url) ++ auth = token_endpoint.Token(auth_url, auth_token) ++ sess = get_session() ++ client = get_keystone_client().Client(session=sess, auth=auth) ++ _ = client.tokens.validate(token) ++ ++ + def get_project_list(*args, **kwargs): + is_federated = kwargs.get('is_federated', False) + sess = kwargs.get('session') or get_session() +@@ -489,9 +505,10 @@ def get_admin_permissions(): + return {get_role_permission(role) for role in get_admin_roles()} + + ++ + def get_client_ip(request): + """Return client ip address using SECURE_PROXY_ADDR_HEADER variable. +- ++ If not present then consider using HTTP_X_FORWARDED_FOR from. + If not present or not defined on settings then REMOTE_ADDR is used. + + :param request: Django http request object. +@@ -508,7 +525,12 @@ def get_client_ip(request): + _SECURE_PROXY_ADDR_HEADER, + request.META.get('REMOTE_ADDR') + ) +- return request.META.get('REMOTE_ADDR') ++ else: ++ x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') ++ if x_forwarded_for: ++ return (x_forwarded_for.split(',')[0]) ++ else: ++ return request.META.get('REMOTE_ADDR') + + + def store_initial_k2k_session(auth_url, request, scoped_auth_ref, +@@ -560,3 +582,219 @@ def store_initial_k2k_session(auth_url, request, scoped_auth_ref, + request.session['k2k_base_unscoped_token'] =\ + unscoped_auth_ref.auth_token + request.session['k2k_auth_url'] = auth_url ++ ++ ++def __log_cache_error__(op, fn, ex): ++ msg = ("unable to %(op)s cache file %(fn)s due to %(ex)s") %\ ++ {'op': op, 'fn': fn, 'ex': ex} ++ LOG.warning(msg) ++ ++ ++def __lock_cache__(cache_file): ++ try: ++ f = open(cache_file, "r+") ++ except Exception as ex: ++ __log_cache_error__("open", cache_file, ex) ++ raise ++ ++ try: ++ fcntl.flock(f.fileno(), fcntl.LOCK_EX) ++ except Exception as ex: ++ __log_cache_error__("lock", f.name, ex) ++ raise ++ ++ return f ++ ++ ++def __write_cache__(f, filedata): ++ try: ++ f.seek(0, 0) ++ f.truncate() ++ f.write(jsonutils.dumps(filedata)) ++ except Exception as ex: ++ __log_cache_error__("write", f.name, ex) ++ ++ ++def __read_cache__(f, default=[]): ++ try: ++ f.seek(0, 0) ++ return jsonutils.loads(f.read()) ++ except Exception as ex: ++ __log_cache_error__("read", f.name, ex) ++ ++ return default ++ ++ ++def __create_cache__(fname, default=[]): ++ try: ++ with open(__file__, "r") as l: ++ fcntl.flock(l.fileno(), fcntl.LOCK_EX) ++ if not os.path.exists(fname): ++ with open(fname, "w") as f: ++ __write_cache__(f, default) ++ ++ except Exception as ex: ++ __log_cache_error__("create", fname, ex) ++ ++ ++# Track failed logins ++# Keep an array of failedUserLogins ++# failedUserLogins : username NumFailedLogins LockoutTime ++# if a login fails due to invalid password (username is ok), ++# add username to array and increment NumFailedLogins ++# if a login succeeds ++# look up username in failedUserLogins, if found ++# if NumFailedLogins > 3, check LockoutTime ++# if CurrentTime - LockoutTime < 5min then ++# fail login ++# else ++# remove username from failedUserLogins ++# else ++# remove username from failedUserLogins, let user log in ++ ++FAILED_LOGINS_CACHE_FILE = "/tmp/.hacache" ++FAILED_LOGINS_NAME_INDEX = 0 ++FAILED_LOGINS_COUNT_INDEX = 1 ++FAILED_LOGINS_TIME_INDEX = 2 ++ ++lockout_tuple = (settings.LOCKOUT_PERIOD_SEC, settings.LOCKOUT_RETRIES_NUM) ++ ++if not os.path.exists(FAILED_LOGINS_CACHE_FILE): ++ __create_cache__(FAILED_LOGINS_CACHE_FILE) ++ ++ ++def check_user_lockout(username): ++ try: ++ with __lock_cache__(FAILED_LOGINS_CACHE_FILE) as f: ++ failedUserLogins = __read_cache__(f) ++ (lockout_period_sec, lockout_retries_num) = lockout_tuple ++ for i, user in enumerate(failedUserLogins): ++ if user[FAILED_LOGINS_NAME_INDEX] != username: ++ continue ++ if user[FAILED_LOGINS_COUNT_INDEX] >= lockout_retries_num: ++ delta = time.time() - user[FAILED_LOGINS_TIME_INDEX] ++ if delta < lockout_period_sec: ++ msg = ("Cannot login/authenticate -" ++ " user \"%(username)s\" locked out." % \ ++ {'username': username}) ++ LOG.info(msg) ++ return True ++ break ++ except: ++ pass ++ ++ return False ++ ++ ++def add_user_lockout(username): ++ try: ++ with __lock_cache__(FAILED_LOGINS_CACHE_FILE) as f: ++ failedUserLogins = __read_cache__(f) ++ (lockout_period_sec, lockout_retries_num) = lockout_tuple ++ for user in failedUserLogins: ++ if user[FAILED_LOGINS_NAME_INDEX] == username: ++ user[FAILED_LOGINS_COUNT_INDEX] += 1 ++ if user[FAILED_LOGINS_COUNT_INDEX] >= lockout_retries_num: ++ # user is now locked out, record the new time ++ user[FAILED_LOGINS_TIME_INDEX] = time.time() ++ LOG.warning("User %s is locked out of web service" ++ "- attempted login." % (username)) ++ break ++ else: ++ failedUserLogins.append([username,1,0]) ++ __write_cache__(f, failedUserLogins) ++ except: ++ pass ++ ++ ++def clear_user_lockout(username): ++ try: ++ with __lock_cache__(FAILED_LOGINS_CACHE_FILE) as f: ++ failedUserLogins = __read_cache__(f) ++ for i, user in enumerate(failedUserLogins): ++ if user[FAILED_LOGINS_NAME_INDEX] == username: ++ # clear the entry for this user ++ failedUserLogins[i] = None ++ failedUserLogins = [elem for elem in failedUserLogins if elem] ++ __write_cache__(f, failedUserLogins) ++ break ++ except: ++ pass ++ ++ ++# Manage user password using keyring service ++# 1. add user password to keyring service once ++# user logs in the first time ++# 2. delete user password from keyring service once ++# user logs out all sessions ++ ++USER_LOGINS_CACHE_FILE = '/tmp/.hacache1' ++USER_LOGINS_NAME_INDEX = 0 ++USER_LOGINS_COUNT_INDEX = 1 ++USER_LOGINS_KEYRING = 'hakeyring' ++ ++if not os.path.exists(USER_LOGINS_CACHE_FILE): ++ __create_cache__(USER_LOGINS_CACHE_FILE) ++ ++ ++def get_user_password(username): ++ # use CGCS for admin ++ service = 'CGCS' if username == 'admin' else USER_LOGINS_KEYRING ++ return keyring.get_password(service, username) ++ ++ ++def handle_user_login(username, password): ++ if username == 'admin': ++ return ++ ++ try: ++ with __lock_cache__(USER_LOGINS_CACHE_FILE) as f: ++ logined = False ++ userLogins = __read_cache__(f) ++ for i, user in enumerate(userLogins): ++ if user[USER_LOGINS_NAME_INDEX] == username: ++ user[USER_LOGINS_COUNT_INDEX] += 1 ++ logined = True ++ break ++ try: ++ old = keyring.get_password(USER_LOGINS_KEYRING, username) ++ if old and old != password: ++ keyring.delete_password(USER_LOGINS_KEYRING, username) ++ old = None ++ if not old: ++ keyring.set_password(USER_LOGINS_KEYRING, username, ++ password) ++ except (keyring.errors.PasswordSetError, RuntimeError): ++ LOG.warning("Failed to update keyring passord" ++ " for user %s" % username) ++ ++ if not logined: ++ userLogins.append([username, 1]) ++ __write_cache__(f, userLogins) ++ except: ++ LOG.warning("Failed to handle login for user %s" % username) ++ ++ ++def handle_user_logout(username): ++ if username == 'admin': ++ return ++ ++ try: ++ with __lock_cache__(USER_LOGINS_CACHE_FILE) as f: ++ userLogins = __read_cache__(f) ++ for i, user in enumerate(userLogins): ++ if user[USER_LOGINS_NAME_INDEX] != username: ++ continue ++ user[USER_LOGINS_COUNT_INDEX] -= 1 ++ if user[USER_LOGINS_COUNT_INDEX] == 0: ++ userLogins[i] = None ++ userLogins = [e for e in userLogins if e] ++ try: ++ keyring.delete_password(USER_LOGINS_KEYRING, username) ++ except keyring.errors.PasswordDeleteError: ++ LOG.warning("Failed to delete keyring passowrd" ++ " for user %s" % username) ++ __write_cache__(f, userLogins) ++ break ++ except: ++ LOG.warning("Failed to handle logout for user %s" % username) +diff --git a/openstack_auth/views.py b/openstack_auth/views.py +index 7ae3063..bf4aa99 100644 +--- a/openstack_auth/views.py ++++ b/openstack_auth/views.py +@@ -130,6 +130,10 @@ def login(request, template_name=None, extra_context=None, **kwargs): + ' in %s minutes') % + expiration_time).replace(':', ' Hours and ') + messages.warning(request, msg) ++ ++ # WRS: add login user name to handle HORIZON session timeout ++ utils.set_response_cookie(res, 'login_user', ++ request.user.username) + return res + + +@@ -167,10 +171,14 @@ def logout(request, login_url=None, **kwargs): + see django.contrib.auth.views.logout_then_login extra parameters. + + """ +- msg = 'Logging out user "%(username)s".' % \ +- {'username': request.user.username} ++ msg = 'Logging out user "%(username)s", remote address %(remote_ip)s.' % \ ++ {'username': request.user.username, ++ 'remote_ip': utils.get_client_ip(request)} + LOG.info(msg) + ++ # handle user logout ++ utils.handle_user_logout(request.user.username) ++ + """ Securely logs a user out. """ + return django_auth_views.logout_then_login(request, login_url=login_url, + **kwargs) +-- +1.8.3.1 + diff --git a/openstack/python-django-openstack-auth/centos/patches/0002-disable-token-validation-per-auth-request.patch b/openstack/python-django-openstack-auth/centos/patches/0002-disable-token-validation-per-auth-request.patch new file mode 100644 index 00000000..c1a95ac6 --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/patches/0002-disable-token-validation-per-auth-request.patch @@ -0,0 +1,28 @@ +From 3b6605b547bb27b272345b03797abc94a23469ab Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Mon, 12 Feb 2018 10:59:39 -0500 +Subject: [PATCH] disable token validation per auth request + +To prevent invalid tokens from being used, each request would previous +attempt a token validation request. This causes 2x AUTH requests to +Keystone which is affecting Host Swact/Lock and VM migration times +--- + openstack_auth/user.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/openstack_auth/user.py b/openstack_auth/user.py +index c6f616c..f486bfa 100644 +--- a/openstack_auth/user.py ++++ b/openstack_auth/user.py +@@ -254,7 +254,7 @@ class User(models.AbstractBaseUser, models.AnonymousUser): + self.password = None + + # WRS: additional check to validate token prior to using +- self.validate_token = False ++ self.validate_token = True + + def __unicode__(self): + return self.username +-- +1.8.3.1 + diff --git a/openstack/python-django-openstack-auth/centos/patches/0003-cache-authorized-tenants-in-cookie-to-improve-performance.patch b/openstack/python-django-openstack-auth/centos/patches/0003-cache-authorized-tenants-in-cookie-to-improve-performance.patch new file mode 100644 index 00000000..d8c8e98f --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/patches/0003-cache-authorized-tenants-in-cookie-to-improve-performance.patch @@ -0,0 +1,69 @@ +From b6724faf6f485f08584a7965bcd881d1f3b78f7c Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Wed, 14 Feb 2018 16:55:29 -0500 +Subject: [PATCH] cache authorized tenants list in cookie to improve + performance + +--- + openstack_auth/utils.py | 9 +++++++++ + openstack_auth/views.py | 17 +++++++++++++++++ + 2 files changed, 26 insertions(+) + +diff --git a/openstack_auth/utils.py b/openstack_auth/utils.py +index 1b35592..9a55790 100644 +--- a/openstack_auth/utils.py ++++ b/openstack_auth/utils.py +@@ -438,6 +438,15 @@ def set_response_cookie(response, cookie_name, cookie_value): + response.set_cookie(cookie_name, cookie_value, expires=expire_date) + + ++def delete_response_cookie(response, cookie_name): ++ """ Common function for deleting a cookie from the response. ++ ++ Deletes the cookie of the given cookie_name. Fails silently ++ if cookie name doesn't exist ++ """ ++ response.delete_cookie(cookie_name) ++ ++ + def get_endpoint_region(endpoint): + """Common function for getting the region from endpoint. + +diff --git a/openstack_auth/views.py b/openstack_auth/views.py +index bf4aa99..a9d84df 100644 +--- a/openstack_auth/views.py ++++ b/openstack_auth/views.py +@@ -134,6 +134,15 @@ def login(request, template_name=None, extra_context=None, **kwargs): + # WRS: add login user name to handle HORIZON session timeout + utils.set_response_cookie(res, 'login_user', + request.user.username) ++ ++ # WRS: Store project list to handle frequent requests to ++ # Keystone. Since the cookie is not removed on logout, we will ++ # do it here. ++ tenants = request.user.authorized_tenants ++ tenants = map(lambda x: x.to_dict(), tenants) ++ utils.delete_response_cookie(res, 'authorized_tenants') ++ utils.set_response_cookie(res, 'authorized_tenants', tenants) ++ + return res + + +@@ -237,6 +246,14 @@ def switch(request, tenant_id, redirect_field_name=auth.REDIRECT_FIELD_NAME): + response = shortcuts.redirect(redirect_to) + utils.set_response_cookie(response, 'recent_project', + request.user.project_id) ++ ++ # WRS: Refresh the project list stored in the cookie, along with ++ # the project switch event ++ tenants = request.user.authorized_tenants ++ tenants = map(lambda x: x.to_dict(), tenants) ++ utils.delete_response_cookie(response, 'authorized_tenants') ++ utils.set_response_cookie(response, 'authorized_tenants', tenants) ++ + return response + + +-- +1.8.3.1 + diff --git a/openstack/python-django-openstack-auth/centos/srpm_path b/openstack/python-django-openstack-auth/centos/srpm_path new file mode 100644 index 00000000..378a58ba --- /dev/null +++ b/openstack/python-django-openstack-auth/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-django-openstack-auth-3.5.0-1.el7.src.rpm diff --git a/openstack/python-glance-store/centos/build_srpm.data b/openstack/python-glance-store/centos/build_srpm.data new file mode 100644 index 00000000..1ca8cb84 --- /dev/null +++ b/openstack/python-glance-store/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=0 diff --git a/openstack/python-glance-store/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch b/openstack/python-glance-store/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..870670f9 --- /dev/null +++ b/openstack/python-glance-store/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch @@ -0,0 +1,25 @@ +From 89c5d60116569e1446c62d652d30eb0a21130193 Mon Sep 17 00:00:00 2001 +From: Daniel Badea +Date: Thu, 2 Nov 2017 18:26:56 +0200 +Subject: [PATCH 1/2] Update package versioning for TIS format + +--- + SPECS/python-glance-store.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/python-glance-store.spec b/SPECS/python-glance-store.spec +index 831aba4..387a326 100644 +--- a/SPECS/python-glance-store.spec ++++ b/SPECS/python-glance-store.spec +@@ -7,7 +7,7 @@ + + Name: python-glance-store + Version: 0.22.0 +-Release: 1%{?dist} ++Release: 1%{?_tis_dist}.%{tis_patch_ver} + Summary: OpenStack Image Service Store Library + + License: ASL 2.0 +-- +2.7.4 + diff --git a/openstack/python-glance-store/centos/meta_patches/0002-meta-patch-Check-ceph-cluster-free-space.patch b/openstack/python-glance-store/centos/meta_patches/0002-meta-patch-Check-ceph-cluster-free-space.patch new file mode 100644 index 00000000..6742cf3a --- /dev/null +++ b/openstack/python-glance-store/centos/meta_patches/0002-meta-patch-Check-ceph-cluster-free-space.patch @@ -0,0 +1,34 @@ +From aef63b7fcf58613c233cbe519b814f5366522299 Mon Sep 17 00:00:00 2001 +From: Daniel Badea +Date: Thu, 2 Nov 2017 18:29:54 +0200 +Subject: [PATCH 2/2] Check ceph cluster free space + +--- + SPECS/python-glance-store.spec | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/SPECS/python-glance-store.spec b/SPECS/python-glance-store.spec +index 387a326..b60bd97 100644 +--- a/SPECS/python-glance-store.spec ++++ b/SPECS/python-glance-store.spec +@@ -13,6 +13,8 @@ Summary: OpenStack Image Service Store Library + License: ASL 2.0 + URL: https://github.com/openstack/%{upstream_name} + Source0: https://tarballs.openstack.org/%{upstream_name}/%{upstream_name}-%{upstream_version}.tar.gz ++# WRS ++Patch0001: 0001-Check-ceph-cluster-free-space-before-creating-image.patch + + BuildArch: noarch + +@@ -84,6 +86,8 @@ Requires: python3-oslo-privsep >= 1.9.0 + %prep + %setup -q -n %{upstream_name}-%{upstream_version} + ++# Apply WRS patches ++%patch0001 -p1 + + %build + %py2_build +-- +2.7.4 + diff --git a/openstack/python-glance-store/centos/meta_patches/0003-meta-patch-Glance-Driver.patch b/openstack/python-glance-store/centos/meta_patches/0003-meta-patch-Glance-Driver.patch new file mode 100644 index 00000000..7cd6d321 --- /dev/null +++ b/openstack/python-glance-store/centos/meta_patches/0003-meta-patch-Glance-Driver.patch @@ -0,0 +1,32 @@ +From ec249641ede0728731c263a51fdf44d5f97d596a Mon Sep 17 00:00:00 2001 +From: Stefan Dinescu +Date: Thu, 16 Nov 2017 17:44:10 +0000 +Subject: [PATCH 1/1] Meta Glance Driver + +--- + SPECS/python-glance-store.spec | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/SPECS/python-glance-store.spec b/SPECS/python-glance-store.spec +index f001803..be60094 100644 +--- a/SPECS/python-glance-store.spec ++++ b/SPECS/python-glance-store.spec +@@ -12,6 +12,7 @@ Source0: https://tarballs.openstack.org/%{upstream_name}/%{upstream_name} + + # WRS + Patch0001: 0001-Check-ceph-cluster-free-space-before-creating-image.patch ++Patch0002: 0002-Add-glance-driver.patch + + BuildArch: noarch + BuildRequires: python2-devel +@@ -44,6 +45,7 @@ OpenStack image service store library + + # Apply WRS patches + %patch0001 -p1 ++%patch0002 -p1 + + %build + %{__python2} setup.py build +-- +1.8.3.1 + diff --git a/openstack/python-glance-store/centos/meta_patches/PATCH_ORDER b/openstack/python-glance-store/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..f63468e9 --- /dev/null +++ b/openstack/python-glance-store/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,3 @@ +0001-Update-package-versioning-for-TIS-format.patch +0002-meta-patch-Check-ceph-cluster-free-space.patch +0003-meta-patch-Glance-Driver.patch diff --git a/openstack/python-glance-store/centos/patches/0001-Check-ceph-cluster-free-space-before-creating-image.patch b/openstack/python-glance-store/centos/patches/0001-Check-ceph-cluster-free-space-before-creating-image.patch new file mode 100644 index 00000000..7c261a13 --- /dev/null +++ b/openstack/python-glance-store/centos/patches/0001-Check-ceph-cluster-free-space-before-creating-image.patch @@ -0,0 +1,298 @@ +From 8dbde864bbdd4302918e91ac696b0ae95f698b36 Mon Sep 17 00:00:00 2001 +From: Daniel Badea +Date: Thu, 2 Nov 2017 21:07:24 +0200 +Subject: [PATCH] Check available ceph space before creating image + +--- + glance_store/_drivers/rbd.py | 159 +++++++++++++++++++++++++++++- + glance_store/tests/unit/test_rbd_store.py | 17 +++- + tox.ini | 2 +- + 3 files changed, 170 insertions(+), 8 deletions(-) + +diff --git a/glance_store/_drivers/rbd.py b/glance_store/_drivers/rbd.py +index 7b803bc..9895472 100644 +--- a/glance_store/_drivers/rbd.py ++++ b/glance_store/_drivers/rbd.py +@@ -18,11 +18,15 @@ + from __future__ import absolute_import + from __future__ import with_statement + ++import ast + import contextlib + import hashlib ++import json + import logging + import math + ++from oslo_concurrency import lockutils ++from oslo_concurrency import processutils + from oslo_config import cfg + from oslo_utils import units + from six.moves import urllib +@@ -46,6 +50,10 @@ DEFAULT_CONFFILE = '/etc/ceph/ceph.conf' + DEFAULT_USER = None # let librados decide based on the Ceph conf file + DEFAULT_CHUNKSIZE = 8 # in MiB + DEFAULT_SNAPNAME = 'snap' ++DEFAULT_POOL_RESERVATION_FILE = '/var/run/glance-space-reservations' ++LOCK_DIR = "/tmp" ++LOCK_PREFIX = "glance_" ++LOCK_RBD_USAGE = "rbd_cluster_usage" + + LOG = logging.getLogger(__name__) + +@@ -344,8 +352,117 @@ class Store(driver.Store): + LOG.debug(msg) + raise exceptions.NotFound(msg) + ++ def validate_available_space(self, ioctx, image_name, ++ image_size, total_space=0, ++ reserved=0, ignore=[]): ++ """ ++ Checks if there is sufficient free space in the ++ ceph cluster to put the whole image in ++ ++ :param image_size: the size of the new image ++ :param total_space: total cluster guaranteed space for images ++ :param reserved: space reserved for other uses ++ :param ignore: list of image names to ignore in space computation ++ ++ Raises exception in case not enough space is found ++ """ ++ ++ pool_name = ioctx.name ++ ++ # Get free space if there is no space guarantee (e.g. no quota set) ++ if total_space == 0: ++ cmd = ('env', 'LC_ALL=C', 'ceph', 'df', ++ '--format', 'json') ++ out, err = processutils.execute(*cmd) ++ ceph_df_pools = json.loads(out).get('pools', []) ++ for pool in ceph_df_pools: ++ if pool_name == pool.get("name", "") and 'stats' in pool: ++ # Leave space to avoid cluster filling up as some ++ # other processes can write at the same time we import ++ # our image. ++ total_space = pool['stats'].get("max_avail", 0) * 0.99 ++ break ++ else: ++ msg = ("Query of max available space in %(pool) failed." ++ "cmd: %(cmd)s, stdout: %(stdout)s, " ++ "stderr: %(stderr)s" % ++ {"pool": pool_name, "cmd": cmd, ++ "stdout": out, "stderr": err}) ++ LOG.error(msg) ++ raise exceptions.GlanceStoreException(message=msg) ++ ++ # Get used space by all images in pool ++ # NOTE: There is no librbd python API for getting real space usage ++ cmd = ('env', 'LC_ALL=C', 'rbd', 'du', '-p', pool_name, ++ '--format', 'json') ++ out, err = processutils.execute(*cmd, check_exit_code=[0, 2]) ++ if out: ++ image_list = json.loads(out).get("images", []) ++ else: ++ image_list = [] ++ ++ # Compute occupied space ++ # NOTE: RBD images can be sparse, in this case real disk usage is ++ # lower than provisioned. All glance images real size equals ++ # provisioned space while raw cache are sparse. Moreover, delta ++ # between RAW caching provisioned and real sizes usually is high. ++ # For e.g. a CentOS cloud image that uses approx. 900MB of real ++ # space yet provisions for 8GB. Therefore, we want the real usage ++ # not waste space with provisions that are never going to be used. ++ occupied_space = 0 ++ image_id = "" ++ for image in image_list: ++ image_id = image.get("name", "") ++ # Process ignores ++ for img in ignore: ++ if img == image_id: ++ continue ++ # Sanitize input ++ if "used_size" not in image or "provisioned_size" not in image: ++ LOG.error("Image disk usage query failure for " ++ "image: %(id)s. cmd: %(cmd)s, " ++ "stdout: %(stdout)s, stderr: %(stderr)s" % ++ {"id": image_id, "cmd": cmd, ++ "stdout": out, "stderr": err}) ++ # Get image usage ++ if "_raw" in image_id: ++ if image.get("snapshot", None) != "snap": ++ # Each image is listed twice, after import, only snapshots ++ # display 'used_size' correctly ++ continue ++ # Get raw cached images real used space ++ size = image["used_size"] ++ else: ++ if image.get("snapshot", None) == "snap": ++ # Before import, there is no snapshot and we also need ++ # reserved space during glance image creation ++ continue ++ # Get glance images provisioned space ++ size = image["provisioned_size"] ++ occupied_space += size ++ LOG.debug("Image %(id)s used RBD space is: %(used_size)s" % ++ {"id": image_id, "used_size": image_size}) ++ ++ # Verify if there is enough space to proceed ++ data = {"image": image_id, ++ "pool": pool_name, ++ "used": occupied_space // 2 ** 20, ++ "needed": image_size // 2 ** 20, ++ "available": (total_space - occupied_space) // 2 ** 20, ++ "reserved": reserved // 2 ** 20} ++ LOG.info("Requesting %(needed)s MB for image %(image)s in " ++ "Ceph %(pool)s pool. Used: %(used)s MB. Available: " ++ "%(available)s MB (where %(reserved)s reserved)" % data) ++ if (total_space and image_size and ++ occupied_space + image_size + reserved > total_space): ++ msg = (_('Not enough cluster free space for image %s.') % ++ image_name) ++ LOG.error(msg) ++ raise exceptions.StorageFull(message=msg) ++ ++ @lockutils.synchronized(LOCK_RBD_USAGE, LOCK_PREFIX, True, LOCK_DIR) + def _create_image(self, fsid, conn, ioctx, image_name, +- size, order, context=None): ++ size, order, total_available_space, context=None): + """ + Create an rbd image. If librbd supports it, + make it a cloneable snapshot, so that copy-on-write +@@ -356,6 +473,34 @@ class Store(driver.Store): + :retval: `glance_store.rbd.StoreLocation` object + """ + librbd = rbd.RBD() ++ ++ # Get space reserved by RAW Caching feature ++ # NOTE: Real space is updated on the fly while an image is added to ++ # RBD (i.e. with 'rbd import') so we will know how big an image is ++ # only after its imported. Also, due to sparse mode provisioned RBD ++ # space is higher than real usage. Therefore we need to get a better ++ # value closer to what we will have as real usage in RBD, and this ++ # has to come from raw caching itself. ++ try: ++ out = None ++ with open(DEFAULT_POOL_RESERVATION_FILE, "r") as f: ++ out = f.read() ++ data = ast.literal_eval(out) ++ reserved = data.get("reserved", 0) ++ img_under_caching = ([data["image_id"]] if ++ "image_id" in data else []) ++ except IOError: ++ # In case reservation file does not exist ++ reserved, img_under_caching = (0, []) ++ except Exception: ++ # In case of any other error ignore reservations ++ LOG.error("Failed parsing: %s" % out) ++ reserved, img_under_caching = (0, []) ++ ++ self.validate_available_space( ++ ioctx, image_name, size, total_available_space, ++ reserved, img_under_caching) ++ + features = conn.conf_get('rbd_default_features') + if ((features is None) or (int(features) == 0)): + features = rbd.RBD_FEATURE_LAYERING +@@ -464,9 +609,19 @@ class Store(driver.Store): + "resize-before-write for each chunk which " + "will be considerably slower than normal")) + ++ ceph_quota_output = json.loads( ++ conn.mon_command( ++ json.dumps({ ++ "prefix": "osd pool get-quota", ++ "pool": self.pool, ++ "format": "json-pretty"}), "")[1]) ++ ++ glance_ceph_quota = ceph_quota_output.get("quota_max_bytes", 0) ++ + try: + loc = self._create_image(fsid, conn, ioctx, image_name, +- image_size, order) ++ image_size, order, ++ glance_ceph_quota) + except rbd.ImageExists: + msg = _('RBD image %s already exists') % image_id + raise exceptions.Duplicate(message=msg) +diff --git a/glance_store/tests/unit/test_rbd_store.py b/glance_store/tests/unit/test_rbd_store.py +index 9765aa3..34ab7b4 100644 +--- a/glance_store/tests/unit/test_rbd_store.py ++++ b/glance_store/tests/unit/test_rbd_store.py +@@ -69,6 +69,9 @@ class MockRados(object): + def conf_get(self, *args, **kwargs): + pass + ++ def mon_command(self, *args, **kwargs): ++ return ["{}", "{}"] ++ + + class MockRBD(object): + +@@ -152,7 +155,7 @@ class MockRBD(object): + pass + + def list(self, *args, **kwargs): +- raise NotImplementedError() ++ return [] + + def clone(self, *args, **kwargs): + raise NotImplementedError() +@@ -184,7 +187,8 @@ class TestStore(base.StoreBaseTest, + self.data_len = 3 * units.Ki + self.data_iter = six.BytesIO(b'*' * self.data_len) + +- def test_add_w_image_size_zero(self): ++ @mock.patch.object(rbd_store.Store, 'validate_available_space') ++ def test_add_w_image_size_zero(self, validate_available_space): + """Assert that correct size is returned even though 0 was provided.""" + self.store.chunk_size = units.Ki + with mock.patch.object(rbd_store.rbd.Image, 'resize') as resize: +@@ -234,7 +238,8 @@ class TestStore(base.StoreBaseTest, + 'fake_image_id', self.data_iter, self.data_len) + self.called_commands_expected = ['create'] + +- def test_add_with_verifier(self): ++ @mock.patch.object(rbd_store.Store, 'validate_available_space') ++ def test_add_with_verifier(self, validate_available_space): + """Assert 'verifier.update' is called when verifier is provided.""" + self.store.chunk_size = units.Ki + verifier = mock.MagicMock(name='mock_verifier') +@@ -403,7 +408,8 @@ class TestStore(base.StoreBaseTest, + pass + self.assertRaises(exceptions.BackendException, test) + +- def test_create_image_conf_features(self): ++ @mock.patch.object(rbd_store.Store, 'validate_available_space') ++ def test_create_image_conf_features(self, validate_available_space): + # Tests that we use non-0 features from ceph.conf and cast to int. + fsid = 'fake' + features = '3' +@@ -413,9 +419,10 @@ class TestStore(base.StoreBaseTest, + name = '1' + size = 1024 + order = 3 ++ ceph_size = 0 + with mock.patch.object(rbd_store.rbd.RBD, 'create') as create_mock: + location = self.store._create_image( +- fsid, conn, ioctxt, name, size, order) ++ fsid, conn, ioctxt, name, size, order, ceph_size) + self.assertEqual(fsid, location.specs['fsid']) + self.assertEqual(rbd_store.DEFAULT_POOL, location.specs['pool']) + self.assertEqual(name, location.specs['image']) +diff --git a/tox.ini b/tox.ini +index 2e5a2f8..426c024 100644 +--- a/tox.ini ++++ b/tox.ini +@@ -27,7 +27,7 @@ commands = + # B101 - Use of assert detected. + # B110 - Try, Except, Pass detected. + # B303 - Use of insecure MD2, MD4, or MD5 hash function. +- bandit -r glance_store -x tests --skip B101,B110,B303 ++ bandit -r glance_store -x tests --skip B101,B110,B303,B108 + + [testenv:bandit] + # NOTE(browne): This is required for the integration test job of the bandit +-- +2.7.4 + diff --git a/openstack/python-glance-store/centos/patches/0002-Add-glance-driver.patch b/openstack/python-glance-store/centos/patches/0002-Add-glance-driver.patch new file mode 100644 index 00000000..5382f134 --- /dev/null +++ b/openstack/python-glance-store/centos/patches/0002-Add-glance-driver.patch @@ -0,0 +1,250 @@ +From 6da11c584cab0e2ff396cc0208453a3e19b4dc2d Mon Sep 17 00:00:00 2001 +From: Stefan Dinescu +Date: Fri, 17 Nov 2017 15:50:23 +0000 +Subject: [PATCH 1/1] Add glance driver + +--- + glance_store/_drivers/glance.py | 210 ++++++++++++++++++++++++++++++++++++++++ + setup.cfg | 2 + + 2 files changed, 212 insertions(+) + create mode 100644 glance_store/_drivers/glance.py + +diff --git a/glance_store/_drivers/glance.py b/glance_store/_drivers/glance.py +new file mode 100644 +index 0000000..554a5a1 +--- /dev/null ++++ b/glance_store/_drivers/glance.py +@@ -0,0 +1,210 @@ ++# Copyright (c) 2013-2017 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++ ++# vim: tabstop=4 shiftwidth=4 softtabstop=4 ++ ++# All Rights Reserved. ++# ++ ++"""Storage backend for glance""" ++ ++import contextlib ++import errno ++import hashlib ++import logging ++import math ++import os ++import socket ++import time ++ ++from oslo_concurrency import processutils ++from oslo_config import cfg ++from oslo_utils import units ++ ++from glance_store import capabilities ++from glance_store.common import utils ++import glance_store.driver ++from glance_store import exceptions ++from glance_store.i18n import _, _LE, _LW, _LI ++import glance_store.location ++from keystoneclient import exceptions as keystone_exc ++from keystoneclient import service_catalog as keystone_sc ++ ++import keystoneauth1.loading ++import keystoneauth1.session ++ ++from glanceclient import client as glance_client ++from cinderclient import exceptions as glance_exception ++ ++CONF = cfg.CONF ++LOG = logging.getLogger(__name__) ++ ++_GLANCE_OPTS = [ ++ cfg.StrOpt('glance_endpoint_template', ++ default=None, ++ help=_("Glance Endpoint template")), ++ cfg.StrOpt('glance_catalog_info', ++ default='image:glance:internalURL', ++ help=_("Glance catalog info")),] ++ ++def get_glanceclient(conf, remote_region, context=None): ++ ++ glance_store = conf.glance_store ++ ++ if glance_store.cinder_endpoint_template: ++ url = glance_store.glance_endpoint_template % context.to_dict() ++ else: ++ info = glance_store.glance_catalog_info ++ service_type, service_name, endpoint_type = info.split(':') ++ sc = {'serviceCatalog': context.service_catalog} ++ try: ++ url = keystone_sc.ServiceCatalogV2(sc).url_for( ++ region_name=remote_region, ++ service_type=service_type, ++ service_name=service_name, ++ endpoint_type=endpoint_type) ++ except keystone_exc.EndpointNotFound: ++ reason = _("Failed to find Glance from a service catalog.") ++ raise exceptions.BadStoreConfiguration(store_name="glance", ++ reason=reason) ++ ++ c = glance_client.Client('2', ++ endpoint=url, ++ token=context.auth_token) ++ ++ return c ++ ++ ++class StoreLocation(glance_store.location.StoreLocation): ++ ++ """Class describing a Glance URI.""" ++ ++ def process_specs(self): ++ self.scheme = self.specs.get('scheme', 'glance') ++ self.image_id = self.specs.get('image_id') ++ self.remote_region = self.specs.get('remote_region') ++ ++ def get_uri(self): ++ return "glance://%s/%s" % (self.remote_region, self.image_id) ++ ++ def parse_uri(self, uri): ++ ++ if not uri.startswith('glance://'): ++ reason = _("URI must start with 'glance://'") ++ LOG.info(reason) ++ raise exceptions.BadStoreUri(message=reason) ++ ++ self.scheme = 'glance' ++ ++ sp = uri.split('/') ++ ++ self.image_id = sp[-1] ++ self.remote_region=sp[-2] ++ ++ if not utils.is_uuid_like(self.image_id): ++ reason = _("URI contains invalid image ID") ++ LOG.info(reason) ++ raise exceptions.BadStoreUri(message=reason) ++ ++ ++ ++class Store(glance_store.driver.Store): ++ ++ """Cinder backend store adapter.""" ++ ++ _CAPABILITIES = (capabilities.BitMasks.READ_ACCESS | ++ capabilities.BitMasks.DRIVER_REUSABLE) ++ OPTIONS = _GLANCE_OPTS ++ EXAMPLE_URL = "glance:///" ++ ++ def __init__(self, *args, **kargs): ++ super(Store, self).__init__(*args, **kargs) ++ ++ def get_schemes(self): ++ return ('glance',) ++ ++ def _check_context(self, context, require_tenant=False): ++ ++ if context is None: ++ reason = _("Glance storage requires a context.") ++ raise exceptions.BadStoreConfiguration(store_name="glance", ++ reason=reason) ++ if context.service_catalog is None: ++ reason = _("glance storage requires a service catalog.") ++ raise exceptions.BadStoreConfiguration(store_name="glance", ++ reason=reason) ++ ++ ++ @capabilities.check ++ def get(self, location, offset=0, chunk_size=None, context=None): ++ """ ++ Takes a `glance_store.location.Location` object that indicates ++ where to find the image file, and returns a tuple of generator ++ (for reading the image file) and image_size ++ ++ :param location `glance_store.location.Location` object, supplied ++ from glance_store.location.get_location_from_uri() ++ :param offset: offset to start reading ++ :param chunk_size: size to read, or None to get all the image ++ :param context: Request context ++ :raises `glance_store.exceptions.NotFound` if image does not exist ++ """ ++ ++ loc = location.store_location ++ self._check_context(context) ++ ++ try: ++ gc = get_glanceclient(self.conf, loc.remote_region, context) ++ img = gc.images.get(loc.image_id) ++ ++ size = int(img.size/(1024*1024)) ++ iterator = gc.images.data(loc.image_id) ++ return (iterator, chunk_size or size) ++ except glance_exception.NotFound: ++ reason = _("Failed to get image size due to " ++ "volume can not be found: %s") % volume.id ++ LOG.error(reason) ++ raise exceptions.NotFound(reason) ++ except glance_exception.ClientException as e: ++ msg = (_('Failed to get image volume %(volume_id): %(error)s') ++ % {'volume_id': loc.volume_id, 'error': e}) ++ LOG.error(msg) ++ raise exceptions.BackendException(msg) ++ ++ def get_size(self, location, context=None): ++ """ ++ Takes a `glance_store.location.Location` object that indicates ++ where to find the image file and returns the image size ++ ++ :param location: `glance_store.location.Location` object, supplied ++ from glance_store.location.get_location_from_uri() ++ :raises: `glance_store.exceptions.NotFound` if image does not exist ++ :rtype int ++ """ ++ ++ loc = location.store_location ++ ++ try: ++ self._check_context(context) ++ img = get_glanceclient(self.conf, ++ context).images.get(loc.image_id) ++ return int(img.size/1024 * 1024) ++ except glance_exception.NotFound: ++ raise exceptions.NotFound(image=loc.image_id) ++ except Exception: ++ LOG.exception(_LE("Failed to get image size due to " ++ "internal error.")) ++ return 0 ++ ++ @capabilities.check ++ def add(self, image_id, image_file, image_size, context=None, ++ verifier=None): ++ raise NotImplementedError ++ ++ @capabilities.check ++ def delete(self, location, context=None): ++ raise NotImplementedError +diff --git a/setup.cfg b/setup.cfg +index b3054c4..8cc9fb7 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -32,6 +32,7 @@ glance_store.drivers = + sheepdog = glance_store._drivers.sheepdog:Store + cinder = glance_store._drivers.cinder:Store + vmware = glance_store._drivers.vmware_datastore:Store ++ glance = glance_store._drivers.glance:Store + # TESTS ONLY + no_conf = glance_store.tests.fakes:UnconfigurableStore + # Backwards compatibility +@@ -42,6 +43,7 @@ glance_store.drivers = + glance.store.sheepdog.Store = glance_store._drivers.sheepdog:Store + glance.store.cinder.Store = glance_store._drivers.cinder:Store + glance.store.vmware_datastore.Store = glance_store._drivers.vmware_datastore:Store ++ glance.store.glance.Store = glance_store._drivers.glance:Store + oslo.config.opts = + glance.store = glance_store.backend:_list_opts + console_scripts = +-- +1.8.3.1 + diff --git a/openstack/python-glance-store/centos/srpm_path b/openstack/python-glance-store/centos/srpm_path new file mode 100644 index 00000000..62249bea --- /dev/null +++ b/openstack/python-glance-store/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-glance-store-0.22.0-1.el7.src.rpm diff --git a/openstack/python-glance/centos/build_srpm.data b/openstack/python-glance/centos/build_srpm.data new file mode 100644 index 00000000..359d02d8 --- /dev/null +++ b/openstack/python-glance/centos/build_srpm.data @@ -0,0 +1,4 @@ +SRC_DIR="$CGCS_BASE/git/glance" +COPY_LIST="$FILES_BASE/*" +TIS_BASE_SRCREV=06af2eb5abe0332f7035a7d7c2fbfd19fbc4dae7 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-glance/centos/files/glance-api-dist.conf b/openstack/python-glance/centos/files/glance-api-dist.conf new file mode 100644 index 00000000..fced82ff --- /dev/null +++ b/openstack/python-glance/centos/files/glance-api-dist.conf @@ -0,0 +1,20 @@ +[DEFAULT] +debug = False +verbose = True +use_stderr = False +log_file = /var/log/glance/api.log +filesystem_store_datadir = /var/lib/glance/images/ +scrubber_datadir = /var/lib/glance/scrubber +image_cache_dir = /var/lib/glance/image-cache/ + +[database] +connection = mysql://glance:glance@localhost/glance + +[keystone_authtoken] +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http + +[paste_deploy] +config_file = /usr/share/glance/glance-api-dist-paste.ini + diff --git a/openstack/python-glance/centos/files/glance-cache-dist.conf b/openstack/python-glance/centos/files/glance-cache-dist.conf new file mode 100644 index 00000000..0daf5248 --- /dev/null +++ b/openstack/python-glance/centos/files/glance-cache-dist.conf @@ -0,0 +1,5 @@ +[DEFAULT] +debug = False +verbose = True +log_file = /var/log/glance/image-cache.log +image_cache_dir = /var/lib/glance/image-cache/ diff --git a/openstack/python-glance/centos/files/glance-purge-deleted-active b/openstack/python-glance/centos/files/glance-purge-deleted-active new file mode 100644 index 00000000..2c86c0d0 --- /dev/null +++ b/openstack/python-glance/centos/files/glance-purge-deleted-active @@ -0,0 +1,63 @@ +#!/bin/bash + +# +# Wrapper script to run glance-manage to purge soft deleted rows on active controller only +# +GLANCE_PURGE_INFO="/var/run/glance-purge.info" +GLANCE_PURGE_CMD="/usr/bin/nice -n 2 /usr/bin/glance-manage db purge --age_in_days 1 --max_rows 1000000 >> /dev/null 2>&1" + +function is_active_pgserver() +{ + # Determine whether we're running on the same controller as the service. + local service=postgres + local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active) + if [ "x$enabledactive" == "x" ] + then + # enabled-active not found for that service on this controller + return 1 + else + # enabled-active found for that resource + return 0 + fi +} + +if is_active_pgserver +then + if [ ! -f ${GLANCE_PURGE_INFO} ] + then + echo delay_count=0 > ${GLANCE_PURGE_INFO} + fi + + source ${GLANCE_PURGE_INFO} + sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null + if [ $? -eq 0 ] + then + source /etc/platform/platform.conf + if [ "${system_type}" = "All-in-one" ] + then + source /etc/init.d/task_affinity_functions.sh + idle_core=$(get_most_idle_core) + if [ "$idle_core" -ne "0" ] + then + # Purge soft deleted records that are older than 1 day from glance database. + sh -c "exec taskset -c $idle_core ${GLANCE_PURGE_CMD}" + sed -i "/delay_count/s/=.*/=0/" ${GLANCE_PURGE_INFO} + exit 0 + fi + fi + + if [ "$delay_count" -lt "3" ] + then + newval=$(($delay_count+1)) + sed -i "/delay_count/s/=.*/=$newval/" ${GLANCE_PURGE_INFO} + (sleep 3600; /usr/bin/glance-purge-deleted-active) & + exit 0 + fi + fi + + # Purge soft deleted records that are older than 1 day from glance database. + eval ${GLANCE_PURGE_CMD} + sed -i "/delay_count/s/=.*/=0/" ${GLANCE_PURGE_INFO} +fi + +exit 0 diff --git a/openstack/python-glance/centos/files/glance-registry-dist.conf b/openstack/python-glance/centos/files/glance-registry-dist.conf new file mode 100644 index 00000000..baed0a54 --- /dev/null +++ b/openstack/python-glance/centos/files/glance-registry-dist.conf @@ -0,0 +1,20 @@ +[DEFAULT] +debug = False +verbose = True +use_stderr = False +log_file = /var/log/glance/registry.log + +[database] +connection = mysql://glance:glance@localhost/glance + +[keystone_authtoken] +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http + +[paste_deploy] +config_file = /usr/share/glance/glance-registry-dist-paste.ini + diff --git a/openstack/python-glance/centos/files/glance-scrubber-dist.conf b/openstack/python-glance/centos/files/glance-scrubber-dist.conf new file mode 100644 index 00000000..9e16edef --- /dev/null +++ b/openstack/python-glance/centos/files/glance-scrubber-dist.conf @@ -0,0 +1,6 @@ +[DEFAULT] +debug = False +verbose = True +log_file = /var/log/glance/scrubber.log +scrubber_datadir = /var/lib/glance/scrubber + diff --git a/openstack/python-glance/centos/files/glance-sudoers b/openstack/python-glance/centos/files/glance-sudoers new file mode 100644 index 00000000..ebb61440 --- /dev/null +++ b/openstack/python-glance/centos/files/glance-sudoers @@ -0,0 +1,3 @@ +Defaults:glance !requiretty + +glance ALL = (root) NOPASSWD: /usr/bin/glance-rootwrap /etc/glance/rootwrap.conf * diff --git a/openstack/python-glance/centos/files/glance-swift.conf b/openstack/python-glance/centos/files/glance-swift.conf new file mode 100644 index 00000000..3c7e1d3b --- /dev/null +++ b/openstack/python-glance/centos/files/glance-swift.conf @@ -0,0 +1,25 @@ +# glance-swift.conf.sample +# +# This file is an example config file when +# multiple swift accounts/backing stores are enabled. +# +# Specify the reference name in [] +# For each section, specify the auth_address, user and key. +# +# WARNING: +# * If any of auth_address, user or key is not specified, +# the glance-api's swift store will fail to configure +# +# [ref1] +# user = tenant:user1 +# key = key1 +# auth_version = 2 +# auth_address = http://localhost:5000/v2.0 +# +# [ref2] +# user = project_name:user_name2 +# key = key2 +# user_domain_id = default +# project_domain_id = default +# auth_version = 3 +# auth_address = http://localhost:5000/v3 diff --git a/openstack/python-glance/centos/files/openstack-glance-api.service b/openstack/python-glance/centos/files/openstack-glance-api.service new file mode 100644 index 00000000..e4f22d30 --- /dev/null +++ b/openstack/python-glance/centos/files/openstack-glance-api.service @@ -0,0 +1,19 @@ +[Unit] +Description=OpenStack Image Service (code-named Glance) API server +After=syslog.target network.target + +[Service] +LimitNOFILE=131072 +LimitNPROC=131072 +Type=simple +# WRS - use root user +#User=glance +User=root +ExecStart=/usr/bin/glance-api +PrivateTmp=true +# WRS - managed by sm +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-glance/centos/files/openstack-glance-registry.service b/openstack/python-glance/centos/files/openstack-glance-registry.service new file mode 100644 index 00000000..f1fdd3e6 --- /dev/null +++ b/openstack/python-glance/centos/files/openstack-glance-registry.service @@ -0,0 +1,17 @@ +[Unit] +Description=OpenStack Image Service (code-named Glance) Registry server +After=syslog.target network.target + +[Service] +Type=simple +# WRS - use root user +#User=glance +User=root +ExecStart=/usr/bin/glance-registry +PrivateTmp=true +# WRS - managed by sm +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-glance/centos/files/openstack-glance-scrubber.service b/openstack/python-glance/centos/files/openstack-glance-scrubber.service new file mode 100644 index 00000000..972c7d38 --- /dev/null +++ b/openstack/python-glance/centos/files/openstack-glance-scrubber.service @@ -0,0 +1,17 @@ +[Unit] +Description=OpenStack Image Service deferred image deletion service +After=syslog.target network.target + +[Service] +Type=simple +# WRS - use root user +#User=glance +User=root +ExecStart=/usr/bin/glance-scrubber +PrivateTmp=true +# WRS - Not currently used - would be managed by sm +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-glance/centos/files/openstack-glance.logrotate b/openstack/python-glance/centos/files/openstack-glance.logrotate new file mode 100644 index 00000000..0a0900e4 --- /dev/null +++ b/openstack/python-glance/centos/files/openstack-glance.logrotate @@ -0,0 +1,8 @@ +/var/log/glance/*.log { + weekly + rotate 14 + size 10M + missingok + compress + minsize 100k +} diff --git a/openstack/python-glance/centos/files/restart-glance b/openstack/python-glance/centos/files/restart-glance new file mode 100644 index 00000000..167f8875 --- /dev/null +++ b/openstack/python-glance/centos/files/restart-glance @@ -0,0 +1,150 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# +# Handle restarting Glance services +# + +# Syntax:(" "\) +SERVICES=("glance-api 30 kill"\ + "glance-registry 30 kill") +# where: +# = name of executable file reported by ps +# = how much to wait for the process to gracefully shutdown +# = either 'kill' the process with SIGKILL or 'leave' it running +# the idea is to avoid leaving behind processes that are degraded, better have +# new ones re-spawned + + +function get_pid { + local service=$1 + PID=$(cat /var/run/resource-agents/$service.pid) + echo "$PID" +} + +if is_controller +then + # Glance services only run on the controller + + if [ ! -f $PATCH_FLAGDIR/glance.restarted ] + then + touch $PATCH_FLAGDIR/glance.restarted + for s in "${SERVICES[@]}"; do + set -- $s + service="$1" + timeout="$2" + after_timeout="$3" + new_process="false" + + # Check SM to see if Horizon is running + sm-query service $service | grep -q 'enabled-active' + if [ $? -eq 0 ] + then + loginfo "$0: Restarting $service" + + # Get PID + PID=$(get_pid $service) + + # Send restart signal to process + kill -s USR1 $PID + + # Wait up to $timeout seconds for service to gracefully recover + let -i UNTIL=$SECONDS+$timeout + while [ $UNTIL -ge $SECONDS ] + do + # Check to see if we have a new process + NEW_PID=$(get_pid $service) + if [[ "$PID" != "$NEW_PID" ]] + then + new_process="true" + break + fi + + # Still old process? Let's wait 5 seconds and check again + sleep 5 + done + + # Do a hard restart of the process if we still have the old one + NEW_PID=$(get_pid $service) + if [[ "$PID" == "$NEW_PID" ]] + then + # we have the old process still running! + if [[ "$after_timeout" == "kill" ]] + then + loginfo "$0: Old process of $service failed to gracefully terminate in $timeout, killing it!" + # kill the old process + kill -s KILL -- -$PID + # wait for a new process to be restarted by sm + let -i UNTIL=$SECONDS+10 + while [ $UNTIL -ge $SECONDS ] + do + # Check to see if we have a new process + NEW_PID=$(get_pid $service) + if [[ ! -z "$NEW_PID" ]] && [[ "$PID" != "$NEW_PID" ]] + then + loginfo "$0: New process of $service started" + new_process="true" + break + fi + done + fi + fi + + # Wait for the new process to complete initialisation + if [[ "$new_process" == "true" ]] + then + let -i UNTIL=$SECONDS+20 + while [ $UNTIL -ge $SECONDS ] + do + # Note: Services are restarted by sm which runs the ocf start script. + # Sm reports enabled-active only *after* those scripts return success + sm-query service $service | grep -q 'enabled-active' + if [ $? -eq 0 ] + then + loginfo "$0: New process of $service started correctly" + break + fi + sleep 1 + done + fi + + sm-query service $service | grep -q 'enabled-active' + if [ $? -ne 0 ] + then + # Still not running! Clear the flag and mark the RC as failed + loginfo "$0: Failed to restart $service" + rm -f $PATCH_FLAGDIR/$service.restarted + GLOBAL_RC=$PATCH_STATUS_FAILED + sm-query service $service + break + # Note: break if any process in the SERVICES list fails + fi + fi + done + fi +fi +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/openstack/python-glance/centos/openstack-glance.spec b/openstack/python-glance/centos/openstack-glance.spec new file mode 100644 index 00000000..be793d53 --- /dev/null +++ b/openstack/python-glance/centos/openstack-glance.spec @@ -0,0 +1,406 @@ +%global service glance + +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%global with_doc %{!?_without_doc:1}%{?_without_doc:0} + +# WRS: remove docs - for now +%global with_doc 0 + +%global common_desc \ +OpenStack Image Service (code-named Glance) provides discovery, registration, \ +and delivery services for virtual disk images. The Image Service API server \ +provides a standard REST interface for querying information about virtual disk \ +images stored in a variety of back-end stores, including OpenStack Object \ +Storage. Clients can register new virtual disk images with the Image Service, \ +query for information on publicly available disk images, and use the Image \ +Service's client library for streaming virtual disk images. + +Name: openstack-glance +# Liberty semver reset +# https://review.openstack.org/#/q/I6a35fa0dda798fad93b804d00a46af80f08d475c,n,z +Epoch: 1 +Version: 15.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: OpenStack Image Service + +License: ASL 2.0 +URL: http://glance.openstack.org +Source0: https://tarballs.openstack.org/%{service}/%{service}-%{upstream_version}.tar.gz + +# + +Source001: openstack-glance-api.service +Source003: openstack-glance-registry.service +Source004: openstack-glance-scrubber.service +Source010: openstack-glance.logrotate + +Source021: glance-api-dist.conf +Source022: glance-cache-dist.conf +Source024: glance-registry-dist.conf +Source025: glance-scrubber-dist.conf +Source026: glance-swift.conf + +Source030: glance-sudoers +Source031: restart-glance +Source032: glance-purge-deleted-active + +BuildArch: noarch + +BuildRequires: git +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-pbr +BuildRequires: intltool +# Required for config generation +BuildRequires: openstack-macros +BuildRequires: python-alembic +BuildRequires: python-cursive +BuildRequires: python-crypto +BuildRequires: python-eventlet +BuildRequires: python-futurist +BuildRequires: python-glance-store >= 0.21.0 +BuildRequires: python-httplib2 +BuildRequires: python-oslo-config >= 2:4.0.0 +BuildRequires: python-oslo-log +BuildRequires: python-oslo-middleware >= 3.27.0 +BuildRequires: python-oslo-policy >= 1.23.0 +BuildRequires: python-oslo-utils >= 3.20.0 +BuildRequires: python-osprofiler +BuildRequires: python-paste-deploy +BuildRequires: python-requests +BuildRequires: python-routes +BuildRequires: python-oslo-messaging >= 5.24.2 +BuildRequires: python-taskflow >= 2.7.0 +BuildRequires: python-wsme >= 0.8 + +Requires(pre): shadow-utils +Requires: python-glance = %{epoch}:%{version}-%{release} +Requires: python-glanceclient >= 1:2.8.0 + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd +BuildRequires: systemd +BuildRequires: systemd-devel +BuildRequires: python-psutil +BuildRequires: python-mock + +%description +%{common_desc} + +This package contains the API and registry servers. + +%package -n python-glance +Summary: Glance Python libraries + +Requires: pysendfile +Requires: python-cursive +Requires: python-cryptography >= 1.6 +Requires: python-debtcollector >= 1.2.0 +Requires: python-eventlet >= 0.18.2 +Requires: python-futurist >= 0.11.0 +Requires: python-glance-store >= 0.21.0 +Requires: python-httplib2 +Requires: python-iso8601 >= 0.1.11 +Requires: python-jsonschema +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-keystoneclient >= 1:3.8.0 +Requires: python-keystonemiddleware >= 4.12.0 +Requires: python-migrate >= 0.11.0 +Requires: python-monotonic >= 0.6 +Requires: python-oslo-concurrency >= 3.8.0 +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-oslo-context >= 2.14.0 +Requires: python-oslo-db >= 4.24.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-messaging >= 5.24.2 +Requires: python-oslo-middleware >= 3.27.0 +Requires: python-oslo-policy >= 1.23.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-oslo-vmware >= 0.11.1 +Requires: python-osprofiler +Requires: python-paste +Requires: python-paste-deploy +Requires: python-pbr +Requires: python-prettytable +Requires: python-retrying +Requires: python-routes +Requires: python-six >= 1.9.0 +Requires: python-sqlalchemy >= 1.0.10 +Requires: python-stevedore >= 1.20.0 +Requires: python-swiftclient >= 2.2.0 +Requires: python-taskflow >= 2.7.0 +Requires: python-webob >= 1.7.1 +Requires: python-wsme >= 0.8 +Requires: pyOpenSSL +Requires: pyxattr +Requires: python-os-brick >= 1.8.0 +Requires: python-alembic >= 0.8.7 +Requires: python-sqlparse + +#test deps: python-mox python-nose python-requests +#test and optional store: +#ceph - glance.store.rdb +#python-boto - glance.store.s3 +Requires: python-boto + +%description -n python-glance +%{common_desc} + +This package contains the glance Python library. + +%if 0%{?with_doc} +%package doc +Summary: Documentation for OpenStack Image Service + +Requires: %{name} = %{epoch}:%{version}-%{release} + +BuildRequires: systemd-units +BuildRequires: python-sphinx +BuildRequires: python-openstackdocstheme +BuildRequires: graphviz +# Required to build module documents +BuildRequires: python-boto +BuildRequires: python-cryptography >= 1.6 +BuildRequires: python-keystoneauth1 +BuildRequires: python-keystonemiddleware +BuildRequires: python-oslo-concurrency >= 3.5.0 +BuildRequires: python-oslo-context >= 0.2.0 +BuildRequires: python-oslo-db >= 4.1.0 +BuildRequires: python-sqlalchemy >= 1.0.10 +BuildRequires: python-stevedore +BuildRequires: python-webob >= 1.2.3 +BuildRequires: python-oslotest +BuildRequires: python-psutil +BuildRequires: python-testresources +BuildRequires: pyxattr +# Required to compile translation files +BuildRequires: python-babel + +%description doc +%{common_desc} + +This package contains documentation files for glance. +%endif + +%package -n python-%{service}-tests +Summary: Glance tests +Requires: openstack-%{service} = %{epoch}:%{version}-%{release} + +%description -n python-%{service}-tests +%{common_desc} + +This package contains the Glance test files. + + +%prep +%autosetup -n glance-%{upstream_version} -S git + +sed -i '/\/usr\/bin\/env python/d' glance/common/config.py glance/common/crypt.py glance/db/sqlalchemy/migrate_repo/manage.py + +# Remove the requirements file so that pbr hooks don't add it +# to distutils requiers_dist config +%py_req_cleanup + +%build +PYTHONPATH=. oslo-config-generator --config-dir=etc/oslo-config-generator/ +export PBR_VERSION=%{version} + +# Build +%{__python2} setup.py build + +# Generate i18n files +%{__python2} setup.py compile_catalog -d build/lib/%{service}/locale + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} + +export PYTHONPATH="$( pwd ):$PYTHONPATH" +%if 0%{?with_doc} +%{__python2} setup.py build_sphinx +%{__python2} setup.py build_sphinx --builder man +mkdir -p %{buildroot}%{_mandir}/man1 +install -p -D -m 644 doc/build/man/*.1 %{buildroot}%{_mandir}/man1/ +%endif + +# Fix hidden-file-or-dir warnings +%if 0%{?with_doc} +rm -fr doc/build/html/.doctrees doc/build/html/.buildinfo +%endif +rm -f %{buildroot}/usr/share/doc/glance/README.rst + +# Setup directories +install -d -m 755 %{buildroot}%{_datadir}/glance +install -d -m 755 %{buildroot}%{_sharedstatedir}/glance/images +install -d -m 755 %{buildroot}%{_sysconfdir}/glance/metadefs + +# WRS: +install -p -D -m 644 etc/glance-api-paste.ini %{buildroot}%{_sysconfdir}/glance/glance-api-paste.ini +install -p -D -m 644 etc/glance-registry-paste.ini %{buildroot}%{_sysconfdir}/glance/glance-registry-paste.ini + +# WRS in-service restarts +install -p -D -m 700 %{SOURCE31} %{buildroot}%{_bindir}/restart-glance + +# WRS purge cron +install -p -D -m 755 %{SOURCE32} %{buildroot}%{_bindir}/glance-purge-deleted-active + +# Config file +install -p -D -m 640 etc/glance-api.conf %{buildroot}%{_sysconfdir}/glance/glance-api.conf +install -p -D -m 644 %{SOURCE21} %{buildroot}%{_datadir}/glance/glance-api-dist.conf +install -p -D -m 644 etc/glance-api-paste.ini %{buildroot}%{_datadir}/glance/glance-api-dist-paste.ini +## +install -p -D -m 640 etc/glance-cache.conf %{buildroot}%{_sysconfdir}/glance/glance-cache.conf +install -p -D -m 644 %{SOURCE22} %{buildroot}%{_datadir}/glance/glance-cache-dist.conf +## +install -p -D -m 640 etc/glance-registry.conf %{buildroot}%{_sysconfdir}/glance/glance-registry.conf +install -p -D -m 644 %{SOURCE24} %{buildroot}%{_datadir}/glance/glance-registry-dist.conf +install -p -D -m 644 etc/glance-registry-paste.ini %{buildroot}%{_datadir}/glance/glance-registry-dist-paste.ini +## +install -p -D -m 640 etc/glance-scrubber.conf %{buildroot}%{_sysconfdir}/glance/glance-scrubber.conf +install -p -D -m 644 %{SOURCE25} %{buildroot}%{_datadir}/glance/glance-scrubber-dist.conf +## +install -p -D -m 644 %{SOURCE26} %{buildroot}%{_sysconfdir}/glance/glance-swift.conf + +install -p -D -m 640 etc/policy.json %{buildroot}%{_sysconfdir}/glance/policy.json +install -p -D -m 640 etc/rootwrap.conf %{buildroot}%{_sysconfdir}/glance/rootwrap.conf +install -p -D -m 640 etc/schema-image.json %{buildroot}%{_sysconfdir}/glance/schema-image.json + +# Move metadefs +install -p -D -m 640 etc/metadefs/*.json %{buildroot}%{_sysconfdir}/glance/metadefs/ + +# systemd services +install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir}/openstack-glance-api.service +install -p -D -m 644 %{SOURCE3} %{buildroot}%{_unitdir}/openstack-glance-registry.service +install -p -D -m 644 %{SOURCE4} %{buildroot}%{_unitdir}/openstack-glance-scrubber.service + +# Install pid directory +install -d -m 755 %{buildroot}%{_localstatedir}/run/glance + +# Install log directory +install -d -m 755 %{buildroot}%{_localstatedir}/log/glance + +# Install sudoers +install -p -D -m 440 %{SOURCE30} %{buildroot}%{_sysconfdir}/sudoers.d/glance + +# Symlinks to rootwrap config files +mkdir -p %{buildroot}%{_sysconfdir}/glance/rootwrap.d +for filter in %{_datarootdir}/os-brick/rootwrap/*.filters; do + ln -s $filter %{buildroot}%{_sysconfdir}/glance/rootwrap.d +done +for filter in %{_datarootdir}/glance_store/rootwrap/*.filters; do + test -f $filter && ln -s $filter %{buildroot}%{_sysconfdir}/glance/rootwrap.d +done + +# Install i18n .mo files (.po and .pot are not required) +install -d -m 755 %{buildroot}%{_datadir} +rm -f %{buildroot}%{python2_sitelib}/%{service}/locale/*/LC_*/%{service}*po +rm -f %{buildroot}%{python2_sitelib}/%{service}/locale/*pot +mv %{buildroot}%{python2_sitelib}/%{service}/locale %{buildroot}%{_datadir}/locale + +# Find language files +%find_lang %{service} --all-name + +# Cleanup +rm -rf %{buildroot}%{_prefix}%{_sysconfdir} + +%pre +getent group glance >/dev/null || groupadd -r glance -g 161 +getent passwd glance >/dev/null || \ +useradd -u 161 -r -g glance -d %{_sharedstatedir}/glance -s /sbin/nologin \ +-c "OpenStack Glance Daemons" glance +exit 0 + +%post +# Initial installation +%systemd_post openstack-glance-api.service +%systemd_post openstack-glance-registry.service +%systemd_post openstack-glance-scrubber.service + + +%preun +%systemd_preun openstack-glance-api.service +%systemd_preun openstack-glance-registry.service +%systemd_preun openstack-glance-scrubber.service + +%postun +%systemd_postun_with_restart openstack-glance-api.service +%systemd_postun_with_restart openstack-glance-registry.service +%systemd_postun_with_restart openstack-glance-scrubber.service + +%files +%doc README.rst +%{_bindir}/glance-api +%{_bindir}/glance-wsgi-api +%{_bindir}/glance-control +%{_bindir}/glance-manage +%{_bindir}/glance-registry +%{_bindir}/glance-cache-cleaner +%{_bindir}/glance-cache-manage +%{_bindir}/glance-cache-prefetcher +%{_bindir}/glance-cache-pruner +%{_bindir}/glance-scrubber +%{_bindir}/glance-replicator + +%{_datadir}/glance/glance-api-dist.conf +%{_datadir}/glance/glance-cache-dist.conf +%{_datadir}/glance/glance-registry-dist.conf +%{_datadir}/glance/glance-scrubber-dist.conf +%{_datadir}/glance/glance-api-dist-paste.ini +%{_datadir}/glance/glance-registry-dist-paste.ini + +%{_unitdir}/openstack-glance-api.service +%{_unitdir}/openstack-glance-registry.service +%{_unitdir}/openstack-glance-scrubber.service + +#WRS: in-service patching +%{_bindir}/restart-glance + +#WRS: purge cron +%{_bindir}/glance-purge-deleted-active + +%dir %attr(770, root, glance) %{_sysconfdir}/glance +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/glance-api-paste.ini +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/glance-registry-paste.ini + +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/glance-api.conf +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/glance-cache.conf +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/glance-registry.conf +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/glance-scrubber.conf +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/glance-swift.conf +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/policy.json +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/rootwrap.conf +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/schema-image.json +%config(noreplace) %attr(-, root, glance) %{_sysconfdir}/glance/metadefs/*.json +%{_sysconfdir}/glance/rootwrap.d/ +%dir %attr(0755, glance, nobody) %{_sharedstatedir}/glance +%dir %attr(0750, glance, glance) %{_localstatedir}/log/glance +%config(noreplace) %{_sysconfdir}/sudoers.d/glance + +%files -n python-glance -f %{service}.lang +%doc README.rst +%{python2_sitelib}/glance +%{python2_sitelib}/glance-*.egg-info +%exclude %{python2_sitelib}/glance/tests + +%files -n python-%{service}-tests +%license LICENSE +%{python2_sitelib}/%{service}/tests + +%if 0%{?with_doc} +%files doc +%doc doc/build/html +%endif + +%changelog +* Wed Aug 30 2017 rdo-trunk 1:15.0.0-1 +- Update to 15.0.0 + +* Fri Aug 25 2017 Alfredo Moralejo 1:15.0.0-0.2.0rc2 +- Update to 15.0.0.0rc2 + +* Mon Aug 21 2017 Alfredo Moralejo 1:15.0.0-0.1.0rc1 +- Update to 15.0.0.0rc1 + diff --git a/openstack/python-glanceclient/centos/build_srpm.data b/openstack/python-glanceclient/centos/build_srpm.data new file mode 100644 index 00000000..fa24455a --- /dev/null +++ b/openstack/python-glanceclient/centos/build_srpm.data @@ -0,0 +1,5 @@ +TAR_NAME=python-glanceclient +SRC_DIR="$CGCS_BASE/git/python-glanceclient" +COPY_LIST="$FILES_BASE/*" +TIS_BASE_SRCREV=13b25ff1fed908cfe7b4e719a97efd7121e3be96 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-glanceclient/centos/files/image-backup.sh b/openstack/python-glanceclient/centos/files/image-backup.sh new file mode 100644 index 00000000..e99ec038 --- /dev/null +++ b/openstack/python-glanceclient/centos/files/image-backup.sh @@ -0,0 +1,212 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +backup_dir="/opt/backups" +tmp_dir="${backup_dir}/image_temp" + +function usage { +cat <<"EOF" +Helper tool for backing up Glance images +Usage: + image-backup export - export the image with into backup file /opt/backups/image_.tgz + image-backup import image_.tgz - import the image from the backup source file at /opt/backups/image_.tgz + into the corresponding image. + +Temporary files are stored in /opt/backups/image_temp + +Please consult the System Backups section of the Administration Guide. +EOF +} + +function create_tmp { + if [ ! -d ${backup_dir} ]; then + echo "Error: backup directory ${backup_dir} does not exist" + exit 1 + fi + # Create temporary directory + if [ ! -d ${tmp_dir} ]; then + mkdir ${tmp_dir} + fi + +} + +function remove_tmp { + # Remove temporary files and directory if not empty + local uuid=$1 + rm -f ${tmp_dir}/${uuid}* + rmdir --ignore-fail-on-non-empty ${tmp_dir} &>/dev/null +} + +function export_file_from_rbd_image { + local file=$1 + rbd export -p images ${file} ${tmp_dir}/${file} + if [ $? -ne 0 ]; then + echo "Error: Failed to export image ${file} from Ceph images pool, please check status of storage cluster" + remove_tmp; exit 1 + fi +} + +function export_image { + local uuid=$1 + + # Check if the corresponding image is present in the RBD pool + rbd -p images ls | grep -q -e "^${uuid}$" + if [ $? -ne 0 ]; then + echo "Error: Corresponding file for image with id: ${uuid} was not found in the RBD images pool" + remove_tmp; exit 1 + fi + + # Export original image + export_file_from_rbd_image ${uuid} + + # Export raw cache if present + rbd -p images ls | grep -q ${uuid}_raw + if [ $? -eq 0 ]; then + export_file_from_rbd_image ${uuid}_raw + raw="${uuid}_raw" + fi + + echo -n "Creating backup archive..." + archive="${backup_dir}/image_${uuid}.tgz" + tar czf ${archive} -C ${tmp_dir} ${uuid} ${raw} + if [ $? -ne 0 ]; then + echo "Error: Failed to create archive ${archive}" + remove_tmp; exit 1 + else + echo "done" + fi + + echo "Backup archive ${archive} created" +} + +function import_file_to_rbd_image { + local file=$1 + local snap="images/${file}@snap" + rbd import --image-format 2 ${tmp_dir}/${file} images/${file} + if [ $? -ne 0 ]; then + echo "Error: Failed to import image ${file} into Ceph images pool, please check status of storage cluster" + remove_tmp; exit 1 + fi + rbd snap create ${snap} 1>/dev/null + if [ $? -ne 0 ]; then + echo "Error: Failed to create snapshot ${snap}, please check status of storage cluster" + remove_tmp; exit 1 + fi + rbd snap protect ${snap} 1>/dev/null + if [ $? -ne 0 ]; then + echo "Error: Failed to protect snapshot ${snap}, please check status of storage cluster" + remove_tmp; exit 1 + fi +} + +function import_image { + local uuid=$1 + + # Storage cluster must be healthy before starting the import + if [ ! "$(ceph health)" = "HEALTH_OK" ]; then + echo "Error: The storage cluster health must be HEALTH_OK before proceding" + remove_tmp; exit 1 + fi + + # Check if the corresponding image is already present in the RBD pool + rbd -p images ls | grep -q -e "^${uuid}$" + if [ $? -eq 0 ]; then + echo "Error: Image with id: ${uuid} is already imported" + remove_tmp; exit 1 + fi + + # Import original image + import_file_to_rbd_image ${uuid} + + # Import raw cache + if [ -f "${tmp_dir}/${uuid}_raw" ]; then + import_file_to_rbd_image ${uuid}_raw + fi +} + +if [ $EUID -ne 0 ]; then + echo "This script must be executed as root" + exit 1 +fi + +if [ $# -ne 2 ] ; +then + usage + exit 0 +fi + +source /etc/nova/openrc + +# Check if glance is using ceph as RBD +cat /etc/glance/glance-api.conf | grep -q -e "^stores.*=.*rbd" +if [ $? -ne 0 ]; then + echo "Error: Glance is not configured to use the ceph backend." + echo "This command should be used only on setups with configured Ceph storage." + exit 1 +fi + +if [ "$1" = "export" ]; then + # Check that glance image is present in glance + glance image-list | tail -n +3 | awk '{print $2}' | grep -q $2 + if [ $? -ne 0 ]; then + echo "Error: Glance image with id: $2 not found. Please try with an existing image id." + remove_tmp; exit 1 + fi + + # Only allow backup of images that use rbd as backend. + glance image-show $2 | grep 'direct_url' | awk '{print $4}' | grep -q '^rbd://' + if [ $? -ne 0 ]; then + echo "Image with id: $2 is not stored in Ceph RBD. Backup using image-backup tool is not needed." + echo "Please consult the Software Management Manual for more details." + remove_tmp; exit 1 + fi + + create_tmp + export_image $2 + remove_tmp + +elif [ "$1" = "import" ]; then + # Check that the input file format is correct + if [[ ! $2 =~ ^image_.*\.tgz$ ]]; then + echo "Error: Source file name must conform to image_.tgz format" + exit 1 + fi + + # Check that the source file exists + if [ ! -f ${backup_dir}/$2 ]; then + echo "Error: File $2 does not exists in ${backup_dir}" + exit 1 + fi + + # Get glance uuid from filename + uuid=$(echo $2 | sed "s/^image_\(.*\)\.tgz/\1/g") + + # Check that glance has this image in the database + glance image-list | grep -q $uuid + if [ $? -ne 0 ]; then + echo "Error: Glance image with id: ${uuid} not found. Please try with an existing image id." + exit 1 + fi + + create_tmp + + # Extract the files that need to be imported into the temp directory + echo -n "Extracting files..." + tar xfz ${backup_dir}/$2 -C ${tmp_dir} 1>/dev/null + if [ $? -ne 0 ]; then + echo "Error: Failed to extract archive ${backup_dir}/$2 into ${tmp_dir}." + remove_tmp; exit 1 + fi + echo "done" + + # Importing images into RBD + import_image $uuid + remove_tmp +else + usage +fi + diff --git a/openstack/python-glanceclient/centos/python-glanceclient.spec b/openstack/python-glanceclient/centos/python-glanceclient.spec new file mode 100644 index 00000000..a8f4b841 --- /dev/null +++ b/openstack/python-glanceclient/centos/python-glanceclient.spec @@ -0,0 +1,187 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%global sname glanceclient + +%if 0%{?fedora} +%global with_python3 1 +%endif + +Name: python-glanceclient +Epoch: 1 +Version: 2.8.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: Python API and CLI for OpenStack Glance + +License: ASL 2.0 +URL: https://launchpad.net/python-glanceclient +Source0: https://tarballs.openstack.org/%{name}/%{name}-%{version}.tar.gz +#WRS +Source1: image-backup.sh + +BuildArch: noarch + +BuildRequires: git + +%description +This is a client for the OpenStack Glance API. There's a Python API (the +glanceclient module), and a command-line script (glance). Each implements +100% of the OpenStack Glance API. + +%package -n python2-%{sname} +Summary: Python API and CLI for OpenStack Glance +%{?python_provide:%python_provide python2-glanceclient} + +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-pbr + +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-pbr +Requires: python-prettytable +Requires: pyOpenSSL >= 0.14 +Requires: python-requests +Requires: python-six >= 1.9.0 +Requires: python-warlock +Requires: python-wrapt + +%description -n python2-%{sname} +This is a client for the OpenStack Glance API. There's a Python API (the +glanceclient module), and a command-line script (glance). Each implements +100% of the OpenStack Glance API. + +%if 0%{?with_python3} +%package -n python3-%{sname} +Summary: Python API and CLI for OpenStack Glance +%{?python_provide:%python_provide python3-glanceclient} + +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pbr + +Requires: python3-keystoneauth1 >= 3.1.0 +Requires: python3-oslo-i18n >= 2.1.0 +Requires: python3-oslo-utils >= 3.20.0 +Requires: python3-pbr +Requires: python3-prettytable +Requires: python3-pyOpenSSL >= 0.14 +Requires: python3-requests +Requires: python3-six >= 1.9.0 +Requires: python3-warlock +Requires: python3-wrapt + +%description -n python3-%{sname} +This is a client for the OpenStack Glance API. There's a Python API (the +glanceclient module), and a command-line script (glance). Each implements +100% of the OpenStack Glance API. +%endif + +%package doc +Summary: Documentation for OpenStack Glance API Client + +BuildRequires: python-sphinx +BuildRequires: python-openstackdocstheme +BuildRequires: python-keystoneauth1 +BuildRequires: python-oslo-utils +BuildRequires: python-prettytable +BuildRequires: python-warlock +BuildRequires: pyOpenSSL >= 0.14 + +%description doc +This is a client for the OpenStack Glance API. There's a Python API (the +glanceclient module), and a command-line script (glance). Each implements +100% of the OpenStack Glance API. + +This package contains auto-generated documentation. + +%package sdk +Summary: SDK files for %{name} + +%description sdk +Contains SDK files for %{name} package + + +%prep +%autosetup -n %{name}-%{upstream_version} -S git + +rm -rf test-requirements.txt + +%build +export PBR_VERSION=%{version} +%py2_build +%if 0%{?with_python3} +%py3_build +%endif + +%install +export PBR_VERSION=%{version} +%if 0%{?with_python3} +%py3_install +mv %{buildroot}%{_bindir}/glance %{buildroot}%{_bindir}/glance-%{python3_version} +ln -s ./glance-%{python3_version} %{buildroot}%{_bindir}/glance-3 +# Delete tests +rm -fr %{buildroot}%{python3_sitelib}/glanceclient/tests +%endif + +%py2_install +mv %{buildroot}%{_bindir}/glance %{buildroot}%{_bindir}/glance-%{python2_version} +ln -s ./glance-%{python2_version} %{buildroot}%{_bindir}/glance-2 + +ln -s ./glance-2 %{buildroot}%{_bindir}/glance + +mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d +install -pm 644 tools/glance.bash_completion \ + %{buildroot}%{_sysconfdir}/bash_completion.d/glance +install -p -D -m 500 %{SOURCE1} %{buildroot}/sbin/image-backup + +# Delete tests +rm -fr %{buildroot}%{python2_sitelib}/glanceclient/tests + +export PYTHONPATH="$( pwd ):$PYTHONPATH" +sphinx-build -b html doc/source html + +# generate man page +sphinx-build -b man doc/source man +install -p -D -m 644 man/glance.1 %{buildroot}%{_mandir}/man1/glance.1 + +# prep SDK package +mkdir -p %{buildroot}/usr/share/remote-clients/%{name} +tar zcf %{buildroot}/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} + + +%files -n python2-%{sname} +%doc README.rst +%license LICENSE +%{python2_sitelib}/glanceclient +%{python2_sitelib}/*.egg-info +%{_sysconfdir}/bash_completion.d +%{_mandir}/man1/glance.1.gz +"/sbin/image-backup" +%{_bindir}/glance +%{_bindir}/glance-2 +%{_bindir}/glance-%{python2_version} + +%if 0%{?with_python3} +%files -n python3-%{sname} +%license LICENSE +%doc README.rst +%{python3_sitelib}/%{sname} +%{python3_sitelib}/*.egg-info +%{_sysconfdir}/bash_completion.d +%{_mandir}/man1/glance.1.gz +%{_bindir}/glance-3 +%{_bindir}/glance-%{python3_version} +%endif + +%files doc +%doc html +%license LICENSE + +%files sdk +/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz + +%changelog +* Fri Aug 11 2017 Alfredo Moralejo 1:2.8.0-1 +- Update to 2.8.0 + diff --git a/openstack/python-heat/openstack-heat/centos/build_srpm.data b/openstack/python-heat/openstack-heat/centos/build_srpm.data new file mode 100644 index 00000000..3185498f --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/build_srpm.data @@ -0,0 +1,7 @@ +SRC_DIR="$CGCS_BASE/git/heat" +COPY_LIST="$FILES_BASE/*" +TAR_NAME=openstack-heat + +# SRCREV is the last upstream commit +TIS_BASE_SRCREV=be1e2e9fa5b44fb186d1705e0fefff4b8eec2233 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-heat/openstack-heat/centos/files/heat-dist.conf b/openstack/python-heat/openstack-heat/centos/files/heat-dist.conf new file mode 100644 index 00000000..714f7ae7 --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/files/heat-dist.conf @@ -0,0 +1,35 @@ +[DEFAULT] +sql_connection = mysql://heat:heat@localhost/heat +db_backend = heat.db.sqlalchemy.api +log_dir = /var/log/heat +use_stderr = False + +[keystone_authtoken] +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http +auth_uri = http://127.0.0.1:5000/v2.0 +signing_dir = /tmp/keystone-signing-heat + +[ssl] + +[database] + +[paste_deploy] +api_paste_config = /usr/share/heat/api-paste-dist.ini + +[rpc_notifier2] + +[ec2authtoken] + +[heat_api_cloudwatch] + +[heat_api] + +[heat_api_cfn] + +[auth_password] + +[matchmaker_ring] + +[matchmaker_redis] diff --git a/openstack/python-heat/openstack-heat/centos/files/heat-purge-deleted-active b/openstack/python-heat/openstack-heat/centos/files/heat-purge-deleted-active new file mode 100644 index 00000000..784d61e4 --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/files/heat-purge-deleted-active @@ -0,0 +1,69 @@ +#!/bin/bash + +# +# Wrapper script to run heat-manage purge_deleted on active controller only +# +HEAT_PURGE_INFO="/var/run/heat-purge.info" +HEAT_STACK_PURGE_CMD="/usr/bin/nice -n 2 /usr/bin/heat-manage --config-file=/etc/heat/heat.conf purge_deleted -g hours 1" +HEAT_EVENT_PURGE_CMD="/usr/bin/nice -n 2 /usr/bin/heat-manage --config-file=/etc/heat/heat.conf expire_events" + +function is_active_pgserver() +{ + # Determine whether we're running on the same controller as the service. + local service=postgres + local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active) + if [ "x$enabledactive" == "x" ] + then + # enabled-active not found for that service on this controller + return 1 + else + # enabled-active found for that resource + return 0 + fi +} + +if is_active_pgserver +then + if [ ! -f ${HEAT_PURGE_INFO} ] + then + echo delay_count=0 > ${HEAT_PURGE_INFO} + fi + + source ${HEAT_PURGE_INFO} + sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null + if [ $? -eq 0 ] + then + source /etc/platform/platform.conf + if [ "${system_type}" = "All-in-one" ] + then + source /etc/init.d/task_affinity_functions.sh + idle_core=$(get_most_idle_core) + if [ "$idle_core" -ne "0" ] + then + # Purge soft deleted records that are older than one hour and events based on + # max_events in config from heat database. + sh -c "exec taskset -c $idle_core ${HEAT_STACK_PURGE_CMD}" + sh -c "exec taskset -c $idle_core ${HEAT_EVENT_PURGE_CMD}" + sed -i "/delay_count/s/=.*/=0/" ${HEAT_PURGE_INFO} + exit 0 + fi + fi + + if [ "$delay_count" -lt "3" ] + then + newval=$(($delay_count+1)) + sed -i "/delay_count/s/=.*/=$newval/" ${HEAT_PURGE_INFO} + (sleep 3600; /usr/bin/heat-purge-deleted-active) & + exit 0 + fi + + fi + + # Purge soft deleted records that are older than one hour and events based on max_events + # in config from heat database. + eval ${HEAT_STACK_PURGE_CMD} + eval ${HEAT_EVENT_PURGE_CMD} + sed -i "/delay_count/s/=.*/=0/" ${HEAT_PURGE_INFO} +fi + +exit 0 diff --git a/openstack/python-heat/openstack-heat/centos/files/heat.conf.sample b/openstack/python-heat/openstack-heat/centos/files/heat.conf.sample new file mode 100644 index 00000000..a15b7637 --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/files/heat.conf.sample @@ -0,0 +1,1375 @@ +[DEFAULT] + +# +# From heat.api.middleware.ssl +# + +# The HTTP Header that will be used to determine which the original request +# protocol scheme was, even if it was removed by an SSL terminator proxy. +# (string value) +#secure_proxy_ssl_header = X-Forwarded-Proto + +# +# From heat.common.config +# + +# Name of the engine node. This can be an opaque identifier. It is not +# necessarily a hostname, FQDN, or IP address. (string value) +#host = gig3 + +# +# From heat.common.config +# + +# The default user for new instances. This option is deprecated and will be +# removed in the Juno release. If it's empty, Heat will use the default user +# set up with your cloud image (for OS::Nova::Server) or 'ec2-user' (for +# AWS::EC2::Instance). (string value) +#instance_user = ec2-user + +# List of directories to search for plug-ins. (list value) +#plugin_dirs = /usr/lib64/heat,/usr/lib/heat,/usr/local/lib/heat,/usr/local/lib64/heat + +# The directory to search for environment files. (string value) +#environment_dir = /etc/heat/environment.d + +# Select deferred auth method, stored password or trusts. (string value) +# Allowed values: password, trusts +#deferred_auth_method = trusts + +# Subset of trustor roles to be delegated to heat. If left unset, all roles of +# a user will be delegated to heat when creating a stack. (list value) +#trusts_delegated_roles = + +# Maximum resources allowed per top-level stack. (integer value) +#max_resources_per_stack = 1000 + +# Maximum number of stacks any one tenant may have active at one time. (integer +# value) +#max_stacks_per_tenant = 100 + +# Number of times to retry to bring a resource to a non-error state. Set to 0 +# to disable retries. (integer value) +#action_retry_limit = 5 + +# Controls how many events will be pruned whenever a stack's events exceed +# max_events_per_stack. Set this lower to keep more events at the expense of +# more frequent purges. (integer value) +#event_purge_batch_size = 10 + +# Maximum events that will be available per stack. Older events will be deleted +# when this is reached. Set to 0 for unlimited events per stack. (integer +# value) +#max_events_per_stack = 1000 + +# Timeout in seconds for stack action (ie. create or update). (integer value) +#stack_action_timeout = 3600 + +# Error wait time in seconds for stack action (ie. create or update). (integer +# value) +#error_wait_time = 240 + +# RPC timeout for the engine liveness check that is used for stack locking. +# (integer value) +#engine_life_check_timeout = 2 + +# Enable the legacy OS::Heat::CWLiteAlarm resource. (boolean value) +#enable_cloud_watch_lite = true + +# Enable the preview Stack Abandon feature. (boolean value) +#enable_stack_abandon = false + +# Enable the preview Stack Adopt feature. (boolean value) +#enable_stack_adopt = false + +# Enables engine with convergence architecture. All stacks with this option +# will be created using convergence engine . (boolean value) +#convergence_engine = false + +# Template default for how the server should receive the metadata required for +# software configuration. POLL_SERVER_CFN will allow calls to the cfn API +# action DescribeStackResource authenticated with the provided keypair +# (requires enabled heat-api-cfn). POLL_SERVER_HEAT will allow calls to the +# Heat API resource-show using the provided keystone credentials (requires +# keystone v3 API, and configured stack_user_* config options). POLL_TEMP_URL +# will create and populate a Swift TempURL with metadata for polling (requires +# object-store endpoint which supports TempURL). (string value) +# Allowed values: POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL +#default_software_config_transport = POLL_SERVER_CFN + +# Template default for how the server should signal to heat with the deployment +# output values. CFN_SIGNAL will allow an HTTP POST to a CFN keypair signed URL +# (requires enabled heat-api-cfn). TEMP_URL_SIGNAL will create a Swift TempURL +# to be signaled via HTTP PUT (requires object-store endpoint which supports +# TempURL). HEAT_SIGNAL will allow calls to the Heat API resource-signal using +# the provided keystone credentials (string value) +# Allowed values: CFN_SIGNAL, TEMP_URL_SIGNAL, HEAT_SIGNAL +#default_deployment_signal_transport = CFN_SIGNAL + +# Deprecated. (string value) +#onready = + +# When this feature is enabled, scheduler hints identifying the heat stack +# context of a server resource are passed to the configured schedulers in nova, +# for server creates done using heat resource types OS::Nova::Server and +# AWS::EC2::Instance. heat_root_stack_id will be set to the id of the root +# stack of the resource, heat_stack_id will be set to the id of the resource's +# parent stack, heat_stack_name will be set to the name of the resource's +# parent stack, heat_path_in_stack will be set to a list of tuples, +# (stackresourcename, stackname) with list[0] being (None, rootstackname), and +# heat_resource_name will be set to the resource's name. (boolean value) +#stack_scheduler_hints = false + +# +# From heat.common.config +# + +# Seconds between running periodic tasks. (integer value) +#periodic_interval = 60 + +# URL of the Heat metadata server. (string value) +#heat_metadata_server_url = + +# URL of the Heat waitcondition server. (string value) +#heat_waitcondition_server_url = + +# URL of the Heat CloudWatch server. (string value) +#heat_watch_server_url = + +# Instance connection to CFN/CW API via https. (string value) +#instance_connection_is_secure = 0 + +# Instance connection to CFN/CW API validate certs if SSL is used. (string +# value) +#instance_connection_https_validate_certificates = 1 + +# Default region name used to get services endpoints. (string value) +#region_name_for_services = + +# Keystone role for heat template-defined users. (string value) +#heat_stack_user_role = heat_stack_user + +# Keystone domain ID which contains heat template-defined users. If this option +# is set, stack_user_domain_name option will be ignored. (string value) +# Deprecated group/name - [DEFAULT]/stack_user_domain +#stack_user_domain_id = + +# Keystone domain name which contains heat template-defined users. If +# `stack_user_domain_id` option is set, this option is ignored. (string value) +#stack_user_domain_name = + +# Keystone username, a user with roles sufficient to manage users and projects +# in the stack_user_domain. (string value) +#stack_domain_admin = + +# Keystone password for stack_domain_admin user. (string value) +#stack_domain_admin_password = + +# Maximum raw byte size of any template. (integer value) +#max_template_size = 524288 + +# Maximum depth allowed when using nested stacks. (integer value) +#max_nested_stack_depth = 5 + +# Number of heat-engine processes to fork and run. (integer value) +#num_engine_workers = 4 + +# +# From heat.common.crypt +# + +# Key used to encrypt authentication info in the database. Length of this key +# must be 16, 24 or 32 characters. (string value) +#auth_encryption_key = notgood but just long enough i t + +# +# From heat.common.heat_keystoneclient +# + +# Fully qualified class name to use as a keystone backend. (string value) +#keystone_backend = heat.common.heat_keystoneclient.KeystoneClientV3 + +# +# From heat.common.wsgi +# + +# Maximum raw byte size of JSON request body. Should be larger than +# max_template_size. (integer value) +#max_json_body_size = 1048576 + +# +# From heat.engine.clients +# + +# Fully qualified class name to use as a client backend. (string value) +#cloud_backend = heat.engine.clients.OpenStackClients + +# +# From heat.engine.notification +# + +# Default notification level for outgoing notifications. (string value) +#default_notification_level = INFO + +# Default publisher_id for outgoing notifications. (string value) +#default_publisher_id = + +# List of drivers to send notifications (DEPRECATED). (multi valued) +#list_notifier_drivers = + +# +# From heat.engine.resources +# + +# Custom template for the built-in loadbalancer nested stack. (string value) +#loadbalancer_template = + +# +# From heat.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , and +# :, where 0 results in listening on a random tcp port number; +# results in listening on the specified port number (and not enabling +# backdoor if that port is in use); and : results in listening on +# the smallest unused port number within the specified range of port numbers. +# The chosen port is displayed in the service's log file. (string value) +#backdoor_port = + +# +# From heat.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. (string value) +#policy_default_rule = default + +# Directories where policy configuration files are stored. They can be relative +# to any directory in the search path defined by the config_dir option, or +# absolute paths. The file defined by policy_file must exist for these +# directories to be searched. (multi valued) +#policy_dirs = policy.d + +# +# From oslo.log +# + +# Print debugging output (set logging level to DEBUG instead of default WARNING +# level). (boolean value) +#debug = false + +# Print more verbose output (set logging level to INFO instead of default +# WARNING level). (boolean value) +#verbose = false + +# The name of a logging configuration file. This file is appended to any +# existing logging configuration files. For details about logging configuration +# files, see the Python logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# DEPRECATED. A logging.Formatter log message format string which may use any +# of the available logging.LogRecord attributes. This option is deprecated. +# Please use logging_context_format_string and logging_default_format_string +# instead. (string value) +#log_format = + +# Format string for %%(asctime)s in log records. Default: %(default)s . (string +# value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is set, logging will +# go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative --log-file paths. (string +# value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Use syslog for logging. Existing syslog format is DEPRECATED during I, and +# will change in J to honor RFC5424. (boolean value) +#use_syslog = false + +# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, +# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The +# format without the APP-NAME is deprecated in I, and will be removed in J. +# (boolean value) +#use_syslog_rfc_format = false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility = LOG_USER + +# Log output to standard error. (boolean value) +#use_stderr = true + +# Format string to use for log messages with context. (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. (string value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. (string value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# The format for an instance that is passed with the log message. (string +# value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. (string +# value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# +# From oslo.messaging +# + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. (string value) +#rpc_zmq_bind_address = * + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port = 9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts = 1 + +# Maximum number of ingress messages to locally buffer per topic. Default is +# unlimited. (integer value) +#rpc_zmq_topic_backlog = + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir = /var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match +# "host" option, if running Nova. (string value) +#rpc_zmq_host = localhost + +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# (integer value) +#rpc_cast_timeout = 30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq = 300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl = 600 + +# Size of RPC thread pool. (integer value) +#rpc_thread_pool_size = 64 + +# Driver or drivers to handle sending notifications. (multi valued) +#notification_driver = + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics = notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# A URL representing the messaging driver to use and its full configuration. If +# not set, we fall back to the rpc_backend option and driver specific +# configuration. (string value) +#transport_url = + +# The messaging driver to use, defaults to rabbit. Other drivers include qpid +# and zmq. (string value) +#rpc_backend = rabbit + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option. (string value) +#control_exchange = openstack + + +[auth_password] + +# +# From heat.common.config +# + +# Allow orchestration of multiple clouds. (boolean value) +#multi_cloud = false + +# Allowed keystone endpoints for auth_uri when multi_cloud is enabled. At least +# one endpoint needs to be specified. (list value) +#allowed_auth_uris = + + +[clients] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = publicURL + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = false + + +[clients_ceilometer] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + + +[clients_cinder] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + +# +# From heat.common.config +# + +# Allow client's debug log output. (boolean value) +#http_log_debug = false + + +[clients_glance] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + + +[clients_heat] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + +# +# From heat.common.config +# + +# Optional heat url in format like http://0.0.0.0:8004/v1/%(tenant_id)s. +# (string value) +#url = + + +[clients_keystone] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + + +[clients_neutron] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + + +[clients_nova] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + +# +# From heat.common.config +# + +# Allow client's debug log output. (boolean value) +#http_log_debug = false + + +[clients_sahara] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + + +[clients_swift] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + + +[clients_trove] + +# +# From heat.common.config +# + +# Type of endpoint in Identity service catalog to use for communication with +# the OpenStack service. (string value) +#endpoint_type = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = + + +[database] + +# +# From oslo.db +# + +# The file name to use with SQLite. (string value) +# Deprecated group/name - [DEFAULT]/sqlite_db +#sqlite_db = oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group/name - [DEFAULT]/sqlite_synchronous +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. (string +# value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection lost. +# (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count. (integer value) +#db_max_retries = 20 + + +[ec2authtoken] + +# +# From heat.api.aws.ec2token +# + +# Authentication Endpoint URI. (string value) +#auth_uri = + +# Allow orchestration of multiple clouds. (boolean value) +#multi_cloud = false + +# Allowed keystone endpoints for auth_uri when multi_cloud is enabled. At least +# one endpoint needs to be specified. (list value) +#allowed_auth_uris = + +# Optional PEM-formatted certificate chain file. (string value) +#cert_file = + +# Optional PEM-formatted file that contains the private key. (string value) +#key_file = + +# Optional CA cert file to use in SSL connections. (string value) +#ca_file = + +# If set, then the server's certificate will not be verified. (boolean value) +#insecure = false + + +[heat_api] + +# +# From heat.common.wsgi +# + +# Address to bind the server. Useful when selecting a particular network +# interface. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#bind_host = 0.0.0.0 + +# The port on which the server will listen. (integer value) +# Deprecated group/name - [DEFAULT]/bind_port +#bind_port = 8004 + +# Number of backlog requests to configure the socket with. (integer value) +# Deprecated group/name - [DEFAULT]/backlog +#backlog = 4096 + +# Location of the SSL certificate file to use for SSL mode. (string value) +# Deprecated group/name - [DEFAULT]/cert_file +#cert_file = + +# Location of the SSL key file to use for enabling SSL mode. (string value) +# Deprecated group/name - [DEFAULT]/key_file +#key_file = + +# Number of workers for Heat service. (integer value) +# Deprecated group/name - [DEFAULT]/workers +#workers = 0 + +# Maximum line size of message headers to be accepted. max_header_line may need +# to be increased when using large tokens (typically those generated by the +# Keystone v3 API with big service catalogs). (integer value) +#max_header_line = 16384 + + +[heat_api_cfn] + +# +# From heat.common.wsgi +# + +# Address to bind the server. Useful when selecting a particular network +# interface. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#bind_host = 0.0.0.0 + +# The port on which the server will listen. (integer value) +# Deprecated group/name - [DEFAULT]/bind_port +#bind_port = 8000 + +# Number of backlog requests to configure the socket with. (integer value) +# Deprecated group/name - [DEFAULT]/backlog +#backlog = 4096 + +# Location of the SSL certificate file to use for SSL mode. (string value) +# Deprecated group/name - [DEFAULT]/cert_file +#cert_file = + +# Location of the SSL key file to use for enabling SSL mode. (string value) +# Deprecated group/name - [DEFAULT]/key_file +#key_file = + +# Number of workers for Heat service. (integer value) +# Deprecated group/name - [DEFAULT]/workers +#workers = 0 + +# Maximum line size of message headers to be accepted. max_header_line may need +# to be increased when using large tokens (typically those generated by the +# Keystone v3 API with big service catalogs). (integer value) +#max_header_line = 16384 + + +[heat_api_cloudwatch] + +# +# From heat.common.wsgi +# + +# Address to bind the server. Useful when selecting a particular network +# interface. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#bind_host = 0.0.0.0 + +# The port on which the server will listen. (integer value) +# Deprecated group/name - [DEFAULT]/bind_port +#bind_port = 8003 + +# Number of backlog requests to configure the socket with. (integer value) +# Deprecated group/name - [DEFAULT]/backlog +#backlog = 4096 + +# Location of the SSL certificate file to use for SSL mode. (string value) +# Deprecated group/name - [DEFAULT]/cert_file +#cert_file = + +# Location of the SSL key file to use for enabling SSL mode. (string value) +# Deprecated group/name - [DEFAULT]/key_file +#key_file = + +# Number of workers for Heat service. (integer value) +# Deprecated group/name - [DEFAULT]/workers +#workers = 0 + +# Maximum line size of message headers to be accepted. max_header_line may need +# to be increased when using large tokens (typically those generated by the +# Keystone v3 API with big service catalogs.) (integer value) +#max_header_line = 16384 + + +[keystone_authtoken] + +# +# From keystonemiddleware.auth_token +# + +# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri. +# (string value) +#auth_admin_prefix = + +# Host providing the admin Identity API endpoint. Deprecated, use identity_uri. +# (string value) +#auth_host = 127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use identity_uri. +# (integer value) +#auth_port = 35357 + +# Protocol of the admin Identity API endpoint (http or https). Deprecated, use +# identity_uri. (string value) +#auth_protocol = https + +# Complete public Identity API endpoint (string value) +#auth_uri = + +# Complete admin Identity API endpoint. This should specify the unversioned +# root endpoint e.g. https://localhost:35357/ (string value) +#identity_uri = + +# API version of the admin Identity API endpoint (string value) +#auth_version = + +# Do not handle authorization requests within the middleware, but delegate the +# authorization decision to downstream WSGI components (boolean value) +#delay_auth_decision = false + +# Request timeout value for communicating with Identity API server. (boolean +# value) +#http_connect_timeout = + +# How many times are we trying to reconnect when communicating with Identity +# API Server. (integer value) +#http_request_max_retries = 3 + +# This option is deprecated and may be removed in a future release. Single +# shared secret with the Keystone configuration used for bootstrapping a +# Keystone installation, or otherwise bypassing the normal authentication +# process. This option should not be used, use `admin_user` and +# `admin_password` instead. (string value) +#admin_token = + +# Keystone account username (string value) +#admin_user = + +# Keystone account password (string value) +#admin_password = + +# Keystone service account tenant name to validate user tokens (string value) +#admin_tenant_name = admin + +# Env key for the swift cache (string value) +#cache = + +# Required if Keystone server requires client certificate (string value) +#certfile = + +# Required if Keystone server requires client certificate (string value) +#keyfile = + +# A PEM encoded Certificate Authority to use when verifying HTTPs connections. +# Defaults to system CAs. (string value) +#cafile = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# Directory used to cache files related to PKI tokens (string value) +#signing_dir = + +# Optionally specify a list of memcached server(s) to use for caching. If left +# undefined, tokens will instead be cached in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers = + +# In order to prevent excessive effort spent validating tokens, the middleware +# caches previously-seen tokens for a configurable duration (in seconds). Set +# to -1 to disable caching completely. (integer value) +#token_cache_time = 300 + +# Determines the frequency at which the list of revoked tokens is retrieved +# from the Identity service (in seconds). A high number of revocation events +# combined with a low cache duration may significantly reduce performance. +# (integer value) +#revocation_cache_time = 10 + +# (optional) if defined, indicate whether token data should be authenticated or +# authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, +# token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data +# is encrypted and authenticated in the cache. If the value is not one of these +# options or empty, auth_token will raise an exception on initialization. +# (string value) +#memcache_security_strategy = + +# (optional, mandatory if memcache_security_strategy is defined) this string is +# used for key derivation. (string value) +#memcache_secret_key = + +# (optional) number of seconds memcached server is considered dead before it is +# tried again. (integer value) +#memcache_pool_dead_retry = 300 + +# (optional) max total number of open connections to every memcached server. +# (integer value) +#memcache_pool_maxsize = 10 + +# (optional) socket timeout in seconds for communicating with a memcache +# server. (integer value) +#memcache_pool_socket_timeout = 3 + +# (optional) number of seconds a connection to memcached is held unused in the +# pool before it is closed. (integer value) +#memcache_pool_unused_timeout = 60 + +# (optional) number of seconds that an operation will wait to get a memcache +# client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout = 10 + +# (optional) use the advanced (eventlet safe) memcache client pool. The +# advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool = false + +# (optional) indicate whether to set the X-Service-Catalog header. If False, +# middleware will not ask for service catalog on token validation and will not +# set the X-Service-Catalog header. (boolean value) +#include_service_catalog = true + +# Used to control the use and type of token binding. Can be set to: "disabled" +# to not check token binding. "permissive" (default) to validate binding +# information if the bind type is of a form known to the server and ignore it +# if not. "strict" like "permissive" but if the bind type is unknown the token +# will be rejected. "required" any form of token binding is needed to be +# allowed. Finally the name of a binding method that must be present in tokens. +# (string value) +#enforce_token_bind = permissive + +# If true, the revocation list will be checked for cached tokens. This requires +# that PKI tokens are configured on the Keystone server. (boolean value) +#check_revocations_for_cached = false + +# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm +# or multiple. The algorithms are those supported by Python standard +# hashlib.new(). The hashes will be tried in the order given, so put the +# preferred one first for performance. The result of the first hash will be +# stored in the cache. This will typically be set to multiple values only while +# migrating from a less secure algorithm to a more secure one. Once all the old +# tokens are expired this option should be set to a single value for better +# performance. (list value) +#hash_algorithms = md5 + + +[matchmaker_redis] + +# +# From oslo.messaging +# + +# Host to locate redis. (string value) +#host = 127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port = 6379 + +# Password for Redis server (optional). (string value) +#password = + + +[matchmaker_ring] + +# +# From oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile = /etc/oslo/matchmaker_ring.json + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# address prefix used when sending to a specific server (string value) +# Deprecated group/name - [amqp1]/server_request_prefix +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +# Deprecated group/name - [amqp1]/broadcast_prefix +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +# Deprecated group/name - [amqp1]/group_request_prefix +#group_request_prefix = unicast + +# Name for the AMQP container (string value) +# Deprecated group/name - [amqp1]/container_name +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +# Deprecated group/name - [amqp1]/idle_timeout +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +# Deprecated group/name - [amqp1]/trace +#trace = false + +# CA certificate PEM file for verifing server certificate (string value) +# Deprecated group/name - [amqp1]/ssl_ca_file +#ssl_ca_file = + +# Identifying certificate PEM file to present to clients (string value) +# Deprecated group/name - [amqp1]/ssl_cert_file +#ssl_cert_file = + +# Private key PEM file used to sign cert_file certificate (string value) +# Deprecated group/name - [amqp1]/ssl_key_file +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +# Deprecated group/name - [amqp1]/ssl_key_password +#ssl_key_password = + +# Accept clients using either SSL or plain TCP (boolean value) +# Deprecated group/name - [amqp1]/allow_insecure_clients +#allow_insecure_clients = false + + +[oslo_messaging_qpid] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete = false + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +#rpc_conn_pool_size = 30 + +# Qpid broker hostname. (string value) +# Deprecated group/name - [DEFAULT]/qpid_hostname +#qpid_hostname = localhost + +# Qpid broker port. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_port +#qpid_port = 5672 + +# Qpid HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/qpid_hosts +#qpid_hosts = $qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_username +#qpid_username = + +# Password for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_password +#qpid_password = + +# Space separated list of SASL mechanisms to use for auth. (string value) +# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms +#qpid_sasl_mechanisms = + +# Seconds between connection keepalive heartbeats. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_heartbeat +#qpid_heartbeat = 60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +# Deprecated group/name - [DEFAULT]/qpid_protocol +#qpid_protocol = tcp + +# Whether to disable the Nagle algorithm. (boolean value) +# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay +#qpid_tcp_nodelay = true + +# The number of prefetched messages held by receiver. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity +#qpid_receiver_capacity = 1 + +# The qpid topology version to use. Version 1 is what was originally used by +# impl_qpid. Version 2 includes some backwards-incompatible changes that allow +# broker federation to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_topology_version +#qpid_topology_version = 1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete = false + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +#rpc_conn_pool_size = 30 + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_version +#kombu_ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile +#kombu_ssl_keyfile = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile +#kombu_ssl_certfile = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs +#kombu_ssl_ca_certs = + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay +#kombu_reconnect_delay = 1.0 + +# The RabbitMQ broker address where a single node is used. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_host +#rabbit_host = localhost + +# The RabbitMQ broker port where a single node is used. (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_port +#rabbit_port = 5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/rabbit_hosts +#rabbit_hosts = $rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_use_ssl +#rabbit_use_ssl = false + +# The RabbitMQ userid. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_userid +#rabbit_userid = guest + +# The RabbitMQ password. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_password +#rabbit_password = guest + +# The RabbitMQ login method. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_login_method +#rabbit_login_method = AMQPLAIN + +# The RabbitMQ virtual host. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_virtual_host +#rabbit_virtual_host = / + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff +#rabbit_retry_backoff = 2 + +# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry +# count). (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_max_retries +#rabbit_max_retries = 0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you +# must wipe the RabbitMQ database. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_ha_queues +#rabbit_ha_queues = false + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disable the heartbeat). (integer value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) +# Deprecated group/name - [DEFAULT]/fake_rabbit +#fake_rabbit = false + + +[paste_deploy] + +# +# From heat.common.config +# + +# The flavor to use. (string value) +#flavor = + +# The API paste config file to use. (string value) +#api_paste_config = api-paste.ini + + +[profiler] + +# +# From heat.common.config +# + +# If False fully disable profiling feature. (boolean value) +#profiler_enabled = false + +# If False do not trace SQL requests. (boolean value) +#trace_sqlalchemy = false + + +[revision] + +# +# From heat.common.config +# + +# Heat build revision. If you would prefer to manage your build revision +# separately, you can move this section to a different file and add it as +# another config option. (string value) +#heat_revision = unknown diff --git a/openstack/python-heat/openstack-heat/centos/files/heat.logrotate b/openstack/python-heat/openstack-heat/centos/files/heat.logrotate new file mode 100644 index 00000000..22e6d5e7 --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/files/heat.logrotate @@ -0,0 +1,6 @@ +/var/log/heat/*.log { + rotate 14 + size 10M + missingok + compress +} diff --git a/openstack/python-heat/openstack-heat/centos/files/openstack-heat-all.service b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-all.service new file mode 100644 index 00000000..2701a94c --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-all.service @@ -0,0 +1,11 @@ +[Unit] +Description=Combined Openstack Heat Engine/API Service +After=syslog.target network.target qpidd.service mysqld.service openstack-keystone.service tgtd.service openstack-glance-api.service openstack-glance-registry.service openstack-nova-api.service openstack-nova-objectstore.service openstack-nova.compute.service openstack-nova-network.service openstack-nova-volume.service openstack-nova-scheduler.service openstack-cinder-volume.service + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/heat-all --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-heat/openstack-heat/centos/files/openstack-heat-api-cfn.service b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-api-cfn.service new file mode 100644 index 00000000..6d9f1350 --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-api-cfn.service @@ -0,0 +1,11 @@ +[Unit] +Description=Openstack Heat CFN-compatible API Service +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-heat/openstack-heat/centos/files/openstack-heat-api-cloudwatch.service b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-api-cloudwatch.service new file mode 100644 index 00000000..aa02172a --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-api-cloudwatch.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Heat CloudWatch API Service +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/heat-api-cloudwatch --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-heat/openstack-heat/centos/files/openstack-heat-api.service b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-api.service new file mode 100644 index 00000000..ffb26a83 --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-api.service @@ -0,0 +1,11 @@ +[Unit] +Description=OpenStack Heat API Service +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-heat/openstack-heat/centos/files/openstack-heat-engine.service b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-engine.service new file mode 100644 index 00000000..7aa96389 --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/files/openstack-heat-engine.service @@ -0,0 +1,11 @@ +[Unit] +Description=Openstack Heat Engine Service +After=syslog.target network.target qpidd.service mysqld.service openstack-keystone.service tgtd.service openstack-glance-api.service openstack-glance-registry.service openstack-nova-api.service openstack-nova-objectstore.service openstack-nova.compute.service openstack-nova-network.service openstack-nova-volume.service openstack-nova-scheduler.service openstack-cinder-volume.service + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-heat/openstack-heat/centos/openstack-heat.spec b/openstack/python-heat/openstack-heat/centos/openstack-heat.spec new file mode 100644 index 00000000..67935de2 --- /dev/null +++ b/openstack/python-heat/openstack-heat/centos/openstack-heat.spec @@ -0,0 +1,540 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%global with_doc %{!?_without_doc:1}%{?_without_doc:0} + +Name: openstack-heat +Summary: OpenStack Orchestration (heat) +# Liberty semver reset +# https://review.openstack.org/#/q/I6a35fa0dda798fad93b804d00a46af80f08d475c,n,z +Epoch: 1 +Version: 9.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +License: ASL 2.0 +URL: http://www.openstack.org +Source0: openstack-heat-%{version}.tar.gz +# + +Obsoletes: heat < 7-9 +Provides: heat + +Source2: openstack-heat-api.service +Source3: openstack-heat-api-cfn.service +Source4: openstack-heat-engine.service +Source5: openstack-heat-api-cloudwatch.service +Source6: openstack-heat-all.service + +Source20: heat-dist.conf +Source22: heat-purge-deleted-active + +BuildArch: noarch +BuildRequires: git +BuildRequires: openstack-macros +BuildRequires: python2-devel +BuildRequires: python-stevedore >= 1.20.0 +BuildRequires: python-oslo-cache +BuildRequires: python-oslo-context +BuildRequires: python-oslo-middleware +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-messaging +BuildRequires: python-setuptools +BuildRequires: python-openstackdocstheme +BuildRequires: python-oslo-i18n +BuildRequires: python-oslo-db +BuildRequires: python-oslo-utils +BuildRequires: python-oslo-log +BuildRequires: python-oslo-versionedobjects +BuildRequires: python-eventlet +BuildRequires: python-kombu +BuildRequires: python-lxml +BuildRequires: python-netaddr +BuildRequires: python-migrate +BuildRequires: python-osprofiler +BuildRequires: python-six +BuildRequires: PyYAML +BuildRequires: python-sphinx +BuildRequires: m2crypto +BuildRequires: python-paramiko +BuildRequires: python-yaql +# These are required to build due to the requirements check added +BuildRequires: python-paste-deploy +BuildRequires: python-routes +BuildRequires: python-sqlalchemy +BuildRequires: python-webob +BuildRequires: python-pbr +BuildRequires: python-d2to1 +BuildRequires: python-cryptography +# These are required to build the config file +BuildRequires: python-oslo-config +BuildRequires: python-redis +BuildRequires: crudini +BuildRequires: python-crypto +BuildRequires: python-keystoneauth1 +BuildRequires: python-keystoneclient +# Required to compile translation files +BuildRequires: python-babel + +BuildRequires: systemd-units +BuildRequires: systemd-devel +BuildRequires: cgts-client +BuildRequires: python-keyring +BuildRequires: tsconfig + +%if 0%{?with_doc} +BuildRequires: python-cinderclient +BuildRequires: python-novaclient +BuildRequires: python-saharaclient +BuildRequires: python-neutronclient +BuildRequires: python-swiftclient +BuildRequires: python-heatclient +BuildRequires: python-ceilometerclient +BuildRequires: python-glanceclient +BuildRequires: python-troveclient +BuildRequires: python-aodhclient +BuildRequires: python-barbicanclient +BuildRequires: python-designateclient +BuildRequires: python-magnumclient +BuildRequires: python-monascaclient +BuildRequires: python-manilaclient +BuildRequires: python-zaqarclient +BuildRequires: python-croniter +BuildRequires: python-gabbi +BuildRequires: python-testscenarios +BuildRequires: python-tempest +BuildRequires: python2-pycodestyle + +# NOTE(ykarel) zunclient are not packaged yet. +BuildRequires: python-senlinclient +#BuildRequires: python-zunclient +%endif + +Requires: %{name}-common = %{epoch}:%{version}-%{release} +Requires: %{name}-engine = %{epoch}:%{version}-%{release} +Requires: %{name}-api = %{epoch}:%{version}-%{release} +Requires: %{name}-api-cfn = %{epoch}:%{version}-%{release} +Requires: %{name}-api-cloudwatch = %{epoch}:%{version}-%{release} + +%package -n python-heat-tests +Summary: Heat tests +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires: python-mox3 +Requires: python-oslotest +Requires: python-testresources +Requires: python-oslotest +Requires: python-oslo-log +Requires: python-oslo-utils +Requires: python-heatclient +Requires: python-cinderclient +Requires: python-zaqarclient +Requires: python-keystoneclient +Requires: python-swiftclient +Requires: python-paramiko +Requires: python-kombu +Requires: python-oslo-config +Requires: python-oslo-concurrency +Requires: python-requests +Requires: python-eventlet +Requires: PyYAML +Requires: python-six +Requires: python-gabbi + +%description -n python-heat-tests +Heat is a service to orchestrate composite cloud applications using a +declarative template format through an OpenStack-native REST API. +This package contains the Heat test files. + +%prep +# WRS: The tarball is packaged as openstack-heat rather than heat +%setup -q -n openstack-heat-%{version} + +# Remove the requirements file so that pbr hooks don't add it +# to distutils requires_dist config +rm -rf {test-,}requirements.txt tools/{pip,test}-requires + +# Remove tests in contrib +find contrib -name tests -type d | xargs rm -r + +%build +export PBR_VERSION=%{version} +%{__python} setup.py build + +# Generate i18n files +%{__python2} setup.py compile_catalog -d build/lib/heat/locale + +# Generate sample config and add the current directory to PYTHONPATH so +# oslo-config-generator doesn't skip heat's entry points. +PYTHONPATH=. oslo-config-generator --config-file=config-generator.conf + +%install +export PBR_VERSION=%{version} +%{__python} setup.py install -O1 --skip-build --root=%{buildroot} +sed -i -e '/^#!/,1 d' %{buildroot}/%{python_sitelib}/heat/db/sqlalchemy/migrate_repo/manage.py + +# Create fake egg-info for the tempest plugin +# TODO switch to %{service} everywhere as in openstack-example.spec +%global service heat +%py2_entrypoint %{service} %{service} + +mkdir -p %{buildroot}/%{_localstatedir}/log/heat/ +mkdir -p %{buildroot}/%{_localstatedir}/run/heat/ + +# install systemd unit files +install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}/openstack-heat-api.service +install -p -D -m 644 %{SOURCE3} %{buildroot}%{_unitdir}/openstack-heat-api-cfn.service +install -p -D -m 644 %{SOURCE4} %{buildroot}%{_unitdir}/openstack-heat-engine.service +install -p -D -m 644 %{SOURCE5} %{buildroot}%{_unitdir}/openstack-heat-api-cloudwatch.service +install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}/openstack-heat-all.service + +mkdir -p %{buildroot}/%{_sharedstatedir}/heat/ +mkdir -p %{buildroot}/%{_sysconfdir}/heat/ + +%if 0%{?with_doc} +export PBR_VERSION=%{version} +%{__python2} setup.py build_sphinx -b html +%{__python2} setup.py build_sphinx -b man +mkdir -p %{buildroot}%{_mandir}/man1 +install -p -D -m 644 doc/build/man/*.1 %{buildroot}%{_mandir}/man1/ +%endif + +rm -f %{buildroot}/%{_bindir}/heat-db-setup +rm -f %{buildroot}/%{_mandir}/man1/heat-db-setup.* +rm -rf %{buildroot}/var/lib/heat/.dummy +rm -f %{buildroot}/usr/bin/cinder-keystone-setup + +install -p -D -m 640 etc/heat/heat.conf.sample %{buildroot}/%{_sysconfdir}/heat/heat.conf +install -p -D -m 640 %{SOURCE20} %{buildroot}%{_datadir}/heat/heat-dist.conf +crudini --set %{buildroot}%{_datadir}/heat/heat-dist.conf revision heat_revision %{version} +install -p -D -m 640 etc/heat/api-paste.ini %{buildroot}/%{_datadir}/heat/api-paste-dist.ini +install -p -D -m 640 etc/heat/policy.json %{buildroot}/%{_sysconfdir}/heat + +# TODO: move this to setup.cfg +cp -vr etc/heat/templates %{buildroot}/%{_sysconfdir}/heat +cp -vr etc/heat/environment.d %{buildroot}/%{_sysconfdir}/heat + +# WRS Manually stage non-code files +install -p -D -m 640 etc/heat/api-paste.ini %{buildroot}/%{_sysconfdir}/heat/api-paste.ini +install -p -D -m 755 %{SOURCE22} %{buildroot}/%{_bindir}/heat-purge-deleted-active +chmod 750 %{buildroot}/%{_sysconfdir}/heat +install -p -D -m 644 heat/db/sqlalchemy/migrate_repo/migrate.cfg %{buildroot}%{python_sitelib}/heat/db/sqlalchemy/migrate_repo/migrate.cfg +install -p -D -m 755 heat/cloudinit/boothook.sh %{buildroot}%{python_sitelib}/heat/cloudinit/boothook.sh +install -p -D -m 644 heat/cloudinit/config %{buildroot}%{python_sitelib}/heat/cloudinit/config + +# Install i18n .mo files (.po and .pot are not required) +install -d -m 755 %{buildroot}%{_datadir} +rm -f %{buildroot}%{python2_sitelib}/heat/locale/*/LC_*/heat*po +rm -f %{buildroot}%{python2_sitelib}/heat/locale/*pot +mv %{buildroot}%{python2_sitelib}/heat/locale %{buildroot}%{_datadir}/locale + +# Find language files +%find_lang heat --all-name + +%description +Heat is a service to orchestrate composite cloud applications using a +declarative template format through an OpenStack-native REST API. + +%package common +Summary: Heat common +Group: System Environment/Base + +Requires: python-pbr +Requires: python-croniter +Requires: python-eventlet +Requires: python-stevedore >= 1.20.0 +Requires: python-lxml +Requires: python-netaddr +Requires: python-osprofiler +Requires: python-paste-deploy +Requires: python-requests +Requires: python-routes +Requires: python-sqlalchemy +Requires: python-migrate +Requires: python-webob +Requires: python-six >= 1.9.0 +Requires: PyYAML +Requires: python-paramiko +Requires: python-babel >= 2.3.4 +Requires: python-cryptography >= 1.6 +Requires: python-yaql >= 1.1.0 + +Requires: python-oslo-cache +Requires: python-oslo-concurrency +Requires: python-oslo-config +Requires: python-oslo-context +Requires: python-oslo-utils +Requires: python-oslo-db +Requires: python-oslo-i18n +Requires: python-oslo-middleware +Requires: python-oslo-messaging +Requires: python-oslo-policy +Requires: python-oslo-reports +Requires: python-oslo-serialization +Requires: python-oslo-service +Requires: python-oslo-log +Requires: python-oslo-versionedobjects + +Requires: python-ceilometerclient +Requires: python-cinderclient +Requires: python-glanceclient +Requires: python-heatclient +Requires: python-keystoneclient +Requires: python-keystonemiddleware +Requires: python-neutronclient +Requires: python-novaclient +#Requires: python-saharaclient +Requires: python-swiftclient +#Requires: python-troveclient + +Requires: python-debtcollector >= 1.2.0 +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-crypto >= 2.6 +#Requires: python-barbicanclient +#Requires: python-designateclient +#Requires: python-manilaclient +#Requires: python-mistralclient +Requires: python-openstackclient +Requires: python-zaqarclient +Requires: python-aodhclient +Requires: python-magnumclient +#Requires: python-senlinclient +Requires: python-openstacksdk +Requires: pytz +Requires: python-tenacity >= 3.2.1 + +Requires(pre): shadow-utils + +%description common +Components common to all OpenStack Heat services + +%files common -f heat.lang +%doc LICENSE +%{_bindir}/heat-manage +%{_bindir}/heat-keystone-setup +%{_bindir}/heat-keystone-setup-domain +%{_bindir}/heat-purge-deleted-active +%{python2_sitelib}/heat +%{python2_sitelib}/heat-%{upstream_version}-*.egg-info +%exclude %{python2_sitelib}/heat/tests +%attr(-, root, heat) %{_datadir}/heat/heat-dist.conf +%attr(-, root, heat) %{_datadir}/heat/api-paste-dist.ini +%dir %attr(0750,heat,root) %{_localstatedir}/log/heat +%dir %attr(0750,heat,root) %{_localstatedir}/run/heat +%dir %attr(0750,heat,root) %{_sharedstatedir}/heat +%dir %attr(0755,heat,root) %{_sysconfdir}/heat +%config(noreplace) %attr(-, root, heat) %{_sysconfdir}/heat/heat.conf +%config(noreplace) %attr(-, root, heat) %{_sysconfdir}/heat/api-paste.ini +%config(noreplace) %attr(-, root, heat) %{_sysconfdir}/heat/policy.json +%config(noreplace) %attr(-,root,heat) %{_sysconfdir}/heat/environment.d/* +%config(noreplace) %attr(-,root,heat) %{_sysconfdir}/heat/templates/* +%if 0%{?with_doc} +%{_mandir}/man1/heat-keystone-setup.1.gz +%{_mandir}/man1/heat-keystone-setup-domain.1.gz +%{_mandir}/man1/heat-manage.1.gz +%endif + +%files -n python-heat-tests +%license LICENSE +%{python2_sitelib}/heat/tests +%{python2_sitelib}/heat_integrationtests +%{python2_sitelib}/%{service}_tests.egg-info + +%pre common +# 187:187 for heat - rhbz#845078 +getent group heat >/dev/null || groupadd -r --gid 187 heat +getent passwd heat >/dev/null || \ +useradd --uid 187 -r -g heat -d %{_sharedstatedir}/heat -s /sbin/nologin \ +-c "OpenStack Heat Daemons" heat +exit 0 + +%package engine +Summary: The Heat engine + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +%description engine +Heat is a service to orchestrate composite cloud applications using a +declarative template format through an OpenStack-native REST API. + +The heat-engine's main responsibility is to orchestrate the launching of +templates and provide events back to the API consumer. + +%files engine +%doc README.rst LICENSE +%if 0%{?with_doc} +%doc doc/build/html/man/heat-engine.html +%endif +%{_bindir}/heat-engine +%{_unitdir}/openstack-heat-engine.service +%if 0%{?with_doc} +%{_mandir}/man1/heat-engine.1.gz +%endif + +%post engine +%systemd_post openstack-heat-engine.service + +%preun engine +%systemd_preun openstack-heat-engine.service + +%postun engine +%systemd_postun_with_restart openstack-heat-engine.service + + +%package api +Summary: The Heat API + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +%description api +Heat is a service to orchestrate composite cloud applications using a +declarative template format through an OpenStack-native REST API. + +The heat-api component provides an OpenStack-native REST API that processes API +requests by sending them to the heat-engine over RPC. + +%files api +%doc README.rst LICENSE +%if 0%{?with_doc} +%doc doc/build/html/man/heat-api.html +%endif +%{_bindir}/heat-api +%{_bindir}/heat-wsgi-api +%{_unitdir}/openstack-heat-api.service +%if 0%{?with_doc} +%{_mandir}/man1/heat-api.1.gz +%endif + +%post api +%systemd_post openstack-heat-api.service + +%preun api +%systemd_preun openstack-heat-api.service + +%postun api +%systemd_postun_with_restart openstack-heat-api.service + + +%package api-cfn +Summary: Heat CloudFormation API + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +%description api-cfn +Heat is a service to orchestrate composite cloud applications using a +declarative template format through an OpenStack-native REST API. + +The heat-api-cfn component provides an AWS Query API that is compatible with +AWS CloudFormation and processes API requests by sending them to the +heat-engine over RPC. + +%files api-cfn +%doc README.rst LICENSE +%if 0%{?with_doc} +%doc doc/build/html/man/heat-api-cfn.html +%endif +%{_bindir}/heat-api-cfn +%{_bindir}/heat-wsgi-api-cfn +%{_unitdir}/openstack-heat-api-cfn.service +%if 0%{?with_doc} +%{_mandir}/man1/heat-api-cfn.1.gz +%endif + +%post api-cfn +%systemd_post openstack-heat-api-cloudwatch.service + +%preun api-cfn +%systemd_preun openstack-heat-api-cloudwatch.service + +%postun api-cfn +%systemd_postun_with_restart openstack-heat-api-cloudwatch.service + + +%package api-cloudwatch +Summary: Heat CloudWatch API + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +%description api-cloudwatch +Heat is a service to orchestrate composite cloud applications using a +declarative template format through an OpenStack-native REST API. + +AWS CloudWatch-compatible API to the Heat Engine + +%files api-cloudwatch +%doc README.rst LICENSE +%if 0%{?with_doc} +%doc doc/build/html/man/heat-api-cloudwatch.html +%endif +%{_bindir}/heat-api-cloudwatch +%{_bindir}/heat-wsgi-api-cloudwatch +%{_unitdir}/openstack-heat-api-cloudwatch.service +%if 0%{?with_doc} +%{_mandir}/man1/heat-api-cloudwatch.1.gz +%endif + +%post api-cloudwatch +%systemd_post openstack-heat-api-cfn.service + +%preun api-cloudwatch +%systemd_preun openstack-heat-api-cfn.service + +%postun api-cloudwatch +%systemd_postun_with_restart openstack-heat-api-cfn.service + + +%package monolith +Summary: The combined Heat engine/API + +Requires: %{name}-common = %{epoch}:%{version}-%{release} + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +%description monolith +Heat is a service to orchestrate composite cloud applications using a +declarative template format through an OpenStack-native REST API. + +The heat-all process bundles together any (or all) of heat-engine, heat-api, +heat-cfn-api, and heat-cloudwatch-api into a single process. This can be used +to bootstrap a minimal TripleO deployment, but is not the recommended way of +running the Heat service in general. + +%files monolith +%doc README.rst LICENSE +%{_bindir}/heat-all +%{_unitdir}/openstack-heat-all.service + +%post monolith +%systemd_post openstack-heat-all.service + +%preun monolith +%systemd_preun openstack-heat-all.service + +%postun monolith +%systemd_postun_with_restart openstack-heat-all.service + + +%changelog +* Wed Aug 30 2017 rdo-trunk 1:9.0.0-1 +- Update to 9.0.0 + +* Fri Aug 25 2017 rdo-trunk 1:9.0.0-0.2.0rc2 +- Update to 9.0.0.0rc2 + +* Tue Aug 22 2017 Alfredo Moralejo 1:9.0.0-0.1.0rc1 +- Update to 9.0.0.0rc1 + diff --git a/openstack/python-heat/python-heat/.yamllint b/openstack/python-heat/python-heat/.yamllint new file mode 100644 index 00000000..293c2bd9 --- /dev/null +++ b/openstack/python-heat/python-heat/.yamllint @@ -0,0 +1,10 @@ +--- +extends: default +rules: + braces: {min-spaces-inside: 0, max-spaces-inside: 1} + brackets: {min-spaces-inside: 0, max-spaces-inside: 1} + #comments: disable + #comments-indentation: disable + document-start: disable + #indentation: disable + #line-length: disable diff --git a/openstack/python-heat/python-heat/README.template b/openstack/python-heat/python-heat/README.template new file mode 100644 index 00000000..e69de29b diff --git a/openstack/python-heat/python-heat/templates/LICENSE b/openstack/python-heat/python-heat/templates/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/openstack/python-heat/python-heat/templates/README b/openstack/python-heat/python-heat/templates/README new file mode 100644 index 00000000..6df71984 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/README @@ -0,0 +1,213 @@ +Copyright © 2014-2017 Wind River Systems, Inc. + +SPDX-License-Identifier: Apache-2.0 +----------------------------------------------------------------------- + + +DESCRIPTION +=========== +This SDK Module provides example HEAT templates to assist Titanium Cloud +users in writing their own HEAT templates. Simple examples of the +more commonly used HEAT resources can be found in heat/simple/ directory, +in a YAML file named after the heat resource. More complex examples of +heat templates, involving several different inter-related heat resources +can be found in heat/scenarios/ directory. + +NOTE that the contents of this SDK Module can also be found on the +controllers, at /etc/heat/templates/ . + + +BACKGROUND +========== +HEAT is an Orchestration service that is meant to simplify the launching and +basic management of complex applications or application 'Services' (or using +Heat terminology, a 'Stack'). An application Service consists of a set of related +resources (VMs, Flavors, Images, Volumes, Networks, Subnets, Routers, Load +Balancers, Firewalls, Scaling Policies, etc.) that collectively provide a higher +order Application Function (e.g. Evolved Packet Core, Class 5 Call Server, Signaling +and Media Session Border Controller, etc.). Nearly all of the individual OpenStack +commands that are executed to create the individual resources of an application service +can be described in a HEAT template. HEAT also provides a means of auto-scaling the +application service. VMs or other resource types can be scaled horizontally or +vertically based on measurements of cloud-platform-monitored metrics or +guest-application-monitored metrics. In regard to VM for example, scaling +horizontally (in/out) means decreasing/increasing number of VMs, whereas scaling +vertically (up/down) means resizing the VM by increasing or decreasing its resources +(e.g. vcpus). + +Titanium Cloud utilizes HEAT templates to enable customers to easily launch and +manage application Services. A set of Titanium Cloud templates are provided that can +be used as a reference to create application Service templates. + + +DELIVERABLE +=========== +The Heat Template SDK Module is delivered as source YAML files in a compressed +tarball called "wrs-heat-templates-#.#.#.tgz". + + +Simple examples of the more commonly used HEAT resources can be found in +the hot/simple/ directory, in a YAML file named after the heat resource. + +./hot/simple: + OS_Ceilometer_Alarm.yaml - Creates a Ceilometer Threshold Alarm; + specifying the meter name, the criteria of the + threshold ( > x or < x ), evaluation period, etc. + OS_Cinder_Volume.yaml - Creates a Cinder Volume of a particular size + (in GBytes). + OS_Cinder_VolumeAttachment.yaml - Given a VM Instance and a Cinder Volume, this + creates an attachment or a mount point within the + VM to the Cinder Volume. + OS_Glance_Image.yaml - Creates a Glance Image specifying the image file, + container format, disk format, etc. . + OS_Heat_AccessPolicy.yaml - Specifies which resources are shown on a + 'heat stack-show ' and a + 'heat resource-show '. + OS_Heat_AutoScalingGroup.yaml - Creates an In/Out Autoscaling group; specifying + the min/max size, cooldown period, etc., and + the resource being scaled (defined by referencing + another heat template / yaml file). + OS_Heat_Stack.yaml - Creates a HEAT Stack Resource, in place, within + a Heat template, specifying the version, parameters + and resources of the inner Heat Stack. + OS_Neutron_FloatingIP.yaml - Creates a source NAT type relationship with an + External IP (from a specified external tenant + network) and an Internal IP (from a specified + internal port). + WR_Neutron_Port_Forwarding.yaml - Creates a destination NAT type relationship + with an External IP and Port (of a router) and + an Internal IP and Port (of a VM). + OS_Neutron_Net.yaml - Creates a Tenant Network. Other resources such as + Port, Subnet and VM Instances can refer to a + Tenant Network. + OS_Neutron_Port.yaml - Creates a VM Instance Port on a particular Tenant + Network. VM Instance can use this object to create + its specific port / attachment to a particular + Tenant Network. + OS_Neutron_Router.yaml - Creates an IP Router for Tenant Networks. + IP Interfaces on the Router to different Subnets + of particular Tenant Networks is done as shown + in OS_Neutron_RouterInterface.yaml and + OS_Neutron_RouterGateway.yaml. + OS_Neutron_RouterGateway.yaml - Creates the Router Gateway Interface for the + specified router and the specified external network. + The IP Address for the interface is allocated from + the external network's subnet and a default IP Route + is created using the gateway_ip of the subnet. + OS_Neutron_RouterInterface.yaml - Creates an IP Interface on an existing Router for + an existing IP Subnet. + OS_Neutron_SecurityGroup.yaml - Creates a Neutron Security Group, specifying the + ingress and/or egress ACL rules. Use + security_groups inside of OS::Nova::Server to assign + the security group to a VM. + OS_Neutron_Subnet.yaml - Creates an IP Subnet on a specified Tenant Network. + Requires the specification of the IP Subnet and mask, + and dhcp support. Can optionally specify DNS NameServers. + OS_Nova_Flavor.yaml - Creates a Nova Flavor describing the resource + requirements for a VM. I.e. specifying required RAM, + vCPUs, disk size, etc. . + OS_Nova_KeyPair.yaml - Creates a SSH key pair to enable secure initial login + to a launched VM. The created key pair should be + referenced in OS::Nova::Server. + OS_Nova_Server.yaml - Creates a VM Instance, specifying flavor, image and + network attachments. + OS_Nova_ServerGroup.yaml - Creates a Server Group; i.e. a container of multiple + VM Instances with group attributes such as + compute-node affinity or anti-affinity. + Use NovaSchedulerHints: + [{Key: 'group', Value: {Ref: }}] + inside of OS::Nova::Server to assign a VM Instance + to a ServerGroup. + WR_Neutron_ProviderNet.yaml - Creates a Provider Network specifying the name and + type (i.e. flat, vlan or vxlan). + WR_Neutron_ProviderNetRange.yaml - Creates a segment id range for a Provider Network; + specifying the providerNet, and the min and max values + of the segment id range. + WR_Neutron_QoSPolicy.yaml - Creates a Neutron QoS Policy which specifies a packet + scheduling weight. This QoS Policy can be referenced + by a Tenant Network to modify the scheduling weight of + AVS ingress traffic from VMs for this Tenant Network. + + + +More complex examples of heat templates, involving several different inter-related +heat resources can be found in hot/scenarios/ directory. + +./hot/scenarios: + BootFromCinder.yaml - A small single VM Instance scenario; intended to + illustrate the creation of a Cinder Volume and then + the launching / creation of a VM Instance using this + Cinder Volume as its boot device. + CFNPushStatsAutoScaling.yaml - An autoscaling stack of VMs that use cfn-push-stats + to emit custom meters which are used by autoscaling + policies. + CombinationAutoScaling.yaml - A single template that creates a simple VM In/Out + auto-scaling use case. A single Load Balancer VM is + created and an AutoScalingGroup of server VMs is + created which scales in or out based on link + utilization. + NestedAutoScale.yaml - A more complex In/Out autoscaling use case that + scales a Stack In/Out. A single Load Balancer VM is + created and an AutoScalingGroup of a Heat Stack + (containing a Cinder Volume and a VM) is created + which scales in or out based on link utilization. + NestedStack.yaml - The Heat Stack being scaled In/Out in the above + use case. A Stack containing a Cinder Volume and + a VM. + Networking_and_Servers.yaml - Two different examples of creating VMs attached + to a Tenant Network containing two Subnets; + illustrating how to specify the exact subnet to + attach to, if required. + PortForwarding.yaml - An example of 3 VMs/Servers sharing 1 External IP + address, using a unique External Port to forward + traffic to the correct VM/Server. + VMAutoScaling.yaml - A template that creates a typical resource + Up/Down auto-scaling use case. The size of a VM in + terms of number of guest vcpus is scaled up or down + based on average cpu utilization. + LabSetup.yaml - A complex example of setting up a large virtual + LAB with a number of resources and resource types. + The Stack contains/creates 2 provider networks, + a keypair, 2 flavors, 4 networks, 5 subnets and + 2 routers with gateways setup. + +A demo illustrating a complex environment can be found in the hot/demo/ directory. + +./hot/demo: + scaleUpDown.yaml - A demo to illustrate scale up and down + +USAGE +===== +The HEAT template (YAML) files can be used to create a 'Stack' by using either the +HEAT CLI, Heat Rest APIs and/or the Heat Horizon panel (Orchestration). + +For example, to create a 'Stack' using the HEAT CLI: + heat stack-create -f + +Refer to the Titanium Cloud Administration Guide, Managing Stacks Section, for +additional information on using HEAT: + * Overview of YAML Template format, + * Description on how to pass 'parameters' to a HEAT stack, + * Description of TiS-specific Extensions to HEAT, + * Examples of how to launch a HEAT Stack using the CLI and Horizon, + * How to pass User Data to a VM/Server/Guest in a HEAT template, + * Overview of HEAT Autoscaling; both In/Out and Up/Down, + * Examples of using heat-cfntools in Guest to report Guest Metrics + to Ceilometer and optionally use for HEAT Autoscaling, + * and more. + + +CAVEATS +======= + +TiS-specific Extensions to HEAT are only provided for the Heat Orchestration Template +(HOT) format; NOT for the CloudFormatioN (CFN) format. HEAT in Titanium Cloud will still accept +and work with CFN-formatted heat templates; this CFN format is just simply not +extended with additional Titanium Cloud HEAT capabilities. + +Example CFN-formatted HEAT templates are no longer provided in this SDK Module. Example +CFN-formatted HEAT template files can be found at + https://github.com/openstack/heat-templates/tree/master/cfn + + + diff --git a/openstack/python-heat/python-heat/templates/hot/demo/README.txt b/openstack/python-heat/python-heat/templates/hot/demo/README.txt new file mode 100644 index 00000000..a76dbf2d --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/README.txt @@ -0,0 +1,35 @@ +############################################################################## +# +# CPU Scaling UP / DOWN Demo +# +# +# Creates two VMs: +# +# 1. traffic-generator +# - iperf client sending traffic to network-appliance +# - pause / unpause this VM to control load on network-appliance VM +# - NOTE use ubuntu-cfntools.img +# (ubuntu 16.04 with cloud-init and cfn-tools installed) +# - NOTE cloud-init and cfn-init used to create required config files, and +# install required tools (i.e. iperf). +# +# 2. network-appliance +# - iperf server receiving and sending back traffic to iperf client +# - also starts 'dd ...' when traffic starts, to cause more load on system +# - this VM auto-scales cpu up and down based on cpu load cfn-pushed to Titanium +# - NOTE use ubuntu-cfntools.img +# (ubuntu 16.04 with cloud-init and cfn-tools installed) +# - NOTE cloud-init and cfn-init used to create required config files, and +# install required tools. +# ( i.e. iperf, Titanium Guest Scaling SDK Module, collectd, +# influxdb and grafana ) +# + +openstack stack create -t scaleUpDown.yaml demo + +watch "ceilometer sample-list -m net_appl_cpu_load -l 10; ceilometer alarm-list | fgrep net_appl" + +http://:3000 + +openstack stack delete demo + diff --git a/openstack/python-heat/python-heat/templates/hot/demo/cfn_cron b/openstack/python-heat/python-heat/templates/hot/demo/cfn_cron new file mode 100644 index 00000000..d40ab693 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/cfn_cron @@ -0,0 +1 @@ +* * * * * root /etc/cfn/send_guest_metrics diff --git a/openstack/python-heat/python-heat/templates/hot/demo/gen-add-load-service b/openstack/python-heat/python-heat/templates/hot/demo/gen-add-load-service new file mode 100644 index 00000000..42e83ff5 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/gen-add-load-service @@ -0,0 +1,105 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: gen-add-load.sh +# Required-Start: $all +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start daemon at boot time +# Description: Enable service provided by daemon. +### END INIT INFO + +######################################################### +# +# Systemd file for 'gen-add-load.sh' +# +######################################################### + +dir="/usr/bin" +cmd="./gen-add-load.sh" +user="root" + +name=`basename $0` +pid_file="/var/run/$name.pid" +stdout_log="/var/log/$name.log" +stderr_log="/var/log/$name.err" + +get_pid() { + cat "$pid_file" +} + +is_running() { + [ -f "$pid_file" ] && ps -p `get_pid` > /dev/null 2>&1 +} + +case "$1" in + start) + if is_running; then + echo "Already started" + else + echo "Starting $name" + cd "$dir" + if [ -z "$user" ]; then + sudo $cmd >> "$stdout_log" 2>> "$stderr_log" & + else + sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" & + fi + echo $! > "$pid_file" + if ! is_running; then + echo "Unable to start, see $stdout_log and $stderr_log" + exit 1 + fi + fi + ;; + stop) + if is_running; then + echo -n "Stopping $name.." + kill `get_pid` + for i in 1 2 3 4 5 6 7 8 9 10 + # for i in `seq 10` + do + if ! is_running; then + break + fi + + echo -n "." + sleep 1 + done + echo + + if is_running; then + echo "Not stopped; may still be shutting down or shutdown may have failed" + exit 1 + else + echo "Stopped" + if [ -f "$pid_file" ]; then + rm "$pid_file" + fi + fi + else + echo "Not running" + fi + ;; + restart) + $0 stop + if is_running; then + echo "Unable to stop, will not attempt to start" + exit 1 + fi + $0 start + ;; + status) + if is_running; then + echo "Running" + else + echo "Stopped" + exit 1 + fi + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-heat/python-heat/templates/hot/demo/gen-add-load.sh b/openstack/python-heat/python-heat/templates/hot/demo/gen-add-load.sh new file mode 100644 index 00000000..c71a3993 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/gen-add-load.sh @@ -0,0 +1,48 @@ +#!/bin/sh + +# +# 'gen-add-load.sh' +# ----------------- +# +# Monitors incoming packets on ens3 interface with 'pkt-capture.sh' +# +# When incoming traffic goes above threshold of 1000 pkts/2seconds, +# starts a DD command to add more load than just handling the traffic. +# (i.e. mimicking doing some work on the traffic) +# +# When incoming traffic goes below threshold of 1000 pkts/2seconds, +# stops the DD command. +# + +command="dd if=/dev/zero of=/dev/null" +pid=0 + +addLoadRunning=false + +while true +do + nbPcks=`/usr/bin/pkt-capture.sh ens3 2` + echo $nbPcks + + if test $nbPcks -gt 1000 + then + if ( ! $addLoadRunning ) + then + echo "Starting DD command." + $command & + pid=$! + fi + echo "TRAFFIC RUNNING" + addLoadRunning=true + else + if ( $addLoadRunning ) + then + echo "Stopping DD command." + kill $pid + fi + echo "No Traffic" + addLoadRunning=false + fi + echo +done + diff --git a/openstack/python-heat/python-heat/templates/hot/demo/gen-traffic-service b/openstack/python-heat/python-heat/templates/hot/demo/gen-traffic-service new file mode 100644 index 00000000..860c11c7 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/gen-traffic-service @@ -0,0 +1,105 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: gen-add-load.sh +# Required-Start: $all +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start daemon at boot time +# Description: Enable service provided by daemon. +### END INIT INFO + +######################################################### +# +# Systemd file for 'gen-traffic.sh' +# +######################################################### + +dir="/usr/bin" +cmd="./gen-traffic.sh" +user="root" + +name=`basename $0` +pid_file="/var/run/$name.pid" +stdout_log="/var/log/$name.log" +stderr_log="/var/log/$name.err" + +get_pid() { + cat "$pid_file" +} + +is_running() { + [ -f "$pid_file" ] && ps -p `get_pid` > /dev/null 2>&1 +} + +case "$1" in + start) + if is_running; then + echo "Already started" + else + echo "Starting $name" + cd "$dir" + if [ -z "$user" ]; then + sudo $cmd >> "$stdout_log" 2>> "$stderr_log" & + else + sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" & + fi + echo $! > "$pid_file" + if ! is_running; then + echo "Unable to start, see $stdout_log and $stderr_log" + exit 1 + fi + fi + ;; + stop) + if is_running; then + echo -n "Stopping $name.." + kill `get_pid` + for i in 1 2 3 4 5 6 7 8 9 10 + # for i in `seq 10` + do + if ! is_running; then + break + fi + + echo -n "." + sleep 1 + done + echo + + if is_running; then + echo "Not stopped; may still be shutting down or shutdown may have failed" + exit 1 + else + echo "Stopped" + if [ -f "$pid_file" ]; then + rm "$pid_file" + fi + fi + else + echo "Not running" + fi + ;; + restart) + $0 stop + if is_running; then + echo "Unable to stop, will not attempt to start" + exit 1 + fi + $0 start + ;; + status) + if is_running; then + echo "Running" + else + echo "Stopped" + exit 1 + fi + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-heat/python-heat/templates/hot/demo/gen-traffic.sh b/openstack/python-heat/python-heat/templates/hot/demo/gen-traffic.sh new file mode 100644 index 00000000..ffad1eca --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/gen-traffic.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +# +# 'gen-traffic.sh' +# ----------------- +# +# in a forever loop: +# call iperf client, sending to 10.10.10.50 (fixed ip of iperf server, network-appliance) +# for 600 seconds. +# ( iperf -c ... seems to sometimes get hung if using a longer time interval ) +# + +while true +do + date + echo "Starting traffic ..." + /usr/bin/iperf -c 10.10.10.50 -t 600 + date + echo "Traffic stopped." + echo +done diff --git a/openstack/python-heat/python-heat/templates/hot/demo/get_cpu_load b/openstack/python-heat/python-heat/templates/hot/demo/get_cpu_load new file mode 100644 index 00000000..ce5d8203 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/get_cpu_load @@ -0,0 +1,204 @@ +#!/bin/bash +############################################################################### +# +# Copyright (c) 2018 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# +############################################################################### +# +# Description: +# This displays total CPU occupancy based on hi-resolution timings. +# +############################################################################### +# Define minimal path +PATH=/bin:/usr/bin:/usr/local/bin + +# NOTE: Comment out LOG_DEBUG and DEBUG_METHODS in production version. +# Uncomment LOG_DEBUG to enable debug print statements +##LOG_DEBUG=1 + +# Uncomment DEBUG_METHODS to enable test of methods +##DEBUG_METHODS=1 + +SCHEDSTAT_VERSION=$(cat /proc/schedstat 2>/dev/null | awk '/version/ {print $2;}') +NPROCESSORS_ONLN=$(getconf _NPROCESSORS_ONLN) +ARCH=$(arch) + +# NOTE: we only support 64-bit math due to long integers of schedstat +SUPPORTED_SCHEDSTAT_VERSION=15 +SUPPORTED_ARCH='x86_64' + +# Customize sleep interval based on how responsive we want scaling to react. +# This is set small for demonstration purposes. +SLEEP_INTERVAL="1.0s" + +# Log if debug is enabled via LOG_DEBUG +function log_debug +{ + if [ ! -z "${LOG_DEBUG}" ]; then + logger -p debug -t "$0[${PPID}]" -s "$@" 2>&1 + fi +} + +# Log unconditionally to STDERR +function log_error +{ + logger -p error -t "$0[${PPID}]" -s "$@" +} + +# Log unconditionally to STDOUT +function log +{ + logger -p info -t "$0[${PPID}]" -s "$@" 2>&1 +} + +function read_proc_schedstat +{ + local _outvar=$1 + local _result # Use some naming convention to avoid OUTVARs to clash + local _cpu + local _cputime + _result=0 + while read -r line + do + # version 15: cputime is 7th field + if [[ $line =~ ^cpu([[:digit:]]+)[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+([[:digit:]]+)[[:space:]]+ ]] + then + _cpu=${BASH_REMATCH[1]} + _cputime=${BASH_REMATCH[2]} + ((_result += _cputime)) + fi + done < "/proc/schedstat" + + eval $_outvar=\$_result # Instead of just =$_result +} + +function occupancy_loadavg() +{ + # NOTE: This method is not recommended, as the feedback is slow and + # based on the loadavg 1-minute decay. The loadavg also includes + # IoWait which isn't desired. This does not require large integers. + + # Calculate total CPU occupancy based on 1 minute loadavg. + ldavg_1m=$(cat /proc/loadavg 2>/dev/null | awk '{print $1}') + + # Calculate total CPU occupancy (%) + occ=$(awk -v ldavg=${ldavg_1m} -v N=${NPROCESSORS_ONLN} \ + 'BEGIN {printf "%.1f\n", 100.0 * ldavg / N;}' + ) + log_debug "CPU Occupancy(loadavg): ${occ}" + echo ${occ} +} + +function occupancy_jiffie() +{ + # NOTE: This method is not recommended, as the per-cpu stats are not + # properly updated by the kernel after scaling VM back up. + # This routine uses simple small integer math. + + # Calculate total CPU occupancy based on jiffie stats. + + read cpu user nice system idle iowait irq softirq steal guest < /proc/stat + j_occ_0=$((user+system+nice+irq+softirq+steal)) + j_tot_0=$((user+system+nice+irq+softirq+steal+idle+iowait)) + + sleep ${SLEEP_INTERVAL} + + read cpu user nice system idle iowait irq softirq steal guest < /proc/stat + j_occ_1=$((user+system+nice+irq+softirq+steal)) + j_tot_1=$((user+system+nice+irq+softirq+steal+idle+iowait)) + + # Calculate total CPU occupancy (%) + occ=$(( 100 * (j_occ_1 - j_occ_0) / (j_tot_1 - j_tot_0) )) + + log_debug "CPU Occupancy(jiffie): ${occ}" + echo ${occ} +} + +function occupancy_schedstat() +{ + # NOTE: This method is recommended as timings are high resolution. + # However the timings require large integers, so we are assuming + # we require 64-bit guest. + + # Calculate total CPU occupancy based on uptime stats + local cputime_0='' + local cputime_1='' + + read t_elapsed_0 t_idle_0 < /proc/uptime + read_proc_schedstat cputime_0 + + sleep ${SLEEP_INTERVAL} + + read t_elapsed_1 t_idle_1 < /proc/uptime + read_proc_schedstat cputime_1 + + # Calculate total CPU occupancy (%) + occ=$(awk -v te0=${t_elapsed_0} -v te1=${t_elapsed_1} \ + -v tc0=${cputime_0} -v tc1=${cputime_1} \ + -v N=${NPROCESSORS_ONLN} \ + 'BEGIN {dt_ms = N*(te1 - te0)*1E3; cputime_ms = (tc1 - tc0)/1.0E6; + occ = 100.0 * cputime_ms / dt_ms; + printf "%.1f\n", occ;}' + ) + log_debug "CPU Occupancy(schedstat): ${occ}" + echo ${occ} +} + +function occupancy_uptime() +{ + # NOTE: This method is is very similar to the loadavg method in that + # IoWait is treated as load, but the occupancy is instantaneous. + # This does not require large integers. + + # Calculate total CPU occupancy based on uptime/idle stats + read t_elapsed_0 t_idle_0 < /proc/uptime + + sleep ${SLEEP_INTERVAL} + + read t_elapsed_1 t_idle_1 < /proc/uptime + + # Calculate total CPU occupancy (%) + occ=$(awk -v te0=${t_elapsed_0} -v ti0=${t_idle_0} \ + -v te1=${t_elapsed_1} -v ti1=${t_idle_1} \ + -v N=${NPROCESSORS_ONLN} \ + 'BEGIN {dt = N*(te1 - te0); di = ti1 - ti0; cputime = dt - di; + occ = 100.0 * cputime / dt; + printf "%.1f\n", occ;}' + ) + log_debug "CPU Occupancy(uptime): ${occ}" + echo ${occ} +} + +############################################################################### +# +# MAIN Program +# +############################################################################### + +if [ ! -z "${DEBUG_METHODS}" ] +then + log_debug "Testing occupancy_loadavg" + occupancy_loadavg + + log_debug "Testing occupancy_jiffie" + occupancy_jiffie + + log_debug "Testing occupancy_uptime" + occupancy_uptime + + log_debug "Testing occupancy_schedstat" + occupancy_schedstat +fi + +log_debug "Discovered arch=${ARCH}, schedstat version=${SCHEDSTAT_VERSION}." +if [[ ${ARCH} == ${SUPPORTED_ARCH} ]] && [[ ${SCHEDSTAT_VERSION} -eq ${SUPPORTED_SCHEDSTAT_VERSION} ]] +then + occupancy_schedstat +else + occupancy_uptime +fi + +exit 0 diff --git a/openstack/python-heat/python-heat/templates/hot/demo/iperf-server-service b/openstack/python-heat/python-heat/templates/hot/demo/iperf-server-service new file mode 100644 index 00000000..542ecaff --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/iperf-server-service @@ -0,0 +1,105 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: iperf +# Required-Start: $all +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start daemon at boot time +# Description: Enable service provided by daemon. +### END INIT INFO + +######################################################### +# +# Systemd file for 'iperf -s' on network-appliance VM +# +######################################################### + +dir="/usr/bin" +cmd="iperf -s" +user="root" + +name=`basename $0` +pid_file="/var/run/$name.pid" +stdout_log="/var/log/$name.log" +stderr_log="/var/log/$name.err" + +get_pid() { + cat "$pid_file" +} + +is_running() { + [ -f "$pid_file" ] && ps -p `get_pid` > /dev/null 2>&1 +} + +case "$1" in + start) + if is_running; then + echo "Already started" + else + echo "Starting $name" + cd "$dir" + if [ -z "$user" ]; then + sudo $cmd >> "$stdout_log" 2>> "$stderr_log" & + else + sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" & + fi + echo $! > "$pid_file" + if ! is_running; then + echo "Unable to start, see $stdout_log and $stderr_log" + exit 1 + fi + fi + ;; + stop) + if is_running; then + echo -n "Stopping $name.." + kill `get_pid` + for i in 1 2 3 4 5 6 7 8 9 10 + # for i in `seq 10` + do + if ! is_running; then + break + fi + + echo -n "." + sleep 1 + done + echo + + if is_running; then + echo "Not stopped; may still be shutting down or shutdown may have failed" + exit 1 + else + echo "Stopped" + if [ -f "$pid_file" ]; then + rm "$pid_file" + fi + fi + else + echo "Not running" + fi + ;; + restart) + $0 stop + if is_running; then + echo "Unable to stop, will not attempt to start" + exit 1 + fi + $0 start + ;; + status) + if is_running; then + echo "Running" + else + echo "Stopped" + exit 1 + fi + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-heat/python-heat/templates/hot/demo/make_load b/openstack/python-heat/python-heat/templates/hot/demo/make_load new file mode 100644 index 00000000..f2e37907 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/make_load @@ -0,0 +1,4 @@ +#!/bin/sh +# Generate maximum CPU load for a core +# Launch this X times for X cores to get 100% utilization +dd if=/dev/urandom of=/dev/null & diff --git a/openstack/python-heat/python-heat/templates/hot/demo/network-appliance-install.sh b/openstack/python-heat/python-heat/templates/hot/demo/network-appliance-install.sh new file mode 100644 index 00000000..5a328ffc --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/network-appliance-install.sh @@ -0,0 +1,215 @@ +#!/bin/bash -v + +######################################################### +# +# Install script for network-appliance VM +# called thru cloud-init +# +######################################################### + +echo "Starting setup of network appliance ..." >> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "Installing iperf ..." >> /var/log/heat_setup.txt +apt-get -y install iperf >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "Installing python-pip ..." >> /var/log/heat_setup.txt +apt-get -y install gcc python-dev python-pip >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "Installing psutil ..." >> /var/log/heat_setup.txt +pip install psutil >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + + +# Create sym links to standard location for cfn tools in an aws environment +echo >> /var/log/heat_setup.txt +echo "Setting up symlinks" >> /var/log/heat_setup.txt +cfn-create-aws-symlinks --source /usr/bin >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +# invoke cfn-init which will extract the cloudformation metadata from the userdata +echo >> /var/log/heat_setup.txt +echo "Setting up cfn-init " >> /var/log/heat_setup.txt +/usr/bin/cfn-init >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "Installing Guest SDK ..." >> /var/log/heat_setup.txt +git clone https://github.com/Wind-River/titanium-cloud.git >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +cat > /lib/systemd/system/guest-agent.service << EOF +[Unit] +Description=Guest Agent +After=cloud-init.service + +[Service] +ExecStart=/usr/sbin/guest_agent +Type=simple +Restart=always +RestartSec=0 + +[Install] +WantedBy=guest-scale-agent.service +WantedBy=multi-user.target + +EOF +cd titanium-cloud/guest-API-SDK/17.06/ +apt-get -y install build-essential libjson0 libjson0-dev >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +cd wrs-server-group-2.0.4/ +mkdir obj bin lib +make >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +cp bin/* /usr/sbin +cp lib/libguesthostmsg.so.2.0.4 lib/libservergroup.so.2.0.4 /usr/lib/ +ldconfig +cd ../wrs-guest-scale-2.0.4/ +mkdir obj bin lib +make >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +cp bin/guest_scale_agent /usr/sbin +cd scripts/ +cp app_scale_helper offline_cpus /usr/sbin +chmod 755 init_offline_cpus offline_cpus +cp init_offline_cpus /etc/init.d +cp guest-scale-agent.service offline-cpus.service /lib/systemd/system/ +systemctl enable guest-agent.service +systemctl enable guest-scale-agent.service +systemctl enable offline-cpus.service +systemctl start guest-agent.service +systemctl start guest-scale-agent.service + + +echo >> /var/log/heat_setup.txt +echo "Starting collectd and grafana install ..." >> /var/log/heat_setup.txt + +apt-get -y update >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +apt-get -y dist-upgrade >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +apt-get -y install openssh-server >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "Setup gpg keys ..." >> /var/log/heat_setup.txt +gpg --recv-keys 3994D24FB8543576 >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +gpg --recv-keys 3994D24FB8543576 >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +gpg --recv-keys 3994D24FB8543576 >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +sh -c 'gpg --export -a 3994D24FB8543576 | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +sh -c 'gpg --export -a 3994D24FB8543576 | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +sh -c 'gpg --export -a 3994D24FB8543576 | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "Get influxdb key and packagecloud key ..." >> /var/log/heat_setup.txt +# don't use latest influxdb yet, it has bugs +# sh -c 'curl -sL https://repos.influxdata.com/influxdb.key | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +sh -c 'curl https://packagecloud.io/gpg.key | apt-key add -' >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "Setup collectd, influxdb and grafana .list files ..." >> /var/log/heat_setup.txt +echo "deb http://pkg.ci.collectd.org/deb xenial collectd-5.8" > /etc/apt/sources.list.d/collectd.list +# don't use latest influxdb yet, it has bugs +# echo "deb https://repos.influxdata.com/debian xenial stable" > /etc/apt/sources.list.d/influxdb.list +echo "deb https://packagecloud.io/grafana/stable/debian/ jessie main" > /etc/apt/sources.list.d/grafana.list + +echo >> /var/log/heat_setup.txt +echo "apt-get update ..." >> /var/log/heat_setup.txt +apt-get -y update >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "apt-cache ..." >> /var/log/heat_setup.txt +apt-cache madison collectd >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +apt-cache madison influxdb >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +apt-cache madison influxdb-client >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +apt-cache madison grafana >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "apt-get install collectd ..." >> /var/log/heat_setup.txt +apt-get -y install collectd >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "apt-get install influxdb ..." >> /var/log/heat_setup.txt +apt-get -y install influxdb >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "apt-get install influxdb-client ..." >> /var/log/heat_setup.txt +apt-get -y install influxdb-client >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "apt-get install grafana ..." >> /var/log/heat_setup.txt +apt-get -y install grafana >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "apt-get cleanup ..." >> /var/log/heat_setup.txt +apt-get -y update >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +apt-get -y dist-upgrade >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +apt-get -y autoclean >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt +apt-get -y autoremove >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +mv /etc/collectd/collectd.conf /etc/collectd/collectd.conf.ORIG +cat >> /etc/collectd/collectd.conf << EOF + +LoadPlugin network + +Server "127.0.0.1" "25826" + + + +ReportByCpu true +ReportByState false +ValuesPercentage false +ReportNumCpu true + + +EOF + + +cp /etc/influxdb/influxdb.conf /etc/influxdb/influxdb.conf.ORIG +sed -i -e '/^\[collectd\].*/,/enabled = false/d' /etc/influxdb/influxdb.conf +cat >> /etc/influxdb/influxdb.conf << EOF + +[collectd] + enabled = true + bind-address = ":25826" + database = "collectd" + typesdb = "/usr/share/collectd/types.db" +EOF + +echo >> /var/log/heat_setup.txt +echo "start grafana-server ..." >> /var/log/heat_setup.txt +systemctl start grafana-server + +echo >> /var/log/heat_setup.txt +echo "start influxdb ..." >> /var/log/heat_setup.txt +systemctl start influxdb + +echo >> /var/log/heat_setup.txt +echo "start collectd ..." >> /var/log/heat_setup.txt +systemctl start collectd + +echo >> /var/log/heat_setup.txt +echo "enable grafana-server ..." >> /var/log/heat_setup.txt +systemctl enable grafana-server.service + +echo >> /var/log/heat_setup.txt +echo "enable influxdb.service ..." >> /var/log/heat_setup.txt +systemctl enable influxdb.service + +echo >> /var/log/heat_setup.txt +echo "enable collectd.service ..." >> /var/log/heat_setup.txt +systemctl enable collectd.service + + +echo >> /var/log/heat_setup.txt +echo "Starting network appliance server service ..." >> /var/log/heat_setup.txt +update-rc.d iperf-server-service defaults 97 03 >> /var/log/heat_setup.txt +service iperf-server-service start >> /var/log/heat_setup.txt + +echo >> /var/log/heat_setup.txt +echo "Starting gen-add-load service ..." >> /var/log/heat_setup.txt +update-rc.d gen-add-load-service defaults 97 03 >> /var/log/heat_setup.txt +service gen-add-load-service start >> /var/log/heat_setup.txt + +sleep 5 +echo >> /var/log/heat_setup.txt +echo "restart collectd ..." >> /var/log/heat_setup.txt +systemctl restart collectd + +sleep 5 +echo >> /var/log/heat_setup.txt +echo "restart influxdb ..." >> /var/log/heat_setup.txt +systemctl restart influxdb + +echo "Finished user data setup" >> /var/log/heat_setup.txt diff --git a/openstack/python-heat/python-heat/templates/hot/demo/pkt-capture.sh b/openstack/python-heat/python-heat/templates/hot/demo/pkt-capture.sh new file mode 100644 index 00000000..fad8b4fc --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/pkt-capture.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +######################################################### +# +# pkt-capture.sh +# +# Measures the received packets on specified interface +# for specified interval (in seconds). +# +######################################################### + +pcksFile="/sys/class/net/$1/statistics/rx_packets" +nbPcks=`cat $pcksFile` +sleep $2 +echo $(expr `cat $pcksFile` - $nbPcks) diff --git a/openstack/python-heat/python-heat/templates/hot/demo/scaleUpDown.yaml b/openstack/python-heat/python-heat/templates/hot/demo/scaleUpDown.yaml new file mode 100644 index 00000000..9317469d --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/scaleUpDown.yaml @@ -0,0 +1,316 @@ +################################################################################ +## Copyright (c) 2018 Wind River Systems, Inc. +## +## SPDX-License-Identifier: Apache-2.0 +## +################################################################################# + +heat_template_version: 2013-05-23 + +################################################################################ +parameters: + + FLAVOR: + description: Nova flavor to use for traffic-generator VM. (nova flavor-list) + type: string + default: small.2c.2G.20G + constraints: + - custom_constraint: nova.flavor + + SCALING_FLAVOR: + description: Nova flavor to use for network-appliance VM. (nova flavor-list) + type: string + default: scalingFlavor + constraints: + - custom_constraint: nova.flavor + + IMAGE: + description: Name of the glance image to create a cinder volume for (glance image-list) + NOTE - this MUST be an ubuntu 16.04 image with cloud-init and cfn-init + type: string + default: ubuntu-cfntools.img + constraints: + - custom_constraint: glance.image + + EXT_NETWORK: + description: Name of the external network to use (neutron net-list) + type: string + default: external-net0 + constraints: + - custom_constraint: neutron.network + + INT_NETWORK: + description: Name of the internal network to use (neutron net-list) + type: string + default: admin-internal-net0 + constraints: + - custom_constraint: neutron.network + + METER_NAME: + description: Meter that VM will cfn-push, and + Ceilometer meter to query when determining autoscaling + type: string + default: net_appl_cpu_load + + METER_UNIT: + description: Unit for the meter + type: string + default: '%' + + HIGH_VALUE: + description: Metric value that will trigger a scale up if exceeded + type: string + default: '50' + + LOW_VALUE: + description: Metric value that will trigger a scale down if below + type: string + default: '20' + + +################################################################################ +resources: + + + ################################################################################ + # trafficGenerator VM + ################################################################################ + + TrafficGeneratorVolume: + type: OS::Cinder::Volume + properties: + name: traffic-generator-DISK + image: { get_param: IMAGE } + size: 20 + + TrafficGeneratorFloatingIP: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: EXT_NETWORK } + + TrafficGeneratorVM: + type: OS::Nova::Server + metadata: + wrs-groupindex-mode: true + AWS::CloudFormation::Init: + config: + files: + /usr/bin/gen-traffic.sh: + content: + get_file: ./gen-traffic.sh + mode: '000700' + owner: root + group: root + /etc/init.d/gen-traffic-service: + content: + get_file: ./gen-traffic-service + mode: '000700' + owner: root + group: root + properties: + name: traffic-generator + flavor: { get_param: FLAVOR } + block_device_mapping: + - device_name: vda + delete_on_termination: true + volume_id: { get_resource: TrafficGeneratorVolume } + networks: + - { network: { get_param: INT_NETWORK } , fixed_ip: 10.10.10.40, vif-model: virtio} + user_data_format: HEAT_CFNTOOLS + user_data: + get_file: ./traffic-generator-install.sh + + TrafficGeneratorFloatingIPAssoc: + type: OS::Neutron::FloatingIPAssociation + properties: + floatingip_id: { get_resource: TrafficGeneratorFloatingIP } + port_id: { get_attr: [TrafficGeneratorVM, addresses, { get_param: INT_NETWORK }, 0, port] } + + + + + + ################################################################################ + # network-appliance VM + ################################################################################ + + CfnUser: + type: AWS::IAM::User + + WebServerKeys: + type: AWS::IAM::AccessKey + properties: + UserName: { get_resource: CfnUser } + + NetworkApplianceVolume: + type: OS::Cinder::Volume + properties: + name: network-appliance-DISK + image: { get_param: IMAGE } + size: 20 + + NetworkApplianceFloatingIP: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: EXT_NETWORK } + + NetworkApplianceVM: + type: OS::Nova::Server + # Special Note: CFN related metadata is located at the resource level (not as a property) + metadata: + wrs-groupindex-mode: true + AWS::CloudFormation::Init: + config: + files: + /etc/cfn/cfn-credentials: + content: + str_replace: + template: | + AWSAccessKeyId=_keyid_ + AWSSecretKey=_secret_ + params: + _keyid_: { get_resource: WebServerKeys } + _secret_: { get_attr: [WebServerKeys, SecretAccessKey] } + mode: '000400' + owner: root + group: root + /etc/cfn/make_load: + content: + get_file: ./make_load + mode: '000700' + owner: root + group: root + /etc/cfn/get_cpu_load: + content: + get_file: ./get_cpu_load + mode: '000700' + owner: root + group: root + /etc/cfn/send_guest_metrics: + content: + str_replace: + template: | + #!/bin/sh + METRIC=`/etc/cfn/get_cpu_load` + /opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_ + sleep 9 + METRIC=`/etc/cfn/get_cpu_load` + /opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_ + sleep 9 + METRIC=`/etc/cfn/get_cpu_load` + /opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_ + sleep 9 + METRIC=`/etc/cfn/get_cpu_load` + /opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_ + sleep 9 + METRIC=`/etc/cfn/get_cpu_load` + /opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_ + sleep 9 + METRIC=`/etc/cfn/get_cpu_load` + /opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_ + params: + _metername_: { get_param: METER_NAME } + _unit_: { get_param: METER_UNIT } + mode: '000700' + owner: root + group: root + /etc/cron.d/cfn_cron: + content: + get_file: ./cfn_cron + mode: '000600' + owner: root + group: root + /usr/bin/pkt-capture.sh: + content: + get_file: ./pkt-capture.sh + mode: '000700' + owner: root + group: root + /usr/bin/gen-add-load.sh: + content: + get_file: ./gen-add-load.sh + mode: '000700' + owner: root + group: root + /etc/init.d/gen-add-load-service: + content: + get_file: ./gen-add-load-service + mode: '000700' + owner: root + group: root + /etc/init.d/iperf-server-service: + content: + get_file: iperf-server-service + mode: '000700' + owner: root + group: root + properties: + name: network-appliance + flavor: { get_param: SCALING_FLAVOR } + metadata: {"metering.stack_id": {get_param: "OS::stack_id"} } + block_device_mapping: + - device_name: vda + delete_on_termination: true + volume_id: { get_resource: NetworkApplianceVolume } + networks: + - { network: { get_param: INT_NETWORK } , fixed_ip: 10.10.10.50, vif-model: virtio} + user_data_format: HEAT_CFNTOOLS + user_data: + get_file: ./network-appliance-install.sh + + NetworkApplianceFloatingIPAssoc: + type: OS::Neutron::FloatingIPAssociation + properties: + floatingip_id: { get_resource: NetworkApplianceFloatingIP } + port_id: { get_attr: [NetworkApplianceVM, addresses, { get_param: INT_NETWORK }, 0, port] } + + + ################################################################################ + # SETUP SCALING POLICIES + + CPUAlarmHigh: + type: OS::Ceilometer::Alarm + properties: + description: Scale up if the 1 minute avg for the meter is above the threshold + meter_name: { get_param: METER_NAME } + statistic: avg + period: '60' + evaluation_periods: '1' + threshold: { get_param: HIGH_VALUE } + repeat_actions: True + alarm_actions: + - {get_attr: [NetworkApplianceVmScaleUpPolicy, AlarmUrl]} + comparison_operator: gt + matching_metadata: {'stack_id': {get_param: "OS::stack_id" }} + + CPUAlarmLow: + type: OS::Ceilometer::Alarm + properties: + description: Scale down if the 1 minute avg for the meter is below the threshold + meter_name: { get_param: METER_NAME } + statistic: avg + period: '60' + evaluation_periods: '1' + threshold: { get_param: LOW_VALUE } + repeat_actions: True + alarm_actions: + - {get_attr: [NetworkApplianceVmScaleDownPolicy, AlarmUrl]} + comparison_operator: lt + matching_metadata: {'stack_id': {get_param: "OS::stack_id" }} + + NetworkApplianceVmScaleUpPolicy: + type: OS::WR::ScalingPolicy + properties: + ServerName: {get_resource: NetworkApplianceVM} + ScalingResource: 'cpu' + ScalingDirection: 'up' + Cooldown: '60' + + NetworkApplianceVmScaleDownPolicy: + type: OS::WR::ScalingPolicy + properties: + ServerName: {get_resource: NetworkApplianceVM} + ScalingResource: 'cpu' + ScalingDirection: 'down' + Cooldown: '60' + diff --git a/openstack/python-heat/python-heat/templates/hot/demo/traffic-generator-install.sh b/openstack/python-heat/python-heat/templates/hot/demo/traffic-generator-install.sh new file mode 100644 index 00000000..b0c85076 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/demo/traffic-generator-install.sh @@ -0,0 +1,27 @@ +#!/bin/bash -v + +######################################################### +# +# Install script for traffic-generator VM +# called thru cloud-init +# +######################################################### + +echo "Starting setup of traffic generator ..." >> /var/log/heat_setup.txt + +echo "Installing iperf ..." >> /var/log/heat_setup.txt +apt-get -y install iperf >> /var/log/heat_setup.txt 2>> /var/log/heat_setup.txt + +# Create sym links to standard location for cfn tools in an aws environment +echo "Setting up symlinks" >> /var/log/heat_setup.txt +cfn-create-aws-symlinks --source /usr/bin + +# invoke cfn-init which will extract the cloudformation metadata from the userdata +echo "Setting up cfn-init " >> /var/log/heat_setup.txt +/usr/bin/cfn-init >> /var/log/heat_setup.txt + +echo "Starting gen-traffic service ..." >> /var/log/heat_setup.txt +update-rc.d gen-traffic-service defaults 97 03 >> /var/log/heat_setup.txt +service gen-traffic-service start >> /var/log/heat_setup.txt + +echo "Finished setup of traffic generator." >> /var/log/heat_setup.txt diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/BootFromCinder.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/BootFromCinder.yaml new file mode 100644 index 00000000..3346c188 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/BootFromCinder.yaml @@ -0,0 +1,118 @@ +################################################################################ +# Copyright (c) 2013 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objectives: +# Create a cinder volume from a glance image +# Boot a nova server from the glance image +# +# Pre-Reqs: +# A keypair called: controller-0 (nova keypair-list) +# A nova flavor called: m1.tiny (nova flavor-list) +# A glance image called: wrl6 (glance image-list) +# 2 networks called: private-net0 and internal-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# None +# +# Sample CLI syntax: +# heat stack-create -f BootFromCinder.yaml STACK +# +# Expected Outcome: +# A new 1GB cinder volume called: cinder-volume +# cinder list +# A new nova instance called: cinder-vm +# nova list +# +################################################################################ +heat_template_version: 2015-04-30 + +description: > + Demonstrate launching an instance from a cinder volume + +parameters: + + VOLUME_NAME: + description: name for the new cinder volume + type: string + default: cinder-volume + + VOLUME_SIZE: + description: size for the new cinder volume + type: number + default: 2 + + VM_NAME: + description: name for the new VM + type: string + default: cinder-vm + + KEYPAIR: + description: keypair to use. (nova keypair-list) + type: string + default: controller-0 + constraints: + - custom_constraint: nova.keypair + + KEYPAIR_ADMIN_USER: + type: string + description: Name of user account to inject ssh keys from keypair + default: 'ec2-user' + + FLAVOR: + description: Nova flavor to use. (nova flavor-list) + type: string + default: m1.tiny + constraints: + - custom_constraint: nova.flavor + + IMAGE: + description: Glance image to create cinder volume (glance image-list) + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + PRIVATE_NET: + description: private network to use (neutron net-list) + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + + INTERNAL_NET: + description: internal network to use (neutron net-list) + type: string + default: internal-net0 + constraints: + - custom_constraint: neutron.network + +resources: + + Cinder_Volume: + type: OS::Cinder::Volume + properties: + description: A bootable cinder image + name: { get_param: VOLUME_NAME } + image: { get_param: IMAGE} + size: { get_param: VOLUME_SIZE } + + Nova_Server: + type: OS::Nova::Server + properties: + name: { get_param: VM_NAME } + key_name: { get_param: KEYPAIR } + admin_user: { get_param: KEYPAIR_ADMIN_USER } + flavor: { get_param: FLAVOR } + block_device_mapping: + - device_name: vda + volume_id: { get_resource: Cinder_Volume } + networks: + - network: { get_param: PRIVATE_NET} + - network: { get_param: INTERNAL_NET} diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/CFNPushStatsAutoScaling.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/CFNPushStatsAutoScaling.yaml new file mode 100644 index 00000000..961171b2 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/CFNPushStatsAutoScaling.yaml @@ -0,0 +1,305 @@ +# Copyright (c) 2013 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrates creating: +# An autoscaling stack of VMs that use cfn-push-stats to emit samples from +# within the guest. +# The name of the stack will prefix the custom meter +# This template requires OAM network setup properly to allow communication +# between the VMs to the controller +# +# Pre-Reqs: +# The VM must be able to communicate with the controller +# Normal lab setup. Capable of launching 3 VMs +# A keypair called: controller-0. (nova keypair-list) +# A flavor called: small (nova flavor-list) +# A glance image called: tis-centos-guest (glance image-list) +# A network called: internal-net0 (neutron net-list) +# A nested template file CFNPushStats.yaml in the same folder as this yaml. +# +# Optional Template Parameters: +# KEYPAIR: A keypair setup for the current user (nova keypair-list) +# KEYPAIR_ADMIN_USER: Name of user to inject ssh keys from keypair on the VM +# FLAVOR: A nova flavor name or UUID for the VMs (nova flavor-list) +# IMAGE: A glance image name or UUID for launching the VMs +# (glance image-list) +# PUBLIC_NETWORK: Name or UUID of the public network to use for the VMs +# (neutron net-list) +# INTERNAL_NETWORK: Name or UUID of the internal network to use for the VMs +# (neutron net-list) +# METER_NAME: Name of the new ceilometer meter to use to trigger autoscaling +# METER_UNIT: Unit of the new ceilometer meter to use to trigger autoscaling +# HIGH_VALUE: Value for the meter to trigger a scale up. +# LOW_VALUE: Value for the meter to trigger a scale down. +# +# Tenant Considerations: +# This template must be run as Admin +# +# Sample CLI syntax: +# heat stack-create -f CFNPushStatsAutoScale.yaml STACK +# +# Expected Outcome: +# VMs running the guest image (nova list) +# New ceilometer alarm triggers (ceilometer alarm-list) +# New ceilometer meters (ceilometer meter-list) created from within the VM +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrates autoscaling VMs that use cfn-push-stats + to emit ceilometer meters from within the VM + +parameters: + + KEYPAIR: + description: keypair to use. (nova keypair-list) + type: string + default: controller-0 + constraints: + - custom_constraint: nova.keypair + + KEYPAIR_ADMIN_USER: + description: Name of user account to inject ssh keys from keypair + type: string + default: 'ec2-user' + + FLAVOR: + description: Nova flavor to use. (nova flavor-list) + type: string + default: small + constraints: + - custom_constraint: nova.flavor + + IMAGE: + description: Glance image to create a cinder volume (glance image-list) + type: string + default: tis-centos-guest + constraints: + - custom_constraint: glance.image + + PUBLIC_NETWORK: + description: Name of public network to use for VMs (neutron net-list) + type: string + default: public-net0 + constraints: + - custom_constraint: neutron.network + + INTERNAL_NETWORK: + description: Name of internal network to use for VMs (neutron net-list) + type: string + default: internal-net0 + constraints: + - custom_constraint: neutron.network + + METER_NAME: + description: Ceilometer meter to query when determining autoscaling + type: string + default: vm_stat + + METER_UNIT: + description: Name for custom meter to be created using cfn-push-stats + type: string + default: '%' + + HIGH_VALUE: + description: Metric value that will trigger a scale up if exceeded + type: string + default: '80' + + LOW_VALUE: + description: Metric value that will trigger a scale down if below + type: string + default: '30' + +resources: + + CfnUser: + type: AWS::IAM::User + + WebKeys: + type: AWS::IAM::AccessKey + properties: + UserName: { get_resource: CfnUser } + + ScaleUpPolicy: + type: OS::Heat::ScalingPolicy + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: { get_resource: ScalingGroup } + cooldown: 60 + scaling_adjustment: 1 + + ScaleDownPolicy: + type: OS::Heat::ScalingPolicy + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: { get_resource: ScalingGroup } + cooldown: 60 + scaling_adjustment: -1 + + # Matching metadata is not compatible with cfn-push-stats + AlarmHigh: + type: OS::Ceilometer::Alarm + properties: + meter_name: + list_join: + - "_" + - - { get_param: 'OS::stack_name'} + - { get_param: METER_NAME } + statistic: avg + period: 60 + evaluation_periods: 1 + threshold: { get_param: HIGH_VALUE } + alarm_actions: + - {get_attr: [ScaleUpPolicy, alarm_url]} + comparison_operator: gt + + AlarmLow: + type: OS::Ceilometer::Alarm + properties: + meter_name: + list_join: + - "_" + - - { get_param: 'OS::stack_name'} + - { get_param: METER_NAME } + statistic: avg + period: 60 + evaluation_periods: 1 + threshold: { get_param: LOW_VALUE } + alarm_actions: + - {get_attr: [ScaleDownPolicy, alarm_url]} + comparison_operator: lt + + + ScalingGroup: + type: OS::Heat::AutoScalingGroup + properties: + cooldown: 60 + desired_capacity: 1 + max_size: 3 + min_size: 1 + resource: + type: OS::Nova::Server + # Special Note: CFN related metadata is located at the resource + # level (not as a property) + metadata: + wrs-groupindex-mode: true + AWS::CloudFormation::Init: + config: + files: + /etc/cfn/cfn-credentials: + content: + str_replace: + template: | + AWSAccessKeyId=_keyid_ + AWSSecretKey=_secret_ + params: + _keyid_: + get_resource: WebKeys + _secret_: + get_attr: + - WebKeys + - SecretAccessKey + mode: '000400' + owner: root + group: root + /etc/cfn/make_load: + content: | + #!/bin/sh + # Generate maximum CPU load for a core + # Launch this X times for X cores + # to get 100% utilization + dd if=/dev/urandom of=/dev/null & + mode: '000700' + owner: root + group: root + /etc/cfn/get_cpu_load: + content: | + #!/usr/bin/python + # Get the 1 minute CPU load average and + # divide by num cores + import os + cores = 1 + n = os.sysconf("SC_NPROCESSORS_ONLN") + if isinstance(n, int) and n > 0: + cores = n + l_avg = float(os.getloadavg()[0]) + # convert to a percentage + pct = (100 * l_avg) / float(cores) + print pct + mode: '000700' + owner: root + group: root + /etc/cfn/send_guest_metrics: + content: + str_replace: + template: | + #!/bin/sh + METRIC=`/etc/cfn/get_cpu_load` + /opt/aws/bin/cfn-push-stats --metric _metername_ --value ${METRIC} --units _unit_ + params: + _metername_: + list_join: + - "_" + - - { get_param: 'OS::stack_name' } + - { get_param: METER_NAME } + _unit_: + get_param: METER_UNIT + mode: '000700' + owner: root + group: root + /etc/cron.d/cfn_cron: + content: | + * * * * * root /etc/cfn/send_guest_metrics + mode: '000600' + owner: root + group: root + + properties: + name: + list_join: + - "_" + - - { get_param: 'OS::stack_name'} + - "vm" + - "" + key_name: { get_param: KEYPAIR } + admin_user: { get_param: KEYPAIR_ADMIN_USER } + flavor: { get_param: FLAVOR } + image: { get_param: IMAGE } + networks: + - network: { get_param: PUBLIC_NETWORK } + - network: { get_param: INTERNAL_NETWORK } + # HEAT_CFNTOOLS includes Resource Metadata in the user-data + # automatically and expects the format to comply with + # AWS::CloudFormation::Init + user_data_format: HEAT_CFNTOOLS + user_data: | + #!/bin/bash -v + # Create sym links to standard location for cfn tools + # in an aws environment + echo "Setting up symlinks" >> /var/log/heat_setup.txt + cfn-create-aws-symlinks --source /usr/bin + # invoke cfn-init which will extract cloudformation + # metadata from the userdata + echo "Running cfn-init " >> /var/log/heat_setup.txt + /usr/bin/cfn-init >> /var/log/heat_setup.txt + echo "Done cfn-init setup" >> /var/log/heat_setup.txt + + +outputs: + ceilometer_query: + value: + str_replace: + template: ceilometer statistics -m metername -p 60 -a avg + params: + metername: + list_join: + - "_" + - - { get_param: 'OS::stack_name' } + - { get_param: METER_NAME } diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/CombinationAutoScaling.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/CombinationAutoScaling.yaml new file mode 100644 index 00000000..00ca343b --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/CombinationAutoScaling.yaml @@ -0,0 +1,234 @@ +################################################################################ +# Copyright (c) 2013-2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrates creating: +# A static nova instance that acts as a load balancer +# An autoscaling stack of nova instances +# Use of the WRS autoscaling naming enhancement +# +# Pre-Reqs: +# Normal lab setup. Capable of launching 3 VMs +# A keypair setup for the current user. (nova keypair-list) +# A flavor called: m1.small (nova flavor-list) +# A glance image called: wrl6 (glance image-list) +# +# Optional Template Parameters: +# KEYPAIR: A keypair setup for the current user (nova keypair-list) +# KEYPAIR_ADMIN_USER: The account to use when doing ssh to the VM (ssh keys +# injected from the keypair) +# FLAVOR: A nova flavor to use in launching the VMs and load balancer +# (nova flavor-list) +# IMAGE: A glance image to use in launching the VMs (glance image-list) +# LB_IMAGE: A glance image to use in launching the load balancer +# (glance image-list) +# LB_NETWORK: name of the network to use for the load balancer VM +# (neutron net-list) +# VM_NETWORK: name of the network to use for the scaling VMs +# (neutron net-list) +# PUBLIC_NETWORK: name of public network to use for all VMs +# (neutron net-list) +# INTERNAL_NETWORK: name of the internal network to use for all VMs +# (neutron net-list) +# METER_NAME: name of the ceilometer meter to trigger autoscaling +# (ceilometer meter-list) +# +# Tenant Considerations: +# The default meters (vswitch) are not accessible to tenants +# +# Sample CLI syntax: +# heat stack-create -f CombinationAutoScaling.yaml STACK +# +# Expected Outcome: +# A VM running the load balancer (nova list) +# An auto-scaling stack of server VMs (nova list) +# New ceilometer alarm triggers (ceilometer alarm-list) +# +# Note: there is no communication between the load balancer and the VMs +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: Demonstrate mixture of regular and autoscaling VMs + +parameters: + + KEYPAIR: + description: Name of an existing KeyPair for the VMs + type: string + default: controller-0 + constraints: + - custom_constraint: nova.keypair + + KEYPAIR_ADMIN_USER: + type: string + description: Name of user account to inject ssh keys from keypair + default: 'ec2-user' + + FLAVOR: + description: Instance flavor + type: string + default: m1.small + constraints: + - custom_constraint: nova.flavor + + LB_IMAGE: + description: the name or uuid of the loadbalancer glance image + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + IMAGE: + description: the name or uuid of the server image in glance + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + PUBLIC_NETWORK: + type: string + description: Public network name + default: public-net0 + constraints: + - custom_constraint: neutron.network + + INTERNAL_NETWORK: + type: string + description: Internal network name + default: internal-net0 + constraints: + - custom_constraint: neutron.network + + VM_NETWORK: + type: string + description: Server network name + default: private-net0 + constraints: + - custom_constraint: neutron.network + + LB_NETWORK: + type: string + description: Load Balancer network name + default: internal-net0 + constraints: + - custom_constraint: neutron.network + + METER_NAME: + type: string + description: ceilometer meter to trigger autoscaling + default: vswitch.port.receive.util + + MinClusterSize: + type: string + default: '1' + description: Minimum number of servers in the scaling group + + MaxClusterSize: + type: string + default: '2' + description: Maximum number of servers in the scaling group + +resources: + + LoadBalancer: + type: OS::Nova::Server + properties: + name: + list_join: + - "-" + - [{get_param: 'OS::stack_name'}, 'LoadBalancer'] + image: { get_param: LB_IMAGE} + flavor: {get_param: FLAVOR } + key_name: {get_param: KEYPAIR} + admin_user: {get_param: KEYPAIR_ADMIN_USER } + networks: + - network: { get_param: PUBLIC_NETWORK } + vif-model: 'virtio' + - network: { get_param: LB_NETWORK } + - network: { get_param: INTERNAL_NETWORK } + user_data_format: 'RAW' + user_data: | + #wrs-config + DEMO_PERSONALITY="lb" + FUNCTIONS="hugepages,demo," + + + SrvScaleOutPolicy: + type: OS::Heat::ScalingPolicy + properties: + auto_scaling_group_id: { get_resource: ServerGroup } + adjustment_type: change_in_capacity + scaling_adjustment: 1 + cooldown: 30 + + SrvScaleInPolicy: + type: OS::Heat::ScalingPolicy + properties: + auto_scaling_group_id: { get_resource: ServerGroup } + adjustment_type: change_in_capacity + scaling_adjustment: -1 + cooldown: 30 + + LINKAlarmHigh: + type: OS::Ceilometer::Alarm + properties: + description: Scale-out if the max link util > 50% for 30 secs + meter_name: { get_param: METER_NAME } + statistic: max + period: '30' + evaluation_periods: '1' + threshold: '50' + alarm_actions: + - {get_attr: [SrvScaleOutPolicy, alarm_url]} + comparison_operator: gt + + LINKAlarmLow: + type: OS::Ceilometer::Alarm + properties: + description: Scale-in if the max link util < 20% for 30 secs + meter_name: { get_param: METER_NAME } + statistic: max + period: '30' + evaluation_periods: '1' + threshold: '20' + alarm_actions: + - {get_attr: [SrvScaleInPolicy, alarm_url]} + comparison_operator: lt + + ServerGroup: + type: OS::Heat::AutoScalingGroup + properties: + cooldown: 60 + desired_capacity: 1 + min_size: { get_param: MinClusterSize } + max_size: { get_param: MaxClusterSize } + resource: + type: OS::Nova::Server + metadata: + wrs-groupindex-mode: true + properties: + name: + list_join: + - "-" + - [{get_param: 'OS::stack_name'}, 'srv', ''] + flavor: { get_param: FLAVOR } + image: { get_param: IMAGE } + key_name: { get_param: KEYPAIR } + admin_user: {get_param: KEYPAIR_ADMIN_USER } + metadata: {"metering.stack": {get_param: "OS::stack_id"}} + networks: + - network: { get_param: PUBLIC_NETWORK } + vif-model: 'virtio' + - network: { get_param: VM_NETWORK } + - network: { get_param: INTERNAL_NETWORK } + user_data_format: 'RAW' + user_data: | + #wrs-config + DEMO_PERSONALITY="srv" + FUNCTIONS="hugepages,demo," diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/LabSetup.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/LabSetup.yaml new file mode 100644 index 00000000..cd9b89e1 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/LabSetup.yaml @@ -0,0 +1,379 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrates a simple lab setup +# Sets up networks, routers, glance images, flavors, keypairs +# +# Pre-Reqs: +# Image file located at /home/wrsroot/images/tis-centos-guest.img +# This is an R2 HEAT template version (2015-04-30) +# +# Mandatory Template Parameters: +# TENANT_ID: The tenant ID (keystone tenant-list) +# +# Tenant Considerations: +# Should be run as admin. +# +# Sample CLI syntax: +# heat stack-create -f LabSetup.yaml +# -P TENANT_ID=812b639cd3714d389a4e2662b114b72b SETUP +# +# Expected Outcome: +# 2 provider networks created called: physnet0 and physnet1 with ranges +# setup (neutron providernet-list) +# A new keypair called: controller-0 (nova keypair-list) +# A new glance image called: wrl6 (glance image-list) +# 2 flavors created called: m1.tiny and m1.small (nova flavor-list) +# 4 networks created called: external-net0, internal-net0, private-net0, +# public-net0 (neutron net-list) +# 5 subnets created called: public-subnet0, external-subnet0, +# private-subnet0, internal-subnet0, tagged-subnet0 (neutron subnet-list) +# 2 routers with gateways setup called: public-router0, +# private-router0 (neutron router-list) +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate a simple Lab Setup + +parameters: + + TENANT_ID: + description: Tenant ID for the network. (openstack project list) + type: string + default: admin + + IMAGE_NAME: + description: Name of the new glance image + type: string + default: wrl6 + + KEYPAIR_NAME: + description: Name of the new keypair + type: string + default: controller-0 + + PHYSNET0: + description: First physical network + type: string + default: physnet0 + + PHYSNET1: + description: Second physical network + type: string + default: physnet1 + + # Network names + PUBLICNET: + description: Name of the Public Network + type: string + default: public-net0 + + PRIVATENET: + description: Name of the Private Network + type: string + default: private-net0 + + INTERNALNET: + description: Name of the Internal Network + type: string + default: internal-net0 + + EXTERNALNET: + description: Name of the External Network + type: string + default: external-net0 + + # Subnet names + PUBLICSUBNET: + description: Name of the Public Subnet + type: string + default: public-subnet0 + + PRIVATESUBNET: + description: Name of the Private Subnet + type: string + default: private-subnet0 + + INTERNALSUBNET: + description: Name of the Internal Subnet + type: string + default: internal-subnet0 + + TAGGEDSUBNET: + description: Name of the Tagged Subnet + type: string + default: tagged-subnet0 + + EXTERNALSUBNET: + description: Name of the External Subnet + type: string + default: external-subnet0 + + PUBLIC_CIDR: + description: Cidr of the Public Subnet + type: string + default: '192.168.101.0/24' + + PUBLIC_EXTERNAL_GWY: + description: External IP of public router on the external network + type: string + default: 192.168.1.2 + + PRIVATE_CIDR: + description: Cidr of the Private Subnet + type: string + default: '192.168.201.0/24' + + PRIVATE_EXTERNAL_GWY: + description: External IP of private router on the external network + type: string + default: 192.168.1.3 + + INTERNAL_CIDR: + description: Cidr of the Internal Subnet + type: string + default: '10.10.0.0/24' + + TAGGED_CIDR: + description: Cidr of the Tagged Subnet + type: string + default: '10.10.1.0/24' + + EXTERNAL_CIDR: + description: Cidr of the External Subnet + type: string + default: '192.168.1.0/24' + + EXTERNAL_GATEWAY: + description: Cidr of the External Subnet + type: string + default: '192.168.1.1' + + # Router names + PUBLICROUTER: + description: Name of the Public Router + type: string + default: public-router0 + + PRIVATEROUTER: + description: Name of the Private Router + type: string + default: private-router0 + + +resources: + + ProviderNet0: + type: WR::Neutron::ProviderNet + properties: + name: { get_param: PHYSNET0 } + type: vlan + + ProviderNet1: + type: WR::Neutron::ProviderNet + properties: + name: { get_param: PHYSNET1 } + type: vlan + + ProviderNetRange0A: + type: WR::Neutron::ProviderNetRange + properties: + providernet_id: { get_resource: ProviderNet0 } + name: { list_join: [ "-", [{ get_param: PHYSNET0}, "a"]]} + minimum: 400 + maximum: 499 + tenant_id: { get_param: TENANT_ID } + + ProviderNetRange0B: + type: WR::Neutron::ProviderNetRange + properties: + # required properties + providernet_id: { get_resource: ProviderNet0 } + name: { list_join: [ "-", [{ get_param: PHYSNET0}, "b"]]} + minimum: 10 + maximum: 10 + shared: true + + ProviderNetRange1A: + type: WR::Neutron::ProviderNetRange + properties: + # required properties + providernet_id: { get_resource: ProviderNet1 } + name: { list_join: [ "-", [{ get_param: PHYSNET1}, "a"]]} + minimum: 500 + maximum: 599 + tenant_id: { get_param: TENANT_ID } + + OS_Nova_KeyPair: + type: OS::Nova::KeyPair + properties: + # required properties + name: {get_param: KEYPAIR_NAME } + # optional properties + save_private_key: false + + wrl5_Glance_Image: + type: OS::Glance::Image + properties: + name: {get_param: IMAGE_NAME } + is_public: true + container_format: bare + disk_format: qcow2 + location: file:///home/wrsroot/images/tis-centos-guest.img + + m1.tiny: + type: OS::Nova::Flavor + properties: + name: m1.tiny + ram: 256 + disk: 2 + vcpus: 1 + extra_specs: + 'hw:cpu_policy': dedicated + 'hw:mem_page_size': 2048 + + m1.small: + type: OS::Nova::Flavor + properties: + name: m1.small + ram: 512 + disk: 2 + vcpus: 1 + extra_specs: + 'hw:cpu_policy': shared + 'hw:mem_page_size': 2048 + + external_network: + type: OS::Neutron::Net + depends_on: ProviderNetRange0B + properties: + name: { get_param: EXTERNALNET } + value_specs: + 'provider:network_type': vlan + 'provider:physical_network': { get_attr: [ProviderNet0, name] } + 'provider:segmentation_id': 10 + 'router:external': 'True' + tenant_id: { get_param: TENANT_ID } + + public_network: + type: OS::Neutron::Net + depends_on: ProviderNetRange0A + properties: + name: { get_param: PUBLICNET } + shared: false + tenant_id: { get_param: TENANT_ID } + value_specs: + 'provider:network_type': vlan + 'provider:physical_network': { get_attr: [ProviderNet0, name] } + 'provider:segmentation_id': 400 + + private_network: + type: OS::Neutron::Net + depends_on: ProviderNetRange1A + properties: + name: { get_param: PRIVATENET } + shared: false + value_specs: + 'provider:network_type': vlan + 'provider:physical_network': { get_attr: [ProviderNet1, name] } + 'provider:segmentation_id': 500 + tenant_id: { get_param: TENANT_ID } + + internal_network: + type: OS::Neutron::Net + depends_on: external_network + properties: + name: { get_param: INTERNALNET } + shared: false + tenant_id: { get_param: TENANT_ID } + + public_subnet: + type: OS::Neutron::Subnet + properties: + cidr: { get_param: PUBLIC_CIDR } + network_id: { get_resource: public_network } + name: { get_param: PUBLICSUBNET } + tenant_id: { get_param: TENANT_ID } + + private_subnet: + type: OS::Neutron::Subnet + properties: + cidr: { get_param: PRIVATE_CIDR } + network_id: { get_resource: private_network } + name: { get_param: PRIVATESUBNET } + enable_dhcp: true + tenant_id: { get_param: TENANT_ID } + + internal_subnet: + type: OS::Neutron::Subnet + properties: + cidr: { get_param: INTERNAL_CIDR } + network_id: { get_resource: internal_network } + name: { get_param: INTERNALSUBNET } + enable_dhcp: true + tenant_id: { get_param: TENANT_ID } + # --no-gateway + + tagged_subnet: + type: OS::Neutron::Subnet + properties: + cidr: { get_param: TAGGED_CIDR } + network_id: { get_resource: internal_network } + name: { get_param: TAGGEDSUBNET } + tenant_id: { get_param: TENANT_ID } + enable_dhcp: true + # --no-gateway + + external_subnet: + type: OS::Neutron::Subnet + properties: + cidr: { get_param: EXTERNAL_CIDR } + network_id: { get_resource: external_network } + name: { get_param: EXTERNALSUBNET } + tenant_id: { get_param: TENANT_ID } + enable_dhcp: false + gateway_ip: { get_param: EXTERNAL_GATEWAY } + + public_router: + type: OS::Neutron::Router + properties: + name: { get_param: PUBLICROUTER} + external_gateway_info: + network: { get_resource: external_network } + enable_snat: false + external_fixed_ips: + - ip_address: { get_param: PUBLIC_EXTERNAL_GWY } + subnet: { get_resource: external_subnet } + tenant_id: {get_param: TENANT_ID} + + private_router: + type: OS::Neutron::Router + properties: + name: { get_param: PRIVATEROUTER} + external_gateway_info: + network: { get_resource: external_network } + enable_snat: false + external_fixed_ips: + - ip_address: { get_param: PRIVATE_EXTERNAL_GWY } + subnet: { get_resource: external_subnet } + tenant_id: {get_param: TENANT_ID} + + public_router_interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: public_router } + subnet_id: { get_resource: public_subnet } + + private_router_interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: private_router } + subnet_id: { get_resource: private_subnet } diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/NestedAutoScale.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/NestedAutoScale.yaml new file mode 100644 index 00000000..38471be4 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/NestedAutoScale.yaml @@ -0,0 +1,212 @@ +################################################################################ +# Copyright (c) 2013 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrates creating: +# An autoscaling stack of nova instances that use cinder volumes +# The default meter used to trigger scaling is: cpu_util +# This template uses another template to show nested scaling +# +# Pre-Reqs: +# Normal lab setup. Capable of launching 3 VMs +# A keypair called: controller-0. (nova keypair-list) +# A flavor called: small (nova flavor-list) +# A glance image called: tis-centos-guest (glance image-list) +# A network called: internal0-net0 (neutron net-list) +# A nested template file NestedStack.yaml in the same folder as this yaml. +# +# Optional Template Parameters: +# KEYPAIR: A keypair setup for the current user (nova keypair-list) +# KEYPAIR_ADMIN_USER: The user to specify for injecting ssh keys from the +# keypair into the VM +# FLAVOR: A nova flavor name or UUID for the VMs (nova flavor-list) +# IMAGE: A glance image name or UUID for launching VMs (glance image-list) +# NETWORK: Name or UUID of the network to use for the VMs (neutron net-list) +# METER_NAME: Name of the ceilometer meter to use to trigger autoscaling +# (ceilometer meter-list) +# METER_PREFIX: user_metadata for a nova meter, metering for all other meters +# HIGH_VALUE: Value for the meter to trigger a scale up. +# LOW_VALUE: Value for the meter to trigger a scale down. +# +# Tenant Considerations: +# The tenant must have access to the meter type selected. +# +# Sample CLI syntax: +# heat stack-create -f NestedAutoScale.yaml STACK +# +# Expected Outcome: +# VMs running the guest image (nova list) +# New ceilometer alarm triggers (ceilometer alarm-list) +# New cinder volumes for each VM (cinder list) +# This template produces an output which shows the CLI syntax to help +# troubleshoot autoscaling +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrates autoscaling of a nested stack + The nested stack creates volumes and VMs + +parameters: + + KEYPAIR: + description: keypair to use. (nova keypair-list) + type: string + default: controller-0 + constraints: + - custom_constraint: nova.keypair + + KEYPAIR_ADMIN_USER: + description: Name of user account to inject ssh keys from keypair + type: string + default: 'ec2-user' + + FLAVOR: + description: Nova flavor to use. (nova flavor-list) + type: string + default: small + constraints: + - custom_constraint: nova.flavor + + IMAGE: + description: Glance image to create a cinder volume(glance image-list) + type: string + default: tis-centos-guest + constraints: + - custom_constraint: glance.image + + NETWORK: + description: Name of the network to use (neutron net-list) + type: string + default: internal0-net0 + constraints: + - custom_constraint: neutron.network + + METER_NAME: + description: Ceilometer meter to query when determining autoscaling + type: string + default: cpu_util + + METER_PREFIX: + description: > + Ceilometer alarm query prefix. + If a nova meter (user_metadata) otherwise (metering) + type: string + default: user_metadata + constraints: + - allowed_values: [ user_metadata, metering ] + + HIGH_VALUE: + description: Metric value that will trigger a scale up if exceeded + type: string + default: '80' + + LOW_VALUE: + description: Metric value that will trigger a scale down if below + type: string + default: '30' + +resources: + + ScalingGroup: + type: OS::Heat::AutoScalingGroup + properties: + cooldown: 60 + desired_capacity: 1 + max_size: 3 + min_size: 1 + resource: + type: NestedStack.yaml + properties: + NAMING: { get_param: "OS::stack_name"} + FLAVOR: { get_param: FLAVOR } + IMAGE: { get_param: IMAGE } + KEYPAIR: { get_param: KEYPAIR } + KEYPAIR_ADMIN_USER: { get_param: KEYPAIR_ADMIN_USER } + NETWORK: { get_param: NETWORK } + METADATA: {"metering.stack": {get_param: "OS::stack_id"}} + + ScaleUpPolicy: + type: OS::Heat::ScalingPolicy + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: { get_resource: ScalingGroup } + cooldown: 60 + scaling_adjustment: 1 + + ScaleDownPolicy: + type: OS::Heat::ScalingPolicy + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: { get_resource: ScalingGroup } + cooldown: 60 + scaling_adjustment: -1 + + AlarmHigh: + type: OS::Ceilometer::Alarm + properties: + meter_name: { get_param: METER_NAME } + statistic: avg + period: 60 + evaluation_periods: 1 + threshold: { get_param: HIGH_VALUE } + alarm_actions: + - {get_attr: [ScaleUpPolicy, alarm_url]} + # ceilometer alarm resource will automatically prepend + # to the matching_metadata based on the meter type + # metadata.metering + # or metadata.user_metadata + matching_metadata: {'stack': {get_param: "OS::stack_id"}} + comparison_operator: gt + + AlarmLow: + type: OS::Ceilometer::Alarm + properties: + meter_name: { get_param: METER_NAME } + statistic: avg + period: 60 + evaluation_periods: 1 + threshold: { get_param: LOW_VALUE } + alarm_actions: + - {get_attr: [ScaleDownPolicy, alarm_url]} + # ceilometer alarm resource will automatically prepend + # to the matching_metadata based on the meter type + # metadata.metering + # or metadata.user_metadata + matching_metadata: {'stack': {get_param: "OS::stack_id"}} + comparison_operator: lt + +outputs: + ceilometer_query: + value: + str_replace: + template: > + ceilometer statistics -m metername + -q metadata.prefix.stack=stackval + -p 60 -a avg + params: + metername: { get_param: METER_NAME } + prefix: { get_param: METER_PREFIX} + stackval: { get_param: "OS::stack_id" } + + manual_scale_up: + value: + str_replace: + template: > + curl -X POST "scale_up_url" + params: + scale_up_url: {get_attr: [ScaleUpPolicy, alarm_url]} + + manual_scale_down: + value: + str_replace: + template: > + curl -X POST "scale_down_url" + params: + scale_down_url: {get_attr: [ScaleDownPolicy, alarm_url]} diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/NestedStack.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/NestedStack.yaml new file mode 100644 index 00000000..05b78514 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/NestedStack.yaml @@ -0,0 +1,103 @@ +################################################################################ +# Copyright (c) 2013 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# This stack is not meant to be launched directly. +# It is used by NestedAutoScale.yaml +# +# Pre-Reqs: +# Refer to NestedAutoScale.yaml +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# Refer to NestedAutoScale.yaml +# +# Sample CLI syntax: +# Refer to NestedAutoScale.yaml +# +# Expected Outcome: +# Refer to NestedAutoScale.yaml +# +################################################################################ + +heat_template_version: 2015-04-30 + +parameters: + + METADATA: + description: a way of passing metadata from the outer stack to the VMs + type: json + + NAMING: + description: Prefix for the name of the volumes and vms + type: string + default: "" + + KEYPAIR: + description: keypair to use. (nova keypair-list) + type: string + default: controller-0 + constraints: + - custom_constraint: nova.keypair + + KEYPAIR_ADMIN_USER: + description: Name of user account to inject ssh keys from keypair + type: string + default: 'ec2-user' + + FLAVOR: + description: Nova flavor to use. (nova flavor-list) + type: string + default: m1.small + constraints: + - custom_constraint: nova.flavor + + IMAGE: + description: Glance image to create cinder volume (glance image-list) + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + NETWORK: + description: Name of the private network to use (neutron net-list) + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + +resources: + + CinderVolume: + type: OS::Cinder::Volume + properties: + name: + list_join: + - "_" + - [{ get_param: NAMING }, "vol", { group_index } ] + image: { get_param: IMAGE } + size: 2 + + NovaServer: + type: OS::Nova::Server + properties: + name: + list_join: + - "_" + - [{ get_param: NAMING }, "vm", { group_index } ] + metadata: {get_param: METADATA } + key_name: { get_param: KEYPAIR } + admin_user: { get_param: KEYPAIR_ADMIN_USER } + block_device_mapping: + - device_name: vda + delete_on_termination: true + volume_id: { get_resource: CinderVolume } + flavor: { get_param: FLAVOR } + networks: + - network: { get_param: NETWORK } diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/Networking_and_Servers.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/Networking_and_Servers.yaml new file mode 100644 index 00000000..5c26a532 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/Networking_and_Servers.yaml @@ -0,0 +1,217 @@ +################################################################################ +# Copyright (c) 2013 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing a server using a newly created subnet +# Shows two scenarios +# Scenario A: network, subnets and ports are created explicitly for the +# servers. This scenario supports selecting a subnet through the use of +# a port +# Scenario B: network and subnets are created and only network is +# specified for the server. This scenario does not support selecting +# which subnet is selected but will not allow booting unless at least one +# subnet exists. +# +# Pre-Reqs: +# A glance image called: wrl6 +# A nova flavor called: m1.small +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# An admin only activity +# +# Sample CLI syntax: +# heat stack-create -f Networking_and_Servers.yaml STACK +# +# Expected Outcome: +# new networks, subnets, ports and servers +# neutron net-list +# neutron subnet-list +# neutron port-list +# nova list +# servers A1 and A2 will be on separate subnets +# servers B1 and B2 will typically be on the same subnet +# +################################################################################ +heat_template_version: 2015-04-30 + +description: > + Demonstrate creating a network and subnetwork to launch a server + +parameters: + + NETWORK_PREFIX: + type: string + description: "The prefix for each newly created network" + default: "network" + + SERVER_PREFIX: + type: string + description: "The prefix for each newly created server" + default: "server" + + IMAGE: + type: string + description: "The name of the glance image" + default: wrl6 + constraints: + - custom_constraint: glance.image + + FLAVOR: + type: string + description: "The name of the flavor to use" + default: m1.small + constraints: + - custom_constraint: nova.flavor + + CIDR1: + type: string + description: "CIDR of subnet A1" + default: "192.168.110.0/24" + + CIDR2: + type: string + description: "CIDR of subnet A2" + default: "192.168.120.0/24" + + CIDR3: + type: string + description: "CIDR of subnet B1" + default: "192.168.130.0/24" + + CIDR4: + type: string + description: "CIDR of subnet B2" + default: "192.168.140.0/24" + +resources: + + ################ + # Scenario 1 # + ################ + A_Network: + type: "OS::Neutron::Net" + properties: + name: + list_join: [ "-", [{ get_param: NETWORK_PREFIX}, "A"]] + + A_Subnet_1: + type: "OS::Neutron::Subnet" + properties: + name: + list_join: + - "-" + - [{ get_param: NETWORK_PREFIX }, "subnet", "A", "1"] + + network_id: { get_resource: A_Network } + cidr: { get_param: CIDR1 } + enable_dhcp: "True" + + A_Subnet_2: + type: "OS::Neutron::Subnet" + properties: + name: + list_join: + - "-" + - [{ get_param: NETWORK_PREFIX }, "subnet", "A", "2"] + network_id: { get_resource: A_Network } + cidr: { get_param: CIDR2 } + enable_dhcp: "True" + + A_Port_1: + type: OS::Neutron::Port + properties: + network_id: { get_resource: A_Network } + fixed_ips: + - subnet_id: { get_resource: A_Subnet_1 } + + A_Port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: A_Network } + fixed_ips: + - subnet_id: { get_resource: A_Subnet_2 } + + A_Server_1: + type: "OS::Nova::Server" + properties: + name: + list_join: [ "-", [{ get_param: SERVER_PREFIX}, "A", "1"]] + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + networks: + - port: { get_resource: A_Port_1 } + + A_Server_2: + type: "OS::Nova::Server" + properties: + name: + list_join: [ "-", [{ get_param: SERVER_PREFIX}, "A", "2"]] + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + networks: + - port: { get_resource: A_Port_2 } + + ################ + # Scenario 2 # + ################ + B_Network: + type: "OS::Neutron::Net" + properties: + name: { list_join: [ "-", [{ get_param: NETWORK_PREFIX}, "B"]]} + + B_Subnet_1: + type: "OS::Neutron::Subnet" + properties: + name: + list_join: + - "-" + - [{ get_param: NETWORK_PREFIX }, "subnet", "B", "1"] + network_id: { get_resource: B_Network } + cidr: { get_param: CIDR3 } + enable_dhcp: "True" + + B_Subnet_2: + type: "OS::Neutron::Subnet" + properties: + name: + list_join: + - "-" + - [{ get_param: NETWORK_PREFIX }, "subnet", "B", "2"] + network_id: { get_resource: B_Network } + cidr: { get_param: CIDR4 } + enable_dhcp: "True" + + # This server will use whichever B_Network subnet exists. + # It delays creation until B_Subnet_1 exists + # If B_Subnet_2 also exists, it may choose it + B_Server_1: + type: "OS::Nova::Server" + depends_on: B_Subnet_1 + properties: + name: + list_join: [ "-", [{ get_param: SERVER_PREFIX}, "B", "1"]] + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + networks: + - network: { get_resource: B_Network } + + # This server will use whichever B_Network subnet exists. + # It delays creation until B_Subnet_2 exists + # If B_Subnet_1 also exists, it may choose it + B_Server_2: + type: "OS::Nova::Server" + depends_on: B_Subnet_2 + properties: + name: + list_join: [ "-", [{ get_param: SERVER_PREFIX}, "B", "2"]] + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + networks: + - network: { get_resource: B_Network } diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/PortForwarding.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/PortForwarding.yaml new file mode 100644 index 00000000..ec018e0f --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/PortForwarding.yaml @@ -0,0 +1,210 @@ +################################################################################ +# Copyright (c) 2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrates a virtual router port forwarding setup +# Sets up ports, VM instances, and forwarding rules +# +# Pre-Reqs: +# An environment capable of launching 3 VMs +# Image imported to glance +# Keypair imported to nova +# Private network created +# Public (external) network created with attachment to private subnet +# Router create connecting private network to public network +# +# Mandatory Template Parameters: +# KEYPAIR +# IMAGE +# FLAVOR +# ROUTER_ID +# PRIVATE_NET +# PRIVATE_SUBNET +# +# Tenant Considerations: +# Should be run as tenant. +# Can be run as admin +# +# Sample CLI syntax: +# heat stack-create -f PortForwarding.yaml \ +# -P ROUTER_ID=812b639cd3714d389a4e2662b114b72b \ +# -P KEYPAIR=tenant1-keypair \ +# -P FLAVOR=small \ +# -P PRIVATE_NET=tenant1-mgmt \ +# -P PRIVATE_SUBNET=tenant1-mgmt-subnet0 \ +# DNAT +# +# Expected Outcome: +# 3 VM instances launched +# 3 Neutron port forwarding rules (1 to each VM) (neutron portforwarding-list) +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + HOT template to deploy three servers into an existing neutron tenant + network and assign port forwarding rules to each server so they are + accessible from the public network via specific layer4 port numbers. + +parameters: + + KEYPAIR: + type: string + description: Name of keypair to assign to servers + constraints: + - custom_constraint: nova.keypair + + KEYPAIR_ADMIN_USER: + type: string + description: Name of user account to inject ssh keys from keypair + default: 'ec2-user' + + IMAGE: + type: string + description: Name of image to use for servers + default: tis-centos-guest + constraints: + - custom_constraint: glance.image + + FLAVOR: + type: string + description: Flavor to use for servers + default: small + constraints: + - custom_constraint: nova.flavor + + ROUTER_ID: + type: string + description: ID of public facing router that handles NAT translations + constraints: + - custom_constraint: neutron.router + + PRIVATE_NET: + type: string + description: Name of private network into which servers get deployed + constraints: + - custom_constraint: neutron.network + + PRIVATE_SUBNET: + type: string + description: Name of private sub network into which servers get deployed + constraints: + - custom_constraint: neutron.subnet + + PRIVATE_PORT_NUMBER: + type: number + description: Layer4 protocol port number which will terminate on each VM + default: 80 + + PROTOCOL: + type: string + description: Layer4 protocol of all port mappings + default: tcp + + SERVER1_PUBLIC_PORT_NUMBER: + type: number + description: Public layer4 protocol portnum which terminates on server1 + default: 8080 + + SERVER2_PUBLIC_PORT_NUMBER: + type: number + description: Public layer4 protocol portnum which terminates on server2 + default: 8081 + + SERVER3_PUBLIC_PORT_NUMBER: + type: number + description: Public layer4 protocol portnum which terminates on server3 + default: 8082 + +resources: + + server1: + type: OS::Nova::Server + properties: + name: Server1 + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + key_name: { get_param: KEYPAIR } + admin_user: { get_param: KEYPAIR_ADMIN_USER } + networks: + - port: { get_resource: server1_port } + + server1_port: + type: OS::Neutron::Port + properties: + network: { get_param: PRIVATE_NET } + fixed_ips: + - subnet: { get_param: PRIVATE_SUBNET } + + server1_rule: + type: WR::Neutron::PortForwarding + properties: + router_id: { get_param: ROUTER_ID } + inside_addr: { get_attr: [ server1, first_address ] } + inside_port: { get_param: PRIVATE_PORT_NUMBER } + outside_port: { get_param: SERVER1_PUBLIC_PORT_NUMBER } + protocol: { get_param: PROTOCOL } + description: "Server1 port forwarding rule" + + server2: + type: OS::Nova::Server + properties: + name: Server2 + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + key_name: { get_param: KEYPAIR } + admin_user: { get_param: KEYPAIR_ADMIN_USER } + networks: + - port: { get_resource: server2_port } + + server2_port: + type: OS::Neutron::Port + properties: + network: { get_param: PRIVATE_NET } + fixed_ips: + - subnet: { get_param: PRIVATE_SUBNET } + + server2_rule: + type: WR::Neutron::PortForwarding + properties: + router_id: { get_param: ROUTER_ID } + inside_addr: { get_attr: [ server2, first_address ] } + inside_port: { get_param: PRIVATE_PORT_NUMBER } + outside_port: { get_param: SERVER2_PUBLIC_PORT_NUMBER } + protocol: { get_param: PROTOCOL } + description: "Server2 port forwarding rule" + + server3: + type: OS::Nova::Server + properties: + name: Server3 + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + key_name: { get_param: KEYPAIR } + admin_user: { get_param: KEYPAIR_ADMIN_USER } + networks: + - port: { get_resource: server3_port } + + server3_port: + type: OS::Neutron::Port + properties: + network: { get_param: PRIVATE_NET } + fixed_ips: + - subnet: { get_param: PRIVATE_SUBNET } + + server3_rule: + type: WR::Neutron::PortForwarding + properties: + # required properties + router_id: { get_param: ROUTER_ID } + inside_addr: { get_attr: [ server3, first_address ] } + inside_port: { get_param: PRIVATE_PORT_NUMBER } + outside_port: { get_param: SERVER3_PUBLIC_PORT_NUMBER } + protocol: { get_param: PROTOCOL } + # optional properties + description: "Server3 port forwarding rule" diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/UserData.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/UserData.yaml new file mode 100644 index 00000000..82b5b31b --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/UserData.yaml @@ -0,0 +1,175 @@ +################################################################################ +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objectives: +# Boot a nova server with userdata +# Use stack-update to rebuild the server with new userdata +# +# Pre-Reqs: +# A keypair called: controller-0 (nova keypair-list) +# A nova flavor called: m1.tiny (nova flavor-list) +# A glance image called: wrl6 (glance image-list) +# A networks called: private-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# None +# +# Sample CLI syntax: +# heat stack-create -f UserData.yaml STACK +# heat stack-update -f UserData.yaml -P USERDATA="new userdata" STACK +# +# Expected Outcome: +# 3 new nova instances called: userdata-vm, userdata-vm_vol, +# userdata-vm_vol_v2 +# 2 new volumes called: userdata-vol, userdata-vol_v2 +# nova list +# The stack-update should rebuild the nova VM with new userdata +# The userdata is base64 encoded attribute: OS-EXT-SRV-ATTR:user_data +# +################################################################################ +heat_template_version: 2015-04-30 + +description: > + Demonstrate launching an instance with updatable userdata + +parameters: + + VM_NAME: + description: name for the new VM + type: string + default: userdata-vm + + VM_VOL_NAME: + description: name for the new VM + type: string + default: userdata-vm_vol + + VOL_V1_NAME: + description: name for the new VM + type: string + default: userdata-vol + + VM_VOL_V2_NAME: + description: name for the new VM + type: string + default: userdata-vm_vol_v2 + + VOL_V2_NAME: + description: name for the new VM + type: string + default: userdata-vol_v2 + + KEYPAIR: + description: keypair to use. (nova keypair-list) + type: string + default: controller-0 + constraints: + - custom_constraint: nova.keypair + + KEYPAIR_ADMIN_USER: + type: string + description: Name of user account to inject ssh keys from keypair + default: 'ec2-user' + + FLAVOR: + description: Nova flavor to use. (nova flavor-list) + type: string + default: m1.tiny + constraints: + - custom_constraint: nova.flavor + + IMAGE: + description: Glance image to use (glance image-list) + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + PRIVATE_NET: + description: private network to use (neutron net-list) + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + + USERDATA: + description: Userdata for the new VM + type: string + default: "Sample Userdata" + +resources: + + ##################### + ## Host with image as boot source + ###################### + RAW_Server: + type: OS::Nova::Server + properties: + admin_user: { get_param: KEYPAIR_ADMIN_USER } + flavor: { get_param: FLAVOR } + image: { get_param: IMAGE } + key_name: { get_param: KEYPAIR } + name: { get_param: VM_NAME } + networks: + - network: { get_param: PRIVATE_NET } + user_data: { get_param: USERDATA } + user_data_format: RAW + user_data_update_policy: REBUILD + + ##################### + ## Host with volume as boot source within the block_device_mapping field + ###################### + RAW_vol: + type: OS::Cinder::Volume + properties: + name: { get_param: VOL_V1_NAME } + image: { get_param: IMAGE } + size: 2 + + RAW_Server_vol: + type: OS::Nova::Server + properties: + admin_user: { get_param: KEYPAIR_ADMIN_USER } + flavor: { get_param: FLAVOR } + block_device_mapping: + - device_name: vda + volume_id: { get_resource: RAW_vol } + key_name: { get_param: KEYPAIR } + name: { get_param: VM_VOL_NAME } + networks: + - network: { get_param: PRIVATE_NET } + user_data: { get_param: USERDATA } + user_data_format: RAW + user_data_update_policy: REBUILD + + ##################### + ## Host with vol as boot source within the block_device_mapping_v2 field + ###################### + RAW_vol_V2: + type: OS::Cinder::Volume + properties: + name: { get_param: VOL_V2_NAME } + image: { get_param: IMAGE } + size: 2 + + RAW_Server_vol_V2: + type: OS::Nova::Server + properties: + admin_user: { get_param: KEYPAIR_ADMIN_USER } + flavor: { get_param: FLAVOR } + block_device_mapping_v2: + - {device_name: vda, volume_id: { get_resource: RAW_vol_V2 } } + key_name: { get_param: KEYPAIR } + name: { get_param: VM_VOL_V2_NAME } + networks: + - network: { get_param: PRIVATE_NET } + user_data: { get_param: USERDATA } + user_data_format: RAW + user_data_update_policy: REBUILD diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/VIF.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/VIF.yaml new file mode 100644 index 00000000..9f5bdcf1 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/VIF.yaml @@ -0,0 +1,96 @@ +################################################################################ +# Copyright (c) 2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrates Launching a VM specifying VIF PCI Address +# vif pci address format is: 0000::.0 +# where bus and slot are 2 digit hex values +# example syntax: 0000:05:0A.0 +# +# Pre-Reqs: +# Normal lab setup. +# Ability to launch a VM with a VIF PCI Address +# A keypair called: controller-0 (nova keypair-list) +# A flavor called: m1.small (nova flavor-list) +# A glance image called: wrl6 (glance image-list) +# Network called: private-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Sample CLI syntax: +# heat stack-create -f VIF.yaml -P VIF_PCI_ADDR=0000:00:00.0 STACK +# +# Expected Outcome: +# A new stack (heat stack-list) +# A new nova VM (nova list) +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Creates a HEAT stack and a VM instance using the speficied vif pic address + +parameters: + + KEYPAIR: + description: Name of existing KeyPair to enable SSH access to VMs + type: string + default: controller-0 + constraints: + - custom_constraint: nova.keypair + + FLAVOR: + description: Nova flavor to use. (nova flavor-list) + type: string + default: m1.small + constraints: + - custom_constraint: nova.flavor + + IMAGE: + description: the name or uuid of the server image in glance + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + NETWORK: + description: name of private network to use (neutron net-list) + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + + VM_NAME: + description: Name to label the VM instance + type: string + default: 'vif-1' + + VIF: + description: Vif Model to use + type: string + default: 'virtio' + + VIF_PCI_ADDR: + description: Vif pci address + type: string + default: '0000:01:01.0' + +resources: + + Srv: + type: OS::Nova::Server + properties: + name: { get_param: VM_NAME } + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + key_name: { get_param: KEYPAIR } + networks: + - network: { get_param: NETWORK } + vif-model: { get_param: VIF } + vif-pci-address: { get_param: VIF_PCI_ADDR } diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/VMAutoScaling.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/VMAutoScaling.yaml new file mode 100644 index 00000000..a746570a --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/VMAutoScaling.yaml @@ -0,0 +1,223 @@ +################################################################################ +# Copyright (c) 2014 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrates dynamically resize guest vcpus of a VM without +# service impact when policy criteria described in the template is met. +# +# Pre-Reqs: +# Normal lab setup. +# Ability to launch a VM +# A keypair called: controller-0 (nova keypair-list) +# A flavor with special attributes +# hw:wrs:min_vcpus = 1 +# hw:cpu_policy = dedicated +# a minimum of 2 vcpus +# A glance image called: wrl6 (glance image-list) +# Network called: private-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# A tenant can use this template if using a keypair, subnet and network +# accessible to it +# +# Sample CLI syntax: +# heat stack-create -f VMAutoScaling.yaml STACK +# +# Expected Outcome: +# A new stack (heat stack-list) +# A new nova VM (nova list) +# Two ceilometer alarms corresponding to high and low watermarks +# (ceilometer alarm-list) +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Creates a HEAT autoscaling stack and a VM instance which is able to scale + up/down the number of guest vpcu's using vcpu_util samples from the guest + instance. + +parameters: + KEYPAIR: + description: Name of existing KeyPair to enable SSH access to VMs + type: string + default: controller-0 + constraints: + - custom_constraint: nova.keypair + + KEYPAIR_ADMIN_USER: + description: Name of user account to inject ssh keys from keypair + type: string + default: 'ec2-user' + + FLAVOR: + description: Nova flavor to use. (nova flavor-list) + type: string + default: m1.small + constraints: + - custom_constraint: nova.flavor + + IMAGE: + description: the name or uuid of the server image in glance + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + NETWORK: + description: name of private network to use (neutron net-list) + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + + METER_NAME: + description: Ceilometer meter to query when determining autoscaling + type: string + default: vcpu_util + + METER_PREFIX: + description: > + Ceilometer alarm query prefix. If a nova meter (user_metadata) + otherwise (metering) + type: string + default: user_metadata + constraints: + - allowed_values: [ user_metadata, metering ] + + SCALE_UP_VALUE: + description: Metric value that will trigger a scale up if exceeded + type: string + default: '60' + + SCALE_DOWN_VALUE: + description: Metric value that will trigger a scale down if below + type: string + default: '5' + + VM_NAME: + description: Name to label the VM instance + type: string + default: 'guest-1' + +resources: + + Srv: + type: OS::Nova::Server + properties: + name: { get_param: VM_NAME } + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + key_name: { get_param: KEYPAIR } + admin_user: { get_param: KEYPAIR_ADMIN_USER } + metadata: + metering.stack: { get_param: "OS::stack_id" } + networks: + - { network: { get_param: NETWORK }, vif-model: virtio } + + CPUAlarmHigh: + type: OS::Ceilometer::Alarm + properties: + description: Scale up if 1 minute avg of meter above threshold + meter_name: { get_param: METER_NAME } + statistic: avg + period: '60' + evaluation_periods: '1' + threshold: { get_param: SCALE_UP_VALUE } + repeat_actions: 'True' + alarm_actions: + - { get_attr: [SrvScaleUpPolicy, AlarmUrl] } + comparison_operator: gt + # ceilometer alarm resource will automatically prepend + # metadata.metering + # or metadata.user_metadata + matching_metadata: { 'stack': { get_param: "OS::stack_id" }} + + CPUAlarmLow: + type: OS::Ceilometer::Alarm + properties: + description: Scale down if 1 minute avg of meter below threshold + meter_name: { get_param: METER_NAME } + statistic: avg + period: '60' + evaluation_periods: '1' + threshold: { get_param: SCALE_DOWN_VALUE } + repeat_actions: 'True' + alarm_actions: + - { get_attr: [SrvScaleDownPolicy, AlarmUrl]} + comparison_operator: lt + # ceilometer alarm resource will automatically prepend + # metadata.metering + # or metadata.user_metadata + matching_metadata: {'stack': { get_param: "OS::stack_id" }} + + SrvScaleUpPolicy: + type: OS::WR::ScalingPolicy + properties: + ServerName: { get_resource: Srv } + ScalingResource: 'cpu' + ScalingDirection: 'up' + Cooldown: '60' + + SrvScaleDownPolicy: + type: OS::WR::ScalingPolicy + properties: + ServerName: { get_resource: Srv } + ScalingResource: 'cpu' + ScalingDirection: 'down' + Cooldown: '60' + +outputs: + + ceilometer_query: + value: + str_replace: + template: > + ceilometer statistics -m metername + -q metadata.prefix.stack=stackval + -p 60 -a avg + params: + metername: { get_param: METER_NAME } + prefix: { get_param: METER_PREFIX } + stackval: { get_param: "OS::stack_id" } + + ceilometer_sample_create: + value: + str_replace: + template: > + ceilometer sample-create + -r server -m metername + --meter-type gauge + --meter-unit '%' + --sample-volume 2 + --resource-metadata '{"prefix.stack":"stackval"} + params: + server: { get_resource: Srv} + metername: { get_param: METER_NAME } + prefix: { get_param: METER_PREFIX } + stackval: { get_param: "OS::stack_id" } + + manual_scale_up: + value: + str_replace: + template: > + curl -X POST "scale_up_url" + params: + scale_up_url: { get_attr: [SrvScaleUpPolicy, AlarmUrl] } + + + manual_scale_down: + value: + str_replace: + template: > + curl -X POST "scale_down_url" + params: + scale_down_url: { get_attr: [SrvScaleDownPolicy, AlarmUrl] } diff --git a/openstack/python-heat/python-heat/templates/hot/scenarios/WRSQoSPolicy.yaml b/openstack/python-heat/python-heat/templates/hot/scenarios/WRSQoSPolicy.yaml new file mode 100644 index 00000000..c00b1ecd --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/scenarios/WRSQoSPolicy.yaml @@ -0,0 +1,59 @@ +################################################################################ +## Copyright (c) 2017 Wind River Systems, Inc. +## +## SPDX-License-Identifier: Apache-2.0 +## +################################################################################# +## +## Objectives: +## Create a wrs-qos policy and a new network with it attached +## +## +## Mandatory Template Parameters: +## None +## +## Tenant Considerations: +## None +## +## Sample CLI syntax: +## heat stack-create -f WRSQoSPolicy.yaml STACK +## +## Expected Outcome: +## A new WRS_qos policy and network are created +## The new wrs_qos policy is attached to the network +## +################################################################################# +heat_template_version: 2015-04-30 + +description: > + Demonstrate creating a wrs-qos policy and a new network with it attached + +parameters: + + QOS_NAME: + description: name for the new wrs_qos + type: string + default: example_qos + + NET_NAME: + description: name for the new network + type: string + default: example_net + +resources: + + ex-qos: + type: WR::Neutron::QoSPolicy + properties: + description: 'Example Network Policy' + name: { get_param: QOS_NAME } + policies: + scheduler: + weight: 16 + + ex-net: + type: OS::Neutron::Net + properties: + name: { get_param: NET_NAME } + shared: true + wrs_qos: {get_resource: ex-qos } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Ceilometer_Alarm.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Ceilometer_Alarm.yaml new file mode 100644 index 00000000..2475eece --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Ceilometer_Alarm.yaml @@ -0,0 +1,102 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Ceilometer::Alarm +# +# Pre-Reqs: +# Normal Lab Setup +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# None +# +# Sample CLI syntax: +# heat stack-create -f OS_Ceilometer_Alarm.yaml STACK +# +# Expected Outcome: +# A new alarm (trigger). +# ceilometer alarm-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Ceilometer::Alarm heat resource + +resources: + + OS_Ceilometer_Alarm: + type: OS::Ceilometer::Alarm + properties: + ################################################# + # Required properties + # comparison_operator: + # constraints: + # - allowed_values: [ge, gt, eq, ne, lt, le] + # description: Operator comparing specified stats with threshold + # required: true + # type: string + comparison_operator: 'ge' + + # evaluation_periods: {description: num periods to evaluate over + # , required: true, type: string} + evaluation_periods: '3' + + # meter_name: {description: Meter name watched by the alarm + # , required: true, type: string} + meter_name: 'cpu_util' + + # period: {description: Period (seconds) to evaluate over + # , required: true, type: string} + period: '90' + + # statistic: + # constraints: + # - allowed_values: [count, avg, sum, min, max] + # description: Meter statistic to evaluate + # required: true + # type: string + statistic: 'avg' + + # threshold: {description: Threshold to evaluate against + # , required: true, type: string} + threshold: '60' + + ################################################# + # Optional properties + # alarm_actions: {description: A list of URLs (webhooks) to invoke + # when state transitions to alarm, required: false, type: list} + + # description: {description: Description for the alarm + # , required: false, type: string} + description: 'A Sample Alarm' + + # enabled: {default: 'true', description: True if alarm + # evaluation/actioning is enabled, + # required: false, type: boolean} + enabled: false + + # insufficient_data_actions: {description: A list of URLs + # (webhooks) to invoke when state transitions to insufficient-data + # , required: false, type: list} + + # matching_metadata: {description: Meter should match this resource + # metadata (key=value) additionally to the meter_name + # , required: false, type: map} + + # ok_actions: {description: A list of URLs (webhooks) to invoke + # when state transitions to ok, required: false, type: list} + + # repeat_actions: {default: 'false', description: 'True to trigger + # actions each time the threshold is reached. By default, actions + # are called when : the threshold is reached AND the alarm''s + # state have changed', required: false, type: boolean} + repeat_actions: true diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Cinder_Volume.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Cinder_Volume.yaml new file mode 100644 index 00000000..0d3788d3 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Cinder_Volume.yaml @@ -0,0 +1,144 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Cinder::Volume +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# None +# +# Sample CLI syntax: +# heat stack-create -f OS_Cinder_Volume.yaml STACK +# +# Expected Outcome: +# A new 1GB cinder volume. +# cinder list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Cinder::Volume heat resource + +parameters: + + VOLUME_NAME: + description: Name of the newly created volume + type: string + default: sample_cinder_vol + + VOLUME_SIZE: + description: size for the new cinder volume + type: number + default: 2 + +resources: + + OS_Cinder_Volume: + type: OS::Cinder::Volume + properties: + ################################################# + # Required properties + ################################################# + # None + + ################################################# + # Optional properties + ################################################# + # availability_zone: {description: The availability zone in which + # the volume will be created., required: false, type: string} + + # backup_id: {description: 'If specified, the backup to create + # the volume from.', required: false, type: string} + + # description: {description: A description of the volume., + # required: false, type: string} + description: 'A sample cinder volume' + + # image: {description: 'If specified, the name or ID of the + # glance image to create the volume from.', + # required: false, type: string} + + # imageRef: {description: 'DEPRECATED: use "image" instead.', + # required: false, type: string} + + # metadata: {description: Key/value pairs to associate with the + # volume., required: false, type: map} + + # name: {description: A name used to distinguish the volume., + # required: false, type: string} + name: { get_param: VOLUME_NAME } + + # scheduler_hints: {description: 'Arbitrary key-value pairs + # specified by the client to help the Cinder scheduler + # creating a volume.', required: false, # type: string} + + # size: {description: The size of the volume in GB., + # required: false, type: number} + size: { get_param: VOLUME_SIZE } + + # snapshot_id: {description: 'If specified, the cinder volume + # to use as source.', # required: false, type: string} + + # source_volid: {description: 'If specified, the volume to use as + # source.', required: false, type: string} + + # volume_type: {description: 'If specified, the type of volume + # to use, mapping to a specific backend cinder vtype.', + # required: false, type: string} + +# Cinder Volume Attributes +outputs: + vol_status: + description: The current status of the volume. + value: { get_attr: [ OS_Cinder_Volume, status] } + vol_metadata_values: + description: Key/value pairs associated with the volume in dict form. + value: { get_attr: [ OS_Cinder_Volume, metadata_values] } + vol_display_name: + description: Name of the volume. + value: { get_attr: [ OS_Cinder_Volume, display_name] } + vol_attachments: + description: The list of attachments of the volume + value: { get_attr: [ OS_Cinder_Volume, attachments] } + vol_availability_zone: + description: The availability zone in which the volume is located. + value: { get_attr: [ OS_Cinder_Volume, availability_zone] } + vol_bootable: + description: Boolean indicating if the volume can be booted or not. + value: { get_attr: [ OS_Cinder_Volume, bootable] } + vol_encrypted: + description: Boolean indicating if the volume is encrypted or not. + value: { get_attr: [ OS_Cinder_Volume, encrypted] } + vol_created_at: + description: The timestamp indicating volume creation. + value: { get_attr: [ OS_Cinder_Volume, created_at] } + vol_display_description: + description: Description of the volume. + value: { get_attr: [ OS_Cinder_Volume, display_description] } + vol_source_volid: + description: The volume used as source, if any. + value: { get_attr: [ OS_Cinder_Volume, source_volid] } + vol_snapshot_id: + description: The snapshot the volume was created from, if any. + value: { get_attr: [ OS_Cinder_Volume, snapshot_id] } + vol_size: + description: The size of the volume in GB. + value: { get_attr: [ OS_Cinder_Volume, size] } + vol_volume_type: + description: The type of the volume mapping to a backend, if any. + value: { get_attr: [ OS_Cinder_Volume, volume_type] } + vol_metadata: + description: Key/value pairs associated with the volume. + value: { get_attr: [ OS_Cinder_Volume, metadata] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Cinder_VolumeAttachment.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Cinder_VolumeAttachment.yaml new file mode 100644 index 00000000..86c513fe --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Cinder_VolumeAttachment.yaml @@ -0,0 +1,122 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Cinder::VolumeAttachment +# +# Pre-Reqs: +# Normal Lab Setup +# A glance image named: wrl6 (glance image-list) +# A nova flavor named: small (nova flavor-list) +# A network named: private-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# network_id (neutron net-list) +# +# Tenant Considerations: +# The tenant (or admin) needs access to the network selected +# +# Sample CLI syntax: +# heat stack-create -f OS_Cinder_VolumeAttachment.yaml STACK +# +# Expected Outcome: +# A nova server that has mounted a new 1GB cinder volume +# cinder list +# nova list +# nova show +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Cinder::VolumeAttachment heat resource + +parameters: + + IMAGE: + description: Glance image used by cinder volume (glance image-list) + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + FLAVOR: + description: Nova flavor to use. (nova flavor-list) + type: string + default: small + constraints: + - custom_constraint: nova.flavor + + NETWORK: + description: name of network to use (neutron net-list) + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + + VM_NAME: + description: Name of the newly created VM + type: string + default: cinder_vm + + VOLUME_NAME: + description: Name of the newly created volume + type: string + default: cinder_volume_demo + + VOLUME_SIZE: + description: size for the new cinder volume + type: number + default: 2 + + +resources: + + OS_Cinder_VolumeAttachment: + type: OS::Cinder::VolumeAttachment + properties: + ################################################# + # Required properties + ################################################# + # instance_uuid: {description: The ID of the server to which the + # volume attaches., + instance_uuid: { get_resource: some_instance } + + # mountpoint: {description: The location where the volume is + # exposed on the instance., Note: launchpad bug 1267744 suggests + # the mountpoint should be /dev/vd[b-z] except on baremetal + # This value is not respected. It will use the first available + # mountpoint (usually /dev/vdb) + mountpoint: /dev/vdb + + # volume_id: {description: The ID of the volume to be attached., + # required: true, type: string} + volume_id: { get_resource: some_volume } + + ################################################# + # Optional properties: NONE + ################################################# + + ################################################# + # The following resources are created because they + # are required fields for the volumeattachment + ################################################# + some_instance: + type: OS::Nova::Server + properties: + name: { get_param: VM_NAME } + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + networks: + - network: { get_param: NETWORK } + + some_volume: + type: OS::Cinder::Volume + properties: + name: { get_param: VOLUME_NAME } + size: { get_param: VOLUME_SIZE } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Glance_Image.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Glance_Image.yaml new file mode 100644 index 00000000..cb4275b4 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Glance_Image.yaml @@ -0,0 +1,91 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Glance::Image +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# guest image file located and accessible at +# /home/wrsroot/images/tis-centos-guest.img +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# Only admin can create a public glance image +# None +# +# Sample CLI syntax: +# heat stack-create -f OS_Glance_Image.yaml STACK +# +# Expected Outcome: +# A new glance image called sample_image. +# glance image-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Glance::Image heat resource + +parameters: + + NAME: + description: Glance image name + type: string + default: sample_image + + CONTAINER_FORMAT: + description: Container format for glance image + type: string + default: bare + + DISK_FORMAT: + description: Disk format for glance image + type: string + default: qcow2 + + LOCATION: + description: Location to directly load the file from disk + type: string + default: file:///home/wrsroot/images/tis-centos-guest.img + + IS_PUBLIC: + description: Whether this is a public glance image. Admin only. + type: boolean + default: true + + MIN_RAM: + description: Minimum amount of ram for image + type: number + default: 0 + + MIN_DISK: + description: Minimum amount of disk for image + type: number + default: 0 + +resources: + + OS_Glance_Image: + type: OS::Glance::Image + properties: + # Required + container_format: {get_param: CONTAINER_FORMAT} + disk_format: {get_param: DISK_FORMAT} + location: {get_param: LOCATION} + + # Optional + name: {get_param: NAME} + min_ram: {get_param: MIN_RAM} + min_disk: {get_param: MIN_DISK} + protected: false + is_public: {get_param: IS_PUBLIC} + # When ID is not specified, a UUID is generated + # id diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_AccessPolicy.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_AccessPolicy.yaml new file mode 100644 index 00000000..bfd1a818 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_AccessPolicy.yaml @@ -0,0 +1,77 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Heat::AccessPolicy +# This creates an accesspolicy which can be set for a User +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces) +# A network named private-net0 with permission to create ports +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# Cannot be run as tenant. +# +# Sample CLI syntax: +# heat stack-create -f OS_Heat_AccessPolicy.yaml STACK +# +# Expected Outcome: +# A newly created neutron port +# neutron port-list +# A new (v3) User with access to the port +# The user will not be visible using the CLI (v2) list commands +# heat resource-list STACK +# openstack user show +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Heat::AccessPolicy heat resource + +parameters: + + NETWORK: + description: Network used by the access policy (neutron net-list) + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + + +resources: + + # AccessPolicy grants access to resources created as part of the stack + OS_Heat_AccessPolicy: + type: OS::Heat::AccessPolicy + properties: + ################################################# + # Required properties + ################################################# + # AllowedResources: { + # description: Resources that users are allowed to access by the + # DescribeStackResource API., required: true, type: list} + # Note: that this is a list of names and not references + AllowedResources: [ some_port, ] + + ################################################# + # Optional properties: None + ################################################# + + some_user: + type: AWS::IAM::User + properties: + Policies: [ { get_resource: OS_Heat_AccessPolicy } ] + + some_port: + type: OS::Neutron::Port + properties: + network: { get_param: NETWORK } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_AutoScalingGroup.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_AutoScalingGroup.yaml new file mode 100644 index 00000000..2be63d67 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_AutoScalingGroup.yaml @@ -0,0 +1,77 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Heat::AutoScalingGroup +# To see a better example, refer to scenarios/NestedAutoScale.yaml +# +# Pre-Reqs: +# Normal lab setup. Capable of launching 3 VMs +# A flavor called: small (nova flavor-list) +# A glance image called: wrl6 (glance image-list) +# A network called: private-net0 (neutron net-list) +# A template file OS_Nova_Server.yaml in the same folder as this yaml. +# +# Optional Template Parameters: +# FLAVOR: A nova flavor name or UUID for the VMs (nova flavor-list) +# IMAGE: A glance image name or UUID when launching VMs (glance image-list) +# NETWORK: Name or UUID of the network to use for the VMs (neutron net-list) +# +# Tenant Considerations: +# None +# +# Sample CLI syntax: +# heat stack-create -f OS_Heat_AutoScalingGroup.yaml STACK +# +# Expected Outcome: +# VMs running the guest image (nova list) +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Heat::AutoScalingGroup heat resource + +parameters: + + FLAVOR: + description: Nova flavor to use (nova flavor-list) + type: string + default: small + constraints: + - custom_constraint: nova.flavor + + IMAGE: + description: Name of the glance image to use (glance image-list) + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + NETWORK: + description: Name of the network to use (neutron net-list) + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + +resources: + + OS_Heat_AutoScalingGroup: + type: OS::Heat::AutoScalingGroup + properties: + cooldown: 60 + desired_capacity: 1 + max_size: 3 + min_size: 1 + resource: + type: OS_Nova_Server.yaml + properties: + FLAVOR: { get_param: FLAVOR } + IMAGE: { get_param: IMAGE } + NETWORK: { get_param: NETWORK } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_Stack.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_Stack.yaml new file mode 100644 index 00000000..e351e35e --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Heat_Stack.yaml @@ -0,0 +1,59 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Heat::Stack +# This resource allows inlining a HEAT template +# +# Pre-Reqs: +# None +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# None +# +# Sample CLI syntax: +# heat stack-create -f OS_Heat_Stack.yaml STAK +# +# Expected Outcome: +# 2 HEAT stacks (one stack owns the second stack) +# heat stack-list +# +# A new keypair called InlineKP (created by the second stack). +# nova keypair-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Heat::Stack heat resource + +resources: + KeyPair_Stack: + type: OS::Heat::Stack + properties: + + template: | + heat_template_version: 2015-04-30 + parameters: + NEW_KEY_NAME: + description: Name for the new keypair + type: string + resources: + OS_Nova_KeyPair: + type: OS::Nova::KeyPair + properties: + name: { get_param: NEW_KEY_NAME } + + + parameters: { NEW_KEY_NAME: InlineKP } + # context: + # - region: + timeout: 1 diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_FloatingIP.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_FloatingIP.yaml new file mode 100644 index 00000000..c31408c9 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_FloatingIP.yaml @@ -0,0 +1,83 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Neutron::FloatingIP +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# external neutron network named: external-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# The tenant (or admin) needs access to the network selected +# +# Sample CLI syntax: +# heat stack-create -f OS_Neutron_FloatingIP.yaml STACK +# +# Expected Outcome: +# A new floating IP address +# neutron floatingip-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Neutron::FloatingIP heat resource + +parameters: + + NETWORK: + description: External network to allocate floatingIP (neutron net-list) + type: string + default: external-net0 + constraints: + - custom_constraint: neutron.network + +resources: + + OS_Neutron_FloatingIP: + type: OS::Neutron::FloatingIP + properties: + ################################################# + # Required properties + ################################################# + # Mandatory. either: + # floating_network (name or uuid) + # or + # floating_network_id (uuid) + # + # floating_network_id: {description: ID of network to allocate + # floating IP from., required: true, type: string} + floating_network: { get_param: NETWORK } + + ################################################# + # Optional properties + ################################################# + # fixed_ip_address: {description: IP address to use if the port + # has multiple addresses., required: false, type: string} + + # port_id: {description: ID of an existing port with at least one + # IP address to associate with this floating IP. + # , required: false, type: string} + # This needs to be a port on the external SUBNET associated with + # the network + + # value_specs: + # default: {} + # description: Extra parameters to include in the "floatingip" + # object in the creation request. Parameters are often specific + # to installed hardware or extensions. + # required: false + # type: map + # + # Passing an invalid key will cause heat creation to fail. + # Some keys that may be valid are: tenant_id, + # value_specs: {'tenant_id': 'some_uuid' } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Net.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Net.yaml new file mode 100644 index 00000000..e7cefbfe --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Net.yaml @@ -0,0 +1,113 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Neutron::Net +# +# Pre-Reqs: +# None +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# None +# +# Sample CLI syntax: +# heat stack-create -f OS_Neutron_Net.yaml STACK +# +# Expected Outcome: +# A new network called sample-net +# neutron net-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Neutron::Net heat resource + +parameters: + + NETWORK_NAME: + description: name of network to create + type: string + default: sample-net + +resources: + + OS_Neutron_Net: + type: OS::Neutron::Net + properties: + ################################################# + # Required properties + ################################################# + + ################################################# + # Optional properties + ################################################# + + # admin_state_up: {default: true, description: A boolean value + # specifying the administrative status of the network., + # required: false, type: boolean} + admin_state_up: true + + # name: A string specifying a symbolic name for the network, which + # is not required to be unique.', required: false, type: string + name: { get_param: NETWORK_NAME } + + # shared: {description: Whether this network should be shared + # across all tenants. Note that the default policy setting + # restricts usage of this attribute to administrative users only. + # ,required: false, type: boolean} + shared: false + + # tenant_id: {description: The ID of the tenant which will own the + # network. Only administrative users can set the tenant + # identifier; this cannot be changed using authorization policies. + # , required: false, type: string} + + # value_specs: + # default: {} + # description: Extra parameters to include in the "network" + # object in the creation request. Parameters are often specific + # to installed hardware or extensions. + # required: false type: map + # + # For example, + # + # value_specs: + # 'provider:network_type': vlan + # 'provider:physical_network': group0-data0b + # 'router:external': false + # vlan_transparent: false + +outputs: + + nwk_admin_state_up: + description: The administrative status of the network. + value: { get_attr: [ OS_Neutron_Net, admin_state_up] } + + nwk_name: + description: The name of the network. + value: { get_attr: [ OS_Neutron_Net, name] } + + nwk_show: + description: All attributes. + value: { get_attr: [ OS_Neutron_Net, show] } + + nwk_status: + description: The status of the network. + value: { get_attr: [ OS_Neutron_Net, status] } + + nwk_subnets: + description: Subnets of this network. + value: { get_attr: [ OS_Neutron_Net, subnets] } + + nwk_tenant_id: + description: The tenant owning this network. + value: { get_attr: [ OS_Neutron_Net, tenant_id] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Port.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Port.yaml new file mode 100644 index 00000000..2b49c4ee --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Port.yaml @@ -0,0 +1,137 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Neutron::Port +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# An external network named: external-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# The tenant (or admin) needs access to the network selected +# +# Sample CLI syntax: +# heat stack-create -f OS_Neutron_Port.yaml STACK +# +# Expected Outcome: +# A new port on that network named: sample_port +# neutron port-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Neutron::Port heat resource + +parameters: + + NETWORK: + description: the external network to use when creating the port + type: string + default: external-net0 + constraints: + - custom_constraint: neutron.network + + + PORT_NAME: + description: Name for the port + type: string + default: sample_port + +resources: + + OS_Neutron_Port: + type: OS::Neutron::Port + properties: + ################################################# + # Required properties + ################################################# + # Requires network_id (uuid) or network (name or uuid) + # network_id: + network: {get_param: NETWORK } + + ################################################# + # Optional properties + ################################################# + # admin_state_up: {default: true, required: false, type: boolean} + admin_state_up: true + + # device_id: {required: false, type: string} + device_id: device_X + + # fixed_ips: + # required: false + # schema: + # '*': + # required: false + # schema: + # ip_address: {required: false, type: string} + # subnet_id: {required: false, type: string} + # type: map + # type: list + + # mac_address: {required: false, type: string} + + # name: {required: false, type: string} + name: { get_param: PORT_NAME } + + # security_groups: {required: false, type: list} + # value_specs: + # default: {} + # required: false + # type: map + + +outputs: + prt_admin_state_up: + description: The administrative state of this port. + value: { get_attr: [ OS_Neutron_Port, admin_state_up] } + + prt_device_id: + description: Unique identifier for the device. + value: { get_attr: [ OS_Neutron_Port, device_id] } + + prt_device_owner: + description: Name of the network owning the port. + value: { get_attr: [ OS_Neutron_Port, device_owner] } + + prt_fixed_ips: + description: Fixed ip addresses. + value: { get_attr: [ OS_Neutron_Port, fixed_ips] } + + prt_mac_address: + description: Mac address of the port. + value: { get_attr: [ OS_Neutron_Port, mac_address] } + + prt_name: + description: Friendly name of the port. + value: { get_attr: [ OS_Neutron_Port, name] } + + prt_network_id: + description: Unique identifier for the network owning the port. + value: { get_attr: [ OS_Neutron_Port, network_id] } + + prt_security_groups: + description: A list of security groups for the port. + value: { get_attr: [ OS_Neutron_Port, security_groups] } + + prt_show: + description: All attributes. + value: { get_attr: [ OS_Neutron_Port, show] } + + prt_status: + description: The status of the port. + value: { get_attr: [ OS_Neutron_Port, status] } + + prt_tenant_id: + description: Tenant owning the port + value: { get_attr: [ OS_Neutron_Port, tenant_id] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Router.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Router.yaml new file mode 100644 index 00000000..07812306 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Router.yaml @@ -0,0 +1,102 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Neutron::Router +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# network named external-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# The tenant (or admin) needs access to the network selected +# +# Sample CLI syntax: +# heat stack-create -f OS_Neutron_Router.yaml STACK +# +# Expected Outcome: +# A new router for the network called: sample_router +# neutron router-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Neutron::Router heat resource + +parameters: + + NETWORK: + description: the external network to use when creating the Router + type: string + default: external-net0 + constraints: + - custom_constraint: neutron.network + + + ROUTER_NAME: + description: name for the new router + type: string + default: sample_router + +resources: + + OS_Neutron_Router: + type: OS::Neutron::Router + properties: + ################################################# + # Required properties: None + ################################################# + + ################################################# + # Optional properties + ################################################# + # admin_state_up: {default: true, required: false, type: boolean} + admin_state_up: true + + # name: {required: false, type: string} + name: { get_param: ROUTER_NAME } + + # value_specs: + # default: {} + # required: false + # type: map + # Possible valid keys are: + # external_gateway_info (value is a dict) + external_gateway_info: + network: { get_param: NETWORK } + enable_snat: true + +outputs: + + rt_admin_state_up: + description: Administrative state of the router. + value: { get_attr: [ OS_Neutron_Router, admin_state_up] } + + rt_external_gateway_info: + description: Gateway network for the router. + value: { get_attr: [ OS_Neutron_Router, external_gateway_info] } + + rt_name: + description: Friendly name of the router. + value: { get_attr: [ OS_Neutron_Router, name] } + + rt_show: + description: All attributes. + value: { get_attr: [ OS_Neutron_Router, show] } + + rt_status: + description: The status of the router. + value: { get_attr: [ OS_Neutron_Router, status] } + + rt_tenant_id: + description: Tenant owning the router. + value: { get_attr: [ OS_Neutron_Router, tenant_id] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_RouterGateway.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_RouterGateway.yaml new file mode 100644 index 00000000..9a8f8bd2 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_RouterGateway.yaml @@ -0,0 +1,75 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Neutron::RouterGateway +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# An external network named: external-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# The tenant (or admin) needs access to the network selected +# +# Sample CLI syntax: +# heat stack-create -f OS_Neutron_RouterGateway.yaml STACK +# +# Expected Outcome: +# Creates a router with no external gateway called sample_gateway_router +# and then adds a gateway to it +# neutron router-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Neutron::RouterGateway heat resource + +parameters: + + NETWORK: + description: the external network to use when creating the router + type: string + default: external-net0 + constraints: + - custom_constraint: neutron.network + + ROUTER_NAME: + description: name of the new router + type: string + default: sample_gateway_router + +resources: + + OS_Neutron_RouterGateway: + type: OS::Neutron::RouterGateway + properties: + ################################################# + # Required properties + ################################################# + # either network_id(uuid) or network (name of uuid) + # network_id: + network: {get_param: NETWORK} + + # router_id: {required: true, type: string} + router_id: { get_resource: some_router} + + ################################################# + # Optional properties: None + ################################################# + + some_router: + type: OS::Neutron::Router + properties: + name: { get_param: ROUTER_NAME } + external_gateway_info: + network: { get_param: NETWORK } + enable_snat: true diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_RouterInterface.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_RouterInterface.yaml new file mode 100644 index 00000000..d8214fc3 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_RouterInterface.yaml @@ -0,0 +1,107 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Neutron::RouterInterface +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# external network named: external-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# The tenant (or admin) needs access to the network selected +# +# Sample CLI syntax: +# heat stack-create -f OS_Neutron_RouterInterface.yaml STACK +# +# Expected Outcome: +# Creates a router called sample_if_router +# neutron router-list +# Creates a net called sample_if_net +# neutron net-list +# Creates a subnet called sample_if_subnet +# neutron subnet-list +# A new port for the interface corresponding to the subnet +# neutron router-port-list sample_if_router +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Neutron::RouterInterface heat resource + +parameters: + + NETWORK: + description: external network used when creating the router and subnet + type: string + default: external-net0 + constraints: + - custom_constraint: neutron.network + + NET_NAME: + description: the network to use when creating the subnet + type: string + default: sample_if_net + + ROUTER_NAME: + description: name of the new router + type: string + default: sample_if_router + + SUBNET_NAME: + description: name of the new subnet + type: string + default: sample_if_subnet + + CIDR: + description: cidr for the sample subnet + type: string + default: 192.168.12.0/24 + +resources: + + OS_Neutron_RouterInterface: + type: OS::Neutron::RouterInterface + properties: + ################################################# + # Required properties + ################################################# + + # router_id: {required: true, type: string} + router_id: {get_resource: some_router} + + # subnet_id: {required: true, type: string} + subnet_id: {get_resource: some_subnet} + + ################################################# + # Optional properties: None + ################################################# + + some_router: + type: OS::Neutron::Router + properties: + name: { get_param: ROUTER_NAME } + external_gateway_info: + network: {get_param: NETWORK } + enable_snat: true + + some_net: + type: OS::Neutron::Net + properties: + name: { get_param: NET_NAME } + + some_subnet: + type: OS::Neutron::Subnet + properties: + name: { get_param: SUBNET_NAME } + cidr: { get_param: CIDR } + network: { get_resource: some_net } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_SecurityGroup.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_SecurityGroup.yaml new file mode 100644 index 00000000..3dd01a31 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_SecurityGroup.yaml @@ -0,0 +1,61 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Neutron::SecurityGroup +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# An admin action +# +# Sample CLI syntax: +# heat stack-create -f OS_Neutron_SecurityGroup.yaml STACK +# +# Expected Outcome: +# A new neutron security group called SecurityGroupDeluxe +# neutron security-group-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Neutron::SecurityGroup heat resource + +parameters: + + NAME: + description: Name for the new neutron security group + type: string + default: SecurityGroupDeluxe + + DESCRIPTION: + description: Description for the new neutron security group + type: string + default: A sample neutron security group + +resources: + + OS_Neutron_SecurityGroup: + type: OS::Neutron::SecurityGroup + properties: + name: { get_param: NAME } + description: { get_param: DESCRIPTION } + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: udp + port_range_min: 81 + port_range_max: 81 diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Subnet.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Subnet.yaml new file mode 100644 index 00000000..b2291158 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Neutron_Subnet.yaml @@ -0,0 +1,149 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Neutron::Subnet +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# A network named: private-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# The tenant (or admin) needs access to the network selected +# +# Sample CLI syntax: +# heat stack-create -f OS_Neutron_Subnet.yaml STACK +# +# Expected Outcome: +# A new subnet called sample_subnet +# neutron subnet-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Neutron::Subnet heat resource + +parameters: + + NETWORK: + description: UUID of the parent network to use when creating the subnet + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + + CIDR: + description: Cidr of the subnet + type: string + default: 192.168.12.0/24 + +resources: + + OS_Neutron_Subnet: + type: OS::Neutron::Subnet + properties: + ################################################# + # Required properties + ################################################# + + # cidr: {required: true, type: string} + # An error will occur if this overlaps with an existing subnet + cidr: { get_param: CIDR } + + # either network_id (uuid) or network (name or uuid) is required + # network_id: { get_param: NETWORK } + network: { get_param: NETWORK } + + ################################################# + # Optional properties + ################################################# + # allocation_pools: + # required: false + # schema: + # '*': + # required: false + # schema: + # end: {required: true, type: string} + # start: {required: true, type: string} + # type: map + # type: list + + # dns_nameservers: {required: false, type: list} + dns_nameservers: + - 8.8.8.8 + - 9.9.9.9 + + # enable_dhcp: {required: false, type: boolean} + enable_dhcp: true + + # gateway_ip: {required: false, type: string} + + # ip_version: + # constraints: + # - allowed_values: [4, 6] + # default: 4 + # required: false + # type: integer + ip_version: 4 + + # name: {required: false, type: string} + name: sample_subnet + + # tenant_id: {required: false, type: string} + + # value_specs: + # default: {} + # required: false + # type: map + + +outputs: + + sn_allocation_pools: + description: Ip allocation pools and their ranges. + value: { get_attr: [ OS_Neutron_Subnet, allocation_pools] } + + sn_cidr: + description: CIDR block notation for this subnet. + value: { get_attr: [ OS_Neutron_Subnet, cidr] } + + sn_dns_nameservers: + description: List of dns nameservers. + value: { get_attr: [ OS_Neutron_Subnet, dns_nameservers] } + + sn_enable_dhcp: + description: '''true'' if DHCP enabled for subnet else ''false''.' + value: { get_attr: [ OS_Neutron_Subnet, enable_dhcp] } + + sn_gateway_ip: + description: Ip of the subnet's gateway. + value: { get_attr: [ OS_Neutron_Subnet, gateway_ip] } + + sn_ip_version: + description: Ip version for the subnet. + value: { get_attr: [ OS_Neutron_Subnet, ip_version] } + + sn_name: + description: Friendly name of the subnet. + value: { get_attr: [ OS_Neutron_Subnet, name] } + + sn_network_id: + description: Parent network of the subnet. + value: { get_attr: [ OS_Neutron_Subnet, network_id] } + + sn_show: + description: All attributes. + value: { get_attr: [ OS_Neutron_Subnet, show] } + + sn_tenant_id: + description: Tenant owning the subnet. + value: { get_attr: [ OS_Neutron_Subnet, tenant_id] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_Flavor.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_Flavor.yaml new file mode 100644 index 00000000..620c18e0 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_Flavor.yaml @@ -0,0 +1,106 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Nova::Flavor +# Shows setting cpu_model and cpu_policy +# +# Pre-Reqs: +# None +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# Only admin can create a flavor +# +# Sample CLI syntax: +# heat stack-create -f OS_Nova_Flavor.yaml STACK +# +# Expected Outcome: +# A new flavor +# nova flavor-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Nova::Flavor heat resource + +parameters: + + FLAVOR_NAME: + description: Name for the flavor + type: string + default: sample-flavor + RAM: + description: How much RAM + type: number + default: 256 + VCPUS: + description: Num vcpus + type: number + default: 1 + DISK: + description: Disk. 0 means no limit + type: number + default: 0 + EPHEMERAL: + description: Ephemeral + type: number + default: 0 + SWAP: + description: Swap space in MB + type: number + default: 0 + RXTX_FACTOR: + description: RX TX Factor + type: string + constraints: + - allowed_pattern: "[0-9]+[.][0-9]+" + default: "1.0" + CPU_MODEL: + description: Cpu model of the new Flavor + type: string + constraints: + - allowed_values: + - Conroe + - Penryn + - Nehalem + - Westmere + - SandyBridge + - Haswell + default: SandyBridge + CPU_POLICY: + description: Cpu policy of the new Flavor + type: string + constraints: + - allowed_values: + - dedicated + - shared + default: shared + +resources: + + OS_Nova_Flavor: + type: OS::Nova::Flavor + properties: + # Required + ram: {get_param: RAM} + vcpus: {get_param: VCPUS} + disk: {get_param: DISK} + # Optional + # id: some_id + # is_public: False + name: {get_param: FLAVOR_NAME} + ephemeral: {get_param: EPHEMERAL} + swap: {get_param: SWAP} + rxtx_factor: {get_param: RXTX_FACTOR} + extra_specs: + 'hw:cpu_model': {get_param: CPU_MODEL} + 'hw:cpu_policy': {get_param: CPU_POLICY} diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_KeyPair.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_KeyPair.yaml new file mode 100644 index 00000000..d9268c62 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_KeyPair.yaml @@ -0,0 +1,63 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Nova::KeyPair +# +# Pre-Reqs: +# None +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# If a tenant can run: nova keypair-create then they can run this template +# +# Sample CLI syntax: +# heat stack-create -f OS_Nova_KeyPair.yaml STACK +# +# Expected Outcome: +# A new keypair +# nova keypair-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Nova::KeyPair heat resource + +parameters: + + NEW_KEY_NAME: + description: Name for the new keypair + type: string + default: KeyPairDeluxe + +resources: + + OS_Nova_KeyPair: + type: OS::Nova::KeyPair + properties: + # required properties + name: { get_param: NEW_KEY_NAME } + # optional properties + # public_key: CONTENTS of the PUBLIC KEY FILE created by: + # ssh-keygen -t dsa -f /root/.ssh/id_rsa -N "" -q + # If the file is accessible externally use + # get_file { get_file: new_key.pub } + save_private_key: false + +outputs: + + kp_public_key: + description: The public key for this new keypair + value: { get_attr: [ OS_Nova_KeyPair, public_key] } + + kp_private_key: + description: The private key for this new keypair + value: { get_attr: [ OS_Nova_KeyPair, private_key] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_Server.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_Server.yaml new file mode 100644 index 00000000..8d49dfd3 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_Server.yaml @@ -0,0 +1,226 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Nova::Server +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# A glance image named: wrl6 (glance image-list) +# A nova flavor named: small (nova flavor-list) +# A neutron network named: private-net0 (neutron net-list) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# The tenant (or admin) needs access to the network selected +# +# Sample CLI syntax: +# heat stack-create -f OS_Nova_Server.yaml STACK +# +# Expected Outcome: +# A newly launched nova instance +# nova list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Nova::Server heat resource + +parameters: + + SERVER_NAME: + description: network to use (neutron net-list) + type: string + default: nova_server + + NETWORK: + description: network to use (neutron net-list) + type: string + default: private-net0 + constraints: + - custom_constraint: neutron.network + + IMAGE: + description: Glance image to use (glance image-list) + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + FLAVOR: + description: Nova flavor to use. (nova flavor-list) + type: string + default: small + constraints: + - custom_constraint: nova.flavor + +resources: + + r_string: + type: OS::Heat::RandomString + properties: + length: 4 + sequence: digits + + OS_Nova_Server: + type: OS::Nova::Server + properties: + ################################################# + # Required properties + ################################################# + # flavor: {description: The ID or name of the flavor to boot + # onto, required: true, type: string} + flavor: { get_param: FLAVOR } + + # Either image or block_device_mapping with device_name='vda' + # MUST be specified + # image: {description: The ID or name of the image to boot with, + # required: false, type: string} + image: { get_param: IMAGE } + + ################################################# + # Optional properties + ################################################# + # availability_zone: {description: Name of the availability zone + # for server placement, required: false, type: string} + + # block_device_mapping: + # description: Block device mappings for this server + # required: false + # schema: + # '*': + # required: false + # schema: + # delete_on_termination: {description: Indicate whether + # the volume should be deleted when the server is + # terminated, required: false, type: boolean} + # device_name: {description: A device name where the + # volume will be attached in the system at + # /dev/device_name. This value is typically vda, + # required: true, type: string} + # snapshot_id: {description: The ID of the snapshot to + # create a volume from, required: false, type: string} + # volume_id: {description: The ID of the volume to boot + # from. Only one of volume_id or snapshot_id should + # be provided, required: false, type: string} + # volume_size: {description: 'The size of the volume, in + # GB. It is safe to leave this blank and have the + # Compute service infer the size', + # required: false,type: string} + # type: map + # type: list + + # config_drive: {description: 'value for config drive either + # boolean, or volume-id', required: false, type: string} + + # diskConfig: + # constraints: + # - allowed_values: [AUTO, MANUAL] + # description: Control how the disk is partitioned when the + # server is created + # required: false + # type: string + + # flavor_update_policy: + # constraints: + # - allowed_values: [RESIZE, REPLACE] + # default: RESIZE + # description: Policy on how to apply a flavor update; either + # by requesting a server resize or by replacing the entire + # server + # required: false + # type: string + flavor_update_policy: REPLACE + + + # key_name: {description: Name of keypair to inject into server, + # required: false, type: string} + + # metadata: {description: 'Arbitrary key/value metadata to store + # for this server. A maximum of five entries is allowed, and both + # keys and values must be 255 charactersor less', + # required: false, type: map} + + # name: {description: Optional server name, + # required: false, type: string} + name: + list_join: + - "-" + - [{ get_param: SERVER_NAME}, { get_resource: r_string }] + + + # networks: + # description: An ordered list of nics to be added to this + # server, with information + # about connected networks, fixed ips, port etc + # required: false + # schema: + # '*': + # required: false + # schema: + # fixed_ip: {description: Fixed IP address to specify for + # the port created on the requested network, + # required: false, type: string} + # port: {description: ID of an existing port to associate + # with this server, required: false, type: string} + # uuid: {description: ID of network to create a port on, + # required: false, type: string} + # type: map + # type: list + networks: + - network: { get_param: NETWORK } + + # reservation_id: {description: A UUID for the set of servers being + # requested, required: false, type: string} + + # scheduler_hints: {description: Arbitrary key-value pairs + # specified by the clientto help boot a server, + # required: false, type: map} + + # security_groups: {description: List of security group names, + # required: false, type: list} + # Note: networks and security_groups cannot BOTH be specified + + # user_data: {description: User data script to be executed by + # cloud-init, required: false, type: string} + + # user_data_format: {description: RAW will bypass MIME formatting + # user data., required: false, type: string} + user_data_format: RAW + +outputs: + srv_accessIPv4: + description: Manually assigned alt public IPv4 address of server + value: { get_attr: [ OS_Nova_Server, accessIPv4] } + + srv_accessIPv6: + description: Manually assigned alt public IPv6 address of the server + value: { get_attr: [ OS_Nova_Server, accessIPv6] } + + srv_addresses: + description: A dict of all network addresses as returned by the API + value: { get_attr: [ OS_Nova_Server, addresses] } + + srv_first_address: + description: the first assigned network address, or empty string + value: { get_attr: [ OS_Nova_Server, first_address] } + + srv_instance_name: + description: AWS compatible instance name + value: { get_attr: [ OS_Nova_Server, instance_name] } + + srv_networks: + description: A dict of assigned network addresses + value: { get_attr: [ OS_Nova_Server, networks] } + + srv_show: + description: A dict of all server details as returned by the API + value: { get_attr: [ OS_Nova_Server, show] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_ServerGroup.yaml b/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_ServerGroup.yaml new file mode 100644 index 00000000..06561f85 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/OS_Nova_ServerGroup.yaml @@ -0,0 +1,99 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource OS::Nova::ServerGroup +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# None +# +# Sample CLI syntax: +# heat stack-create -f OS_Nova_ServerGroup.yaml STACK +# +# Expected Outcome: +# A new nova servergroup of size 2 and type affinity +# nova server-group-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the OS::Nova::ServerGroup heat resource + +parameters: + + FLAVOR: + type: string + description: Nova flavor to use (nova flavor-list) + default: small + constraints: + - custom_constraint: nova.flavor + + NETWORK: + type: string + description: network to use (neutron net-list) + default: private-net0 + constraints: + - custom_constraint: neutron.network + + IMAGE: + description: Glance image to use (glance image-list) + type: string + default: wrl6 + constraints: + - custom_constraint: glance.image + + SIZE: + description: Number of servers to create for the group + type: number + default: 2 + +resources: + + OS_Nova_ServerGroup: + type: OS::Nova::ServerGroup + properties: + ################################################# + # Required properties + ################################################# + # policies: {description: The scheduler policy for the server + # group., required: true, type: string} + # Note: only valid values are 'affinity' or 'anti-affinity' + policies: ['affinity'] + + ################################################# + # Optional properties: + ################################################# + # best_effort: {description: Whether the scheduler should still + # allow the server to be created even if it cannot satisfy the + # group policy. (Optional.), required: false, type: boolean} + best_effort: true + + # group_size: {description: Maximum number of servers in the + # server group. (Optional.), required: false, type: integer} + group_size: { get_param: SIZE } + + OS_Nova_RG: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: SIZE } + resource_def: + type: OS::Nova::Server + properties: + networks: + - network: { get_param: NETWORK } + image: { get_param: IMAGE } + flavor: { get_param: FLAVOR } + scheduler_hints: + group: { get_resource: OS_Nova_ServerGroup } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_Port_Forwarding.yaml b/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_Port_Forwarding.yaml new file mode 100644 index 00000000..0fe29983 --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_Port_Forwarding.yaml @@ -0,0 +1,129 @@ +################################################################################ +# Copyright (c) 2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource WR::Neutron::ProviderNet +# +# Pre-Reqs: +# A glance image called: wrl6 +# A nova flavor called: m1.tiny +# A neutron network called: external-net0 +# Permissions to create a neutron router and a nova server +# +# Tenant Considerations: +# This is an admin activity +# +# Sample CLI syntax: +# heat stack-create -f WR_Neutron_PortForwarding.yaml STACK +# +# Expected Outcome: +# A new router called: PF_Router (neutron router-list) +# A new server called: PF_Server (nova list) +# A new port forwarding rule created between a public port number on the router +# and a private port number on a VM instance. +# Verify using: neutron portforwarding-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the WR::Neutron::PortForwarding heat resource + +parameters: + + ROUTER_NAME: + description: Name of the new router instance + type: string + default: PF_Router + + SERVER_NAME: + description: Name of the new server + type: string + default: PF_Server + + NETWORK: + description: the external network to use when creating the Router + type: string + default: external-net0 + constraints: + - custom_constraint: neutron.network + + IMAGE: + type: string + description: Name of image to use for server + default: wrl6 + constraints: + - custom_constraint: glance.image + + FLAVOR: + type: string + description: Flavor to use for server + default: m1.tiny + constraints: + - custom_constraint: nova.flavor + + INSIDE_PORT: + description: Layer4 protocol port number in use on the VM instance + type: number + default: 80 + + OUTSIDE_PORT: + description: Layer4 protocol port number to publish on the VM instance + type: number + default: 8080 + + PROTOCOL: + description: Layer4 protocol type + type: string + default: tcp + + DESCRIPTION: + description: User defined description of the forwarding rule + type: string + default: A sample port forwarding rule + +resources: + A_Router: + type: OS::Neutron::Router + properties: + admin_state_up: true + name: { get_param: ROUTER_NAME } + external_gateway_info: + network: {get_param: NETWORK} + enable_snat: true + + A_Server: + type: OS::Nova::Server + properties: + name: { get_param: SERVER_NAME } + image: {get_param: IMAGE} + flavor: {get_param: FLAVOR} + networks: [ network: { get_param: NETWORK } ] + + WR_Neutron_Port_Forwarding: + type: WR::Neutron::PortForwarding + properties: + # required properties + router_id: { get_resource: A_Router } + inside_addr: { get_attr: [A_Server, first_address ] } + inside_port: { get_param: INSIDE_PORT } + outside_port: { get_param: OUTSIDE_PORT } + protocol: { get_param: PROTOCOL } + + # optional properties + description: { get_param: DESCRIPTION } + +outputs: + + pn_show: + description: All attributes of the port forwarding rule + value: { get_attr: [ WR_Neutron_Port_Forwarding, show] } + + pn_port_id: + description: The neutron port uuid to which the IP address is associated + value: { get_attr: [ WR_Neutron_Port_Forwarding, port_id] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_ProviderNet.yaml b/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_ProviderNet.yaml new file mode 100644 index 00000000..005181ed --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_ProviderNet.yaml @@ -0,0 +1,92 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource WR::Neutron::ProviderNet +# +# Pre-Reqs: +# None +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# This is an admin activity +# +# Sample CLI syntax: +# heat stack-create -f WR_Neutron_ProviderNet.yaml STACK +# +# Expected Outcome: +# A new provider network called physnetX +# neutron providernet-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the WR::Neutron::ProviderNet heat resource + +parameters: + + NAME: + description: Name of the provider network + type: string + default: physnetX + + TYPE: + description: Provider network type for the network + type: string + default: vlan + constraints: + - allowed_values: [ vlan, vxlan, flat ] + + MTU: + description: MTU for the provider network + type: number + default: 1500 + + VLANTRANSPARENT: + description: vlan_transparent tenant networks for provider network + type: boolean + default: false + + DESCRIPTION: + description: Description for the provider network + type: string + default: A sample provider network + +resources: + + WR_Neutron_ProviderNet: + type: WR::Neutron::ProviderNet + properties: + # required properties + name: { get_param: NAME } + type: { get_param: TYPE } + + # optional properties + mtu: { get_param: MTU } + vlan_transparent: { get_param: VLANTRANSPARENT } + description: { get_param: DESCRIPTION } + +outputs: + pn_status: + description: The status of the provider network + value: { get_attr: [ WR_Neutron_ProviderNet, status] } + + pn_mtu: + description: The mtu of the provider network + value: { get_attr: [ WR_Neutron_ProviderNet, mtu] } + + pn_show: + description: All attributes of the provider network + value: { get_attr: [ WR_Neutron_ProviderNet, show] } + + pn_name: + description: Name of the provider network + value: { get_attr: [ WR_Neutron_ProviderNet, name] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_ProviderNetRange.yaml b/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_ProviderNetRange.yaml new file mode 100644 index 00000000..30067cdd --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_ProviderNetRange.yaml @@ -0,0 +1,121 @@ +################################################################################ +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource WR::Neutron::ProviderNetRange +# +# Pre-Reqs: +# None +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# Admin activity +# +# Sample CLI syntax: +# heat stack-create -f WR_Neutron_ProviderNetRange.yaml STACK +# +# Expected Outcome: +# A new provider network range called: physnetX-R1 +# neutron providernet-range-list +# A fake provider network called: physnetX +# neutron providernet-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the WR::Neutron::ProviderNetRange heat resource + +parameters: + + NAME: + description: Name of the provider network range + type: string + default: physnetX-R1 + + TENANT: + description: NAME or ID for tenant of unshared provider network range + type: string + default: admin + constraints: + - custom_constraint: keystone.project + + SHARED: + description: Whether provider network range is shared for all tenants + type: boolean + default: false + + DESCRIPTION: + description: Description of the provider network range + type: string + default: 'physnetX-R1 sample range' + + MINIMUM: + description: Min range for this provider network range + type: number + default: '10' + + MAXIMUM: + description: Max range for this provider network range + type: number + default: '10' + + TTL: + description: TTL for this vxlan provider network range + type: number + default: '44' + + PORT: + description: PORT for this vxlan provider network range + type: number + default: '8472' + + GROUP: + description: Multicast IP for this vxlan provider network range + type: string + default: '224.0.0.255' + +resources: + + WR_Neutron_ProviderNetRange: + type: WR::Neutron::ProviderNetRange + properties: + # required properties + providernet_id: { get_resource: ProviderNet1 } + name: { get_param: NAME } + minimum: { get_param: MINIMUM} + maximum: { get_param: MAXIMUM} + + # optional properties + description: { get_param: DESCRIPTION } + shared: { get_param: SHARED} + # tenant_id is ignored if this is a shared range + tenant_id: { get_param: TENANT } + # group (ie: multicast IP) is only set if provider net is vxlan + group: { get_param: GROUP } + # ttl is only set if provider net is vxlan + ttl: { get_param: TTL } + # port is only set if provider net is vxlan. + port: { get_param: PORT} + + ProviderNet1: + type: WR::Neutron::ProviderNet + properties: + name: sample_physnet_X + type: vxlan + vlan_transparent: true + mtu: 1500 + description: fake physet + +outputs: + + pnr_show: + description: All attributes of the provider net range + value: { get_attr: [ WR_Neutron_ProviderNetRange, show] } diff --git a/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_QoSPolicy.yaml b/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_QoSPolicy.yaml new file mode 100644 index 00000000..52b9ca2e --- /dev/null +++ b/openstack/python-heat/python-heat/templates/hot/simple/WR_Neutron_QoSPolicy.yaml @@ -0,0 +1,67 @@ +############################################################################### +# Copyright (c) 2013-2015 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Objective: +# Demonstrate constructing the heat resource WR::Neutron::QosPolicy +# +# Pre-Reqs: +# Normal Lab Setup (networks, host interfaces, glance images) +# +# Mandatory Template Parameters: +# None +# +# Tenant Considerations: +# An admin activity +# +# Sample CLI syntax: +# heat stack-create -f WR_Neutron_QoSPolicy.yaml STACK +# +# Expected Outcome: +# A new qos policy called SampleQoS +# neutron qos-list +# +################################################################################ + +heat_template_version: 2015-04-30 + +description: > + Demonstrate the WR::Neutron::QoSPolicy heat resource + +parameters: + + NAME: + description: Name of the QoS policy + type: string + default: SampleQoS + + DESCRIPTION: + description: Description of the QoS policy + type: string + default: 'A simple QoS policy' + +resources: + + WR_Neutron_QoSPolicy: + type: WR::Neutron::QoSPolicy + properties: + # required properties + name: { get_param: NAME } + + # optional properties + description: { get_param: DESCRIPTION } + + # tenant-id: { get_param: TENANT_ID} + + policies: { + scheduler: { weight: 16 }, + # dscp: { dscp: 10 } + } + +outputs: + qos_show: + description: All attributes of the QoS policy + value: { get_attr: [ WR_Neutron_QoSPolicy, show] } diff --git a/openstack/python-heat/wrs-heat-template/centos/build_srpm.data b/openstack/python-heat/wrs-heat-template/centos/build_srpm.data new file mode 100644 index 00000000..fa9aa5e7 --- /dev/null +++ b/openstack/python-heat/wrs-heat-template/centos/build_srpm.data @@ -0,0 +1,2 @@ +SRC_DIR="python-heat/templates" +TIS_PATCH_VER=1 diff --git a/openstack/python-heat/wrs-heat-template/centos/wrs-heat-templates.spec b/openstack/python-heat/wrs-heat-template/centos/wrs-heat-templates.spec new file mode 100644 index 00000000..fdeced08 --- /dev/null +++ b/openstack/python-heat/wrs-heat-template/centos/wrs-heat-templates.spec @@ -0,0 +1,29 @@ +Summary: Titanium Cloud Sample Heat Templates +Name: wrs-heat-templates +Version: 1.6.0 +Release: %{tis_patch_ver}%{?_tis_dist} +License: Apache-2.0 +Group: base +Packager: Wind River +URL: unknown + +Source0: %{name}-%{version}.tar.gz + +%define cgcs_sdk_deploy_dir /opt/deploy/cgcs_sdk + +Requires: openstack-heat-common + +%description +Titanium Cloud Heat Template examples + +%install +rm -rf ${RPM_BUILD_ROOT} +mkdir -p ${RPM_BUILD_ROOT}/%{cgcs_sdk_deploy_dir} +install -m 644 %{SOURCE0} $RPM_BUILD_ROOT/%{cgcs_sdk_deploy_dir}/%{name}-%{version}.tgz + +%clean +rm -rf ${RPM_BUILD_ROOT} + +%files +%defattr(-,root,root,-) +%{cgcs_sdk_deploy_dir} diff --git a/openstack/python-heat/wrs-heat-template/python-heat b/openstack/python-heat/wrs-heat-template/python-heat new file mode 120000 index 00000000..6512b15c --- /dev/null +++ b/openstack/python-heat/wrs-heat-template/python-heat @@ -0,0 +1 @@ +../python-heat \ No newline at end of file diff --git a/openstack/python-heatclient/centos/build_srpm.data b/openstack/python-heatclient/centos/build_srpm.data new file mode 100644 index 00000000..d3f64f33 --- /dev/null +++ b/openstack/python-heatclient/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=3 diff --git a/openstack/python-heatclient/centos/meta_patches/0001-Update-package-versioning-format-for-TiS-format.patch b/openstack/python-heatclient/centos/meta_patches/0001-Update-package-versioning-format-for-TiS-format.patch new file mode 100644 index 00000000..ea37d1c6 --- /dev/null +++ b/openstack/python-heatclient/centos/meta_patches/0001-Update-package-versioning-format-for-TiS-format.patch @@ -0,0 +1,25 @@ +From 001aa3b51766fb3b2cbb0c9a0c715650ef394f8c Mon Sep 17 00:00:00 2001 +From: Al Bailey +Date: Mon, 30 Jan 2017 12:24:46 -0500 +Subject: [PATCH 1/2] Update package versioning format for Titanium Cloud format + +--- + SPECS/python-heatclient.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/python-heatclient.spec b/SPECS/python-heatclient.spec +index 73a1c6f..7d6d32f 100644 +--- a/SPECS/python-heatclient.spec ++++ b/SPECS/python-heatclient.spec +@@ -8,7 +8,7 @@ + + Name: python-heatclient + Version: 1.11.0 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: Python API and CLI for OpenStack Heat + + License: ASL 2.0 +-- +1.8.3.1 + diff --git a/openstack/python-heatclient/centos/meta_patches/0002-Packages-sdk-for-remote-clients.patch b/openstack/python-heatclient/centos/meta_patches/0002-Packages-sdk-for-remote-clients.patch new file mode 100644 index 00000000..40db99b2 --- /dev/null +++ b/openstack/python-heatclient/centos/meta_patches/0002-Packages-sdk-for-remote-clients.patch @@ -0,0 +1,51 @@ +From cdb41db222215b7d4576b53d042cf342d9ff6b29 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 2 Oct 2017 14:32:49 -0400 +Subject: [PATCH 2/3] WRS: 0002-Packages-sdk-for-remote-clients.patch + +--- + SPECS/python-heatclient.spec | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/SPECS/python-heatclient.spec b/SPECS/python-heatclient.spec +index 8b36981..3060a3f 100644 +--- a/SPECS/python-heatclient.spec ++++ b/SPECS/python-heatclient.spec +@@ -93,6 +93,13 @@ the OpenStack Heat API. + + This package contains auto-generated documentation. + ++%package sdk ++Summary: SDK files for %{name} ++ ++%description sdk ++Contains SDK files for %{name} package ++ ++ + %prep + %setup -q -n %{name}-%{upstream_version} + +@@ -129,6 +136,10 @@ install -pm 644 tools/heat.bash_completion \ + # Delete tests + rm -fr %{buildroot}%{python2_sitelib}/heatclient/tests + ++# prep SDK package ++mkdir -p %{buildroot}/usr/share/remote-clients/%{name} ++tar zcf %{buildroot}/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} ++ + + export PYTHONPATH="$( pwd ):$PYTHONPATH" + sphinx-build -b html doc/source html +@@ -167,6 +178,9 @@ rm -fr html/.doctrees html/.buildinfo + %doc html + %license LICENSE + ++%files sdk ++/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz ++ + %changelog + * Fri Jul 21 2017 rdo-trunk 1.5.2-1 + - Update to 1.5.2 +-- +1.9.1 + diff --git a/openstack/python-heatclient/centos/meta_patches/0003-Apply-timezone-support-patch.patch b/openstack/python-heatclient/centos/meta_patches/0003-Apply-timezone-support-patch.patch new file mode 100644 index 00000000..1c73736e --- /dev/null +++ b/openstack/python-heatclient/centos/meta_patches/0003-Apply-timezone-support-patch.patch @@ -0,0 +1,37 @@ +From 2d0e1e8effaf3c83e137af5a79eb62f3e312bd94 Mon Sep 17 00:00:00 2001 +From: Al Bailey +Date: Thu, 26 Oct 2017 08:56:39 -0500 +Subject: [PATCH] WRS: 0003-Apply-timezone-support-patch.patch + +--- + SPECS/python-heatclient.spec | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/SPECS/python-heatclient.spec b/SPECS/python-heatclient.spec +index 1f44763..12ce333 100644 +--- a/SPECS/python-heatclient.spec ++++ b/SPECS/python-heatclient.spec +@@ -15,6 +15,12 @@ License: ASL 2.0 + URL: https://launchpad.net/python-heatclientclient + Source0: https://tarballs.openstack.org/%{name}/%{name}-%{upstream_version}.tar.gz + ++# WRS. ++Patch0001: 0001-timezone-support-for-heatclient.patch ++ ++# BuildArch needs to be located below patches in the spec file ++ ++ + BuildArch: noarch + + %description +@@ -101,6 +107,7 @@ Contains SDK files for %{name} package + + %prep + %autosetup -n %{name}-%{upstream_version} -S git ++# autosetup applies all patches automatically + + rm -rf {test-,}requirements.txt tools/{pip,test}-requires + +-- +1.8.3.1 + diff --git a/openstack/python-heatclient/centos/meta_patches/PATCH_ORDER b/openstack/python-heatclient/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..87f461e1 --- /dev/null +++ b/openstack/python-heatclient/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,3 @@ +0001-Update-package-versioning-format-for-TiS-format.patch +0002-Packages-sdk-for-remote-clients.patch +0003-Apply-timezone-support-patch.patch diff --git a/openstack/python-heatclient/centos/patches/0001-timezone-support-for-heatclient.patch b/openstack/python-heatclient/centos/patches/0001-timezone-support-for-heatclient.patch new file mode 100644 index 00000000..aa951422 --- /dev/null +++ b/openstack/python-heatclient/centos/patches/0001-timezone-support-for-heatclient.patch @@ -0,0 +1,224 @@ +From d144d6cfe42d8af42106f709114fd707afcfaede Mon Sep 17 00:00:00 2001 +From: Litao Gao +Date: Fri, 24 Mar 2017 04:34:54 -0400 +Subject: [PATCH 1/1] timezone support for heatclient + +--- + heatclient/common/utils.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++ + heatclient/v1/shell.py | 18 +++++++++++++++++ + 2 files changed, 66 insertions(+) + +diff --git a/heatclient/common/utils.py b/heatclient/common/utils.py +index f2b20f1..9f2767f 100644 +--- a/heatclient/common/utils.py ++++ b/heatclient/common/utils.py +@@ -18,6 +18,14 @@ import logging + import os + import textwrap + import uuid ++import sys ++import re ++ ++from functools import wraps ++from cStringIO import StringIO ++from datetime import datetime ++import dateutil ++from dateutil import parser + + from oslo_serialization import jsonutils + from oslo_utils import encodeutils +@@ -371,3 +379,43 @@ def get_response_body(resp): + else: + body = None + return body ++ ++ ++def parse_date(string_data): ++ """Parses a date-like input string into a timezone aware Python ++ datetime. ++ """ ++ pattern = r'(\d{4}-\d{2}-\d{2}[T ])?\d{2}:\d{2}:\d{2}(\.\d{6})?Z?' ++ ++ def convert_date(matchobj): ++ formats = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%d %H:%M:%S.%f", ++ "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S", ++ "%Y-%m-%dT%H:%M:%SZ", "%H:%M:%S"] ++ datestring = matchobj.group(0) ++ if datestring: ++ for format in formats: ++ try: ++ datetime.strptime(datestring, format) ++ datestring += "+0000" ++ parsed = parser.parse(datestring) ++ converted = parsed.astimezone(dateutil.tz.tzlocal()) ++ return datetime.strftime(converted, format) ++ except Exception: ++ pass ++ return datestring ++ ++ return re.sub(pattern, convert_date, str(string_data)) ++ ++ ++def timestamp_converter(display): ++ """ ++ Decorator that parse the timestamp and convert according timezone ++ """ ++ @wraps(display) ++ def new_f(*args, **kwargs): ++ sys.stdout = mystdout = StringIO() ++ display(*args, **kwargs) ++ sys.stdout = sys.__stdout__ ++ content = mystdout.getvalue() ++ print parse_date(content) ++ return new_f +diff --git a/heatclient/v1/shell.py b/heatclient/v1/shell.py +index ac4ecb9..d8c8324 100644 +--- a/heatclient/v1/shell.py ++++ b/heatclient/v1/shell.py +@@ -45,6 +45,7 @@ def show_deprecated(deprecated, recommended): + ) + + ++@utils.timestamp_converter + @utils.arg('-f', '--template-file', metavar='', + help=_('Path to the template.')) + @utils.arg('-e', '--environment-file', metavar='', +@@ -414,6 +415,7 @@ def do_action_check(hc, args): + do_stack_list(hc) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of stack to describe.')) + @utils.arg('--no-resolve-outputs', action="store_true", +@@ -592,6 +594,7 @@ def do_stack_cancel_update(hc, args): + do_stack_list(hc) + + ++@utils.timestamp_converter + @utils.arg('-s', '--show-deleted', default=False, action="store_true", + help=_('Include soft-deleted stacks in the stack listing.')) + @utils.arg('-n', '--show-nested', default=False, action="store_true", +@@ -695,6 +698,7 @@ def do_stack_list(hc, args=None): + utils.print_list(stacks, fields, sortby_index=sortby_index) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of stack to query.')) + def do_output_list(hc, args): +@@ -856,6 +860,7 @@ def do_resource_type_template(hc, args): + print(utils.format_output(template)) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of stack to get the template for.')) + def do_template_show(hc, args): +@@ -929,6 +934,7 @@ def do_template_validate(hc, args): + print(jsonutils.dumps(validation, indent=2, ensure_ascii=False)) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of stack to show the resources for.')) + @utils.arg('-n', '--nested-depth', metavar='', +@@ -970,6 +976,7 @@ def do_resource_list(hc, args): + utils.print_list(resources, fields, sortby_index=4) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of stack to show the resource for.')) + @utils.arg('resource', metavar='', +@@ -1135,6 +1142,7 @@ def do_hook_clear(hc, args): + hook_type, resource_pattern) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of stack to show the events for.')) + @utils.arg('-r', '--resource', metavar='', +@@ -1258,6 +1266,7 @@ def do_event(hc, args): + do_event_show(hc, args) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of stack to show the events for.')) + @utils.arg('resource', metavar='', +@@ -1285,6 +1294,7 @@ def do_event_show(hc, args): + utils.print_dict(event.to_dict(), formatters=formatters) + + ++@utils.timestamp_converter + @utils.arg('-f', '--definition-file', metavar='', + help=_('Path to JSON/YAML containing map defining ' + ', , and .')) +@@ -1355,6 +1365,7 @@ def do_config_list(hc, args): + utils.print_list(scs, fields, sortby_index=None) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('ID of the config.')) + @utils.arg('-c', '--config-only', default=False, action="store_true", +@@ -1475,6 +1486,7 @@ def do_deployment_list(hc, args): + utils.print_list(deployments, fields, sortby_index=5) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('ID of the deployment.')) + def do_deployment_show(hc, args): +@@ -1490,6 +1502,7 @@ def do_deployment_show(hc, args): + print(jsonutils.dumps(sd.to_dict(), indent=2)) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('ID of the server to fetch deployments for.')) + def do_deployment_metadata_show(hc, args): +@@ -1535,6 +1548,7 @@ def do_deployment_delete(hc, args): + {'count': failure_count, 'total': len(args.id)}) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('ID deployment to show the output for.')) + @utils.arg('output', metavar='', nargs='?', default=None, +@@ -1589,6 +1603,7 @@ def do_build_info(hc, args): + utils.print_dict(result, formatters=formatters) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of stack to snapshot.')) + @utils.arg('-n', '--name', metavar='', +@@ -1608,6 +1623,7 @@ def do_stack_snapshot(hc, args): + print(jsonutils.dumps(snapshot, indent=2, ensure_ascii=False)) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of the stack containing the snapshot.')) + @utils.arg('snapshot', metavar='', +@@ -1655,6 +1671,7 @@ def do_stack_restore(hc, args): + raise exc.CommandError(_('Stack or snapshot not found')) + + ++@utils.timestamp_converter + @utils.arg('id', metavar='', + help=_('Name or ID of the stack containing the snapshots.')) + def do_snapshot_list(hc, args): +@@ -1678,6 +1695,7 @@ def do_snapshot_list(hc, args): + utils.print_list(snapshots["snapshots"], fields, formatters=formatters) + + ++@utils.timestamp_converter + def do_service_list(hc, args=None): + '''List the Heat engines.''' + show_deprecated('heat service-list', +-- +1.8.3.1 + diff --git a/openstack/python-heatclient/centos/srpm_path b/openstack/python-heatclient/centos/srpm_path new file mode 100644 index 00000000..1a807d9c --- /dev/null +++ b/openstack/python-heatclient/centos/srpm_path @@ -0,0 +1,2 @@ +mirror:Source/python-heatclient-1.11.0-1.el7.src.rpm + diff --git a/openstack/python-horizon/centos/build_srpm.data b/openstack/python-horizon/centos/build_srpm.data new file mode 100755 index 00000000..ef2c5412 --- /dev/null +++ b/openstack/python-horizon/centos/build_srpm.data @@ -0,0 +1,5 @@ +TAR_NAME=horizon +SRC_DIR="$CGCS_BASE/git/horizon" +COPY_LIST="$FILES_BASE/*" +TIS_BASE_SRCREV=79a529593f84e4ea0762dc4a72ca167a1c5d0c3d +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-horizon/centos/files/guni_config.py b/openstack/python-horizon/centos/files/guni_config.py new file mode 100644 index 00000000..dd3d1379 --- /dev/null +++ b/openstack/python-horizon/centos/files/guni_config.py @@ -0,0 +1,59 @@ +import datetime +import fnmatch +import os +import resource +import subprocess +from django.conf import settings + + +errorlog = "/var/log/horizon/gunicorn.log" +capture_output = True + +# maxrss ceiling in kbytes +MAXRSS_CEILING = 512000 + + +def worker_abort(worker): + path = ("/proc/%s/fd") % os.getpid() + contents = os.listdir(path) + upload_dir = getattr(settings, 'FILE_UPLOAD_TEMP_DIR', '/tmp') + pattern = os.path.join(upload_dir, '*.upload') + + for i in contents: + f = os.path.join(path, i) + if os.path.exists(f): + try: + l = os.readlink(f) + if fnmatch.fnmatch(l, pattern): + worker.log.info(l) + os.remove(l) + except OSError: + pass + + +def when_ready(server): + subprocess.check_call(["/usr/bin/horizon-assets-compress"]) + + +def post_worker_init(worker): + worker.nrq = 0 + worker.restart = False + + +def pre_request(worker, req): + worker.nrq += 1 + if worker.restart: + worker.nr = worker.max_requests - 1 + maxrss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + msg = "%(date)s %(uri)s %(rss)u" % ({'date': datetime.datetime.now(), + 'uri': getattr(req, "uri"), + 'rss': maxrss}) + worker.log.info(msg) + + +def post_request(worker, req, environ, resp): + worker.nrq -= 1 + if not worker.restart: + maxrss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + if maxrss > MAXRSS_CEILING and worker.nrq == 0: + worker.restart = True diff --git a/openstack/python-horizon/centos/files/horizon-assets-compress b/openstack/python-horizon/centos/files/horizon-assets-compress new file mode 100644 index 00000000..8b17d316 --- /dev/null +++ b/openstack/python-horizon/centos/files/horizon-assets-compress @@ -0,0 +1,43 @@ +#!/bin/bash +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +PYTHON=`which python` +MANAGE="/usr/share/openstack-dashboard/manage.py" +STATICDIR="/www/pages/static" +BRANDDIR="/opt/branding" +APPLIEDDIR="/opt/branding/applied" + +# Handle custom horizon branding +rm -rf ${APPLIEDDIR} +if ls ${BRANDDIR}/*.tgz 1> /dev/null 2>&1; then + LATESTBRANDING=$(ls $BRANDDIR |grep '\.tgz$' | tail -n 1) + mkdir -p ${APPLIEDDIR} + tar zxf ${BRANDDIR}/${LATESTBRANDING} -C ${APPLIEDDIR} 2>/dev/null 1>/dev/null + RETVAL=$? + if [ $RETVAL -ne 0 ]; then + echo "Failed to extract ${BRANDDIR}/${LATESTBRANDING}" + fi +fi + +echo "Dumping static assets" +if [ -d ${STATICDIR} ]; then + COLLECTARGS=--clear +fi +${PYTHON} -- ${MANAGE} collectstatic -v0 --noinput ${COLLECTARGS} + +RETVAL=$? +if [ $RETVAL -ne 0 ]; then + echo "Failed to dump static assets." + exit $RETVAL +fi + +nice -n 20 ionice -c Idle ${PYTHON} -- ${MANAGE} compress -v0 +RETVAL=$? +if [ $RETVAL -ne 0 ]; then + echo "Failed to compress assets." + exit $RETVAL +fi diff --git a/openstack/python-horizon/centos/files/horizon-clearsessions b/openstack/python-horizon/centos/files/horizon-clearsessions new file mode 100644 index 00000000..33e07363 --- /dev/null +++ b/openstack/python-horizon/centos/files/horizon-clearsessions @@ -0,0 +1,3 @@ +#!/bin/bash + +/usr/bin/nice -n 2 /usr/bin/python /usr/share/openstack-dashboard/manage.py clearsessions diff --git a/openstack/python-horizon/centos/files/horizon-patching-restart b/openstack/python-horizon/centos/files/horizon-patching-restart new file mode 100644 index 00000000..9fc15df8 --- /dev/null +++ b/openstack/python-horizon/centos/files/horizon-patching-restart @@ -0,0 +1,80 @@ +#!/bin/bash +# +# Copyright (c) 2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# +# handle restarting horizon. +# +if is_controller +then + # Horizon only runs on the controller + + if [ ! -f $PATCH_FLAGDIR/horizon.restarted ] + then + # Check SM to see if Horizon is running + sm-query service horizon | grep -q 'enabled-active' + if [ $? -eq 0 ] + then + loginfo "$0: Logging out all horizon sessions" + + # Remove sessions + rm -f /var/tmp/sessionid* + + loginfo "$0: Restarting horizon" + + # Ask SM to restart Horizon + sm-restart service horizon + touch $PATCH_FLAGDIR/horizon.restarted + + # Wait up to 30 seconds for service to recover + let -i UNTIL=$SECONDS+30 + while [ $UNTIL -ge $SECONDS ] + do + # Check to see if it's running + sm-query service horizon | grep -q 'enabled-active' + if [ $? -eq 0 ] + then + break + fi + + # Still not running? Let's wait 5 seconds and check again + sleep 5 + done + + sm-query service horizon | grep -q 'enabled-active' + if [ $? -ne 0 ] + then + # Still not running! Clear the flag and mark the RC as failed + loginfo "$0: Failed to restart horizon" + rm -f $PATCH_FLAGDIR/horizon.restarted + GLOBAL_RC=$PATCH_STATUS_FAILED + sm-query service horizon + fi + fi + fi +fi + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC + diff --git a/openstack/python-horizon/centos/files/horizon-region-exclusions.csv b/openstack/python-horizon/centos/files/horizon-region-exclusions.csv new file mode 100755 index 00000000..e5d27ff6 --- /dev/null +++ b/openstack/python-horizon/centos/files/horizon-region-exclusions.csv @@ -0,0 +1,12 @@ +# +# This file is used by horizon to prevent specific regions from being +# populated in the region selector. The contents of this file are a single +# CSV list containing the region names meant to be ignored, +# as shown in the example below (currently commented out). +# +# After modifying this file on the active controller, run the ‘sudo service horizon restart’ +# command and then lock/unlock the inactive controller +# +# Note: ignoring this system's region is not supported + +#regionSeven,externalRegion,region_to_ignore diff --git a/openstack/python-horizon/centos/files/horizon.init b/openstack/python-horizon/centos/files/horizon.init new file mode 100755 index 00000000..3b261b94 --- /dev/null +++ b/openstack/python-horizon/centos/files/horizon.init @@ -0,0 +1,157 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: OpenStack Dashboard +# Required-Start: networking +# Required-Stop: networking +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: OpenStack Dashboard +# Description: Web based user interface to OpenStack services including +# Nova, Swift, Keystone, etc. +### END INIT INFO + +RETVAL=0 +DESC="openstack-dashboard" +PIDFILE="/var/run/$DESC.pid" +PYTHON=`which python` +# Centos packages openstack_dashboard under /usr/share +#MANAGE="@PYTHON_SITEPACKAGES@/openstack_dashboard/manage.py" +MANAGE="/usr/share/openstack-dashboard/manage.py" +EXEC="/usr/bin/gunicorn" +BIND="localhost" +PORT="8080" +WORKER="eventlet" +WORKERS=`grep workers /etc/openstack-dashboard/horizon-config.ini | cut -f3 -d' '` +# Increased timeout to facilitate large image uploads +TIMEOUT="200" +STATICDIR="/www/pages/static" +BRANDDIR="/opt/branding" +APPLIEDDIR="/opt/branding/applied" +TMPUPLOADDIR="/scratch/horizon" + +source /usr/bin/tsconfig + +start() +{ + # Change workers if combined controller/compute + . /etc/platform/platform.conf + if [ "${WORKERS}" -lt "2" ]; then + WORKERS=2 + fi + + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + return + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + # Clean up any possible orphaned worker threads + if lsof -t -i:${PORT} 1> /dev/null 2>&1; then + kill $(lsof -t -i:${PORT}) > /dev/null 2>&1 + fi + + rm -rf ${TMPUPLOADDIR} + mkdir -p ${TMPUPLOADDIR} + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background --pidfile ${PIDFILE} \ + --make-pidfile --exec ${PYTHON} -- ${EXEC} --bind ${BIND}:${PORT} \ + --worker-class ${WORKER} --workers ${WORKERS} --timeout ${TIMEOUT} \ + --log-syslog \ + --config '/usr/share/openstack-dashboard/guni_config.py' \ + --pythonpath '/usr/share/openstack-dashboard' \ + openstack_dashboard.wsgi + RETVAL=$? + if [ $RETVAL -eq 0 ]; then + echo "done." + else + echo "failed." + fi + + # now copy customer branding file to CONFIG_PATH/branding if anything updated + sm-query service drbd-platform | grep enabled-active > /dev/null 2>&1 + IS_ACTIVE=$? + + # Handle horizon region exclusions + if [ $IS_ACTIVE -eq 0 ]; then + # Only copy if the file has been modified + if ! cmp --silent ${BRANDDIR}/horizon-region-exclusions.csv ${CONFIG_PATH}/branding/horizon-region-exclusions.csv ; then + mkdir -p ${CONFIG_PATH}/branding + cp -r ${BRANDDIR}/horizon-region-exclusions.csv ${CONFIG_PATH}/branding 1>/dev/null 2>&1 + fi + fi + + if ls ${BRANDDIR}/*.tgz 1> /dev/null 2>&1; then + LATESTBRANDING=$(ls $BRANDDIR |grep '\.tgz$' | tail -n 1) + if [ $IS_ACTIVE -eq 0 ]; then + # Only do the copy if the tarball has changed + if ! cmp --silent ${BRANDDIR}/${LATESTBRANDING} ${CONFIG_PATH}/branding/${LATESTBRANDING} ; then + mkdir -p ${CONFIG_PATH}/branding + rm -rf ${CONFIG_PATH}/branding/*.tgz + cp -r ${BRANDDIR}/${LATESTBRANDING} ${CONFIG_PATH}/branding + fi + fi + fi +} + +stop() +{ + if [ ! -e $PIDFILE ]; then return; fi + + echo -n "Stopping $DESC..." + + start-stop-daemon --stop --quiet --pidfile $PIDFILE + RETVAL=$? + if [ $RETVAL -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -rf ${TMPUPLOADDIR} + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &> /dev/null ; then + echo "$DESC is running" + RETVAL=0 + return + else + RETVAL=1 + fi + fi + echo "$DESC is not running" + RETVAL=3 +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status}" + RETVAL=1 + ;; +esac + +exit $RETVAL diff --git a/openstack/python-horizon/centos/files/horizon.logrotate b/openstack/python-horizon/centos/files/horizon.logrotate new file mode 100644 index 00000000..c5bee47e --- /dev/null +++ b/openstack/python-horizon/centos/files/horizon.logrotate @@ -0,0 +1,13 @@ +/var/log/horizon.log { + nodateext + size 10M + start 1 + rotate 20 + missingok + notifempty + compress + sharedscripts + postrotate + /etc/init.d/syslog reload > /dev/null 2>&1 || true + endscript +} diff --git a/openstack/python-horizon/centos/files/local_settings.py b/openstack/python-horizon/centos/files/local_settings.py new file mode 100755 index 00000000..cbb23e49 --- /dev/null +++ b/openstack/python-horizon/centos/files/local_settings.py @@ -0,0 +1,1198 @@ +# -*- coding: utf-8 -*- + +import os + +from django.utils.translation import ugettext_lazy as _ + +from horizon.utils import secret_key + +from openstack_dashboard.settings import HORIZON_CONFIG + +from tsconfig.tsconfig import distributed_cloud_role + +DEBUG = False + +# This setting controls whether or not compression is enabled. Disabling +# compression makes Horizon considerably slower, but makes it much easier +# to debug JS and CSS changes +#COMPRESS_ENABLED = not DEBUG + +# This setting controls whether compression happens on the fly, or offline +# with `python manage.py compress` +# See https://django-compressor.readthedocs.io/en/latest/usage/#offline-compression +# for more information +#COMPRESS_OFFLINE = not DEBUG + +# WEBROOT is the location relative to Webserver root +# should end with a slash. +WEBROOT = '/' +#LOGIN_URL = WEBROOT + 'auth/login/' +#LOGOUT_URL = WEBROOT + 'auth/logout/' +# +# LOGIN_REDIRECT_URL can be used as an alternative for +# HORIZON_CONFIG.user_home, if user_home is not set. +# Do not set it to '/home/', as this will cause circular redirect loop +#LOGIN_REDIRECT_URL = WEBROOT + +# If horizon is running in production (DEBUG is False), set this +# with the list of host/domain names that the application can serve. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts +#ALLOWED_HOSTS = ['horizon.example.com', ] + +# Set SSL proxy settings: +# Pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header +#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +#CSRF_COOKIE_SECURE = True +#SESSION_COOKIE_SECURE = True + +# The absolute path to the directory where message files are collected. +# The message file must have a .json file extension. When the user logins to +# horizon, the message files collected are processed and displayed to the user. +#MESSAGES_PATH=None + +# Overrides for OpenStack API versions. Use this setting to force the +# OpenStack dashboard to use a specific API version for a given service API. +# Versions specified here should be integers or floats, not strings. +# NOTE: The version should be formatted as it appears in the URL for the +# service API. For example, The identity service APIs have inconsistent +# use of the decimal point, so valid options would be 2.0 or 3. +# Minimum compute version to get the instance locked status is 2.9. +#OPENSTACK_API_VERSIONS = { +# "data-processing": 1.1, +# "identity": 3, +# "image": 2, +# "volume": 2, +# "compute": 2, +#} + +# Set this to True if running on a multi-domain model. When this is enabled, it +# will require the user to enter the Domain name in addition to the username +# for login. +#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False + +# Set this to True if you want available domains displayed as a dropdown menu +# on the login screen. It is strongly advised NOT to enable this for public +# clouds, as advertising enabled domains to unauthenticated customers +# irresponsibly exposes private information. This should only be used for +# private clouds where the dashboard sits behind a corporate firewall. +#OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = False + +# If OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN is enabled, this option can be used to +# set the available domains to choose from. This is a list of pairs whose first +# value is the domain name and the second is the display name. +#OPENSTACK_KEYSTONE_DOMAIN_CHOICES = ( +# ('Default', 'Default'), +#) + +# Overrides the default domain used when running on single-domain model +# with Keystone V3. All entities will be created in the default domain. +# NOTE: This value must be the name of the default domain, NOT the ID. +# Also, you will most likely have a value in the keystone policy file like this +# "cloud_admin": "rule:admin_required and domain_id:" +# This value must be the name of the domain whose ID is specified there. +#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' + +# Set this to True to enable panels that provide the ability for users to +# manage Identity Providers (IdPs) and establish a set of rules to map +# federation protocol attributes to Identity API attributes. +# This extension requires v3.0+ of the Identity API. +#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False + +# Set Console type: +# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None +# Set to None explicitly if you want to deactivate the console. +#CONSOLE_TYPE = "AUTO" + +# If provided, a "Report Bug" link will be displayed in the site header +# which links to the value of this setting (ideally a URL containing +# information on how to report issues). +#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com" + +# Show backdrop element outside the modal, do not close the modal +# after clicking on backdrop. +#HORIZON_CONFIG["modal_backdrop"] = "static" + +# Specify a regular expression to validate user passwords. +#HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements."), +#} + +# Disable simplified floating IP address management for deployments with +# multiple floating IP pools or complex network requirements. +#HORIZON_CONFIG["simple_ip_management"] = False + +# Turn off browser autocompletion for forms including the login form and +# the database creation workflow if so desired. +#HORIZON_CONFIG["password_autocomplete"] = "off" + +# Setting this to True will disable the reveal button for password fields, +# including on the login form. +#HORIZON_CONFIG["disable_password_reveal"] = False + +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +# Set custom secret key: +# You can either set it to a specific value or you can let horizon generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, +# there may be situations where you would want to set this explicitly, e.g. +# when multiple dashboard instances are distributed on different machines +# (usually behind a load-balancer). Either you have to make sure that a session +# gets all requests routed to the same dashboard instance or you set the same +# SECRET_KEY for all of them. +SECRET_KEY = secret_key.generate_or_read_from_file( + os.path.join(LOCAL_PATH, '.secret_key_store')) + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like +#CACHES = { +# 'default': { +# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', +# 'LOCATION': '127.0.0.1:11211', +# }, +#} + +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + }, +} + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +#EMAIL_HOST = 'smtp.my-company.com' +#EMAIL_PORT = 25 +#EMAIL_HOST_USER = 'djangomail' +#EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +#AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +#] + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" + +# For setting the default service region on a per-endpoint basis. Note that the +# default value for this setting is {}, and below is just an example of how it +# should be specified. +#DEFAULT_SERVICE_REGIONS = { +# OPENSTACK_KEYSTONE_URL: 'RegionOne' +#} + +# Enables keystone web single-sign-on if set to True. +#WEBSSO_ENABLED = False + +# Determines which authentication choice to show as default. +#WEBSSO_INITIAL_CHOICE = "credentials" + +# The list of authentication mechanisms which include keystone +# federation protocols and identity provider/federation protocol +# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol +# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID +# Connect respectively. +# Do not remove the mandatory credentials mechanism. +# Note: The last two tuples are sample mapping keys to a identity provider +# and federation protocol combination (WEBSSO_IDP_MAPPING). +#WEBSSO_CHOICES = ( +# ("credentials", _("Keystone Credentials")), +# ("oidc", _("OpenID Connect")), +# ("saml2", _("Security Assertion Markup Language")), +# ("acme_oidc", "ACME - OpenID Connect"), +# ("acme_saml2", "ACME - SAML2"), +#) + +# A dictionary of specific identity provider and federation protocol +# combinations. From the selected authentication mechanism, the value +# will be looked up as keys in the dictionary. If a match is found, +# it will redirect the user to a identity provider and federation protocol +# specific WebSSO endpoint in keystone, otherwise it will use the value +# as the protocol_id when redirecting to the WebSSO by protocol endpoint. +# NOTE: The value is expected to be a tuple formatted as: (, ). +#WEBSSO_IDP_MAPPING = { +# "acme_oidc": ("acme", "oidc"), +# "acme_saml2": ("acme", "saml2"), +#} + +# The Keystone Provider drop down uses Keystone to Keystone federation +# to switch between Keystone service providers. +# Set display name for Identity Provider (dropdown display name) +#KEYSTONE_PROVIDER_IDP_NAME = "Local Keystone" +# This id is used for only for comparison with the service provider IDs. This ID +# should not match any service provider IDs. +#KEYSTONE_PROVIDER_IDP_ID = "localkeystone" + +# Disable SSL certificate checks (useful for self-signed certificates): +#OPENSTACK_SSL_NO_VERIFY = True + +# The CA certificate to use to verify SSL connections +#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_group': True, + 'can_edit_project': True, + 'can_edit_domain': True, + 'can_edit_role': True, +} + +# Setting this to True, will add a new "Retrieve Password" action on instance, +# allowing Admin session password retrieval/decryption. +#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False + +# This setting allows deployers to control whether a token is deleted on log +# out. This can be helpful when there are often long running processes being +# run in the Horizon environment. +#TOKEN_DELETION_DISABLED = False + +# The Launch Instance user experience has been significantly enhanced. +# You can choose whether to enable the new launch instance experience, +# the legacy experience, or both. The legacy experience will be removed +# in a future release, but is available as a temporary backup setting to ensure +# compatibility with existing deployments. Further development will not be +# done on the legacy experience. Please report any problems with the new +# experience via the Launchpad tracking system. +# +# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to +# determine the experience to enable. Set them both to true to enable +# both. +#LAUNCH_INSTANCE_LEGACY_ENABLED = True +#LAUNCH_INSTANCE_NG_ENABLED = False + +# A dictionary of settings which can be used to provide the default values for +# properties found in the Launch Instance modal. +#LAUNCH_INSTANCE_DEFAULTS = { +# 'config_drive': False, +# 'enable_scheduler_hints': True, +# 'disable_image': False, +# 'disable_instance_snapshot': False, +# 'disable_volume': False, +# 'disable_volume_snapshot': False, +# 'create_volume': True, +#} + +# The Xen Hypervisor has the ability to set the mount point for volumes +# attached to instances (other Hypervisors currently do not). Setting +# can_set_mount_point to True will add the option to set the mount point +# from the UI. +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': False, + 'can_set_password': False, + 'requires_keypair': False, + 'enable_quotas': True +} + +# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional +# services provided by cinder that is not exposed by its extension API. +OPENSTACK_CINDER_FEATURES = { + 'enable_backup': False, +} + +# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional +# services provided by neutron. Options currently available are load +# balancer service, security groups, quotas, VPN service. +OPENSTACK_NEUTRON_NETWORK = { + 'enable_router': True, + 'enable_quotas': True, + 'enable_ipv6': True, + 'enable_distributed_router': False, + 'enable_ha_router': False, + 'enable_fip_topology_check': True, + + # Default dns servers you would like to use when a subnet is + # created. This is only a default, users can still choose a different + # list of dns servers when creating a new subnet. + # The entries below are examples only, and are not appropriate for + # real deployments + # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"], + + # Set which provider network types are supported. Only the network types + # in this list will be available to choose from when creating a network. + # Network types include local, flat, vlan, gre, vxlan and geneve. + # 'supported_provider_types': ['*'], + + # You can configure available segmentation ID range per network type + # in your deployment. + # 'segmentation_id_range': { + # 'vlan': [1024, 2048], + # 'vxlan': [4094, 65536], + # }, + + # You can define additional provider network types here. + # 'extra_provider_types': { + # 'awesome_type': { + # 'display_name': 'Awesome New Type', + # 'require_physical_network': False, + # 'require_segmentation_id': True, + # } + # }, + + # Set which VNIC types are supported for port binding. Only the VNIC + # types in this list will be available to choose from when creating a + # port. + # VNIC types include 'normal', 'direct', 'direct-physical', 'macvtap', + # 'baremetal' and 'virtio-forwarder' + # Set to empty list or None to disable VNIC type selection. + 'supported_vnic_types': ['*'], + + # Set list of available physical networks to be selected in the physical + # network field on the admin create network modal. If it's set to an empty + # list, the field will be a regular input field. + # e.g. ['default', 'test'] + 'physical_networks': [], + +} + +# The OPENSTACK_HEAT_STACK settings can be used to disable password +# field required while launching the stack. +OPENSTACK_HEAT_STACK = { + 'enable_user_pass': True, +} + +# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features +# in the OpenStack Dashboard related to the Image service, such as the list +# of supported image formats. +#OPENSTACK_IMAGE_BACKEND = { +# 'image_formats': [ +# ('', _('Select format')), +# ('aki', _('AKI - Amazon Kernel Image')), +# ('ami', _('AMI - Amazon Machine Image')), +# ('ari', _('ARI - Amazon Ramdisk Image')), +# ('docker', _('Docker')), +# ('iso', _('ISO - Optical Disk Image')), +# ('ova', _('OVA - Open Virtual Appliance')), +# ('qcow2', _('QCOW2 - QEMU Emulator')), +# ('raw', _('Raw')), +# ('vdi', _('VDI - Virtual Disk Image')), +# ('vhd', _('VHD - Virtual Hard Disk')), +# ('vhdx', _('VHDX - Large Virtual Hard Disk')), +# ('vmdk', _('VMDK - Virtual Machine Disk')), +# ], +#} + +# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for +# image custom property attributes that appear on image detail pages. +IMAGE_CUSTOM_PROPERTY_TITLES = { + "architecture": _("Architecture"), + "kernel_id": _("Kernel ID"), + "ramdisk_id": _("Ramdisk ID"), + "image_state": _("Euca2ools state"), + "project_id": _("Project ID"), + "image_type": _("Image Type"), +} + +# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image +# custom properties should not be displayed in the Image Custom Properties +# table. +IMAGE_RESERVED_CUSTOM_PROPERTIES = [] + +# Set to 'legacy' or 'direct' to allow users to upload images to glance via +# Horizon server. When enabled, a file form field will appear on the create +# image form. If set to 'off', there will be no file form field on the create +# image form. See documentation for deployment considerations. +#HORIZON_IMAGES_UPLOAD_MODE = 'legacy' + +# Allow a location to be set when creating or updating Glance images. +# If using Glance V2, this value should be False unless the Glance +# configuration and policies allow setting locations. +#IMAGES_ALLOW_LOCATION = False + +# A dictionary of default settings for create image modal. +#CREATE_IMAGE_DEFAULTS = { +# 'image_visibility': "public", +#} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'publicURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the +# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is None. This +# value should differ from OPENSTACK_ENDPOINT_TYPE if used. +#SECONDARY_ENDPOINT_TYPE = None + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# The size of chunk in bytes for downloading objects from Swift +SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024 + +# The default number of lines displayed for instance console log. +INSTANCE_LOG_LENGTH = 35 + +# Specify a maximum number of items to display in a dropdown. +DROPDOWN_MAX_ITEMS = 30 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +# When launching an instance, the menu of available flavors is +# sorted by RAM usage, ascending. If you would like a different sort order, +# you can provide another flavor attribute as sorting key. Alternatively, you +# can provide a custom callback method to use for sorting. You can also provide +# a flag for reverse sort. For more info, see +# http://docs.python.org/2/library/functions.html#sorted +#CREATE_INSTANCE_FLAVOR_SORT = { +# 'key': 'name', +# # or +# 'key': my_awesome_callback_method, +# 'reverse': False, +#} + +# Set this to True to display an 'Admin Password' field on the Change Password +# form to verify that it is indeed the admin logged-in who wants to change +# the password. +#ENFORCE_PASSWORD_CHECK = False + +# Modules that provide /auth routes that can be used to handle different types +# of user authentication. Add auth plugins that require extra route handling to +# this list. +#AUTHENTICATION_URLS = [ +# 'openstack_auth.urls', +#] + +# The Horizon Policy Enforcement engine uses these values to load per service +# policy rule files. The content of these files should match the files the +# OpenStack services are using to determine role based access control in the +# target installation. + +# Path to directory containing policy.json files +#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") + +# Map of local copy of service policy files. +# Please insure that your identity policy file matches the one being used on +# your keystone servers. There is an alternate policy file that may be used +# in the Keystone v3 multi-domain case, policy.v3cloudsample.json. +# This file is not included in the Horizon repository by default but can be +# found at +# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \ +# policy.v3cloudsample.json +# Having matching policy files on the Horizon and Keystone servers is essential +# for normal operation. This holds true for all services and their policy files. +#POLICY_FILES = { +# 'identity': 'keystone_policy.json', +# 'compute': 'nova_policy.json', +# 'volume': 'cinder_policy.json', +# 'image': 'glance_policy.json', +# 'orchestration': 'heat_policy.json', +# 'network': 'neutron_policy.json', +#} + +# TODO: (david-lyle) remove when plugins support adding settings. +# Note: Only used when trove-dashboard plugin is configured to be used by +# Horizon. +# Trove user and database extension support. By default support for +# creating users and databases on database instances is turned on. +# To disable these extensions set the permission here to something +# unusable such as ["!"]. +#TROVE_ADD_USER_PERMS = [] +#TROVE_ADD_DATABASE_PERMS = [] + +# Change this patch to the appropriate list of tuples containing +# a key, label and static directory containing two files: +# _variables.scss and _styles.scss +#AVAILABLE_THEMES = [ +# ('default', 'Default', 'themes/default'), +# ('material', 'Material', 'themes/material'), +#] + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + # If apache2 mod_wsgi is used to deploy OpenStack dashboard + # timestamp is output by mod_wsgi. If WSGI framework you use does not + # output timestamp for logging, add %(asctime)s in the following + # format definitions. + 'formatters': { + 'console': { + 'format': '%(levelname)s %(name)s %(message)s' + }, + 'operation': { + # The format of "%(message)s" is defined by + # OPERATION_LOG_OPTIONS['format'] + 'format': '%(message)s' + }, + }, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'logging.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'console', + }, + 'operation': { + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'operation', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'horizon.operation_log': { + 'handlers': ['operation'], + 'level': 'INFO', + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'cinderclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'neutronclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'heatclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'swiftclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_auth': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'django': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'iso8601': { + 'handlers': ['null'], + 'propagate': False, + }, + 'scss': { + 'handlers': ['null'], + 'propagate': False, + }, + }, +} + +# 'direction' should not be specified for all_tcp/udp/icmp. +# It is specified in the form. +SECURITY_GROUP_RULES = { + 'all_tcp': { + 'name': _('All TCP'), + 'ip_protocol': 'tcp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_udp': { + 'name': _('All UDP'), + 'ip_protocol': 'udp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_icmp': { + 'name': _('All ICMP'), + 'ip_protocol': 'icmp', + 'from_port': '-1', + 'to_port': '-1', + }, + 'ssh': { + 'name': 'SSH', + 'ip_protocol': 'tcp', + 'from_port': '22', + 'to_port': '22', + }, + 'smtp': { + 'name': 'SMTP', + 'ip_protocol': 'tcp', + 'from_port': '25', + 'to_port': '25', + }, + 'dns': { + 'name': 'DNS', + 'ip_protocol': 'tcp', + 'from_port': '53', + 'to_port': '53', + }, + 'http': { + 'name': 'HTTP', + 'ip_protocol': 'tcp', + 'from_port': '80', + 'to_port': '80', + }, + 'pop3': { + 'name': 'POP3', + 'ip_protocol': 'tcp', + 'from_port': '110', + 'to_port': '110', + }, + 'imap': { + 'name': 'IMAP', + 'ip_protocol': 'tcp', + 'from_port': '143', + 'to_port': '143', + }, + 'ldap': { + 'name': 'LDAP', + 'ip_protocol': 'tcp', + 'from_port': '389', + 'to_port': '389', + }, + 'https': { + 'name': 'HTTPS', + 'ip_protocol': 'tcp', + 'from_port': '443', + 'to_port': '443', + }, + 'smtps': { + 'name': 'SMTPS', + 'ip_protocol': 'tcp', + 'from_port': '465', + 'to_port': '465', + }, + 'imaps': { + 'name': 'IMAPS', + 'ip_protocol': 'tcp', + 'from_port': '993', + 'to_port': '993', + }, + 'pop3s': { + 'name': 'POP3S', + 'ip_protocol': 'tcp', + 'from_port': '995', + 'to_port': '995', + }, + 'ms_sql': { + 'name': 'MS SQL', + 'ip_protocol': 'tcp', + 'from_port': '1433', + 'to_port': '1433', + }, + 'mysql': { + 'name': 'MYSQL', + 'ip_protocol': 'tcp', + 'from_port': '3306', + 'to_port': '3306', + }, + 'rdp': { + 'name': 'RDP', + 'ip_protocol': 'tcp', + 'from_port': '3389', + 'to_port': '3389', + }, +} + +# Deprecation Notice: +# +# The setting FLAVOR_EXTRA_KEYS has been deprecated. +# Please load extra spec metadata into the Glance Metadata Definition Catalog. +# +# The sample quota definitions can be found in: +# /etc/metadefs/compute-quota.json +# +# The metadata definition catalog supports CLI and API: +# $glance --os-image-api-version 2 help md-namespace-import +# $glance-manage db_load_metadefs +# +# See Metadata Definitions on: http://docs.openstack.org/developer/glance/ + +# TODO: (david-lyle) remove when plugins support settings natively +# Note: This is only used when the Sahara plugin is configured and enabled +# for use in Horizon. +# Indicate to the Sahara data processing service whether or not +# automatic floating IP allocation is in effect. If it is not +# in effect, the user will be prompted to choose a floating IP +# pool for use in their cluster. False by default. You would want +# to set this to True if you were running Nova Networking with +# auto_assign_floating_ip = True. +#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False + +# The hash algorithm to use for authentication tokens. This must +# match the hash algorithm that the identity server and the +# auth_token middleware are using. Allowed values are the +# algorithms supported by Python's hashlib library. +#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' + +# AngularJS requires some settings to be made available to +# the client side. Some settings are required by in-tree / built-in horizon +# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the +# form of ['SETTING_1','SETTING_2'], etc. +# +# You may remove settings from this list for security purposes, but do so at +# the risk of breaking a built-in horizon feature. These settings are required +# for horizon to function properly. Only remove them if you know what you +# are doing. These settings may in the future be moved to be defined within +# the enabled panel configuration. +# You should not add settings to this list for out of tree extensions. +# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI +REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES', + 'LAUNCH_INSTANCE_DEFAULTS', + 'OPENSTACK_IMAGE_FORMATS', + 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN', + 'CREATE_IMAGE_DEFAULTS'] + +# Additional settings can be made available to the client side for +# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS +# !! Please use extreme caution as the settings are transferred via HTTP/S +# and are not encrypted on the browser. This is an experimental API and +# may be deprecated in the future without notice. +#REST_API_ADDITIONAL_SETTINGS = [] + +# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded +# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame +# Scripting (XFS) vulnerability, so this option allows extra security hardening +# where iframes are not used in deployment. Default setting is True. +# For more information see: +# http://tinyurl.com/anticlickjack +#DISALLOW_IFRAME_EMBED = True + +# Help URL can be made available for the client. To provide a help URL, edit the +# following attribute to the URL of your choice. +#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org" + +# Settings for OperationLogMiddleware +# OPERATION_LOG_ENABLED is flag to use the function to log an operation on +# Horizon. +# mask_targets is arrangement for appointing a target to mask. +# method_targets is arrangement of HTTP method to output log. +# format is the log contents. +#OPERATION_LOG_ENABLED = False +#OPERATION_LOG_OPTIONS = { +# 'mask_fields': ['password'], +# 'target_methods': ['POST'], +# 'ignored_urls': ['/js/', '/static/', '^/api/'], +# 'format': ("[%(client_ip)s] [%(domain_name)s]" +# " [%(domain_id)s] [%(project_name)s]" +# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]" +# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]" +# " [%(http_status)s] [%(param)s]"), +#} + +# The default date range in the Overview panel meters - either minus N +# days (if the value is integer N), or from the beginning of the current month +# until today (if set to None). This setting should be used to limit the amount +# of data fetched by default when rendering the Overview panel. +#OVERVIEW_DAYS_RANGE = 1 + +# To allow operators to require users provide a search criteria first +# before loading any data into the views, set the following dict +# attributes to True in each one of the panels you want to enable this feature. +# Follow the convention . +#FILTER_DATA_FIRST = { +# 'admin.instances': False, +# 'admin.images': False, +# 'admin.networks': False, +# 'admin.routers': False, +# 'admin.volumes': False, +# 'identity.users': False, +# 'identity.projects': False, +# 'identity.groups': False, +# 'identity.roles': False +#} + +# Dict used to restrict user private subnet cidr range. +# An empty list means that user input will not be restricted +# for a corresponding IP version. By default, there is +# no restriction for IPv4 or IPv6. To restrict +# user private subnet cidr range set ALLOWED_PRIVATE_SUBNET_CIDR +# to something like +#ALLOWED_PRIVATE_SUBNET_CIDR = { +# 'ipv4': ['10.0.0.0/8', '192.168.0.0/16'], +# 'ipv6': ['fc00::/7'] +#} +ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []} + +# Projects and users can have extra attributes as defined by keystone v3. +# Horizon has the ability to display these extra attributes via this setting. +# If you'd like to display extra data in the project or user tables, set the +# corresponding dict key to the attribute name, followed by the display name. +# For more information, see horizon's customization (http://docs.openstack.org/developer/horizon/topics/customizing.html#horizon-customization-module-overrides) +#PROJECT_TABLE_EXTRA_INFO = { +# 'phone_num': _('Phone Number'), +#} +#USER_TABLE_EXTRA_INFO = { +# 'phone_num': _('Phone Number'), +#} + +# Password will have an expiration date when using keystone v3 and enabling the +# feature. +# This setting allows you to set the number of days that the user will be alerted +# prior to the password expiration. +# Once the password expires keystone will deny the access and users must +# contact an admin to change their password. +#PASSWORD_EXPIRES_WARNING_THRESHOLD_DAYS = 0 + + + + + + + + + +# Custom WRS settings +import configss + +ALLOWED_HOSTS = ["*"] + + +HORIZON_CONFIG["password_autocomplete"] = "off" + +# The OPENSTACK_HEAT_STACK settings can be used to disable password +# field required while launching the stack. +OPENSTACK_HEAT_STACK = { + 'enable_user_pass': False, +} + +OPENSTACK_HOST = "controller" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST +OPENSTACK_API_VERSIONS={"identity":3} + +OPENSTACK_NEUTRON_NETWORK['enable_distributed_router'] = True + + +# TODO(tsmith) remove this, only for HP custom, this isnt being used +# Load Region Config params, if present +# Config OPENSTACK_HOST is still required in region mode since Titanium Cloud +# does not use the local_settings populated via packstack +try: + if os.path.exists('/etc/openstack-dashboard/region-config.ini'): + if not configss.CONFSS: + configss.load('/etc/openstack-dashboard/region-config.ini') + + OPENSTACK_HOST = configss.CONFSS['shared_services']['openstack_host'] + OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST + AVAILABLE_REGIONS = [(OPENSTACK_KEYSTONE_URL, configss.CONFSS['shared_services']['region_name']),] + REGION_NAME = configss.CONFSS['shared_services']['region_name'] + SS_ENABLED = "True" + else: + SS_ENABLED = "Failed" + else: + SS_ENABLED = "False" +except Exception: + SS_ENABLED = "Exception" + +# Load Horizon region exclusion list +REGION_EXCLUSIONS = [] +try: + if os.path.exists('/opt/branding/horizon-region-exclusions.csv'): + with open('/opt/branding/horizon-region-exclusions.csv') as f: + for line in f: + if line.startswith('#') or line.startswith(' '): + continue + REGION_EXCLUSIONS = line.rstrip('\n').rstrip('\r').split(',') +except Exception: + pass + +# check if it is in distributed cloud +DC_MODE = False +if distributed_cloud_role and distributed_cloud_role in [ 'systemcontroller', 'subcloud']: + DC_MODE= True + +OPENSTACK_ENDPOINT_TYPE = "internalURL" + + +# Override Django tempory file upload directory +# Directory in which upload streamed files will be temporarily saved. A value of +# `None` will make Django use the operating system's default temporary directory +FILE_UPLOAD_TEMP_DIR = "/scratch/horizon" + +# Override openstack-dashboard NG_CACHE_TEMPLATE_AGE +NG_TEMPLATE_CACHE_AGE = 300 + +# Conf file location on CentOS +POLICY_FILES_PATH = "/etc/openstack-dashboard" + + +# Settings for OperationLogMiddleware +OPERATION_LOG_ENABLED = True +OPERATION_LOG_OPTIONS = { + 'mask_fields': ['password', 'bm_password', 'bm_confirm_password', + 'current_password', 'confirm_password', 'new_password'], + 'target_methods': ['POST', 'PUT', 'DELETE'], + 'format': ("[%(project_name)s %(project_id)s] [%(user_name)s %(user_id)s]" + " [%(method)s %(request_url)s %(http_status)s]" + " parameters:[%(param)s] message:[%(message)s]"), +} + + +# Wind River CGCS Branding Settings +SITE_BRANDING = "Akraino Edge Stack" +# To be deprecated +HORIZON_CONFIG["help_url"] = "http://www.windriver.com/support/" + +# Note (Eddie Ramirez): The theme name will be updated after r0 +AVAILABLE_THEMES = [ + ('default', 'Default', 'themes/default'), + ('material', 'Material', 'themes/material'), + ('titanium', 'Titanium', 'themes/titanium'), +] +DEFAULT_THEME = 'titanium' + +for root, dirs, files in os.walk('/opt/branding/applied'): + if 'manifest.py' in files: + execfile(os.path.join(root, 'manifest.py')) + AVAILABLE_THEMES = [ + ('default', 'Default', 'themes/default'), + ('material', 'Material', 'themes/material'), + ('titanium', 'Titanium', 'themes/titanium'), + ('custom', 'Custom', '/opt/branding/applied'), + ] + DEFAULT_THEME = 'custom' + +STATIC_ROOT = "/www/pages/static" +COMPRESS_OFFLINE = True + +# Secure site configuration +SESSION_COOKIE_HTTPONLY = True + +# Size of thread batch +THREAD_BATCH_SIZE = 100 + +try: + if os.path.exists('/etc/openstack-dashboard/horizon-config.ini'): + if not configss.CONFSS or 'horizon_params' not in configss.CONFSS: + configss.load('/etc/openstack-dashboard/horizon-config.ini') + + if configss.CONFSS['horizon_params']['https_enabled'] == 'true': + CSRF_COOKIE_SECURE = True + SESSION_COOKIE_SECURE = True + + if configss.CONFSS['auth']['lockout_period']: + LOCKOUT_PERIOD_SEC = float(configss.CONFSS['auth']['lockout_period']) + if configss.CONFSS['auth']['lockout_retries']: + LOCKOUT_RETRIES_NUM = int(configss.CONFSS['auth']['lockout_retries']) + + ENABLE_MURANO_TAB = False + try: + if configss.CONFSS['optional_tabs']['murano_enabled'] == 'True': + ENABLE_MURANO_TAB = True + except Exception: + # disable murano tab if we cannot find the murano_enabled param + pass + + ENABLE_MAGNUM_TAB = False + try: + if configss.CONFSS['optional_tabs']['magnum_enabled'] == 'True': + ENABLE_MAGNUM_TAB = True + except Exception: + # disable magnum tab if we cannot find the magnum_enabled param + pass + +except Exception: + pass + + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'formatters': { + 'simple': { + 'format': '%(levelno)s %(levelname)s %(message)s', + }, + 'standard': { + 'format': '%(levelno)s %(asctime)s [%(levelname)s] %(name)s: %(message)s', + }, + 'verbose': { + 'format': '%(levelno)s %(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s', + }, + 'operation': { + # The format of "%(message)s" is defined by + # OPERATION_LOG_OPTIONS['format'] + 'format': '%(asctime)s %(message)s', + }, + }, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'logging.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + 'syslog': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'formatter': 'standard', + 'class': 'logging.handlers.SysLogHandler', + 'facility': 'local7', + 'address': '/dev/log', + }, + 'operation': { + 'level': 'INFO', + 'formatter': 'operation', + 'class': 'logging.handlers.SysLogHandler', + 'facility': 'local7', + 'address': '/dev/log', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'horizon.operation_log': { + 'handlers': ['syslog'], + 'level': 'INFO', + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'cinderclient': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'neutronclient': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'heatclient': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'swiftclient': { + 'handlers': ['null'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_auth': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'django': { + 'handlers': ['syslog'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'iso8601': { + 'handlers': ['null'], + 'propagate': False, + }, + 'scss': { + 'handlers': ['null'], + 'propagate': False, + }, + }, +} + + + diff --git a/openstack/python-horizon/centos/files/openstack-dashboard-httpd-2.4.conf b/openstack/python-horizon/centos/files/openstack-dashboard-httpd-2.4.conf new file mode 100644 index 00000000..820008c5 --- /dev/null +++ b/openstack/python-horizon/centos/files/openstack-dashboard-httpd-2.4.conf @@ -0,0 +1,19 @@ +WSGIDaemonProcess dashboard +WSGIProcessGroup dashboard +WSGISocketPrefix run/wsgi + +WSGIScriptAlias /dashboard /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi +Alias /dashboard/static /usr/share/openstack-dashboard/static + + + Options All + AllowOverride All + Require all granted + + + + Options All + AllowOverride All + Require all granted + + diff --git a/openstack/python-horizon/centos/files/openstack-dashboard-httpd-logging.conf b/openstack/python-horizon/centos/files/openstack-dashboard-httpd-logging.conf new file mode 100644 index 00000000..5292b9ef --- /dev/null +++ b/openstack/python-horizon/centos/files/openstack-dashboard-httpd-logging.conf @@ -0,0 +1,32 @@ +# if you want logging to a separate file, please update your config +# according to the last 4 lines in this snippet, and also take care +# to introduce a directive. +# + +WSGISocketPrefix run/wsgi + + + WSGIScriptAlias /dashboard /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi + Alias /static /usr/share/openstack-dashboard/static + + WSGIDaemonProcess dashboard + WSGIProcessGroup dashboard + + #DocumentRoot %HORIZON_DIR%/.blackhole/ + + + Options FollowSymLinks + AllowOverride None + + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + Order allow,deny + allow from all + + + ErrorLog logs/openstack_dashboard_error.log + LogLevel warn + CustomLog logs/openstack_dashboard_access.log combined + diff --git a/openstack/python-horizon/centos/files/python-django-horizon-logrotate.conf b/openstack/python-horizon/centos/files/python-django-horizon-logrotate.conf new file mode 100644 index 00000000..08725fa2 --- /dev/null +++ b/openstack/python-horizon/centos/files/python-django-horizon-logrotate.conf @@ -0,0 +1,8 @@ +/var/log/horizon/*.log { + weekly + rotate 4 + missingok + compress + minsize 100k +} + diff --git a/openstack/python-horizon/centos/files/python-django-horizon-systemd.conf b/openstack/python-horizon/centos/files/python-django-horizon-systemd.conf new file mode 100644 index 00000000..8aa1d4cc --- /dev/null +++ b/openstack/python-horizon/centos/files/python-django-horizon-systemd.conf @@ -0,0 +1,3 @@ +[Service] +ExecStartPre=/usr/bin/python /usr/share/openstack-dashboard/manage.py collectstatic --noinput --clear +ExecStartPre=/usr/bin/python /usr/share/openstack-dashboard/manage.py compress --force diff --git a/openstack/python-horizon/centos/python-django-horizon.spec b/openstack/python-horizon/centos/python-django-horizon.spec new file mode 100755 index 00000000..8dc96018 --- /dev/null +++ b/openstack/python-horizon/centos/python-django-horizon.spec @@ -0,0 +1,517 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +Name: python-django-horizon +# Liberty semver reset +# https://review.openstack.org/#/q/I6a35fa0dda798fad93b804d00a46af80f08d475c,n,z +Epoch: 1 +Version: 12.0.0 +Release: 2%{?_tis_dist}.%{tis_patch_ver} +Summary: Django application for talking to Openstack + +Group: Development/Libraries +# Code in horizon/horizon/utils taken from django which is BSD +License: ASL 2.0 and BSD +URL: http://horizon.openstack.org/ +Source0: horizon-%{version}.tar.gz +Source2: openstack-dashboard-httpd-2.4.conf +Source3: python-django-horizon-systemd.conf + +# demo config for separate logging +Source4: openstack-dashboard-httpd-logging.conf + +# logrotate config +Source5: python-django-horizon-logrotate.conf + +# WRS +Source7: horizon.init +Source8: horizon-clearsessions +Source10: local_settings.py +Source11: horizon-patching-restart +Source12: horizon-region-exclusions.csv +Source13: guni_config.py +Source14: horizon-assets-compress + +# +# BuildArch needs to be located below patches in the spec file. Don't ask! +# + +BuildArch: noarch + +BuildRequires: python-django +Requires: python-django + +# WRS +BuildRequires: cgts-client +Requires: cgts-client + +Requires: pytz +Requires: python-six >= 1.9.0 +Requires: python-pbr + +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-pbr >= 2.0.0 +BuildRequires: git +BuildRequires: python-six >= 1.9.0 +BuildRequires: gettext + +# for checks: +%if 0%{?rhel} == 0 +BuildRequires: python-django-nose >= 1.2 +BuildRequires: python-coverage +BuildRequires: python-mox3 +BuildRequires: python-nose-exclude +BuildRequires: python-nose +BuildRequires: python-selenium +%endif +BuildRequires: python-netaddr +BuildRequires: python-anyjson +BuildRequires: python-iso8601 + +# additional provides to be consistent with other django packages +Provides: django-horizon = %{epoch}:%{version}-%{release} + +%description +Horizon is a Django application for providing Openstack UI components. +It allows performing site administrator (viewing account resource usage, +configuring users, accounts, quotas, flavors, etc.) and end user +operations (start/stop/delete instances, create/restore snapshots, view +instance VNC console, etc.) + + +%package -n openstack-dashboard +Summary: Openstack web user interface reference implementation +Group: Applications/System + +Requires: httpd +Requires: mod_wsgi +Requires: %{name} = %{epoch}:%{version}-%{release} +Requires: python-django-openstack-auth >= 3.5.0 +Requires: python-django-compressor >= 2.0 +Requires: python-django-appconf +Requires: python-django-babel +Requires: python-lesscpy + +Requires: python-iso8601 +Requires: python-glanceclient +Requires: python-keystoneclient >= 1:3.8.0 +Requires: python-novaclient +Requires: python-neutronclient +Requires: python-cinderclient +Requires: python-swiftclient >= 3.2.0 +Requires: python-heatclient >= 1.6.1 +Requires: python-netaddr +Requires: python-osprofiler >= 1.4.0 +Requires: python-pymongo >= 3.0.2 +Requires: python-django-pyscss >= 2.0.2 +Requires: python-semantic_version +Requires: python-XStatic +Requires: python-XStatic-jQuery +Requires: python-XStatic-Angular >= 1:1.3.7 +Requires: python-XStatic-Angular-Bootstrap +Requires: python-XStatic-Angular-Schema-Form +Requires: python-XStatic-D3 >= 3.5.17.0 +Requires: python-XStatic-Font-Awesome >= 4.7.0 +Requires: python-XStatic-Hogan +Requires: python-XStatic-JQuery-Migrate +Requires: python-XStatic-JQuery-TableSorter +Requires: python-XStatic-JQuery-quicksearch +Requires: python-XStatic-JSEncrypt >= 2.3.1.1 +Requires: python-XStatic-Jasmine >= 2.4.1.1 +Requires: python-XStatic-Rickshaw +Requires: python-XStatic-Spin +Requires: python-XStatic-jquery-ui +Requires: python-XStatic-Bootstrap-Datepicker +Requires: python-XStatic-Bootstrap-SCSS >= 3.3.7.1 +Requires: python-XStatic-termjs >= 0.0.7.0 +Requires: python-XStatic-smart-table +Requires: python-XStatic-Angular-lrdragndrop +Requires: python-XStatic-Angular-Gettext +Requires: python-XStatic-Angular-FileUpload +Requires: python-XStatic-Magic-Search +Requires: python-XStatic-bootswatch +Requires: python-XStatic-roboto-fontface >= 0.5.0.0 +Requires: python-XStatic-mdi +Requires: python-XStatic-objectpath +Requires: python-XStatic-tv4 + +Requires: python-scss >= 1.3.4 +Requires: fontawesome-fonts-web >= 4.1.0 + +Requires: python-oslo-concurrency >= 3.8.0 +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-oslo-policy >= 1.23.0 +Requires: python-babel +Requires: python-futurist +Requires: python-pint + +Requires: openssl +Requires: logrotate + +Requires: PyYAML >= 3.10 + +BuildRequires: python-django-openstack-auth >= 3.5.0 +BuildRequires: python-django-compressor >= 2.0 +BuildRequires: python-django-appconf +BuildRequires: python-lesscpy +BuildRequires: python-semantic_version +BuildRequires: python-django-pyscss >= 2.0.2 +BuildRequires: python-XStatic +BuildRequires: python-XStatic-jQuery +BuildRequires: python-XStatic-Angular >= 1:1.3.7 +BuildRequires: python-XStatic-Angular-Bootstrap +BuildRequires: python-XStatic-Angular-Schema-Form +BuildRequires: python-XStatic-D3 +BuildRequires: python-XStatic-Font-Awesome +BuildRequires: python-XStatic-Hogan +BuildRequires: python-XStatic-JQuery-Migrate +BuildRequires: python-XStatic-JQuery-TableSorter +BuildRequires: python-XStatic-JQuery-quicksearch +BuildRequires: python-XStatic-JSEncrypt >= 2.3.1.1 +BuildRequires: python-XStatic-Jasmine +BuildRequires: python-XStatic-Rickshaw +BuildRequires: python-XStatic-Spin +BuildRequires: python-XStatic-jquery-ui +BuildRequires: python-XStatic-Bootstrap-Datepicker +BuildRequires: python-XStatic-Bootstrap-SCSS >= 3.3.7.1 +BuildRequires: python-XStatic-termjs +BuildRequires: python-XStatic-smart-table +BuildRequires: python-XStatic-Angular-lrdragndrop +BuildRequires: python-XStatic-Angular-FileUpload +BuildRequires: python-XStatic-Magic-Search +BuildRequires: python-XStatic-Angular-Gettext +BuildRequires: python-XStatic-bootswatch +BuildRequires: python-XStatic-roboto-fontface +BuildRequires: python-XStatic-mdi +BuildRequires: python-XStatic-objectpath +BuildRequires: python-XStatic-tv4 +# bootstrap-scss requires at least python-scss >= 1.2.1 +BuildRequires: python-scss >= 1.3.4 +BuildRequires: fontawesome-fonts-web >= 4.1.0 +BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-config +BuildRequires: python-oslo-i18n +BuildRequires: python-oslo-serialization +BuildRequires: python-oslo-utils +BuildRequires: python-oslo-policy +BuildRequires: python-babel +BuildRequires: python-pint + +BuildRequires: pytz +BuildRequires: systemd +# WRS +BuildRequires: systemd-devel + +%description -n openstack-dashboard +Openstack Dashboard is a web user interface for Openstack. The package +provides a reference implementation using the Django Horizon project, +mostly consisting of JavaScript and CSS to tie it altogether as a standalone +site. + + +# Turn OFF sphinx documentation in WRS environment +# Mock does not have /dev/log so sphinx-build will always fail +%if 0%{?with_doc} +%package doc +Summary: Documentation for Django Horizon +Group: Documentation + +Requires: %{name} = %{epoch}:%{version}-%{release} +BuildRequires: python-sphinx >= 1.1.3 + +# Doc building basically means we have to mirror Requires: +BuildRequires: python-openstackdocstheme +BuildRequires: python-glanceclient +BuildRequires: python-keystoneclient +BuildRequires: python-novaclient >= 1:6.0.0 +BuildRequires: python-neutronclient +BuildRequires: python-cinderclient +BuildRequires: python-swiftclient +BuildRequires: python-heatclient + +%description doc +Documentation for the Django Horizon application for talking with Openstack + +%endif + +%package -n openstack-dashboard-theme +Summary: OpenStack web user interface reference implementation theme module +Requires: openstack-dashboard = %{epoch}:%{version}-%{release} + +%description -n openstack-dashboard-theme +Customization module for OpenStack Dashboard to provide a branded logo. + +%prep +%autosetup -n horizon-%{upstream_version} -S git + +# WRS remove troublesome files introduced by tox +rm -f openstack_dashboard/test/.secret_key_store +rm -f openstack_dashboard/test/*.secret_key_store.lock +rm -f openstack_dashboard/local/.secret_key_store +rm -f openstack_dashboard/local/*.secret_key_store.lock +rm -rf horizon.egg-info + +cp %{SOURCE10} openstack_dashboard/local/local_settings.py + +# drop config snippet +cp -p %{SOURCE4} . +cp -p %{SOURCE13} . + +# customize default settings +# WAS [PATCH] disable debug, move web root +sed -i "/^DEBUG =.*/c\DEBUG = False" openstack_dashboard/local/local_settings.py.example +sed -i "/^WEBROOT =.*/c\WEBROOT = '/dashboard/'" openstack_dashboard/local/local_settings.py.example +sed -i "/^.*ALLOWED_HOSTS =.*/c\ALLOWED_HOSTS = ['horizon.example.com', 'localhost']" openstack_dashboard/local/local_settings.py.example +sed -i "/^.*LOCAL_PATH =.*/c\LOCAL_PATH = '/tmp'" openstack_dashboard/local/local_settings.py.example +sed -i "/^.*POLICY_FILES_PATH =.*/c\POLICY_FILES_PATH = '/etc/openstack-dashboard'" openstack_dashboard/local/local_settings.py.example + +sed -i "/^BIN_DIR = .*/c\BIN_DIR = '/usr/bin'" openstack_dashboard/settings.py +sed -i "/^COMPRESS_PARSER = .*/a COMPRESS_OFFLINE = True" openstack_dashboard/settings.py + +# set COMPRESS_OFFLINE=True +sed -i 's:COMPRESS_OFFLINE.=.False:COMPRESS_OFFLINE = True:' openstack_dashboard/settings.py + +# WRS: MANIFEST needs .eslintrc files for angular +echo "include .eslintrc" >> MANIFEST.in +# MANIFEST needs to include json and pot files under openstack_dashboard +echo "recursive-include openstack_dashboard *.json *.pot .eslintrc" >> MANIFEST.in +# MANIFEST needs to include pot files under horizon +echo "recursive-include horizon *.pot .eslintrc" >> MANIFEST.in + + +%build +# compile message strings +cd horizon && django-admin compilemessages && cd .. +cd openstack_dashboard && django-admin compilemessages && cd .. +# Dist tarball is missing .mo files so they're not listed in distributed egg metadata. +# Removing egg-info and letting PBR regenerate it was working around that issue +# but PBR cannot regenerate complete SOURCES.txt so some other files wont't get installed. +# Further reading why not remove upstream egg metadata: +# https://github.com/emonty/python-oslo-messaging/commit/f632684eb2d582253601e8da7ffdb8e55396e924 +# https://fedorahosted.org/fpc/ticket/488 +# WRS: 2 problems. 1 we dont have an egg yet. 2 there are no .mo files +#echo >> horizon.egg-info/SOURCES.txt +#ls */locale/*/LC_MESSAGES/django*mo >> horizon.egg-info/SOURCES.txt +export PBR_VERSION=%{version} +%{__python} setup.py build + +# WRS: package our own local_setting.py and run compression on the controller +# compress css, js etc. +#cp openstack_dashboard/local/local_settings.py.example openstack_dashboard/local/local_settings.py +# get it ready for compressing later in puppet-horizon +# WRS turn off compression because /dev/log does not exist in mock +#%{__python} manage.py collectstatic --noinput --clear +#%{__python} manage.py compress --force + + +%if 0%{?with_doc} +# build docs +export PYTHONPATH="$( pwd ):$PYTHONPATH" +sphinx-build -b html doc/source html + +# undo hack +#cp openstack_dashboard/local/local_settings.py.example openstack_dashboard/local/local_settings.py + +# Fix hidden-file-or-dir warnings +rm -fr html/.doctrees html/.buildinfo +%endif + +%install +export PBR_VERSION=%{version} +%{__python} setup.py install -O1 --skip-build --root %{buildroot} + +# WRS +install -d -m 755 %{buildroot}/opt/branding +mkdir -p %{buildroot}%{_sysconfdir}/rc.d/init.d +install -m 755 -D -p %{SOURCE7} %{buildroot}%{_sysconfdir}/rc.d/init.d/horizon +install -m 755 -D -p %{SOURCE8} %{buildroot}/%{_bindir}/horizon-clearsessions +install -m 755 -D -p %{SOURCE11} %{buildroot}/%{_bindir}/horizon-patching-restart +install -m 755 -D -p %{SOURCE12} %{buildroot}/opt/branding/horizon-region-exclusions.csv +install -m 755 -D -p %{SOURCE14} %{buildroot}/%{_bindir}/horizon-assets-compress + +# drop httpd-conf snippet +install -m 0644 -D -p %{SOURCE2} %{buildroot}%{_sysconfdir}/httpd/conf.d/openstack-dashboard.conf +install -d -m 755 %{buildroot}%{_datadir}/openstack-dashboard +install -d -m 755 %{buildroot}%{_sharedstatedir}/openstack-dashboard +install -d -m 755 %{buildroot}%{_sysconfdir}/openstack-dashboard + +# create directory for systemd snippet +mkdir -p %{buildroot}%{_unitdir}/httpd.service.d/ +cp %{SOURCE3} %{buildroot}%{_unitdir}/httpd.service.d/openstack-dashboard.conf + + +# Copy everything to /usr/share +mv %{buildroot}%{python_sitelib}/openstack_dashboard \ + %{buildroot}%{_datadir}/openstack-dashboard +cp manage.py %{buildroot}%{_datadir}/openstack-dashboard +# WRS +cp guni_config.py %{buildroot}%{_datadir}/openstack-dashboard +rm -rf %{buildroot}%{python_sitelib}/openstack_dashboard + +# remove unnecessary .po files +find %{buildroot} -name django.po -exec rm '{}' \; +find %{buildroot} -name djangojs.po -exec rm '{}' \; + +# Move config to /etc, symlink it back to /usr/share +mv %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.py.example %{buildroot}%{_sysconfdir}/openstack-dashboard/local_settings +#ln -s ../../../../../%{_sysconfdir}/openstack-dashboard/local_settings %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.py + +mv %{buildroot}%{_datadir}/openstack-dashboard/openstack_dashboard/conf/*.json %{buildroot}%{_sysconfdir}/openstack-dashboard + +%find_lang django --all-name + +grep "\/usr\/share\/openstack-dashboard" django.lang > dashboard.lang +grep "\/site-packages\/horizon" django.lang > horizon.lang + +# copy static files to %{_datadir}/openstack-dashboard/static +mkdir -p %{buildroot}%{_datadir}/openstack-dashboard/static +cp -a openstack_dashboard/static/* %{buildroot}%{_datadir}/openstack-dashboard/static +cp -a horizon/static/* %{buildroot}%{_datadir}/openstack-dashboard/static +# WRS: there is no static folder, since compress step was skipped +#cp -a static/* %{buildroot}%{_datadir}/openstack-dashboard/static + +# create /var/run/openstack-dashboard/ and own it +mkdir -p %{buildroot}%{_sharedstatedir}/openstack-dashboard + +# create /var/log/horizon and own it +mkdir -p %{buildroot}%{_var}/log/horizon + +# place logrotate config +mkdir -p %{buildroot}%{_sysconfdir}/logrotate.d +cp -a %{SOURCE5} %{buildroot}%{_sysconfdir}/logrotate.d/openstack-dashboard + +%check +# don't run tests on rhel +%if 0%{?rhel} == 0 +# since rawhide has django-1.7 now, tests fail +#./run_tests.sh -N -P +%endif + +%post -n openstack-dashboard +# ugly hack to set a unique SECRET_KEY +sed -i "/^from horizon.utils import secret_key$/d" /etc/openstack-dashboard/local_settings +sed -i "/^SECRET_KEY.*$/{N;s/^.*$/SECRET_KEY='`openssl rand -hex 10`'/}" /etc/openstack-dashboard/local_settings +# reload systemd unit files +systemctl daemon-reload >/dev/null 2>&1 || : + +%postun +# update systemd unit files +%{systemd_postun} + +%files -f horizon.lang +%doc README.rst openstack-dashboard-httpd-logging.conf +%license LICENSE +%dir %{python_sitelib}/horizon +%{python_sitelib}/horizon/*.py* +%{python_sitelib}/horizon/browsers +%{python_sitelib}/horizon/conf +%{python_sitelib}/horizon/contrib +%{python_sitelib}/horizon/forms +%{python_sitelib}/horizon/hacking +%{python_sitelib}/horizon/management +%{python_sitelib}/horizon/static +%{python_sitelib}/horizon/tables +%{python_sitelib}/horizon/tabs +%{python_sitelib}/horizon/templates +%{python_sitelib}/horizon/templatetags +%{python_sitelib}/horizon/test +%{python_sitelib}/horizon/utils +%{python_sitelib}/horizon/workflows +%{python_sitelib}/horizon/karma.conf.js +%{python_sitelib}/horizon/middleware +%{python_sitelib}/*.egg-info + +%files -n openstack-dashboard -f dashboard.lang +%license LICENSE +%dir %{_datadir}/openstack-dashboard/ +%{_datadir}/openstack-dashboard/*.py* +%{_datadir}/openstack-dashboard/static +%{_datadir}/openstack-dashboard/openstack_dashboard/*.py* +%{_datadir}/openstack-dashboard/openstack_dashboard/api +%{_datadir}/openstack-dashboard/openstack_dashboard/contrib +%dir %{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/ +%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/admin +%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/identity +%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/project +%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/settings +# WRS +%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/dc_admin +%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/__init__.py* +%{_datadir}/openstack-dashboard/openstack_dashboard/django_pyscss_fix +%{_datadir}/openstack-dashboard/openstack_dashboard/enabled +%{_datadir}/openstack-dashboard/openstack_dashboard/karma.conf.js +%{_datadir}/openstack-dashboard/openstack_dashboard/local +%{_datadir}/openstack-dashboard/openstack_dashboard/management +%{_datadir}/openstack-dashboard/openstack_dashboard/static +%{_datadir}/openstack-dashboard/openstack_dashboard/templates +%{_datadir}/openstack-dashboard/openstack_dashboard/templatetags +%{_datadir}/openstack-dashboard/openstack_dashboard/themes +%{_datadir}/openstack-dashboard/openstack_dashboard/test +%{_datadir}/openstack-dashboard/openstack_dashboard/usage +%{_datadir}/openstack-dashboard/openstack_dashboard/utils +%{_datadir}/openstack-dashboard/openstack_dashboard/wsgi +%dir %{_datadir}/openstack-dashboard/openstack_dashboard +%dir %{_datadir}/openstack-dashboard/openstack_dashboard/locale +%dir %{_datadir}/openstack-dashboard/openstack_dashboard/locale/?? +%dir %{_datadir}/openstack-dashboard/openstack_dashboard/locale/??_?? +%dir %{_datadir}/openstack-dashboard/openstack_dashboard/locale/??/LC_MESSAGES +%dir %{_datadir}/openstack-dashboard/openstack_dashboard/locale/??_??/LC_MESSAGES +%{_datadir}/openstack-dashboard/openstack_dashboard/.eslintrc + +%dir %attr(0750, root, apache) %{_sysconfdir}/openstack-dashboard +%dir %attr(0750, apache, apache) %{_sharedstatedir}/openstack-dashboard +%dir %attr(0750, apache, apache) %{_var}/log/horizon +%config(noreplace) %{_sysconfdir}/httpd/conf.d/openstack-dashboard.conf +%config(noreplace) %attr(0640, root, apache) %{_sysconfdir}/openstack-dashboard/local_settings +%config(noreplace) %attr(0640, root, apache) %{_sysconfdir}/openstack-dashboard/cinder_policy.json +%config(noreplace) %attr(0640, root, apache) %{_sysconfdir}/openstack-dashboard/keystone_policy.json +%config(noreplace) %attr(0640, root, apache) %{_sysconfdir}/openstack-dashboard/nova_policy.json +%config(noreplace) %attr(0640, root, apache) %{_sysconfdir}/openstack-dashboard/glance_policy.json +%config(noreplace) %attr(0640, root, apache) %{_sysconfdir}/openstack-dashboard/neutron_policy.json +%config(noreplace) %attr(0640, root, apache) %{_sysconfdir}/openstack-dashboard/heat_policy.json +%config(noreplace) %{_sysconfdir}/logrotate.d/openstack-dashboard +%attr(755,root,root) %dir %{_unitdir}/httpd.service.d +%config(noreplace) %{_unitdir}/httpd.service.d/openstack-dashboard.conf + +# WRS +%dir /opt/branding +%config(noreplace) /opt/branding/horizon-region-exclusions.csv +%{_sysconfdir}/rc.d/init.d/horizon +%{_bindir}/horizon-clearsessions +%{_bindir}/horizon-patching-restart +%{_bindir}/horizon-assets-compress + + +%if 0%{?with_doc} + +%files doc +%doc html +%license LICENSE + +%endif + + +%files -n openstack-dashboard-theme +#%{_datadir}/openstack-dashboard/openstack_dashboard/dashboards/theme +#%{_datadir}/openstack-dashboard/openstack_dashboard/enabled/_99_customization.* + +%changelog +* Mon Oct 04 2017 Radomir Dopieralski 1:12.0.0-2 +- Require at least 3.3.7.1 version of XStatic-bootstrap-SCSS package + +* Wed Aug 30 2017 rdo-trunk 1:12.0.0-1 +- Update to 12.0.0 + +* Mon Aug 28 2017 rdo-trunk 1:12.0.0-0.3.0rc3 +- Update to 12.0.0.0rc3 + +* Fri Aug 25 2017 Alfredo Moralejo 1:12.0.0-0.2.0rc2 +- Update to 12.0.0.0rc2 + +* Mon Aug 21 2017 Alfredo Moralejo 1:12.0.0-0.1.0rc1 +- Update to 12.0.0.0rc1 + + diff --git a/openstack/python-ironicclient/centos/build_srpm.data b/openstack/python-ironicclient/centos/build_srpm.data new file mode 100644 index 00000000..f46b146e --- /dev/null +++ b/openstack/python-ironicclient/centos/build_srpm.data @@ -0,0 +1,5 @@ +TAR_NAME="python-ironicclient" +SRC_DIR="$CGCS_BASE/git/python-ironicclient" + +TIS_BASE_SRCREV=096834f09ae5d8cabc8c0b1ccbac271ddb13e2ee +TIS_PATCH_VER=1 diff --git a/openstack/python-ironicclient/centos/python-ironicclient.spec b/openstack/python-ironicclient/centos/python-ironicclient.spec new file mode 100644 index 00000000..71e26bef --- /dev/null +++ b/openstack/python-ironicclient/centos/python-ironicclient.spec @@ -0,0 +1,141 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%if 0%{?fedora} +%global with_python3 1 +%endif + +%global sname ironicclient + +Name: python-ironicclient +Version: 1.17.0 +Release: 0%{?_tis_dist}.%{tis_patch_ver} +Summary: Python client for Ironic + +License: ASL 2.0 +URL: https://pypi.python.org/pypi/python-ironicclient +Source0: https://tarballs.openstack.org/python-ironicclient/python-ironicclient-%{version}%{?milestone}.tar.gz +BuildArch: noarch + + +%description +A python and command line client library for Ironic. + + +%package -n python2-%{sname} +Summary: Python client for Ironic + +BuildRequires: python2-devel +BuildRequires: python-pbr >= 2.0.0 +BuildRequires: python-setuptools + +Requires: python-appdirs >= 1.3.0 +Requires: python-dogpile-cache >= 0.6.2 +Requires: python-httplib2 +Requires: python-jsonschema +Requires: python-openstackclient >= 3.3.0 +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-pbr >= 2.0.0 +Requires: python-prettytable +Requires: python-six >= 1.9.0 +Requires: python-osc-lib >= 1.7.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-requests +Requires: PyYAML + +%{?python_provide:%python_provide python2-%{sname}} + +%description -n python2-%{sname} +A python and command line client library for Ironic + + +%if 0%{?with_python3} +%package -n python3-%{sname} +Summary: Python client for Ironic + +BuildRequires: python3-devel +BuildRequires: python3-pbr >= 2.0.0 +BuildRequires: python3-setuptools + +Requires: python3-appdirs >= 1.3.0 +Requires: python3-dogpile-cache >= 0.6.2 +Requires: python3-httplib2 +Requires: python3-jsonschema +Requires: python3-openstackclient >= 3.3.0 +Requires: python3-keystoneauth1 >= 3.1.0 +Requires: python3-pbr >= 2.0.0 +Requires: python3-prettytable +Requires: python3-six >= 1.9.0 +Requires: python3-osc-lib >= 1.7.0 +Requires: python3-oslo-i18n >= 2.1.0 +Requires: python3-oslo-serialization >= 1.10.0 +Requires: python3-oslo-utils >= 3.20.0 +Requires: python3-requests +Requires: python3-PyYAML + +%{?python_provide:%python_provide python3-%{sname}} + +%description -n python3-%{sname} +A python and command line client library for Ironic +%endif + +%prep +%setup -q -n %{name}-%{upstream_version} + +# Remove the requirements file so that pbr hooks don't add it +# to distutils requires_dist config +rm -rf {test-,}requirements.txt tools/{pip,test}-requires + +%build +export PBR_VERSION=%{version} +%py2_build +%if 0%{?with_python3} +%py3_build +%endif + + +%install +export PBR_VERSION=%{version} +%if 0%{?with_python3} +%py3_install +mv %{buildroot}%{_bindir}/ironic %{buildroot}%{_bindir}/ironic-%{python3_version} +ln -s ./ironic-%{python3_version} %{buildroot}%{_bindir}/ironic-3 +%endif + +%py2_install +mv %{buildroot}%{_bindir}/ironic %{buildroot}%{_bindir}/ironic-%{python2_version} +ln -s ./ironic-%{python2_version} %{buildroot}%{_bindir}/ironic-2 + +ln -s ./ironic-2 %{buildroot}%{_bindir}/ironic + +install -p -D -m 644 tools/ironic.bash_completion %{buildroot}%{_sysconfdir}/bash_completion.d/ironic.bash_completion + +%files -n python2-%{sname} +%doc README.rst +%license LICENSE +%{_bindir}/ironic +%{_bindir}/ironic-2 +%{_bindir}/ironic-%{python2_version} +%{python2_sitelib}/ironicclient* +%{python2_sitelib}/python_ironicclient* +%{_sysconfdir}/bash_completion.d/ironic.bash_completion + +%if 0%{?with_python3} +%files -n python3-%{sname} +%doc README.rst +%license LICENSE +%{_bindir}/ironic-3 +%{_bindir}/ironic-%{python3_version} +%{python3_sitelib}/ironicclient* +%{python3_sitelib}/python_ironicclient* +%endif + + +%changelog +* Wed Sep 06 2017 rdo-trunk 1.17.0-1 +- Update to 1.17.0 + +* Mon Aug 14 2017 Alfredo Moralejo 1.16.0-1 +- Update to 1.16.0 + diff --git a/openstack/python-keystone/centos/build_srpm.data b/openstack/python-keystone/centos/build_srpm.data new file mode 100644 index 00000000..31e3c077 --- /dev/null +++ b/openstack/python-keystone/centos/build_srpm.data @@ -0,0 +1,5 @@ +TAR_NAME="keystone" +SRC_DIR="$CGCS_BASE/git/keystone" +COPY_LIST="$FILES_BASE/*" +TIS_BASE_SRCREV=6a67918f9d5f39564af8eacc57b80cba98242683 +TIS_PATCH_VER=GITREVCOUNT+2 diff --git a/openstack/python-keystone/centos/files/keystone-all b/openstack/python-keystone/centos/files/keystone-all new file mode 100644 index 00000000..d6a7c418 --- /dev/null +++ b/openstack/python-keystone/centos/files/keystone-all @@ -0,0 +1,151 @@ +#!/bin/sh +# Copyright (c) 2013-2018 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +### BEGIN INIT INFO +# Provides: OpenStack Keystone-wsgi +# Required-Start: networking +# Required-Stop: networking +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: OpenStack Keystone +# Description: Openstack Identitiy service running on WSGI compatable gunicorn web server +# +### END INIT INFO + +RETVAL=0 +#public 5000 + +DESC_PUBLIC="openstack-keystone" + +PIDFILE_PUBLIC="/var/run/$DESC_PUBLIC.pid" + +PYTHON=`which python` + +source /etc/keystone/keystone-extra.conf + +if [ -n ${@:2:1} ] ; then + if [ ${@:2:1}="--public-bind-addr" ] ; then + PUBLIC_BIND_ADDR_CMD=${@:3:1} + fi +fi + + +### +EXEC="/usr/bin/gunicorn" + +WORKER="eventlet" +# Increased timeout to facilitate large image uploads +TIMEOUT="200" + +# Calculate the no of workers based on the number of workers retrieved by +# Platform Eng which is retreived from the keystone-extra.conf + +TIS_WORKERS_FACTOR=1.5 +TIS_WORKERS=$(echo "${TIS_WORKERS_FACTOR}*${TIS_PUBLIC_WORKERS}"|bc ) +TIS_WORKERS=${TIS_WORKERS%.*} + +#--max-requests , --max-requests-jitter Configuration +#--max-requests = The max number of requests a worker will process before restarting +#--max-requests-jitter = The maximum jitter to add to the max_requests setting. +MAX_REQUESTS=100000 +MAX_REQ_JITTER_CAP_FACTOR=0.5 +MAX_REQ_JITTER_PUBLIC=$(echo "${TIS_WORKERS}*${MAX_REQ_JITTER_CAP_FACTOR}+1"|bc) +MAX_REQ_JITTER_PUBLIC=${MAX_REQ_JITTER_PUBLIC%.*} + + +start() +{ + # Got proper no of workers . Starting gunicorn now + echo -e "Initialising keystone service using gunicorn .. \n" + + if [ -z "$PUBLIC_BIND_ADDR" ]; then + echo "Keystone floating ip not found . Cannot start services. Exiting .." + exit 1 + fi + BIND_PUBLIC=$PUBLIC_BIND_ADDR:5000 + + if [ -e $PIDFILE_PUBLIC ]; then + PIDDIR=/proc/$(cat $PIDFILE_PUBLIC) + if [ -d ${PIDDIR} ]; then + echo "$DESC_PUBLIC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE_PUBLIC" + rm -f $PIDFILE_PUBLIC + fi + fi + + echo -e "Starting $DESC_PUBLIC...\n"; + echo -e "Worker is ${WORKER} --workers ${TIS_WORKERS} --timeout ${TIMEOUT} --max_requests ${MAX_REQUESTS} --max_request_jitter public ${MAX_REQ_JITTER_PUBLIC}\n" ; + + echo -e "Starting keystone process at port 5000 \n" ; + + start-stop-daemon --start --quiet --background --pidfile ${PIDFILE_PUBLIC} \ + --make-pidfile --exec ${PYTHON} -- ${EXEC} --bind ${BIND_PUBLIC} \ + --worker-class ${WORKER} --workers ${TIS_WORKERS} --timeout ${TIMEOUT} \ + --max-requests ${MAX_REQUESTS} --max-requests-jitter ${MAX_REQ_JITTER_PUBLIC} \ + --log-syslog \ + --pythonpath '/usr/share/keystone' public:application --name keystone-public + + RETVAL=$? + if [ $RETVAL -eq 0 ]; then + echo -e "Keystone started at port 5000... \n" + else + echo -e "Failed to start Keystone .. \n" + fi +} + +stop() +{ + if [ -e $PIDFILE_PUBLIC ]; then + start-stop-daemon --stop --quiet --pidfile $PIDFILE_PUBLIC + RETVAL_PUBLIC=$? + if [ $RETVAL_PUBLIC -eq 0 ]; then + echo "Stopped $DESC_PUBLIC." + else + echo "Stopping failed - $PIDFILE_PUBLIC" + fi + rm -f $PIDFILE_PUBLIC + else + echo "Already stopped - $PIDFILE_PUBLIC" + fi +} + +status() +{ + pid_public=`cat $PIDFILE_PUBLIC 2>/dev/null` + + if [ -n "$pid_public" ]; then + echo -e "\033[32m $DESC_PUBLIC is running..\033[0m" + else + echo -e "\033[31m $DESC_PUBLIC is not running..\033[0m" + fi +} + + + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + *) + #echo "Usage: $0 {start|stop|force-reload|restart|reload|status} OR {/usr/bin/keystone-all start --public-bind-addr xxx.xxx.xxx}" + start + #RETVAL=1 + ;; +esac + +exit $RETVAL diff --git a/openstack/python-keystone/centos/files/keystone-fernet-keys-rotate-active b/openstack/python-keystone/centos/files/keystone-fernet-keys-rotate-active new file mode 100644 index 00000000..8080ea00 --- /dev/null +++ b/openstack/python-keystone/centos/files/keystone-fernet-keys-rotate-active @@ -0,0 +1,64 @@ +#!/bin/bash + +# +# Wrapper script to rotate keystone fernet keys on active controller only +# +KEYSTONE_KEYS_ROTATE_INFO="/var/run/keystone-keys-rotate.info" +KEYSTONE_KEYS_ROTATE_CMD="/usr/bin/nice -n 2 /usr/bin/keystone-manage fernet_rotate --keystone-user keystone --keystone-group keystone" + +function is_active_pgserver() +{ + # Determine whether we're running on the same controller as the service. + local service=postgres + local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active) + if [ "x$enabledactive" == "x" ] + then + # enabled-active not found for that service on this controller + return 1 + else + # enabled-active found for that resource + return 0 + fi +} + +if is_active_pgserver +then + if [ ! -f ${KEYSTONE_KEYS_ROTATE_INFO} ] + then + echo delay_count=0 > ${KEYSTONE_KEYS_ROTATE_INFO} + fi + + source ${KEYSTONE_KEYS_ROTATE_INFO} + sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null + if [ $? -eq 0 ] + then + source /etc/platform/platform.conf + if [ "${system_type}" = "All-in-one" ] + then + source /etc/init.d/task_affinity_functions.sh + idle_core=$(get_most_idle_core) + if [ "$idle_core" -ne "0" ] + then + sh -c "exec taskset -c $idle_core ${KEYSTONE_KEYS_ROTATE_CMD}" + sed -i "/delay_count/s/=.*/=0/" ${KEYSTONE_KEYS_ROTATE_INFO} + exit 0 + fi + fi + + if [ "$delay_count" -lt "3" ] + then + newval=$(($delay_count+1)) + sed -i "/delay_count/s/=.*/=$newval/" ${KEYSTONE_KEYS_ROTATE_INFO} + (sleep 3600; /usr/bin/keystone-fernet-keys-rotate-active) & + exit 0 + fi + + fi + + eval ${KEYSTONE_KEYS_ROTATE_CMD} + sed -i "/delay_count/s/=.*/=0/" ${KEYSTONE_KEYS_ROTATE_INFO} + +fi + +exit 0 + diff --git a/openstack/python-keystone/centos/files/openstack-keystone.defaultconf b/openstack/python-keystone/centos/files/openstack-keystone.defaultconf new file mode 100644 index 00000000..ffc936c2 --- /dev/null +++ b/openstack/python-keystone/centos/files/openstack-keystone.defaultconf @@ -0,0 +1,2 @@ +[DEFAULT] +log_dir= /var/log/keystone \ No newline at end of file diff --git a/openstack/python-keystone/centos/files/openstack-keystone.logrotate b/openstack/python-keystone/centos/files/openstack-keystone.logrotate new file mode 100644 index 00000000..b5224ef0 --- /dev/null +++ b/openstack/python-keystone/centos/files/openstack-keystone.logrotate @@ -0,0 +1,11 @@ +/var/log/keystone/*.log { + weekly + dateext + rotate 10 + size 1M + missingok + compress + notifempty + su keystone keystone + minsize 100k +} diff --git a/openstack/python-keystone/centos/files/openstack-keystone.service b/openstack/python-keystone/centos/files/openstack-keystone.service new file mode 100644 index 00000000..a72aa84b --- /dev/null +++ b/openstack/python-keystone/centos/files/openstack-keystone.service @@ -0,0 +1,14 @@ +[Unit] +Description=OpenStack Identity Service (code-named Keystone) +After=syslog.target network.target + +[Service] +Type=forking +#ReminAfterExit is set to yes as we have 2 pids to monitor +RemainAfterExit=yes +ExecStart=/usr/bin/keystone-all start +ExecStop=/usr/bin/keystone-all stop +ExecReload=/usr/bin/keystone-all reload + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-keystone/centos/files/openstack-keystone.sysctl b/openstack/python-keystone/centos/files/openstack-keystone.sysctl new file mode 100644 index 00000000..ca985afa --- /dev/null +++ b/openstack/python-keystone/centos/files/openstack-keystone.sysctl @@ -0,0 +1,3 @@ +# By default, keystone starts a service on port 5000 +# http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt +net.ipv4.ip_local_reserved_ports = 5000 diff --git a/openstack/python-keystone/centos/files/openstack-keystone.tmpfiles b/openstack/python-keystone/centos/files/openstack-keystone.tmpfiles new file mode 100644 index 00000000..241df8ab --- /dev/null +++ b/openstack/python-keystone/centos/files/openstack-keystone.tmpfiles @@ -0,0 +1 @@ +d /run/keystone 0700 keystone keystone - diff --git a/openstack/python-keystone/centos/files/password-rules.conf b/openstack/python-keystone/centos/files/password-rules.conf new file mode 100644 index 00000000..a110cd4b --- /dev/null +++ b/openstack/python-keystone/centos/files/password-rules.conf @@ -0,0 +1,34 @@ +# The password rules captures the [security_compliance] +# section of the generic Keystone configuration (keystone.conf) +# This configuration is used to statically define the password +# rules for password validation in pre-Keystone environments +# +# N.B: Only set non-default keys here (default commented configuration +# items not needed) + +[security_compliance] + +# +# From keystone +# + +# This controls the number of previous user password iterations to keep in +# history, in order to enforce that newly created passwords are unique. Setting +# the value to one (the default) disables this feature. Thus, to enable this +# feature, values must be greater than 1. This feature depends on the `sql` +# backend for the `[identity] driver`. (integer value) +# Minimum value: 1 +unique_last_password_count = 2 + +# The regular expression used to validate password strength requirements. By +# default, the regular expression will match any password. The following is an +# example of a pattern which requires at least 1 letter, 1 digit, and have a +# minimum length of 7 characters: ^(?=.*\d)(?=.*[a-zA-Z]).{7,}$ This feature +# depends on the `sql` backend for the `[identity] driver`. (string value) +password_regex = ^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*()<>{}+=_\\\[\]\-?|~`,.;:]).{7,}$ + +# Describe your password regular expression here in language for humans. If a +# password fails to match the regular expression, the contents of this +# configuration variable will be returned to users to explain why their +# requested password was insufficient. (string value) +password_regex_description = Password must have a minimum length of 7 characters, and must contain at least 1 upper case, 1 lower case, 1 digit, and 1 special character diff --git a/openstack/python-keystone/centos/openstack-keystone.spec b/openstack/python-keystone/centos/openstack-keystone.spec new file mode 100644 index 00000000..3840f3cb --- /dev/null +++ b/openstack/python-keystone/centos/openstack-keystone.spec @@ -0,0 +1,310 @@ +%global with_doc %{!?_without_doc:1}%{?_without_doc:0} +%global service keystone + +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +Name: openstack-keystone +Epoch: 0 +Version: 12.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: OpenStack Identity Service +License: Apache-2.0 +URL: https://launchpad.net/keystone/ +Source0: %{service}-%{version}.tar.gz + +Source1: openstack-keystone.logrotate +Source2: openstack-keystone.sysctl +Source3: openstack-keystone.tmpfiles +Source4: openstack-keystone.defaultconf + +#WRS +Source99: openstack-keystone.service +Source100: keystone-all +Source101: keystone-fernet-keys-rotate-active +Source102: password-rules.conf + +BuildArch: noarch +BuildRequires: openstack-macros +BuildRequires: openstack-tempest +BuildRequires: python-webtest +BuildRequires: python-bcrypt +BuildRequires: python2-devel +BuildRequires: python-fixtures +BuildRequires: python-freezegun +BuildRequires: python-lxml +BuildRequires: python-mock +# WRS: Required for debian based builds only +# use openstackdocstheme on RHEL instead +#BuildRequires: python-os-api-ref +BuildRequires: python2-openstackdocstheme +BuildRequires: python-os-testr +# Required to build keystone.conf +BuildRequires: python-oslo-cache >= 1.5.0 +BuildRequires: python-oslo-config >= 2:3.9.0 +BuildRequires: python-oslotest +BuildRequires: python-osprofiler >= 1.1.0 +BuildRequires: python-pbr >= 1.8 +BuildRequires: python-subunit +BuildRequires: python-reno +BuildRequires: python-requests +BuildRequires: python2-scrypt +BuildRequires: python-testrepository +BuildRequires: python-testresources +# Required to compile translation files +BuildRequires: python-babel + +#WRS: Need these for build_sphinx +BuildRequires: tsconfig +BuildRequires: python2-pycodestyle + +Requires: python-keystone = %{epoch}:%{version}-%{release} +Requires: python-keystoneclient >= 1:2.3.1 + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd +BuildRequires: systemd +BuildRequires: systemd-devel +BuildRequires: xmlsec1-openssl +Requires(pre): shadow-utils + +%description +Keystone is a Python implementation of the OpenStack +(http://www.openstack.org) identity service API. +. +This package contains the keystone python libraries. + +%package -n python-keystone +Summary: Keystone Python libraries +Group: Application/System +Requires: python-babel +Requires: python-paste +Requires: python-paste-deploy +Requires: python-PyMySQL +Requires: python-routes +Requires: python-sqlalchemy +Requires: python-webob +Requires: python-bcrypt +Requires: python-cryptography +Requires: python-dogpile-cache +Requires: python-jsonschema +Requires: python-keystoneclient +Requires: python-keystonemiddleware +Requires: python-ldappool +Requires: python-msgpack +Requires: python-oauthlib +Requires: python-oslo-cache +Requires: python-oslo-concurrency +Requires: python-oslo-config +Requires: python-oslo-context +Requires: python-oslo-db +Requires: python-oslo-i18n +Requires: python-oslo-log +Requires: python-oslo-messaging +Requires: python-oslo-middleware +Requires: python-oslo-policy +Requires: python-oslo-serialization +Requires: python-oslo-utils +Requires: python-osprofiler +Requires: python-passlib +Requires: python-pbr +Requires: python-pycadf +Requires: python-pysaml2 +Requires: python-memcached +Requires: python-six +Requires: python-migrate +Requires: python-stevedore +Requires: python-ldap + +%description -n python-keystone +Keystone is a Python implementation of the OpenStack +(http://www.openstack.org) identity service API. +This package contains the Keystone Python library. + +%package doc +Summary: Documentation for OpenStack Identity Service +Group: Documentation +BuildRequires: python-paste-deploy +BuildRequires: python-routes +BuildRequires: python-sphinx +BuildRequires: python-cryptography +BuildRequires: python-dogpile-cache +BuildRequires: python-jsonschema +BuildRequires: python-keystonemiddleware +BuildRequires: python-ldappool +BuildRequires: python-msgpack +BuildRequires: python-oauthlib +BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-db +BuildRequires: python-oslo-i18n +BuildRequires: python-oslo-log +BuildRequires: python-oslo-messaging +BuildRequires: python-oslo-middleware +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-sphinx +BuildRequires: python-passlib +BuildRequires: python-pysaml2 +BuildRequires: python-memcached + +%description doc +OpenStack Keystone documentaion. +. +This package contains the documentation + +%prep +%setup -q -n keystone-%{upstream_version} + +find . \( -name .gitignore -o -name .placeholder \) -delete +find keystone -name \*.py -exec sed -i '/\/usr\/bin\/env python/d' {} \; +# Let RPM handle the dependencies +rm -f test-requirements.txt requirements.txt + +# adjust paths to WSGI scripts +sed -i 's#/local/bin#/bin#' httpd/wsgi-keystone.conf +sed -i 's#apache2#httpd#' httpd/wsgi-keystone.conf +sed -i 's/^warning-is-error.*/warning-is-error = 0/g' setup.cfg + +%build +#PYTHONPATH=. +# WRS: export PBR version +export PBR_VERSION=%{version} +%{__python2} setup.py build + +%{__python2} setup.py build_sphinx --builder=html,man +# remove the Sphinx-build leftovers +rm -rf doc/build/html/.{doctrees,buildinfo} +# config file generation +oslo-config-generator --config-file config-generator/keystone.conf \ +--output-file etc/keystone.conf.sample +# policy file generation +oslopolicy-sample-generator --config-file config-generator/keystone-policy-generator.conf --output-file etc/keystone.policy.yaml + +%install +# WRS: export PBR version +export PBR_VERSION=%{version} +%{__python2} setup.py install --skip-build --root %{buildroot} + +mkdir -p %{buildroot}%{_mandir}/man1 +install -d -m 755 %{buildroot}%{_sysconfdir}/keystone +install -d -m 755 %{buildroot}%{_sysconfdir}/sysctl.d +install -d -m 755 %{buildroot}%{_localstatedir}/{lib,log}/keystone +install -d -m 750 %{buildroot}%{_localstatedir}/cache/keystone +install -d -m 755 %{buildroot}%{_sysconfdir}/keystone/keystone.conf.d/ + +# default dir for fernet tokens +install -d -m 750 %{buildroot}%{_sysconfdir}/keystone/credential-keys/ +install -D -m 644 %{SOURCE3} %{buildroot}/%{_tmpfilesdir}/keystone.conf +install -p -D -m 640 etc/keystone.conf.sample %{buildroot}%{_sysconfdir}/keystone/keystone.conf +install -D -m 640 %{SOURCE4} %{buildroot}/%{_sysconfdir}/keystone/keystone.conf.d/010-keystone.conf +#install -D -m 440 %{SOURCE5} %{buildroot}/%{_sysconfdir}/keystone/README.config +install -p -D -m 640 etc/logging.conf.sample %{buildroot}%{_sysconfdir}/keystone/logging.conf +install -p -D -m 640 etc/keystone-paste.ini %{buildroot}%{_sysconfdir}/keystone/keystone-paste.ini +install -p -D -m 640 etc/keystone.policy.yaml %{buildroot}%{_sysconfdir}/keystone/keystone.policy.yaml +install -p -D -m 640 etc/default_catalog.templates %{buildroot}%{_sysconfdir}/keystone/default_catalog.templates +install -p -D -m 640 etc/sso_callback_template.html %{buildroot}%{_sysconfdir}/keystone/sso_callback_template.html +# WRS: don't install a seperate keystone logrotate file as this is managed by syslog-ng +#install -p -D -m 644 %{SOURCE1} %{buildroot}%{_sysconfdir}/logrotate.d/openstack-keystone +install -p -D -m 644 etc/policy.v3cloudsample.json %{buildroot}%{_datadir}/keystone/policy.v3cloudsample.json +install -p -D -m 644 %{SOURCE2} %{buildroot}%{_sysconfdir}/sysctl.d/openstack-keystone.conf +install -p -D -m 644 doc/build/man/*.1 %{buildroot}%{_mandir}/man1/ +# Install sample data script. +install -p -D -m 755 tools/sample_data.sh %{buildroot}%{_datadir}/keystone/sample_data.sh +# Install apache configuration files +install -p -D -m 644 httpd/wsgi-keystone.conf %{buildroot}%{_datadir}/keystone/ + +# WRS: install policy rules +install -p -D -m 640 etc/policy.wrs.json %{buildroot}%{_sysconfdir}/keystone/policy.json + +# WRS install keystone cron script +install -p -D -m 755 %{SOURCE101} %{buildroot}%{_bindir}/keystone-fernet-keys-rotate-active + +# WRS: install password rules(readable only) +install -p -D -m 440 %{SOURCE102} %{buildroot}%{_sysconfdir}/keystone/password-rules.conf + +# WRS: install keystone public and admin gunicorn apps +install -p -D -m 755 etc/admin.py %{buildroot}/%{_datarootdir}/keystone/admin.py +install -p -D -m 755 etc/public.py %{buildroot}/%{_datarootdir}/keystone/public.py + +# WRS: install openstack-keystone service script +install -p -D -m 644 %{SOURCE99} %{buildroot}%{_unitdir}/openstack-keystone.service + +# WRS: Install keystone-all bash script +install -p -D -m 755 %{SOURCE100} %{buildroot}%{_bindir}/keystone-all + +%pre +# 163:163 for keystone (openstack-keystone) - rhbz#752842 +getent group keystone >/dev/null || groupadd -r --gid 163 keystone +getent passwd keystone >/dev/null || \ +useradd --uid 163 -r -g keystone -d %{_sharedstatedir}/keystone -s /sbin/nologin \ +-c "OpenStack Keystone Daemons" keystone +exit 0 + +# WRS: disable testr +#%check +# don't want to depend on hacking for package building +#rm keystone/tests/unit/test_hacking_checks.py +#%{__python2} setup.py testr + +%post +%tmpfiles_create %{_tmpfilesdir}/keystone.conf +%systemd_post openstack-keystone.service +%sysctl_apply openstack-keystone.conf + +%preun +%systemd_preun openstack-keystone.service + +%postun +%systemd_postun_with_restart openstack-keystone.service + +%files +%license LICENSE +%doc README.rst +%{_mandir}/man1/keystone*.1.gz +%{_bindir}/keystone-wsgi-admin +%{_bindir}/keystone-wsgi-public +%{_bindir}/keystone-manage +# WRS: add keystone-all as part of newton rebase +%{_bindir}/keystone-all +# WRS: add Keystone fernet keys cron job +%{_bindir}/keystone-fernet-keys-rotate-active +%_tmpfilesdir/keystone.conf +%dir %{_datadir}/keystone +%attr(0644, root, keystone) %{_datadir}/keystone/policy.v3cloudsample.json +%attr(0755, root, root) %{_datadir}/keystone/sample_data.sh +%attr(0644, root, keystone) %{_datadir}/keystone/wsgi-keystone.conf +# WRS: add openstack-keystone sysVinit script +%{_unitdir}/openstack-keystone.service +%dir %attr(0750, root, keystone) %{_sysconfdir}/keystone +%dir %attr(0750, root, keystone) %{_sysconfdir}/keystone/keystone.conf.d/ +%config(noreplace) %attr(0640, root, keystone) %{_sysconfdir}/keystone/keystone.conf +%config(noreplace) %attr(0640, root, keystone) %{_sysconfdir}/keystone/keystone.conf.d/010-keystone.conf +%config(noreplace) %attr(0640, root, keystone) %{_sysconfdir}/keystone/keystone-paste.ini +%config(noreplace) %attr(0640, root, keystone) %{_sysconfdir}/keystone/logging.conf +%config(noreplace) %attr(0640, root, keystone) %{_sysconfdir}/keystone/default_catalog.templates +%config(noreplace) %attr(0640, keystone, keystone) %{_sysconfdir}/keystone/keystone.policy.yaml +%config(noreplace) %attr(0640, keystone, keystone) %{_sysconfdir}/keystone/policy.json +%config(noreplace) %attr(0640, keystone, keystone) %{_sysconfdir}/keystone/sso_callback_template.html +# WRS: add password rules configuration +%attr(0440, root, keystone) %{_sysconfdir}/keystone/password-rules.conf + +# WRS: log rotate not needed +#%config(noreplace) %{_sysconfdir}/logrotate.d/openstack-keystone +%dir %attr(0755, %{keystone}, %{keystone}) %{_localstatedir}/lib/keystone +%dir %attr(0750, %{keystone}, %{keystone}) %{_localstatedir}/log/keystone +%dir %attr(0750, %{keystone}, %{keystone}) %{_localstatedir}/cache/keystone +%{_sysconfdir}/sysctl.d/openstack-keystone.conf + +%files -n python-keystone +%{_datarootdir}/keystone/public*.py* +%{_datarootdir}/keystone/admin*.py* +%defattr(-,root,root,-) +%doc README.rst +%license LICENSE +%{python2_sitelib}/keystone +%{python2_sitelib}/keystone-*.egg-info + +%files doc +%license LICENSE +%doc doc/build/html + +%changelog diff --git a/openstack/python-keystoneauth1/centos/build_srpm.data b/openstack/python-keystoneauth1/centos/build_srpm.data new file mode 100644 index 00000000..024e3e13 --- /dev/null +++ b/openstack/python-keystoneauth1/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=5 diff --git a/openstack/python-keystoneauth1/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch b/openstack/python-keystoneauth1/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..0883d1b8 --- /dev/null +++ b/openstack/python-keystoneauth1/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch @@ -0,0 +1,27 @@ +From 6326d77cf992363766099fc4a8405ec1deac082e Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 20 Mar 2017 09:21:36 -0400 +Subject: [PATCH 4/4] WRS: 0001-Update-package-versioning-for-TIS-format.patch + +Conflicts: + SPECS/python-keystoneauth1.spec +--- + SPECS/python-keystoneauth1.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/python-keystoneauth1.spec b/SPECS/python-keystoneauth1.spec +index b97182b..d0c26a0 100644 +--- a/SPECS/python-keystoneauth1.spec ++++ b/SPECS/python-keystoneauth1.spec +@@ -8,7 +8,7 @@ + + Name: python-%{pypi_name} + Version: 3.1.0 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: Authentication Library for OpenStack Clients + License: ASL 2.0 + URL: https://pypi.io/pypi/%{pypi_name} +-- +1.8.3.1 + diff --git a/openstack/python-keystoneauth1/centos/meta_patches/0002-meta-fix-neutron-error-not-shown-to-user.patch b/openstack/python-keystoneauth1/centos/meta_patches/0002-meta-fix-neutron-error-not-shown-to-user.patch new file mode 100644 index 00000000..63a17e17 --- /dev/null +++ b/openstack/python-keystoneauth1/centos/meta_patches/0002-meta-fix-neutron-error-not-shown-to-user.patch @@ -0,0 +1,26 @@ +From ef9e70e4a52b958bbc88c148cb3c4c9d18030b7d Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 20 Mar 2017 09:21:36 -0400 +Subject: [PATCH 1/4] WRS: meta-fix-neutron-error-not-shown-to-user.patch + +--- + SPECS/python-keystoneauth1.spec | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/SPECS/python-keystoneauth1.spec b/SPECS/python-keystoneauth1.spec +index ff1673b..fb9e8e1 100644 +--- a/SPECS/python-keystoneauth1.spec ++++ b/SPECS/python-keystoneauth1.spec +@@ -14,6 +14,9 @@ License: ASL 2.0 + URL: https://pypi.io/pypi/%{pypi_name} + Source0: https://tarballs.openstack.org/keystoneauth/keystoneauth1-%{upstream_version}.tar.gz + ++# WRS ++Patch0001: fix-neutron-error-not-shown-to-user.patch ++ + BuildArch: noarch + + %description +-- +1.8.3.1 + diff --git a/openstack/python-keystoneauth1/centos/meta_patches/0003-meta-spec-remote-client.patch b/openstack/python-keystoneauth1/centos/meta_patches/0003-meta-spec-remote-client.patch new file mode 100644 index 00000000..43049dfd --- /dev/null +++ b/openstack/python-keystoneauth1/centos/meta_patches/0003-meta-spec-remote-client.patch @@ -0,0 +1,55 @@ +From 8777fc48e9b3e2086e6a68eb3601bde8ec927d6b Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 20 Mar 2017 09:21:36 -0400 +Subject: [PATCH 2/4] WRS: 0002-meta-spec-remote-client.patch + +--- + SPECS/python-keystoneauth1.spec | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/SPECS/python-keystoneauth1.spec b/SPECS/python-keystoneauth1.spec +index fb9e8e1..dad6758 100644 +--- a/SPECS/python-keystoneauth1.spec ++++ b/SPECS/python-keystoneauth1.spec +diff --git a/SPECS/python-keystoneauth1.spec b/SPECS/python-keystoneauth1.spec +index 91968cc..578cc19 100644 +--- a/SPECS/python-keystoneauth1.spec ++++ b/SPECS/python-keystoneauth1.spec +@@ -116,6 +116,12 @@ BuildRequires: python-fixtures + %description doc + Documentation for OpenStack Identity Authentication Library + ++%package sdk ++Summary: SDK files for %{name} ++ ++%description sdk ++Contains SDK files for %{name} package ++ + %prep + %autosetup -n %{pypi_name}-%{upstream_version} -S git + +@@ -142,6 +148,11 @@ rm -rf %{pypi_name}.egg-info + %{__python} setup.py build_sphinx -b html + rm -rf doc/build/html/.buildinfo + ++ ++# install SDK package ++mkdir -p %{buildroot}/usr/share/remote-clients ++tar zcf %{buildroot}/usr/share/remote-clients/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' --transform="s/keystoneauth1-%{version}/%{name}-%{version}/" -C .. keystoneauth1-%{version} ++ + %check + %{__python2} setup.py testr + %if 0%{?with_python3} +@@ -168,6 +179,9 @@ rm -rf .testrepository + %license LICENSE + %doc doc/build/html + ++%files sdk ++/usr/share/remote-clients/%{name}-%{version}.tgz ++ + %changelog + * Fri Aug 11 2017 Alfredo Moralejo 3.1.0-1 + - Update to 3.1.0 +-- +1.8.3.1 + diff --git a/openstack/python-keystoneauth1/centos/meta_patches/0004-meta-dont-remove-requirements-txt.patch b/openstack/python-keystoneauth1/centos/meta_patches/0004-meta-dont-remove-requirements-txt.patch new file mode 100644 index 00000000..8ea1bd0e --- /dev/null +++ b/openstack/python-keystoneauth1/centos/meta_patches/0004-meta-dont-remove-requirements-txt.patch @@ -0,0 +1,25 @@ +From b7378bf717a0930e2e57be89572b05a5da7cd2f2 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 20 Mar 2017 09:21:36 -0400 +Subject: [PATCH 3/4] WRS: meta-dont-remove-requirements-txt.patch + +--- + SPECS/python-keystoneauth1.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/python-keystoneauth1.spec b/SPECS/python-keystoneauth1.spec +index dad6758..b97182b 100644 +--- a/SPECS/python-keystoneauth1.spec ++++ b/SPECS/python-keystoneauth1.spec +@@ -128,7 +128,7 @@ Contains SDK files for %{name} package + sed -i '/sphinx.ext.intersphinx.*$/d' doc/source/conf.py + + # Let RPM handle the dependencies +-rm -rf {test-,}requirements.txt ++rm -rf test-requirements.txt + # Remove bundled egg-info + rm -rf %{pypi_name}.egg-info + +-- +1.8.3.1 + diff --git a/openstack/python-keystoneauth1/centos/meta_patches/0005-meta-ignore-unexpected-arguments-to-token-plugin.patch b/openstack/python-keystoneauth1/centos/meta_patches/0005-meta-ignore-unexpected-arguments-to-token-plugin.patch new file mode 100644 index 00000000..e888f563 --- /dev/null +++ b/openstack/python-keystoneauth1/centos/meta_patches/0005-meta-ignore-unexpected-arguments-to-token-plugin.patch @@ -0,0 +1,25 @@ +From 927307745579900e1f207a0df8b715a19fb36407 Mon Sep 17 00:00:00 2001 +From: Kam Nasim +Date: Wed, 13 Dec 2017 18:59:08 -0500 +Subject: [PATCH] meta patch for ignoring unexpected arguments to token auth + plugin + +--- + SPECS/python-keystoneauth1.spec | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SPECS/python-keystoneauth1.spec b/SPECS/python-keystoneauth1.spec +index 2375ec7..818488b 100644 +--- a/SPECS/python-keystoneauth1.spec ++++ b/SPECS/python-keystoneauth1.spec +@@ -16,6 +16,7 @@ Source0: https://tarballs.openstack.org/keystoneauth/keystoneauth1-%{upstream + + # WRS + Patch0001: fix-neutron-error-not-shown-to-user.patch ++Patch0002: ignore-unexpected-arguments-to-token-auth-plugin.patch + + BuildArch: noarch + +-- +1.8.3.1 + diff --git a/openstack/python-keystoneauth1/centos/meta_patches/PATCH_ORDER b/openstack/python-keystoneauth1/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..adfd0f35 --- /dev/null +++ b/openstack/python-keystoneauth1/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,5 @@ +0001-Update-package-versioning-for-TIS-format.patch +0002-meta-fix-neutron-error-not-shown-to-user.patch +0003-meta-spec-remote-client.patch +0004-meta-dont-remove-requirements-txt.patch +0005-meta-ignore-unexpected-arguments-to-token-plugin.patch diff --git a/openstack/python-keystoneauth1/centos/patches/fix-neutron-error-not-shown-to-user.patch b/openstack/python-keystoneauth1/centos/patches/fix-neutron-error-not-shown-to-user.patch new file mode 100644 index 00000000..7aa012e4 --- /dev/null +++ b/openstack/python-keystoneauth1/centos/patches/fix-neutron-error-not-shown-to-user.patch @@ -0,0 +1,31 @@ +From 4fe0798dc5f1457b19fe43361d5a1b235bd7a6a6 Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Thu, 25 Aug 2016 10:54:38 -0400 +Subject: [PATCH 1/1] US80213:Support for the openstack CLI command Fixed + an issue where neutron error response message was not fully propagating + through to the user + +--- + keystoneauth1/exceptions/http.py | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/keystoneauth1/exceptions/http.py b/keystoneauth1/exceptions/http.py +index 6f725ed..0265575 100644 +--- a/keystoneauth1/exceptions/http.py ++++ b/keystoneauth1/exceptions/http.py +@@ -415,6 +415,12 @@ def from_response(response, method, url): + error = body["error"] + kwargs["message"] = error.get("message") + kwargs["details"] = error.get("details") ++ # Neutron error response has "NeutronError" instead of "error" in the body ++ # This block ensures the error response is correctly constructed into an exception object ++ elif isinstance(body, dict) and isinstance(body.get("NeutronError"), dict): ++ error = body["NeutronError"] ++ kwargs["message"] = error.get("message") ++ kwargs["details"] = error.get("details") + elif content_type.startswith("text/"): + kwargs["details"] = response.text + +-- +1.8.3.1 + diff --git a/openstack/python-keystoneauth1/centos/patches/ignore-unexpected-arguments-to-token-auth-plugin.patch b/openstack/python-keystoneauth1/centos/patches/ignore-unexpected-arguments-to-token-auth-plugin.patch new file mode 100644 index 00000000..d71ac7c6 --- /dev/null +++ b/openstack/python-keystoneauth1/centos/patches/ignore-unexpected-arguments-to-token-auth-plugin.patch @@ -0,0 +1,29 @@ +From 559c134057501228adb585cc85ae0cac84cc450b Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Wed, 13 Dec 2017 18:53:18 -0500 +Subject: [PATCH] ignore unexpected arguments to token auth plugin + +When creating the token auth plugin, for authentication via token, extra +arguments might get passed in by the application that the auth plugin +doesn't care about. This will cause a TypeError due to unexpected +keyword arguments. These extra arguments should just be ignored instead +--- + keystoneauth1/token_endpoint.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/keystoneauth1/token_endpoint.py b/keystoneauth1/token_endpoint.py +index 675d4c1..0a2f160 100644 +--- a/keystoneauth1/token_endpoint.py ++++ b/keystoneauth1/token_endpoint.py +@@ -20,7 +20,7 @@ class Token(plugin.BaseAuthPlugin): + have a known endpoint and admin token that you want to use. + """ + +- def __init__(self, endpoint, token): ++ def __init__(self, endpoint, token, **kwargs): + # NOTE(jamielennox): endpoint is reserved for when plugins + # can be used to provide that information + self.endpoint = endpoint +-- +1.8.3.1 + diff --git a/openstack/python-keystoneauth1/centos/srpm_path b/openstack/python-keystoneauth1/centos/srpm_path new file mode 100644 index 00000000..49e35a09 --- /dev/null +++ b/openstack/python-keystoneauth1/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-keystoneauth1-3.1.0-1.el7.src.rpm diff --git a/openstack/python-keystoneclient/centos/build_srpm.data b/openstack/python-keystoneclient/centos/build_srpm.data new file mode 100644 index 00000000..c66bf348 --- /dev/null +++ b/openstack/python-keystoneclient/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=7 diff --git a/openstack/python-keystoneclient/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch b/openstack/python-keystoneclient/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..ca1e1ae1 --- /dev/null +++ b/openstack/python-keystoneclient/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch @@ -0,0 +1,13 @@ +diff --git a/SPECS/python-keystoneclient.spec b/SPECS/python-keystoneclient.spec +index 79ab30f..e68bc6f 100644 +--- a/SPECS/python-keystoneclient.spec ++++ b/SPECS/python-keystoneclient.spec +@@ -8,7 +8,7 @@ + Name: python-keystoneclient + Epoch: 1 + Version: 3.13.0 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: Client library for OpenStack Identity API + License: ASL 2.0 + URL: https://launchpad.net/python-keystoneclient diff --git a/openstack/python-keystoneclient/centos/meta_patches/0002-meta-public-adminURL-detection.patch b/openstack/python-keystoneclient/centos/meta_patches/0002-meta-public-adminURL-detection.patch new file mode 100644 index 00000000..b3cafac8 --- /dev/null +++ b/openstack/python-keystoneclient/centos/meta_patches/0002-meta-public-adminURL-detection.patch @@ -0,0 +1,16 @@ +diff --git a/SPECS/python-keystoneclient.spec b/SPECS/python-keystoneclient.spec +index e68bc6f..c71629d 100644 +--- a/SPECS/python-keystoneclient.spec ++++ b/SPECS/python-keystoneclient.spec +@@ -14,6 +14,11 @@ License: ASL 2.0 + URL: https://launchpad.net/python-keystoneclient + Source0: https://tarballs.openstack.org/%{name}/%{name}-%{version}.tar.gz + ++# WRS ++Patch0001: internal-keystone-client-public-adminURL-detection.patch ++ ++# BuildArch needs to be located below patches in the spec file ++ + BuildArch: noarch + + BuildRequires: /usr/bin/openssl diff --git a/openstack/python-keystoneclient/centos/meta_patches/0003-meta-TiS-remote-client-sdk-patch.patch b/openstack/python-keystoneclient/centos/meta_patches/0003-meta-TiS-remote-client-sdk-patch.patch new file mode 100644 index 00000000..2417f565 --- /dev/null +++ b/openstack/python-keystoneclient/centos/meta_patches/0003-meta-TiS-remote-client-sdk-patch.patch @@ -0,0 +1,47 @@ +diff --git a/SPECS/python-keystoneclient.spec b/SPECS/python-keystoneclient.spec +index 2844d30..edd6720 100644 +--- a/SPECS/python-keystoneclient.spec ++++ b/SPECS/python-keystoneclient.spec +@@ -167,6 +167,12 @@ BuildRequires: python-openstackdocstheme + %description doc + Documentation for the keystoneclient module + ++%package sdk ++Summary: SDK files for %{name} ++ ++%description sdk ++Contains SDK files for %{name} package ++ + %prep + %autosetup -n %{name}-%{upstream_version} -S git + +@@ -194,6 +200,10 @@ rm -rf {test-,}requirements.txt + # Fix hidden-file-or-dir warnings + rm -fr doc/build/html/.{doctrees,buildinfo} + ++# prep SDK package ++mkdir -p %{buildroot}/usr/share/remote-clients/%{name} ++tar zcf %{buildroot}/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} ++ + + %check + %{__python2} setup.py test +@@ -202,6 +212,7 @@ rm -fr .testrepository + %{__python3} setup.py test + %endif + ++ + %files -n python2-keystoneclient + %license LICENSE + %doc README.rst +@@ -232,6 +243,10 @@ rm -fr .testrepository + %{python3_sitelib}/keystoneclient/tests + %endif + ++%files sdk ++/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz ++ ++ + %changelog + * Fri Aug 11 2017 Alfredo Moralejo 1:3.13.0-1 + - Update to 3.13.0 diff --git a/openstack/python-keystoneclient/centos/meta_patches/0004-meta-dont-remove-requirements-txt.patch b/openstack/python-keystoneclient/centos/meta_patches/0004-meta-dont-remove-requirements-txt.patch new file mode 100644 index 00000000..457afaa1 --- /dev/null +++ b/openstack/python-keystoneclient/centos/meta_patches/0004-meta-dont-remove-requirements-txt.patch @@ -0,0 +1,13 @@ +diff --git a/SPECS/python-keystoneclient.spec b/SPECS/python-keystoneclient.spec +index edd6720..a41311e 100644 +--- a/SPECS/python-keystoneclient.spec ++++ b/SPECS/python-keystoneclient.spec +@@ -181,7 +181,7 @@ Contains SDK files for %{name} package + sed -i 's/^warning-is-error.*/warning-is-error = 0/g' setup.cfg + + # Let RPM handle the dependencies +-rm -rf {test-,}requirements.txt ++rm -f test-requirements.txt + + %build + %py2_build diff --git a/openstack/python-keystoneclient/centos/meta_patches/0006-meta-buildrequires-python-setuptools_scm.patch b/openstack/python-keystoneclient/centos/meta_patches/0006-meta-buildrequires-python-setuptools_scm.patch new file mode 100644 index 00000000..b5e83be4 --- /dev/null +++ b/openstack/python-keystoneclient/centos/meta_patches/0006-meta-buildrequires-python-setuptools_scm.patch @@ -0,0 +1,20 @@ +diff --git a/SPECS/python-keystoneclient.spec b/SPECS/python-keystoneclient.spec +index 965c437..efc0c4b 100644 +--- a/SPECS/python-keystoneclient.spec ++++ b/SPECS/python-keystoneclient.spec +@@ -34,6 +34,7 @@ Summary: Client library for OpenStack Identity API + + BuildRequires: python2-devel + BuildRequires: python-setuptools ++BuildRequires: python2-setuptools_scm + BuildRequires: python-pbr >= 2.0.0 + BuildRequires: git + +@@ -61,6 +62,7 @@ Summary: Client library for OpenStack Identity API + + BuildRequires: python3-devel + BuildRequires: python3-setuptools ++BuildRequires: python3-setuptools_scm + BuildRequires: python3-pbr >= 2.0.0 + + Requires: python3-oslo-config >= 2:4.0.0 diff --git a/openstack/python-keystoneclient/centos/meta_patches/PATCH_ORDER b/openstack/python-keystoneclient/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..3a2b04aa --- /dev/null +++ b/openstack/python-keystoneclient/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,5 @@ +0001-Update-package-versioning-for-TIS-format.patch +0002-meta-public-adminURL-detection.patch +0003-meta-TiS-remote-client-sdk-patch.patch +0004-meta-dont-remove-requirements-txt.patch +0006-meta-buildrequires-python-setuptools_scm.patch diff --git a/openstack/python-keystoneclient/centos/patches/internal-keystone-client-public-adminURL-detection.patch b/openstack/python-keystoneclient/centos/patches/internal-keystone-client-public-adminURL-detection.patch new file mode 100644 index 00000000..af5c7cc9 --- /dev/null +++ b/openstack/python-keystoneclient/centos/patches/internal-keystone-client-public-adminURL-detection.patch @@ -0,0 +1,55 @@ +From e9d61bd41cfbe8cf424d13052c35f298f5beb1e2 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Tue, 24 Jan 2017 15:19:33 -0500 +Subject: [PATCH] TiS-remote-client-sdk-patch + +--- + keystoneclient/v2_0/client.py | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +diff --git a/keystoneclient/v2_0/client.py b/keystoneclient/v2_0/client.py +index 904f769..003c5b1 100644 +--- a/keystoneclient/v2_0/client.py ++++ b/keystoneclient/v2_0/client.py +@@ -13,6 +13,7 @@ + # License for the specific language governing permissions and limitations + # under the License. + ++import os + import logging + import warnings + +@@ -30,6 +31,9 @@ from keystoneclient.v2_0 import tenants + from keystoneclient.v2_0 import tokens + from keystoneclient.v2_0 import users + ++import requests ++from requests.packages.urllib3.exceptions import InsecureRequestWarning ++from requests.packages.urllib3 import disable_warnings as urllib3_disable_warnings + + _logger = logging.getLogger(__name__) + +@@ -154,6 +158,20 @@ class Client(httpclient.HTTPClient): + 'deprecated as of the 1.7.0 release and may be removed in ' + 'the 2.0.0 release.', DeprecationWarning) + ++ # NOTE(knasim-wrs): As per US76645, the Keystone adminURL ++ # is no longer an internal address since it needs to be ++ # accessible via remote Openstack client. Things get ++ # complicated with HTTPS where the internal keystone client ++ # gets this adminURL and cannot connect to Keystone server ++ # as it cannot verify the SSL certificate. ++ # We will check for this condition here, if OS_ENDPOINT_TYPE ++ # is not publicURL then this is an internal access scenario and ++ # Keystone client will be set to SSL insecure mode ++ if os.environ.get('OS_ENDPOINT_TYPE') == 'internalURL': ++ kwargs['insecure'] = True ++ # disable verbose insecurity warnings ++ urllib3_disable_warnings(InsecureRequestWarning) ++ + super(Client, self).__init__(**kwargs) + + self.certificates = certificates.CertificatesManager(self._adapter) +-- +1.8.3.1 + diff --git a/openstack/python-keystoneclient/centos/srpm_path b/openstack/python-keystoneclient/centos/srpm_path new file mode 100644 index 00000000..d2bd6c67 --- /dev/null +++ b/openstack/python-keystoneclient/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-keystoneclient-3.13.0-1.el7.src.rpm diff --git a/openstack/python-keystoneclient/python-keystoneclient/CGCSkeyringsupport.patch b/openstack/python-keystoneclient/python-keystoneclient/CGCSkeyringsupport.patch new file mode 100644 index 00000000..9777de3b --- /dev/null +++ b/openstack/python-keystoneclient/python-keystoneclient/CGCSkeyringsupport.patch @@ -0,0 +1,144 @@ +Index: git/keystoneclient/shell.py +=================================================================== +--- git.orig/keystoneclient/shell.py 2014-09-17 13:06:07.761186569 -0400 ++++ git/keystoneclient/shell.py 2014-09-22 15:10:36.326737219 -0400 +@@ -24,6 +24,7 @@ + + from __future__ import print_function + ++import os + import argparse + import getpass + import logging +@@ -32,6 +33,8 @@ + + import six + ++import keyring ++ + import keystoneclient + from keystoneclient import access + from keystoneclient.contrib.bootstrap import shell as shell_bootstrap +@@ -333,6 +336,11 @@ + '--os-username or env[OS_USERNAME]') + + if not args.os_password: ++ # priviledge check (only allow Keyring retrieval if we are root) ++ if os.geteuid() == 0: ++ args.os_password = keyring.get_password('CGCS', args.os_username) ++ ++ if not args.os_password: + # No password, If we've got a tty, try prompting for it + if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty(): + # Check for Ctl-D +Index: git/keystoneclient/probe.py +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ git/keystoneclient/probe.py 2014-09-23 10:41:57.758412311 -0400 +@@ -0,0 +1,106 @@ ++# ++# Copyright (c) 2014 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++ ++""" ++OCF sanity probe to prevent cleartext password ++""" ++ ++import os ++import sys ++import json ++import urllib2 ++import datetime ++import keyring ++import logging ++import logging.handlers ++ ++_loggers = {} ++ ++def get_logger(name): ++ """ Get a logger or create one """ ++ if name not in _loggers: ++ _loggers[name] = logging.getLogger(name) ++ ++ return _loggers[name] ++ ++ ++def setup_logger(logger): ++ """ Setup a logger """ ++ syslog_facility = logging.handlers.SysLogHandler.LOG_SYSLOG ++ ++ formatter = logging.Formatter("probe_keyring[%(process)d] " + ++ "%(pathname)s:%(lineno)s " + ++ "%(levelname)8s [%(name)s] %(message)s") ++ ++ handler = logging.handlers.SysLogHandler(address='/dev/log', ++ facility=syslog_facility) ++ handler.setLevel(logging.INFO) ++ handler.setFormatter(formatter) ++ ++ logger.addHandler(handler) ++ logger.setLevel(logging.INFO) ++ ++def configure(): ++ """ Setup logging """ ++ for logger in _loggers: ++ setup_logger(_loggers[logger]) ++ ++LOG = get_logger(__name__) ++ ++def probe(auth_url, tenant, login): ++ """ Asks OpenStack Keystone for a token """ ++ ++ try: ++ url = auth_url + "tokens" ++ request_info = urllib2.Request(url) ++ request_info.add_header("Content-type", "application/json") ++ request_info.add_header("Accept", "application/json") ++ payload = json.dumps( ++ {"auth": {"tenantName": tenant, ++ "passwordCredentials": {"username": login, ++ "password": keyring.get_password('CGCS',login)}}}) ++ request_info.add_data(payload) ++ ++ request = urllib2.urlopen(request_info) ++ response = json.loads(request.read()) ++ request.close() ++ return response['access']['token']['id'] ++ ++ except Exception as e: ++ LOG.error("%s, %s" % (e.code, e.read())) ++ return None ++ ++def main(): ++ ++ global cmd_auth_url ++ global cmd_tenant ++ global cmd_os_username ++ ++ cmd_auth_url = "http://127.0.0.1:5000/v2.0/tokens" ++ cmd_tenant = "tenant" ++ cmd_os_username = "username" ++ ++ configure() ++ ++# priviledge check (only allow Keyring retrieval if we are root) ++ if os.geteuid() == 0: ++ arg = 1 ++ cmd_auth_url = sys.argv[arg] ++ arg += 1 ++ cmd_tenant = sys.argv[arg] ++ arg += 1 ++ cmd_os_username = sys.argv[arg] ++ ++ try: ++ token_id = probe(cmd_auth_url, cmd_tenant, cmd_os_username) ++ if token_id is None: ++ sys.exit(-1) ++ sys.exit(0) ++ except Exception as e: ++ sys.exit(-1) ++ diff --git a/openstack/python-keystoneclient/python-keystoneclient/extend_token_expiry_window.patch b/openstack/python-keystoneclient/python-keystoneclient/extend_token_expiry_window.patch new file mode 100644 index 00000000..715aa7de --- /dev/null +++ b/openstack/python-keystoneclient/python-keystoneclient/extend_token_expiry_window.patch @@ -0,0 +1,16 @@ +Index: python-keystoneclient-1.3.1/keystoneclient/auth/identity/base.py +=================================================================== +--- python-keystoneclient-1.3.1.orig/keystoneclient/auth/identity/base.py ++++ python-keystoneclient-1.3.1/keystoneclient/auth/identity/base.py +@@ -34,8 +34,9 @@ def get_options(): + @six.add_metaclass(abc.ABCMeta) + class BaseIdentityPlugin(base.BaseAuthPlugin): + +- # we count a token as valid if it is valid for at least this many seconds +- MIN_TOKEN_LIFE_SECONDS = 1 ++ # we count a token as valid (not needing refreshing) if it is valid for at ++ # least this many seconds before the token expiry time ++ MIN_TOKEN_LIFE_SECONDS = 120 + + def __init__(self, + auth_url=None, diff --git a/openstack/python-keystoneclient/python-keystoneclient/internal-keystone-client-public-adminURL-detection.patch b/openstack/python-keystoneclient/python-keystoneclient/internal-keystone-client-public-adminURL-detection.patch new file mode 100644 index 00000000..fb64fed0 --- /dev/null +++ b/openstack/python-keystoneclient/python-keystoneclient/internal-keystone-client-public-adminURL-detection.patch @@ -0,0 +1,45 @@ +--- + keystoneclient/v2_0/client.py | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +--- a/keystoneclient/v2_0/client.py ++++ b/keystoneclient/v2_0/client.py +@@ -13,6 +13,7 @@ + # License for the specific language governing permissions and limitations + # under the License. + ++import os + import logging + + from keystoneclient.auth.identity import v2 as v2_auth +@@ -29,6 +30,8 @@ from keystoneclient.v2_0 import tenants + from keystoneclient.v2_0 import tokens + from keystoneclient.v2_0 import users + ++import requests ++from requests.packages.urllib3.exceptions import InsecureRequestWarning + + _logger = logging.getLogger(__name__) + +@@ -130,6 +133,21 @@ class Client(httpclient.HTTPClient): + + def __init__(self, **kwargs): + """Initialize a new client for the Keystone v2.0 API.""" ++ ++ # NOTE(knasim-wrs): As per US76645, the Keystone adminURL ++ # is no longer an internal address since it needs to be ++ # accessible via remote Openstack client. Things get ++ # complicated with HTTPS where the internal keystone client ++ # gets this adminURL and cannot connect to Keystone server ++ # as it cannot verify the SSL certificate. ++ # We will check for this condition here, if OS_ENDPOINT_TYPE ++ # is not publicURL then this is an internal access scenario and ++ # Keystone client will be set to SSL insecure mode ++ if os.environ.get('OS_ENDPOINT_TYPE') == 'internalURL': ++ kwargs['insecure'] = True ++ # disable verbose insecurity warnings ++ requests.packages.urllib3.disable_warnings(InsecureRequestWarning) ++ + super(Client, self).__init__(**kwargs) + + self.certificates = certificates.CertificatesManager(self._adapter) diff --git a/openstack/python-keystoneclient/python-keystoneclient/v2-client-empty-password-check.patch b/openstack/python-keystoneclient/python-keystoneclient/v2-client-empty-password-check.patch new file mode 100644 index 00000000..a7988f05 --- /dev/null +++ b/openstack/python-keystoneclient/python-keystoneclient/v2-client-empty-password-check.patch @@ -0,0 +1,26 @@ +--- + keystoneclient/v2_0/shell.py | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/keystoneclient/v2_0/shell.py ++++ b/keystoneclient/v2_0/shell.py +@@ -141,7 +141,8 @@ def do_user_password_update(kc, args): + """Update user password.""" + user = utils.find_resource(kc.users, args.user) + new_passwd = args.passwd or utils.prompt_for_password() +- if new_passwd is None: ++ # if password is empty or blank then reject it ++ if new_passwd is None or new_passwd.strip() is "": + msg = (_("\nPlease specify password using the --pass option " + "or using the prompt")) + sys.exit(msg) +@@ -167,7 +168,8 @@ def do_password_update(kc, args): + currentpasswd = getpass.getpass(_('Current Password: ')) + + newpasswd = args.newpasswd +- while newpasswd is None: ++ # don't allow empty or blank passwords ++ while newpasswd is None or newpasswd.strip() is "": + passwd1 = getpass.getpass(_('New Password: ')) + passwd2 = getpass.getpass(_('Repeat New Password: ')) + if passwd1 == passwd2: diff --git a/openstack/python-keystonemiddleware/centos/build_srpm.data b/openstack/python-keystonemiddleware/centos/build_srpm.data new file mode 100644 index 00000000..8aeb5536 --- /dev/null +++ b/openstack/python-keystonemiddleware/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=1 diff --git a/openstack/python-keystonemiddleware/centos/meta_patches/0001-update-package-versioning-for-TIS-format.patch b/openstack/python-keystonemiddleware/centos/meta_patches/0001-update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..687651e7 --- /dev/null +++ b/openstack/python-keystonemiddleware/centos/meta_patches/0001-update-package-versioning-for-TIS-format.patch @@ -0,0 +1,35 @@ +commit fd40ac6be0cb4e0dcc8295e9f9673fa5970e0035 +Author: Shoaib Nasir +Date: Wed Feb 14 17:00:55 2018 -0500 + + 0001-update-package-versioning-for-TIS-format + +diff --git a/SPECS/python-keystonemiddleware.spec b/SPECS/python-keystonemiddleware.spec +index 8ccc7b4..63e83d2 100644 +--- a/SPECS/python-keystonemiddleware.spec ++++ b/SPECS/python-keystonemiddleware.spec +@@ -9,7 +9,7 @@ + + Name: python-%{sname} + Version: 4.17.0 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: Middleware for OpenStack Identity + + License: ASL 2.0 +@@ -133,6 +133,7 @@ rm -rf %{sname}.egg-info + sed -i 's/^warning-is-error.*/warning-is-error = 0/g' setup.cfg + + %build ++export PBR_VERSION=%{version} + %py2_build + %if 0%{?with_python3} + %py3_build +@@ -147,6 +148,7 @@ rm -rf doc/build/html/.{doctrees,buildinfo} + + + %install ++export PBR_VERSION=%{version} + %if 0%{?with_python3} + %py3_install + # Delete tests diff --git a/openstack/python-keystonemiddleware/centos/meta_patches/0002-Upstream-gnnochi-panko-fix.patch b/openstack/python-keystonemiddleware/centos/meta_patches/0002-Upstream-gnnochi-panko-fix.patch new file mode 100644 index 00000000..2fea611e --- /dev/null +++ b/openstack/python-keystonemiddleware/centos/meta_patches/0002-Upstream-gnnochi-panko-fix.patch @@ -0,0 +1,21 @@ +commit 5ba75388d3394c3016570a4e68fb79aebd18bf31 +Author: Shoaib Nasir +Date: Wed Feb 14 19:01:00 2018 -0500 + + WRS: 0002-Upstream-gnnochi-panko-fix + +diff --git a/SPECS/python-keystonemiddleware.spec b/SPECS/python-keystonemiddleware.spec +index 63e83d2..cb3c9c9 100644 +--- a/SPECS/python-keystonemiddleware.spec ++++ b/SPECS/python-keystonemiddleware.spec +@@ -15,6 +15,10 @@ Summary: Middleware for OpenStack Identity + License: ASL 2.0 + URL: http://launchpad.net/keystonemiddleware + Source0: https://tarballs.openstack.org/%{sname}/%{sname}-%{version}.tar.gz ++ ++# WRS ++Patch0001: 0001-Upstream-gnnochi-panko-fix.patch ++ + BuildArch: noarch + + diff --git a/openstack/python-keystonemiddleware/centos/meta_patches/PATCH_ORDER b/openstack/python-keystonemiddleware/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..6d34b0da --- /dev/null +++ b/openstack/python-keystonemiddleware/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,2 @@ +0001-update-package-versioning-for-TIS-format.patch +0002-Upstream-gnnochi-panko-fix.patch diff --git a/openstack/python-keystonemiddleware/centos/patches/0001-Upstream-gnnochi-panko-fix.patch b/openstack/python-keystonemiddleware/centos/patches/0001-Upstream-gnnochi-panko-fix.patch new file mode 100644 index 00000000..ffc4c938 --- /dev/null +++ b/openstack/python-keystonemiddleware/centos/patches/0001-Upstream-gnnochi-panko-fix.patch @@ -0,0 +1,70 @@ +commit c475ceb3658309e5c24bae2423e2ec1b125531d8 +Author: rpm-build +Date: Wed Feb 14 18:41:21 2018 -0500 + + 0002-Upstream-gnocchi-panko-bug + + + Expect paste.deploy and gnocchi/panko options + + The authtoken middleware has been printing warning log messages to + the API logs for all services, reporting unexpected conf keys. This + was traced back to paste.deploy adding 'here' and '__file__' and + both gnocchi and panko adding 'configkey' keys in wsgi apps though + these do not actually exist in the conf file. This change allows + for those keys without printing a warning that unnecessarily + confuses operators. + + But it's kind of a hack, especially the configkey bit. We shouldn't + have to know about gnocchi/panko specifics like this. And it doesn't + address the comment in the bug about what is seen for ironic. So I + think there will still be more to do here. + + Change-Id: I678482309c7dd35ce147bebf13ebefc84251fe91 + Partial-Bug: 1722444 + + Signed-of-by: Shoaib Nasir + + #enter the commit message for your changes. Lines starting + +diff --git a/keystonemiddleware/_common/config.py b/keystonemiddleware/_common/config.py +index 3e38eba..de701b0 100644 +--- a/keystonemiddleware/_common/config.py ++++ b/keystonemiddleware/_common/config.py +@@ -49,17 +49,18 @@ def _conf_values_type_convert(group_name, all_options, conf): + for k, v in conf.items(): + dest = k + try: +- if v is not None: ++ # 'here' and '__file__' come from paste.deploy ++ # 'configkey' is added by panko and gnocchi ++ if v is not None and k not in ['here', '__file__', 'configkey']: + type_, dest = opt_types[k] + v = type_(v) + except KeyError: # nosec +- # This option is not known to auth_token. v is not converted. + _LOG.warning( +- 'The option "%s" in conf is not known to auth_token', k) ++ 'The option "%s" is not known to keystonemiddleware', k) + except ValueError as e: + raise exceptions.ConfigurationError( +- _('Unable to convert the value of %(key)s option into correct ' +- 'type: %(ex)s') % {'key': k, 'ex': e}) ++ _('Unable to convert the value of option "%(key)s" into ' ++ 'correct type: %(ex)s') % {'key': k, 'ex': e}) + opts[dest] = v + + return opts +diff --git a/keystonemiddleware/tests/unit/auth_token/test_auth_token_middleware.py b/keystonemiddleware/tests/unit/auth_token/test_auth_token_middleware.py +index 6c66aee..b3aa8ff 100644 +--- a/keystonemiddleware/tests/unit/auth_token/test_auth_token_middleware.py ++++ b/keystonemiddleware/tests/unit/auth_token/test_auth_token_middleware.py +@@ -495,7 +495,7 @@ class GeneralAuthTokenMiddlewareTest(BaseAuthTokenMiddlewareTest, + conf = { + 'wrong_key': '123' + } +- log = 'The option "wrong_key" in conf is not known to auth_token' ++ log = 'The option "wrong_key" is not known to keystonemiddleware' + auth_token.AuthProtocol(self.fake_app, conf) + self.assertThat(self.logger.output, matchers.Contains(log)) + diff --git a/openstack/python-keystonemiddleware/centos/srpm_path b/openstack/python-keystonemiddleware/centos/srpm_path new file mode 100644 index 00000000..ddead07c --- /dev/null +++ b/openstack/python-keystonemiddleware/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-keystonemiddleware-4.17.0-1.el7.src.rpm diff --git a/openstack/python-magnumclient/centos/build_srpm.data b/openstack/python-magnumclient/centos/build_srpm.data new file mode 100644 index 00000000..c5803ec7 --- /dev/null +++ b/openstack/python-magnumclient/centos/build_srpm.data @@ -0,0 +1,6 @@ +TAR_NAME="python-magnumclient" +SRC_DIR="$CGCS_BASE/git/python-magnumclient" + +TIS_BASE_SRCREV=6bef59aec50e41ec2133fad1bd1fc4c954a08312 +TIS_PATCH_VER=1 + diff --git a/openstack/python-magnumclient/centos/python-magnumclient.spec b/openstack/python-magnumclient/centos/python-magnumclient.spec new file mode 100644 index 00000000..78633795 --- /dev/null +++ b/openstack/python-magnumclient/centos/python-magnumclient.spec @@ -0,0 +1,232 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%global sname python-magnumclient +%global pname magnumclient + +%if 0%{?fedora} >= 24 +%global with_python3 1 +%global default_python 3 +%else +%global default_python 2 +%endif + +Name: python-%{pname} +Version: 2.7.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: Client library for Magnum API + +License: ASL 2.0 +URL: https://launchpad.net/python-magnumclient +Source0: https://tarballs.openstack.org/%{sname}/%{sname}-%{upstream_version}.tar.gz +BuildArch: noarch + +%description +This is a client library for Magnum built on the Magnum API. +It provides a Python API (the magnumclient module) and a +command-line tool (magnum). + +%package -n python2-%{pname} +Summary: Client library for Magnum API +%{?python_provide:%python_provide python2-%{pname}} + +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-pbr +BuildRequires: git + +# test dependencies +BuildRequires: python-oslo-utils +BuildRequires: python-stevedore +BuildRequires: python-requests +BuildRequires: python-oslo-i18n +BuildRequires: python-fixtures +BuildRequires: python-mock +BuildRequires: python-testtools +BuildRequires: python-keystoneauth1 +BuildRequires: python-prettytable + +Requires: python-babel +Requires: python-cryptography +Requires: python-decorator +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-osc-lib >= 1.7.0 +Requires: python-os-client-config >= 1.28.0 +Requires: python-pbr +Requires: python-prettytable +Requires: python-six + +%description -n python2-%{pname} +This is a client library for Magnum built on the Magnum API. +It provides a Python API (the magnumclient module) and a +command-line tool (magnum). + +%if 0%{?with_python3} +%package -n python3-%{pname} +Summary: Client library for Magnum API +%{?python_provide:%python_provide python3-%{pname}} + +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pbr + +# test dependencies +BuildRequires: python3-oslo-utils +BuildRequires: python3-stevedore +BuildRequires: python3-requests +BuildRequires: python3-oslo-i18n +BuildRequires: python3-fixtures +BuildRequires: python3-mock +BuildRequires: python3-testtools +BuildRequires: python3-keystoneauth1 +BuildRequires: python3-prettytable + +Requires: python3-babel +Requires: python3-cryptography +Requires: python3-decorator +Requires: python3-keystoneauth1 >= 3.1.0 +Requires: python3-oslo-i18n >= 2.1.0 +Requires: python3-oslo-serialization >= 1.10.0 +Requires: python3-oslo-utils >= 3.20.0 +Requires: python3-osc-lib >= 1.7.0 +Requires: python3-os-client-config >= 1.28.0 +Requires: python3-pbr +Requires: python3-prettytable +Requires: python3-six + +%description -n python3-%{pname} +This is a client library for Magnum built on the Magnum API. +It provides a Python API (the magnumclient module) and a +command-line tool (magnum). +%endif + +%package -n python-%{pname}-doc +Summary: python-magnumclient documentation +BuildRequires: python-sphinx +BuildRequires: python-openstackdocstheme +BuildRequires: python-os-client-config +#BuildRequires: python-decorator + +%description -n python-%{pname}-doc +Documentation for python-magnumclient + +%package -n python-%{pname}-tests +Summary: Python-magnumclient test subpackage + +Requires: python-%{pname} = %{version}-%{release} +Requires: python-oslo-utils +Requires: python-stevedore +Requires: python-requests +Requires: python-oslo-i18n +Requires: python-fixtures +Requires: python-mock +Requires: python-testtools +Requires: python-keystoneauth1 +Requires: python-prettytable + +%description -n python-%{pname}-tests +Python-magnumclient test subpackage + +%if 0%{?with_python3} +%package -n python3-%{pname}-tests +Summary: Python-magnumclient test subpackage + +Requires: python3-%{pname} = %{version}-%{release} +Requires: python3-oslo-utils +Requires: python3-stevedore +Requires: python3-requests +Requires: python3-oslo-i18n +Requires: python3-fixtures +Requires: python3-mock +Requires: python3-testtools +Requires: python3-keystoneauth1 +Requires: python3-prettytable + +%description -n python3-%{pname}-tests +Python-magnumclient test subpackage +%endif + +%prep +%autosetup -n %{name}-%{upstream_version} -S git + +# let RPM handle deps +rm -rf {test-,}requirements.txt + +%build +export PBR_VERSION=%{version} +%py2_build + +%if 0%{?with_python3} +%py3_build +%endif +# generate html docs +%{__python2} setup.py build_sphinx -b html +# Fix hidden-file-or-dir warnings +rm -rf doc/build/html/.{doctrees,buildinfo} + +%install +export PBR_VERSION=%{version} + +install -p -D -m 644 tools/magnum.bash_completion %{buildroot}%{_sysconfdir}/bash_completion.d/magnum.bash_completion + +%if 0%{?with_python3} +%py3_install +%if %{default_python} >= 3 +mv %{buildroot}%{_bindir}/magnum ./magnum.py3 +%endif +%endif + +%py2_install + +%if 0%{?default_python} >= 3 +mv magnum.py3 %{buildroot}%{_bindir}/magnum +%endif + +#%check +# tests are failing due to unicode not defined +# we are skipping the test +#%{__python2} setup.py test || +#%if 0%{?with_python3} +#%{__python3} setup.py test || +#%endif + +%files -n python2-%{pname} +%doc README.rst +%license LICENSE +%{python2_sitelib}/%{pname} +%if 0%{?default_python} <= 2 +%{_bindir}/magnum +%endif +%{python2_sitelib}/*.egg-info +%exclude %{python2_sitelib}/%{pname}/tests +%{_sysconfdir}/bash_completion.d/magnum.bash_completion + +%if 0%{?with_python3} +%files -n python3-%{pname} +%doc README.rst +%license LICENSE +%if 0%{?default_python} >= 3 +%{_bindir}/magnum +%endif +%{python3_sitelib}/magnumclient +%{python3_sitelib}/*.egg-info +%exclude %{python3_sitelib}/%{pname}/tests +%endif + +%files -n python-%{pname}-doc +%license LICENSE +%doc doc/build/html + +%files -n python-%{pname}-tests +%{python2_sitelib}/%{pname}/tests + +%if 0%{?with_python3} +%files -n python3-%{pname}-tests +%{python3_sitelib}/%{pname}/tests +%endif + +%changelog +* Fri Aug 11 2017 Alfredo Moralejo 2.7.0-1 +- Update to 2.7.0 + diff --git a/openstack/python-muranoclient/centos/build_srpm.data b/openstack/python-muranoclient/centos/build_srpm.data new file mode 100644 index 00000000..fb1e0a9f --- /dev/null +++ b/openstack/python-muranoclient/centos/build_srpm.data @@ -0,0 +1,5 @@ +TAR_NAME="python-muranoclient" +SRC_DIR="$CGCS_BASE/git/python-muranoclient" + +TIS_BASE_SRCREV=9a31e6bd406b18b32a97bd11d0020426dd6ff318 +TIS_PATCH_VER=2 diff --git a/openstack/python-muranoclient/centos/python-muranoclient.spec b/openstack/python-muranoclient/centos/python-muranoclient.spec new file mode 100644 index 00000000..ab8b5589 --- /dev/null +++ b/openstack/python-muranoclient/centos/python-muranoclient.spec @@ -0,0 +1,195 @@ +%global pypi_name muranoclient + +%if 0%{?fedora} +%global with_python3 0 +%{!?python3_shortver: %global python3_shortver %(%{__python3} -c 'import sys; print(str(sys.version_info.major) + "." + str(sys.version_info.minor))')} +%endif + +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +Name: python-%{pypi_name} +Version: 0.14.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: Client library for OpenStack Murano API + +License: ASL 2.0 +URL: http://pypi.python.org/pypi/%{name} +Source0: https://tarballs.openstack.org/%{name}/%{name}-%{version}.tar.gz + +BuildArch: noarch + +%description +Client library for Murano built on the Murano API. It provides a Python +API (the muranoclient module) and a command-line tool (murano). + + +%package -n python2-%{pypi_name} + +BuildRequires: git +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-pbr >= 2.0.0 + +Requires: python-babel >= 2.3.4 +Requires: python-glanceclient >= 1:2.8.0 +Requires: python-httplib2 >= 0.7.5 +Requires: python-iso8601 >= 0.1.11 +Requires: python-keystoneclient >= 1:3.8.0 +Requires: python-murano-pkg-check >= 0.3.0 +Requires: python-pbr >= 2.0.0 +Requires: python-prettytable >= 0.7 +Requires: python-requests >= 2.10.0 +Requires: python-six >= 1.9.0 +Requires: python-yaql >= 1.1.0 +Requires: python-osc-lib >= 1.7.0 +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: pyOpenSSL >= 0.14 +Requires: PyYAML >= 3.10 + +Summary: Client library for OpenStack Murano API. +%{?python_provide:%python_provide python2-%{pypi_name}} + +%description -n python2-%{pypi_name} +Client library for Murano built on the Murano API. It provides a Python +API (the muranoclient module) and a command-line tool (murano). + +# Python3 package +%if 0%{?with_python3} +%package -n python3-%{pypi_name} +Summary: Support of EC2 API for OpenStack +%{?python_provide:%python_provide python3-%{pypi_name}} + +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pbr >= 2.0.0 +BuildRequires: python-tools + +Requires: python3-babel >= 2.3.4 +Requires: python3-glanceclient >= 1:2.8.0 +Requires: python3-httplib2 >= 0.7.5 +Requires: python3-iso8601 >= 0.1.11 +Requires: python3-keystoneclient >= 1:3.8.0 +Requires: python3-murano-pkg-check >= 0.3.0 +Requires: python3-pbr >= 2.0.0 +Requires: python3-prettytable >= 0.7 +Requires: python3-requests >= 2.10.0 +Requires: python3-six >= 1.9.0 +Requires: python3-yaql >= 1.1.0 +Requires: python3-osc-lib >= 1.7.0 +Requires: python3-oslo-log >= 3.22.0 +Requires: python3-oslo-i18n >= 2.1.0 +Requires: python3-oslo-serialization >= 1.10.0 +Requires: python3-oslo-utils >= 3.20.0 +Requires: python3-oslo-utils >= 3.18.0 +Requires: python3-pyOpenSSL >= 0.14 +Requires: python3-PyYAML >= 3.10 + +%description -n python3-%{pypi_name} +Client library for Murano built on the Murano API. It provides a Python +API (the muranoclient module) and a command-line tool (murano). +%endif + +# Documentation package +%package -n python-%{pypi_name}-doc +Summary: Documentation for OpenStack Murano API Client + +BuildRequires: python-sphinx +BuildRequires: python-openstackdocstheme + +%description -n python-%{pypi_name}-doc +Documentation for the client library for interacting with Openstack +Murano API. + +%package sdk +Summary: SDK files for %{name} + +%description sdk +Contains SDK files for %{name} package + +%prep +%autosetup -n %{name}-%{upstream_version} -S git +# Remove bundled egg-info +rm -rf %{pypi_name}.egg-info +# Let RPM handle the dependencies +rm -f test-requirements.txt requirements.txt + +%if 0%{?with_python3} +rm -rf %{py3dir} +cp -a . %{py3dir} +2to3 --write --nobackups %{py3dir} +%endif + +%build +export PBR_VERSION=%{version} +%{__python2} setup.py build + +%if 0%{?with_python3} +pushd %{py3dir} +LANG=en_US.UTF-8 %{__python3} setup.py build +popd +%endif + +# generate html docs +%{__python2} setup.py build_sphinx -b html +# remove the sphinx-build leftovers +rm -rf doc/build/html/.{doctrees,buildinfo} + +%install +export PBR_VERSION=%{version} +%if 0%{?with_python3} +pushd %{py3dir} +LANG=en_US.UTF-8 %{__python3} setup.py install --skip-build --root %{buildroot} +mv %{buildroot}%{_bindir}/murano %{buildroot}%{_bindir}/python3-murano +popd +%endif + +%{__python2} setup.py install --skip-build --root %{buildroot} + +install -p -D -m 644 tools/murano.bash_completion %{buildroot}%{_sysconfdir}/bash_completion.d/murano.bash_completion + +# rename binaries, make compat symlinks +pushd %{buildroot}%{_bindir} +%if 0%{?with_python3} +for i in %{pypi_name}-{3,%{?python3_shortver}}; do + ln -s python3-%{pypi_name} $i +done +%endif +popd + +# prep SDK package +mkdir -p %{buildroot}/usr/share/remote-clients +tar zcf %{buildroot}/usr/share/remote-clients/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} + +%files -n python2-%{pypi_name} +%license LICENSE +%doc README.rst +%{python2_sitelib}/%{pypi_name} +%{python2_sitelib}/python_%{pypi_name}-*-py?.?.egg-info +%{_bindir}/murano* +%{_sysconfdir}/bash_completion.d/murano.bash_completion + +# Files for python3 +%if 0%{?with_python3} +%files -n python3-%{pypi_name} +%license LICENSE +%doc README.rst +%{_bindir}/python3-murano +%{_bindir}/murano* +%{python3_sitelib}/%{pypi_name} +%{python3_sitelib}/python_%{pypi_name}-%{version}-py?.?.egg-info +%endif + +%files -n python-%{pypi_name}-doc +%doc doc/build/html +%license LICENSE + +%files sdk +/usr/share/remote-clients/%{name}-%{version}.tgz + +%changelog +* Mon Aug 14 2017 Alfredo Moralejo 0.14.0-1 +- Update to 0.14.0 + diff --git a/openstack/python-networking-bgpvpn/centos/build_srpm.data b/openstack/python-networking-bgpvpn/centos/build_srpm.data new file mode 100644 index 00000000..9f4edb31 --- /dev/null +++ b/openstack/python-networking-bgpvpn/centos/build_srpm.data @@ -0,0 +1,4 @@ +TAR_NAME=python-networking-bgpvpn +SRC_DIR="$CGCS_BASE/git/networking-bgpvpn" +TIS_BASE_SRCREV=bf87af3bab560f737bd3de273733a5702a23d939 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-networking-bgpvpn/centos/python-networking-bgpvpn.spec b/openstack/python-networking-bgpvpn/centos/python-networking-bgpvpn.spec new file mode 100644 index 00000000..dfe26a9c --- /dev/null +++ b/openstack/python-networking-bgpvpn/centos/python-networking-bgpvpn.spec @@ -0,0 +1,156 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%global pypi_name networking-bgpvpn +%global sname networking_bgpvpn +%global service neutron + +Name: python-%{pypi_name} +Version: 7.0.0 +Release: 0%{?_tis_dist}.%{tis_patch_ver} +Summary: API and Framework to interconnect bgpvpn to neutron networks + +License: ASL 2.0 +URL: https://github.com/openstack/networking-bgpvpn +Source0: %{name}-%{version}.tar.gz + +BuildArch: noarch + +BuildRequires: python-webob +BuildRequires: python-webtest +BuildRequires: python-coverage +BuildRequires: python-hacking +BuildRequires: python-neutron-tests +BuildRequires: python-neutron +BuildRequires: python-oslo-sphinx +BuildRequires: python-oslotest +BuildRequires: python-openstackclient +BuildRequires: python-openvswitch +BuildRequires: python-pbr +BuildRequires: python-reno +BuildRequires: python-setuptools +BuildRequires: python-sphinx +BuildRequires: python-subunit +BuildRequires: python-testrepository +BuildRequires: python-testresources +BuildRequires: python-testscenarios +BuildRequires: python-testtools +BuildRequires: python2-devel + +%description +BGPMPLS VPN Extension for OpenStack Networking This project provides an API and +Framework to interconnect BGP/MPLS VPNs to Openstack Neutron networks, routers +and ports.The Border Gateway Protocol and MultiProtocol Label Switching are +widely used Wide Area Networking technologies. The primary purpose of this +project is to allow attachment of Neutron networks and/or routers to carrier +provided. + +%package -n python2-%{pypi_name} +Summary: API and Framework to interconnect bgpvpn to neutron networks +%{?python_provide:%python_provide python2-%{pypi_name}} + +Requires: python-pbr >= 1.6 +Requires: python-babel >= 2.3.4 +Requires: python-neutron-lib >= 0.4.0 +Requires: python-oslo-config >= 2.3.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-db >= 2.4.1 +Requires: python-oslo-log >= 1.8.0 +Requires: python-oslo-utils >= 2.0.0 +Requires: python-setuptools + +%description -n python2-%{pypi_name} +BGPMPLS VPN Extension for OpenStack Networking This project provides an API and +Framework to interconnect BGP/MPLS VPNs to Openstack Neutron networks, routers +and ports.The Border Gateway Protocol and MultiProtocol Label Switching are +widely used Wide Area Networking technologies. The primary purpose of this +project is to allow attachment of Neutron networks and/or routers to carrier +provided. + +%package -n python-%{pypi_name}-doc +Summary: networking-bgpvpn documentation +%description -n python-%{pypi_name}-doc +Documentation for networking-bgpvpn + +%package -n python-%{pypi_name}-tests +Summary: networking-bgpvpn tests +Requires: python-%{pypi_name} = %{version}-%{release} + +%description -n python-%{pypi_name}-tests +Networking-bgpvpn set of tests + +%package -n python-%{pypi_name}-dashboard +Summary: networking-bgpvpn dashboard +Requires: python-%{pypi_name} = %{version}-%{release} + +%description -n python-%{pypi_name}-dashboard +Dashboard to be able to handle BGPVPN functionality via Horizon + +%package -n python-%{pypi_name}-heat +Summary: networking-bgpvpn heat +Requires: python-%{pypi_name} = %{version}-%{release} + +%description -n python-%{pypi_name}-heat +Networking-bgpvpn heat resources + +%prep +%autosetup -n %{name}-%{upstream_version} -S git +# Remove bundled egg-info +rm -rf %{pypi_name}.egg-info + +%build +export PBR_VERSION=%{version} +%py2_build +# generate html docs +# TODO: the doc generation is commented until python-sphinxcontrib-* packages +# are included in CBS. This needs to be fixed. +#%{__python2} setup.py build_sphinx +# remove the sphinx-build leftovers +rm -rf html/.{doctrees,buildinfo} + +%install +export PBR_VERSION=%{version} +%py2_install + +mkdir -p %{buildroot}%{_sysconfdir}/%{service}/policy.d +mv %{buildroot}/usr/etc/neutron/networking_bgpvpn.conf %{buildroot}%{_sysconfdir}/%{service}/ +mv %{buildroot}/usr/etc/neutron/policy.d/bgpvpn.conf %{buildroot}%{_sysconfdir}/%{service}/policy.d/ + +# Make sure neutron-server loads new configuration file +mkdir -p %{buildroot}/%{_datadir}/neutron/server +ln -s %{_sysconfdir}/%{service}/networking_bgpvpn.conf %{buildroot}%{_datadir}/%{service}/server/networking_bgpvpn.conf + + +%files -n python2-%{pypi_name} +%license LICENSE +%doc README.rst +%{python2_sitelib}/%{sname} +%{python2_sitelib}/networking_bgpvpn-*.egg-info +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/networking_bgpvpn.conf +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/policy.d/bgpvpn.conf +%{_datadir}/%{service}/server/networking_bgpvpn.conf +%exclude %{python2_sitelib}/%{sname}/tests +%exclude %{python2_sitelib}/bgpvpn_dashboard +%exclude %{python2_sitelib}/networking_bgpvpn_heat +%exclude %{python2_sitelib}/networking_bgpvpn_tempest + +%files -n python-%{pypi_name}-doc +#%doc html +%license LICENSE + +%files -n python-%{pypi_name}-tests +%license LICENSE +%doc networking_bgpvpn_tempest/README.rst +%{python2_sitelib}/networking_bgpvpn_tempest +%{python2_sitelib}/%{sname}/tests + +%files -n python-%{pypi_name}-dashboard +%license LICENSE +%{python2_sitelib}/bgpvpn_dashboard/ + +%files -n python-%{pypi_name}-heat +%license LICENSE +%{python2_sitelib}/networking_bgpvpn_heat + +%changelog +* Mon Mar 13 2017 Matt Peters 5.0.0-0 +- Initial Version based on CentOS distribution. diff --git a/openstack/python-networking-odl/centos/build_srpm.data b/openstack/python-networking-odl/centos/build_srpm.data new file mode 100644 index 00000000..9f51653e --- /dev/null +++ b/openstack/python-networking-odl/centos/build_srpm.data @@ -0,0 +1,4 @@ +TAR_NAME=python-networking-odl +SRC_DIR="$CGCS_BASE/git/networking-odl" +TIS_BASE_SRCREV=77cbc291fb689ce01e88963ce8e34e4df3dfe2bf +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-networking-odl/centos/python-networking-odl.spec b/openstack/python-networking-odl/centos/python-networking-odl.spec new file mode 100644 index 00000000..31d4a205 --- /dev/null +++ b/openstack/python-networking-odl/centos/python-networking-odl.spec @@ -0,0 +1,80 @@ +%global drv_vendor OpenDaylight +%global pkgname networking-odl +%global srcname networking_odl +%global docpath doc/build/html + +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +Name: python-%{pkgname} +Epoch: 1 +Version: 11.0.0 +Release: 0%{?_tis_dist}.%{tis_patch_ver} +Summary: %{drv_vendor} OpenStack Neutron driver + +License: ASL 2.0 +URL: https://pypi.python.org/pypi/%{pkgname} +Source0: %{name}-%{version}.tar.gz + +BuildArch: noarch + +BuildRequires: git +BuildRequires: python2-devel +BuildRequires: python-mock +#BuildRequires: python-neutron-tests +BuildRequires: python-openstackdocstheme +#BuildRequires: python-oslotest +BuildRequires: python-oslo-config +BuildRequires: python-oslo-sphinx +BuildRequires: python-pbr +BuildRequires: python-sphinx +BuildRequires: python-testrepository +BuildRequires: python-testtools + +Requires: openstack-neutron-ml2 +Requires: python-babel +Requires: python-pbr +Requires: python-websocket-client +Requires: python-stevedore +Requires: python-neutron-lib +Requires: python-debtcollector + +%description +This package contains %{drv_vendor} networking driver for OpenStack Neutron. + + +%prep +%autosetup -n %{name}-%{upstream_version} -S git +# Remove gate hooks +rm -rf %{srcname}/tests/contrib + +%build +export PBR_VERSION=%{version} +rm requirements.txt test-requirements.txt +%{__python2} setup.py build +%{__python2} setup.py build_sphinx -b html +rm %{docpath}/.buildinfo + + +#%check +#%{__python2} setup.py testr + + +%install +export PBR_VERSION=%{version} +export SKIP_PIP_INSTALL=1 +%{__python2} setup.py install --skip-build --root %{buildroot} + +%files +%license LICENSE +%doc %{docpath} +%{_bindir}/neutron-odl-ovs-hostconfig +%{python2_sitelib}/%{srcname} +%{python2_sitelib}/%{srcname}-%{version}-py%{python2_version}.egg-info + +%changelog +* Wed Aug 30 2017 rdo-trunk 1:11.0.0-1 +- Update to 11.0.0 + +* Fri Aug 25 2017 Alfredo Moralejo 1:11.0.0-0.1.0rc2 +- Update to 11.0.0.0rc2 + diff --git a/openstack/python-networking-odl/python-networking-odl/LICENSE b/openstack/python-networking-odl/python-networking-odl/LICENSE new file mode 100644 index 00000000..68c771a0 --- /dev/null +++ b/openstack/python-networking-odl/python-networking-odl/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/openstack/python-networking-sfc/centos/build_srpm.data b/openstack/python-networking-sfc/centos/build_srpm.data new file mode 100644 index 00000000..cc6c8813 --- /dev/null +++ b/openstack/python-networking-sfc/centos/build_srpm.data @@ -0,0 +1,4 @@ +TAR_NAME=python-networking-sfc +SRC_DIR="$CGCS_BASE/git/networking-sfc" +TIS_BASE_SRCREV=af2ee0cf50e2295d2b0eee48640d81fcd6c472c8 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-networking-sfc/centos/python-networking-sfc.spec b/openstack/python-networking-sfc/centos/python-networking-sfc.spec new file mode 100644 index 00000000..14ef9423 --- /dev/null +++ b/openstack/python-networking-sfc/centos/python-networking-sfc.spec @@ -0,0 +1,185 @@ +%global pypi_name networking-sfc +%global module networking_sfc +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +Name: python-%{pypi_name} +Version: 5.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: API and implementations to support Service Function Chaining in Neutron + +License: ASL 2.0 +URL: https://github.com/openstack/networking-sfc +Source0: %{name}-%{version}.tar.gz + +# + +BuildArch: noarch + +BuildRequires: openstack-macros +BuildRequires: git +BuildRequires: python2-devel +BuildRequires: python-pbr +BuildRequires: python-openstackdocstheme +BuildRequires: python-sphinx +#BuildRequires: openstack-neutron +# Test requirements +BuildRequires: python-mock +BuildRequires: python-requests-mock +BuildRequires: python-oslotest +BuildRequires: python-testrepository +BuildRequires: python-testresources +BuildRequires: python-testscenarios +BuildRequires: python-neutron-lib-tests +BuildRequires: python-neutron-tests + +%description +This project provides APIs and implementations to support Service Function +Chaining in Neutron. + +Service Function Chaining is a mechanism for overriding the basic destination +based forwarding that is typical of IP networks. It is conceptually related to +Policy Based Routing in physical networks but it is typically thought of as a +Software Defined Networking technology. It is often used in conjunction with +security functions although it may be used for a broader range of features. +Fundamentally SFC is the ability to cause network packet flows to route through +a network via a path other than the one that would be chosen by routing table +lookup on the packet's destination IP address. It is most commonly used in +conjunction with Network Function Virtualization when recreating in a virtual +environment a series of network functions that would have traditionally been +implemented as a collection of physical network devices connected in series by +cables. + +%package -n python2-%{pypi_name} +Summary: API and implementations to support Service Function Chaining in Neutron +%{?python_provide:%python_provide python2-%{pypi_name}} + +#Requires: openstack-neutron-common +#Requires: openstack-neutron +Requires: python-alembic +Requires: python-eventlet +Requires: python-netaddr +Requires: python-neutron +Requires: python-neutron-lib >= 1.9.0 +Requires: python-neutronclient >= 6.3.0 +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-messaging >= 5.24.2 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-six +Requires: python-sqlalchemy +Requires: python-stevedore >= 1.20.0 + +%description -n python2-%{pypi_name} +This project provides APIs and implementations to support Service Function +Chaining in Neutron. + +Service Function Chaining is a mechanism for overriding the basic destination +based forwarding that is typical of IP networks. It is conceptually related to +Policy Based Routing in physical networks but it is typically thought of as a +Software Defined Networking technology. It is often used in conjunction with +security functions although it may be used for a broader range of features. +Fundamentally SFC is the ability to cause network packet flows to route through +a network via a path other than the one that would be chosen by routing table +lookup on the packet's destination IP address. It is most commonly used in +conjunction with Network Function Virtualization when recreating in a virtual +environment a series of network functions that would have traditionally been +implemented as a collection of physical network devices connected in series by +cables. + + +%package -n python-%{pypi_name}-doc +Summary: Documentation for networking-sfc +%description -n python-%{pypi_name}-doc +Documentation for networking-sfc + +%package -n python2-%{pypi_name}-tests +Summary: Tests for networking-sfc +Requires: python2-%{pypi_name} = %{version}-%{release} +Requires: python-mock +Requires: python-requests-mock +Requires: python-oslotest +Requires: python-testrepository +Requires: python-testresources +Requires: python-testscenarios +Requires: python-neutron-lib-tests +Requires: python-neutron-tests + +%description -n python2-%{pypi_name}-tests +Networking-sfc set of tests + +%package -n python2-%{pypi_name}-tests-tempest +Summary: Tempest plugin for %{name} + +Requires: python2-%{pypi_name} = %{version}-%{release} +Requires: python-tempest-tests + +%description -n python2-%{pypi_name}-tests-tempest +It contains the tempest plugin for %{name}. + +%prep +%autosetup -n %{name}-%{upstream_version} +# Let RPM handle the dependencies +%py_req_cleanup + +# Remove bundled egg-info +rm -rf %{pypi_name}.egg-info +# FIXME(bcafarel): require neutronclient.tests.unit (python-neutronclient-tests package was dropped) +rm -rf %{module}/tests/unit/cli + +%build +export PBR_VERSION=%{version} +%py2_build +%{__python2} setup.py build_sphinx -b html +# remove the sphinx-build leftovers +rm -rf doc/build/html/.{doctrees,buildinfo} +# generate the configuration file +PYTHONPATH=. oslo-config-generator --config-file etc/oslo-config-generator/networking-sfc.conf + + +%install +export PBR_VERSION=%{version} +%py2_install + +# Create a fake tempest plugin entrypoint +%py2_entrypoint %{module} %{pypi_name} + +# The generated config files are not moved automatically by setup.py +mkdir -p %{buildroot}%{_sysconfdir}/neutron/conf.d/neutron-server +mv etc/networking-sfc.conf.sample %{buildroot}%{_sysconfdir}/neutron/conf.d/neutron-server/networking-sfc.conf + +#%check +#export OS_TEST_PATH='./networking_sfc/tests/functional' +#export PATH=$PATH:$RPM_BUILD_ROOT/usr/bin +#%{__python2} setup.py testr + +%files -n python2-%{pypi_name} +%license LICENSE +%doc README.rst +%{python2_sitelib}/%{module} +%{python2_sitelib}/%{module}-*.egg-info +%config(noreplace) %attr(0640, root, neutron) %{_sysconfdir}/neutron/conf.d/neutron-server/networking-sfc.conf +%exclude %{python2_sitelib}/%{module}/tests + +%files -n python-%{pypi_name}-doc +%doc doc/build/html/* +%license LICENSE + +%files -n python2-%{pypi_name}-tests +%{python2_sitelib}/%{module}/tests +%exclude %{python2_sitelib}/%{module}/tests/contrib +%exclude %{python2_sitelib}/%{module}/tests/tempest_plugin + +%files -n python2-%{pypi_name}-tests-tempest +%{python2_sitelib}/%{module}_tests.egg-info +%{python2_sitelib}/%{module}/tests/tempest_plugin +%{python2_sitelib}/%{module}/tests/__init__.py* + +%changelog +* Wed Aug 30 2017 rdo-trunk 5.0.0-1 +- Update to 5.0.0 + +* Fri Aug 25 2017 Alfredo Moralejo 5.0.0-0.1.0rc2 +- Update to 5.0.0.0rc2 + diff --git a/openstack/python-neutron-dynamic-routing/centos/build_srpm.data b/openstack/python-neutron-dynamic-routing/centos/build_srpm.data new file mode 100644 index 00000000..ad2138c8 --- /dev/null +++ b/openstack/python-neutron-dynamic-routing/centos/build_srpm.data @@ -0,0 +1,5 @@ +TAR_NAME=python-neutron-dynamic-routing +SRC_DIR="$CGCS_BASE/git/neutron-dynamic-routing" +COPY_LIST="$FILES_BASE/*" +TIS_BASE_SRCREV=9098d4447581117e857d2f86fb4a0508b5ffbb6a +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.init b/openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.init new file mode 100755 index 00000000..cf7c9eeb --- /dev/null +++ b/openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.init @@ -0,0 +1,97 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: neutron-bgp-dragent +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: neutron-bgp-dragent +# Description: Provides the Neutron bgp dynamic-routing agent +### END INIT INFO + +DESC="neutron-bgp-dragent" +DAEMON="/usr/bin/neutron-bgp-dragent" +PIDFILE="/var/run/neutron-bgp-dragent.pid" +DAEMON_ARGS="--config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/bgp_dragent.ini" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} \ + -- $DAEMON_ARGS + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + return + fi + fi + echo "$DESC is not running" +} + +reset() +{ + stop + start +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + reset) + reset + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.pmon b/openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.pmon new file mode 100644 index 00000000..8b956d9c --- /dev/null +++ b/openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.pmon @@ -0,0 +1,16 @@ +[process] +process = neutron-bgp-dragent +pidfile = /var/run/neutron-bgp-dragent.pid +script = /etc/init.d/neutron-bgp-dragent +style = lsb ; ocf or lsb +severity = major ; minor, major, critical +restarts = 3 ; restarts before error assertion +interval = 5 ; number of seconds to wait between restarts +debounce = 20 ; number of seconds that a process needs to remain + ; running before degrade is removed and retry count + ; is cleared. +startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active : heartbeat monitoring, i.e. request / response messaging + ; ignore : do not monitor or stop monitoring diff --git a/openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.service b/openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.service new file mode 100644 index 00000000..f6602021 --- /dev/null +++ b/openstack/python-neutron-dynamic-routing/centos/files/neutron-bgp-dragent.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Neutron BGP Dynamic Routing agent +After=syslog.target neutron-server.service + +[Service] +Type=simple +ExecStart=/etc/rc.d/init.d/neutron-bgp-dragent start +ExecStop=/etc/rc.d/init.d/neutron-bgp-dragent stop +ExecReload=/etc/rc.d/init.d/neutron-bgp-dragent reload +PrivateTmp=true +PIDFile=/var/run/neutron-bgp-dragent.pid +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron-dynamic-routing/centos/python-neutron-dynamic-routing.spec b/openstack/python-neutron-dynamic-routing/centos/python-neutron-dynamic-routing.spec new file mode 100644 index 00000000..6aab257b --- /dev/null +++ b/openstack/python-neutron-dynamic-routing/centos/python-neutron-dynamic-routing.spec @@ -0,0 +1,130 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%global pypi_name neutron-dynamic-routing +%global sname neutron_dynamic_routing +%global service neutron + +Name: python-%{pypi_name} +Version: 11.0.0 +Release: 0%{?_tis_dist}.%{tis_patch_ver} +Summary: Dynamic routing services for OpenStack Neutron. + +License: ASL 2.0 +URL: https://github.com/openstack/neutron-dynamic-routing +Source0: %{name}-%{version}.tar.gz + +# WRS +Source1: neutron-bgp-dragent.init +Source2: neutron-bgp-dragent.service +Source3: neutron-bgp-dragent.pmon + +BuildArch: noarch + +BuildRequires: python-coverage +BuildRequires: python-hacking +BuildRequires: python-oslo-config +BuildRequires: python-oslo-sphinx +BuildRequires: python-oslotest +BuildRequires: python-pbr +BuildRequires: python-setuptools +BuildRequires: python-sphinx +BuildRequires: python-subunit +BuildRequires: python-testrepository +BuildRequires: python-testscenarios +BuildRequires: python-testtools +BuildRequires: python2-devel + +%description +Neutron dynamic routing enables advertisement of self-service (private) network prefixes +to physical network devices that support dynamic routing protocols such as routers, thus +removing the conventional dependency on static routes. + +%package -n python2-%{pypi_name} +Summary: Dynamic routing services for OpenStack Neutron. + +Requires: python-pbr >= 1.6 +Requires: python-eventlet >= 0.18.4 +Requires: python-httplib2 >= 0.7.5 +Requires: python-netaddr >= 0.7.18 +Requires: python-six >= 1.9.0 +Requires: python-neutron-lib >= 0.4.0 +Requires: python-oslo-config >= 3.14.0 +Requires: python-oslo-db >= 4.13.3 +Requires: python-oslo-log >= 1.14.0 +Requires: python-oslo-messaging >= 5.2.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-service >= 1.10.0 +Requires: python-oslo-utils >= 3.16.0 + +%description -n python2-%{pypi_name} +Neutron dynamic routing enables advertisement of self-service (private) network prefixes +to physical network devices that support dynamic routing protocols such as routers, thus + +%package -n python-%{pypi_name}-doc +Summary: neutron-dynamic-routing documentation +%description -n python-%{pypi_name}-doc +Documentation for neutron-dynamic-routing + +%package -n python-%{pypi_name}-tests +Summary: neutron-dynamic-routing tests +Requires: python-%{pypi_name} = %{version}-%{release} +%description -n python-%{pypi_name}-tests +neutron-dynamic-routing set of tests + +%prep +%autosetup -n %{name}-%{upstream_version} -S git +# Remove bundled egg-info +rm -rf %{pypi_name}.egg-info + +%build +export PBR_VERSION=%{version} +%py2_build +# Generate sample config and add the current directory to PYTHONPATH so +# oslo-config-generator doesn't skip entry points. +PYTHONPATH=. oslo-config-generator --config-file=./etc/oslo-config-generator/bgp_dragent.ini +# generate html docs +#%{__python2} setup.py build_sphinx +# remove the sphinx-build leftovers +rm -rf html/.{doctrees,buildinfo} + +%install +export PBR_VERSION=%{version} + +install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}/neutron-bgp-dragent.service +install -d %{buildroot}%{_sysconfdir}/init.d +install -m 755 %{SOURCE1} %{buildroot}%{_sysconfdir}/init.d/neutron-bgp-dragent +install -d %{buildroot}%{_sysconfdir}/%{service}/pmon +install -m 755 %{SOURCE3} %{buildroot}%{_sysconfdir}/%{service}/pmon/neutron-bgp-dragent.conf +%py2_install + +mkdir -p %{buildroot}%{_sysconfdir}/%{service}/policy.d +mv %{buildroot}/usr/etc/%{service}/policy.d/dynamic_routing.conf %{buildroot}%{_sysconfdir}/%{service}/policy.d/ +mv etc/bgp_dragent.ini.sample %{buildroot}%{_sysconfdir}/%{service}/bgp_dragent.ini + + +%files -n python2-%{pypi_name} +%license LICENSE +%doc README.rst +%{python2_sitelib}/%{sname} +%{python2_sitelib}/%{sname}-*.egg-info +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/policy.d/dynamic_routing.conf +%{_bindir}/neutron-bgp-dragent +%exclude %{python2_sitelib}/%{sname}/tests +%{_unitdir}/neutron-bgp-dragent.service +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/pmon/neutron-bgp-dragent.conf +%{_sysconfdir}/init.d/%{service}-bgp-dragent +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/bgp_dragent.ini + + +%files -n python-%{pypi_name}-doc +#%doc html +%license LICENSE + +%files -n python-%{pypi_name}-tests +%license LICENSE +%{python2_sitelib}/%{sname}/tests + + +%changelog +* Mon Mar 13 2017 Matt Peters 9.2.0-0 +- Initial Version diff --git a/openstack/python-neutron-lib/centos/build_srpm.data b/openstack/python-neutron-lib/centos/build_srpm.data new file mode 100644 index 00000000..6214e19a --- /dev/null +++ b/openstack/python-neutron-lib/centos/build_srpm.data @@ -0,0 +1,4 @@ +TAR_NAME=python-neutron-lib +SRC_DIR="$CGCS_BASE/git/neutron-lib" +TIS_BASE_SRCREV=f0d7e470c2ef1702b2715ceb2fd8a00fce2a23be +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-neutron-lib/centos/python-neutron-lib.spec b/openstack/python-neutron-lib/centos/python-neutron-lib.spec new file mode 100644 index 00000000..a265caf3 --- /dev/null +++ b/openstack/python-neutron-lib/centos/python-neutron-lib.spec @@ -0,0 +1,105 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%global library neutron-lib +%global module neutron_lib + +Name: python-%{library} +Version: 1.9.1 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: OpenStack Neutron library +License: ASL 2.0 +URL: http://launchpad.net/neutron/ + +Source0: %{name}-%{version}.tar.gz + +BuildArch: noarch + +BuildRequires: python2-devel +BuildRequires: python-pbr +BuildRequires: python-setuptools +BuildRequires: git + +Requires: python-debtcollector >= 1.2.0 +Requires: python-oslo-concurrency >= 3.8.0 +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-oslo-context >= 2.14.0 +Requires: python-oslo-db >= 4.24.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-messaging >= 5.24.2 +Requires: python-oslo-policy >= 1.23.0 +Requires: python-oslo-service >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-sqlalchemy >= 1.0.10 +Requires: python-stevedore + +%description +OpenStack Neutron library shared by all Neutron sub-projects. + + +%package tests +Summary: OpenStack Neutron library tests +Requires: python-%{library} = %{version}-%{release} + +%description tests +OpenStack Neutron library shared by all Neutron sub-projects. + +This package contains the Neutron library test files. + + +%package doc +Summary: OpenStack Neutron library documentation + +BuildRequires: python-sphinx +BuildRequires: python-openstackdocstheme +BuildRequires: python-oslo-context +BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-db +BuildRequires: python-oslo-i18n +BuildRequires: python-oslo-log +BuildRequires: python-oslo-utils +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-service +BuildRequires: python-netaddr +BuildRequires: python-debtcollector +BuildRequires: python-fixtures + +%description doc +OpenStack Neutron library shared by all Neutron sub-projects. + +This package contains the documentation. + +%prep +%autosetup -n %{name}-%{upstream_version} -S git + +# Let's handle dependencies ourseleves +rm -f *requirements.txt + +%build +%py2_build +# generate html docs +%{__python2} setup.py build_sphinx -b html +# remove the sphinx-build leftovers +rm -rf doc/build/html/.{doctrees,buildinfo} + +%install +%py2_install + +%files +%license LICENSE +%{python2_sitelib}/%{module} +%{python2_sitelib}/%{module}-*.egg-info +%exclude %{python2_sitelib}/%{module}/tests + +%files tests +%license LICENSE +%{python2_sitelib}/%{module}/tests + +%files doc +%license LICENSE +%doc doc/build/html README.rst + +%changelog +* Mon Aug 21 2017 Alfredo Moralejo 1.9.1-1 +- Update to 1.9.1 + diff --git a/openstack/python-neutron/centos/build_srpm.data b/openstack/python-neutron/centos/build_srpm.data new file mode 100644 index 00000000..a3bd51e6 --- /dev/null +++ b/openstack/python-neutron/centos/build_srpm.data @@ -0,0 +1,5 @@ +TAR_NAME="neutron" +SRC_DIR="$CGCS_BASE/git/neutron" +COPY_LIST="$FILES_BASE/*" +TIS_BASE_SRCREV=eb2be51d847f8e8b79488b0c956d0d2aa6122ea7 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-neutron/centos/files/NetnsCleanup.ocf_ra b/openstack/python-neutron/centos/files/NetnsCleanup.ocf_ra new file mode 100644 index 00000000..747f9312 --- /dev/null +++ b/openstack/python-neutron/centos/files/NetnsCleanup.ocf_ra @@ -0,0 +1,154 @@ +#!/bin/sh +# +# Neutron Netns Cleanup OCF RA. +# Handles the netns cleanup at start / stop of the agent service +# group +# +# Copyright (c) 2014 Red Hat +# +# This is a one-shot OCF resource agent with the next properties: +# +# * It wraps the init.d script to make an OCF-RA +# * It maps the start, stop, monitor to start, stop , status, and provides +# the specific OCF ones. +# * It cleans unused resources during start (system or agents startup) +# * It cleans everything on stop (agents migration to other hosts) +# * Once started, it will respond with status = OK +# * Once stopped, it will respond with status = DEAD +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. +# + +####################################################################### +# Initialization: + + +: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} +: ${OCF_NEUTRON_DIR=${OCF_ROOT}/lib/neutron} +. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + + +WRAPPED_INITD_SCRIPT=${OCF_NEUTRON_DIR}/neutron-netns-cleanup + +####################################################################### + +meta_data() { + cat < + + +1.0 + + +This resource agent does nothing during execution, only executes +a cleanup during start, and a force cleanup during stop of +the netns resources generated by neutron agents. + + +neutron netns cleanup resource agent + + + + + + + + + + + + + + + +END +} + +####################################################################### + + +netns_cleanup_usage() { + cat < + + +1.0 + + +This resource agent sets host parameter in neutron config files to allow +neutron agents to scale + + +neutron host base name resource agent + + + + + neutron host base name + + neutron host base name + + + + + + + + + + + + + + + +END +} + +####################################################################### + +neutronconfigfiles_to_clean="dhcp_agent.ini fwaas_driver.ini lbaas_agent.ini l3_agent.ini metadata_agent.ini plugins/openvswitch/ovs_neutron_plugin.ini" +neutronconfigfile="neutron.conf" + +neutron_scale_usage() { + cat < + + +1.0 + + +This resource agent does nothing during execution, only executes +a cleanup during start, and a force cleanup during stop of +the openvswitch resources generated by neutron agents. + + +neutron OVS cleanup resource agent + + + + + + + + + + + + + + + +END +} + +####################################################################### + + +ovs_cleanup_usage() { + cat </dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + return + fi + fi + echo "$DESC is not running" +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload|reset) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-neutron/centos/files/neutron-dhcp-agent.pmon b/openstack/python-neutron/centos/files/neutron-dhcp-agent.pmon new file mode 100644 index 00000000..1a37143c --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-dhcp-agent.pmon @@ -0,0 +1,24 @@ +[process] +process = neutron-dhcp-agent +pidfile = /var/run/neutron-dhcp-agent.pid +script = /etc/init.d/neutron-dhcp-agent +style = lsb ; ocf or lsb +severity = major ; minor, major, critical +restarts = 3 ; restarts before error assertion +interval = 5 ; number of seconds to wait between restarts +debounce = 20 ; number of seconds that a process needs to remain + ; running before degrade is removed and retry count + ; is cleared. +startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active : heartbeat monitoring, i.e. request / response messaging + ; ignore : do not monitor or stop monitoring +subfunction = compute ; Optional label. + ; Manage this process in the context of a combo host subfunction + ; Choices: compute or storage. + ; when specified pmond will wait for + ; /var/run/.compute_config_complete or + ; /var/run/.storage_config_complete + ; ... before managing this process with the specified subfunction + ; Excluding this label will cause this process to be managed by default on startup diff --git a/openstack/python-neutron/centos/files/neutron-dhcp-agent.service b/openstack/python-neutron/centos/files/neutron-dhcp-agent.service new file mode 100644 index 00000000..f4ce9051 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-dhcp-agent.service @@ -0,0 +1,16 @@ +[Unit] +Description=Neutron networking agent +After=network.target syslog.target openvswitch.service +Before=pmon.service + +[Service] +Type=forking +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/neutron-dhcp-agent start +ExecStop=/etc/rc.d/init.d/neutron-dhcp-agent stop +ExecReload=/etc/rc.d/init.d/neutron-dhcp-agent reload +PIDFile=/var/run/neutron-dhcp-agent.pid +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-dist.conf b/openstack/python-neutron/centos/files/neutron-dist.conf new file mode 100644 index 00000000..69f03cf4 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-dist.conf @@ -0,0 +1,11 @@ +[DEFAULT] +verbose = True +lock_path = $state_path/lock +notification_driver = neutron.openstack.common.notifier.rpc_notifier +allow_overlapping_ips = True +use_stderr = False +api_paste_config = /usr/share/neutron/api-paste.ini + +[agent] +root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf +root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf diff --git a/openstack/python-neutron/centos/files/neutron-l3-agent.service b/openstack/python-neutron/centos/files/neutron-l3-agent.service new file mode 100644 index 00000000..2c9f291b --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-l3-agent.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack Neutron Layer 3 Agent +After=syslog.target network.target + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-l3-agent --log-file /var/log/neutron/l3-agent.log +PrivateTmp=false +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-linuxbridge-agent.service b/openstack/python-neutron/centos/files/neutron-linuxbridge-agent.service new file mode 100644 index 00000000..e647962b --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-linuxbridge-agent.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack Neutron Linux Bridge Agent +After=syslog.target network.target + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-linuxbridge-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/linuxbridge_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-linuxbridge-agent --log-file /var/log/neutron/linuxbridge-agent.log +PrivateTmp=true +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-linuxbridge-cleanup.service b/openstack/python-neutron/centos/files/neutron-linuxbridge-cleanup.service new file mode 100644 index 00000000..579d901e --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-linuxbridge-cleanup.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Neutron Linux Bridge Cleanup Utility +After=syslog.target network.target +Before=neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-l3-agent.service openstack-nova-compute.service + +[Service] +Type=oneshot +User=neutron +ExecStart=/usr/bin/neutron-linuxbridge-cleanup --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/linuxbridge_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-linuxbridge-cleanup --log-file /var/log/neutron/linuxbridge-cleanup.log +ExecStop=/usr/bin/neutron-linuxbridge-cleanup --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/linuxbridge_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-linuxbridge-cleanup --log-file /var/log/neutron/linuxbridge-cleanup.log +PrivateTmp=true +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-macvtap-agent.service b/openstack/python-neutron/centos/files/neutron-macvtap-agent.service new file mode 100644 index 00000000..c62f1d65 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-macvtap-agent.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack Neutron macvtap L2 agent +After=syslog.target + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-macvtap-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-macvtap-agent --log-file /var/log/neutron/macvtap-agent.log +PrivateTmp=true +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-metadata-agent.init b/openstack/python-neutron/centos/files/neutron-metadata-agent.init new file mode 100755 index 00000000..5d615012 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-metadata-agent.init @@ -0,0 +1,87 @@ +#! /bin/sh + +### BEGIN INIT INFO +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Description: Neutron networking agent +### END INIT INFO + +SUFFIX=metadata +DESC="neutron-$SUFFIX-agent" +DAEMON="/usr/bin/neutron-$SUFFIX-agent" +PIDFILE="/var/run/neutron-$SUFFIX-agent.pid" +DAEMON_ARGS="--config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/metadata_agent.ini" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} \ + -- ${DAEMON_ARGS} + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + return + fi + fi + echo "$DESC is not running" +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload|reset) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-neutron/centos/files/neutron-metadata-agent.pmon b/openstack/python-neutron/centos/files/neutron-metadata-agent.pmon new file mode 100644 index 00000000..0fe64dd8 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-metadata-agent.pmon @@ -0,0 +1,24 @@ +[process] +process = neutron-metadata-agent +pidfile = /var/run/neutron-metadata-agent.pid +script = /etc/init.d/neutron-metadata-agent +style = lsb ; ocf or lsb +severity = major ; minor, major, critical +restarts = 3 ; restarts before error assertion +interval = 5 ; number of seconds to wait between restarts +debounce = 20 ; number of seconds that a process needs to remain + ; running before degrade is removed and retry count + ; is cleared. +startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active : heartbeat monitoring, i.e. request / response messaging + ; ignore : do not monitor or stop monitoring +subfunction = compute ; Optional label. + ; Manage this process in the context of a combo host subfunction + ; Choices: compute or storage. + ; when specified pmond will wait for + ; /var/run/.compute_config_complete or + ; /var/run/.storage_config_complete + ; ... before managing this process with the specified subfunction + ; Excluding this label will cause this process to be managed by default on startup diff --git a/openstack/python-neutron/centos/files/neutron-metadata-agent.service b/openstack/python-neutron/centos/files/neutron-metadata-agent.service new file mode 100644 index 00000000..b7061be7 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-metadata-agent.service @@ -0,0 +1,17 @@ +[Unit] +Description=Neutron networking agent +After=syslog.target network.target openvswitch.service +Before=pmon.service + +[Service] +Type=forking +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/neutron-metadata-agent start +ExecStop=/etc/rc.d/init.d/neutron-metadata-agent stop +ExecReload=/etc/rc.d/init.d/neutron-metadata-agent reload +PIDFile=/var/run/neutron-metadata-agent.pid +KillMode=process + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-neutron/centos/files/neutron-metering-agent.service b/openstack/python-neutron/centos/files/neutron-metering-agent.service new file mode 100644 index 00000000..703864e4 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-metering-agent.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack Neutron Metering Agent +After=syslog.target network.target + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-metering-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metering_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-metering-agent --log-file /var/log/neutron/metering-agent.log +PrivateTmp=false +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-netns-cleanup.init b/openstack/python-neutron/centos/files/neutron-netns-cleanup.init new file mode 100644 index 00000000..aef6a070 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-netns-cleanup.init @@ -0,0 +1,74 @@ +#!/bin/bash +# +# neutron-netns-cleanup OpenStack Neutron netns cleanup utility +# +# chkconfig: - 97 02 +# description: OpenStack Neutron netns cleanup utility +# +# This is a one-shot init.d script with the next properties: +# +# * It accepts 3 verbs: start, stop, status +# * It cleans unused resources during start (system or agents startup) +# * It cleans everything on stop (agents migration to other hosts) +# * Once started, it will respond with status = OK +# * Once stopped, it will respond with status = DEAD +# +### END INIT INFO + +. /etc/rc.d/init.d/functions + +proj=neutron +prog=$proj-netns-cleanup +exec="/usr/bin/$prog" +configs=( + "/usr/share/$proj/$proj-dist.conf" \ + "/etc/$proj/$proj.conf" \ + "/etc/$proj/dhcp_agent.ini" +) +configs_str=${configs[@]/#/--config-file } + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +lockfile=/var/lock/subsys/$prog + +clean() { + cleanopts="$@" + [ -x $exec ] || exit 5 + for config in ${configs[@]}; do + [ -f $config ] || exit 6 + done + runuser -s /bin/bash neutron -c "$exec $cleanopts --log-file /var/log/$proj/netns-cleanup.log $configs_str &>/dev/null" + if [ "x$1" == "x--force" ]; then + killall neutron-ns-metadata-proxy 2>/dev/null || : + killall neutron-keepalived-state-change 2>/dev/null || : + kill $(ps ax | grep -e "keepalived.*\.pid-vrrp" | awk '{print $1}') 2>/dev/null || : + kill $(ps ax | grep -e "radvd.*\.pid\.radvd" | awk '{print $1}') 2>/dev/null || : + kill $(ps ax | grep -e "haproxy .*/conf .*/pid" | awk '{print $1}') 2>/dev/null || : + fi + return $? +} + +retval=0 + +case "$1" in + start) + clean + retval=$? + [ $retval -eq 0 ] && touch $lockfile + ;; + stop) + clean --force + retval=$? + [ $retval -eq 0 ] && rm -f $lockfile + ;; + status) + [ ! -f $lockfile ] && retval=3 + ;; + restart|reload|force-reload|status|condrestart|try-restart) + # Do nothing + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac +exit $retval diff --git a/openstack/python-neutron/centos/files/neutron-netns-cleanup.service b/openstack/python-neutron/centos/files/neutron-netns-cleanup.service new file mode 100644 index 00000000..7a730e4b --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-netns-cleanup.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Neutron Netns Cleanup Utility +After=syslog.target network.target openvswitch.service +Before=neutron-openvswitch-agent.service neutron-dhcp-agent.service neutron-l3-agent.service openstack-nova-compute.service + +[Service] +Type=oneshot +User=neutron +ExecStart=/usr/bin/neutron-netns-cleanup --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-netns-cleanup --log-file /var/log/neutron/netns-cleanup.log +ExecStop=/usr/bin/neutron-netns-cleanup --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-netns-cleanup --log-file /var/log/neutron/netns-cleanup.log --force +PrivateTmp=false +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-openvswitch-agent.service b/openstack/python-neutron/centos/files/neutron-openvswitch-agent.service new file mode 100644 index 00000000..95eb5bbd --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-openvswitch-agent.service @@ -0,0 +1,14 @@ +[Unit] +Description=OpenStack Neutron Open vSwitch Agent +After=syslog.target network.target network.service +PartOf=network.service + +[Service] +Type=simple +User=neutron +ExecStart=/usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-openvswitch-agent --log-file /var/log/neutron/openvswitch-agent.log +PrivateTmp=true +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-ovs-cleanup.init b/openstack/python-neutron/centos/files/neutron-ovs-cleanup.init new file mode 100644 index 00000000..271ab22c --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-ovs-cleanup.init @@ -0,0 +1,67 @@ +#!/bin/bash +# +# neutron-ovs-cleanup OpenStack Open vSwitch cleanup utility +# +# chkconfig: - 97 02 +# description: Purge Open vSwitch of the Neutron devices +# +# This is a one-shot init.d script with the next properties: +# +# * It accepts 3 verbs: start, stop, status +# * It cleans unused resources during start (system or agents startup) +# * It cleans everything on stop (agents migration to other hosts) +# * Once started, it will respond with status = OK +# * Once stopped, it will respond with status = DEAD +# +### END INIT INFO + +. /etc/rc.d/init.d/functions + +proj=neutron +prog=$proj-ovs-cleanup +exec="/usr/bin/$prog" +pidfile="/var/run/$proj/$prog.pid" +configs=( + "/usr/share/$proj/$proj-dist.conf" \ + "/etc/$proj/$proj.conf" \ + "/etc/$proj/plugins/ml2/openvswitch_agent.ini" \ +) +configs_str=${configs[@]/#/--config-file } + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +lockfile=/var/lock/subsys/$prog + +clean() { + [ -x $exec ] || exit 5 + for config in ${configs[@]}; do + [ -f $config ] || exit 6 + done + runuser -s /bin/bash neutron -c "$exec --log-file /var/log/$proj/ovs-cleanup.log $configs_str &>/dev/null" + return $? +} + +retval=0 + +case "$1" in + start) + clean + retval=$? + [ $retval -eq 0 ] && touch $lockfile + ;; + stop) + clean + retval=$? + [ $retval -eq 0 ] && rm -f $lockfile + ;; + status) + [ ! -f $lockfile ] && retval=3 + ;; + restart|reload|force-reload|condrestart|try-restart) + # Do nothing + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac +exit $retval diff --git a/openstack/python-neutron/centos/files/neutron-ovs-cleanup.service b/openstack/python-neutron/centos/files/neutron-ovs-cleanup.service new file mode 100644 index 00000000..dd8f635d --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-ovs-cleanup.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Neutron Open vSwitch Cleanup Utility +After=syslog.target network.target openvswitch.service +Before=neutron-openvswitch-agent.service neutron-dhcp-agent.service neutron-l3-agent.service openstack-nova-compute.service + +[Service] +Type=oneshot +User=neutron +ExecStart=/usr/bin/neutron-ovs-cleanup --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-ovs-cleanup --log-file /var/log/neutron/ovs-cleanup.log +ExecStop=/usr/bin/neutron-ovs-cleanup --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-ovs-cleanup --log-file /var/log/neutron/ovs-cleanup.log +PrivateTmp=true +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-rpc-server.service b/openstack/python-neutron/centos/files/neutron-rpc-server.service new file mode 100644 index 00000000..b3f4d181 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-rpc-server.service @@ -0,0 +1,14 @@ +[Unit] +Description=OpenStack Neutron (RPC only) Server +After=syslog.target network.target + +[Service] +Type=notify +User=neutron +ExecStart=/usr/bin/neutron-rpc-server --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-rpc-server --log-file /var/log/neutron/rpc-server.log +PrivateTmp=true +NotifyAccess=all +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-server.init b/openstack/python-neutron/centos/files/neutron-server.init new file mode 100755 index 00000000..5f5c0e55 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-server.init @@ -0,0 +1,137 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: neutron-server +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: neutron-server +# Description: Provides the Neutron networking service +### END INIT INFO + +DESC="neutron-server" +DAEMON="/usr/bin/neutron-server" +PIDFILE="/var/run/neutron-server.pid" +DAEMON_ARGS="--config-file=/etc/neutron/neutron.conf \ + --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} \ + -- $DAEMON_ARGS + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + return + fi + fi + echo "$DESC is not running" +} + +reset() +{ + . /etc/nova/openrc + + # Cleanup all neutron floating ip + simple_delete "neutron floatingip-list --all-tenant" "neutron floatingip-delete" 1 "neutron floatingip" + + # Cleanup all neutron router + neutron router-list | while read line; do + router_id=`echo $line | get_field 1` + neutron router-port-list $router_id | while read line_port; do + port_id=`echo $line_port | get_field 1` + subnet_id=`echo $line_port | get_field 4 | cut -d ' ' -f 2 | cut -d '"' -f 2` + if [ ! -z "$router_id" ] && [ ! -z "$subnet_id" ] ; then + echo ">>> Delete router-port: router_id=$router_id, port_id=$port_id, subnet_id=$subnet_id" + neutron router-interface-delete $router_id $subnet_id > /dev/null 2>&1 + fi + done + if [ ! -z "$router_id" ] ; then + echo ">>> Delete router: router_id=$router_id" + neutron router-delete $router_id > /dev/null 2>&1 + fi + done + + # Cleanup all neutron ports + simple_delete "neutron port-list --all-tenant" "neutron port-delete" 1 "neutron port" + + # Cleanup all neutron net + simple_delete "neutron net-list --all-tenant" "neutron net-delete" 1 "neutron net" + + stop + + # This is to make sure postgres is configured and running + if ! pidof postmaster > /dev/null; then + /etc/init.d/postgresql-init + /etc/init.d/postgresql start + sleep 2 + fi + + sudo -u postgres dropdb ovs_neutron + sudo -u postgres createdb ovs_neutron + + start +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + reset) + reset + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-neutron/centos/files/neutron-server.service b/openstack/python-neutron/centos/files/neutron-server.service new file mode 100644 index 00000000..f1afd6d1 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-server.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack Neutron Server +After=syslog.target network.target + +[Service] +Type=forking +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/neutron-server start +ExecStop=/etc/rc.d/init.d/neutron-server stop +ExecReload=/etc/rc.d/init.d/neutron-server reload + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-sriov-nic-agent.init b/openstack/python-neutron/centos/files/neutron-sriov-nic-agent.init new file mode 100755 index 00000000..926fbb17 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-sriov-nic-agent.init @@ -0,0 +1,87 @@ +#! /bin/sh + +### BEGIN INIT INFO +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Description: Neutron networking agent +### END INIT INFO + +SUFFIX=sriov-nic +DESC="neutron-$SUFFIX-agent" +DAEMON="/usr/bin/neutron-$SUFFIX-agent" +PIDFILE="/var/run/neutron-$SUFFIX-agent.pid" +DAEMON_ARGS="--config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/sriov_agent.ini" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} \ + -- ${DAEMON_ARGS} + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + return + fi + fi + echo "$DESC is not running" +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload|reset) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-neutron/centos/files/neutron-sriov-nic-agent.pmon b/openstack/python-neutron/centos/files/neutron-sriov-nic-agent.pmon new file mode 100644 index 00000000..3dd6d724 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-sriov-nic-agent.pmon @@ -0,0 +1,24 @@ +[process] +process = neutron-sriov-nic-agent +pidfile = /var/run/neutron-sriov-nic-agent.pid +script = /etc/init.d/neutron-sriov-nic-agent +style = lsb ; ocf or lsb +severity = major ; minor, major, critical +restarts = 3 ; restarts before error assertion +interval = 5 ; number of seconds to wait between restarts +debounce = 20 ; number of seconds that a process needs to remain + ; running before degrade is removed and retry count + ; is cleared. +startuptime = 5 ; Seconds to wait after process start before starting the debounce monitor +mode = passive ; Monitoring mode: passive (default) or active + ; passive: process death monitoring (default: always) + ; active : heartbeat monitoring, i.e. request / response messaging + ; ignore : do not monitor or stop monitoring +subfunction = compute ; Optional label. + ; Manage this process in the context of a combo host subfunction + ; Choices: compute or storage. + ; when specified pmond will wait for + ; /var/run/.compute_config_complete or + ; /var/run/.storage_config_complete + ; ... before managing this process with the specified subfunction + ; Excluding this label will cause this process to be managed by default on startup diff --git a/openstack/python-neutron/centos/files/neutron-sriov-nic-agent.service b/openstack/python-neutron/centos/files/neutron-sriov-nic-agent.service new file mode 100644 index 00000000..73b2d7fa --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-sriov-nic-agent.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenStack Neutron SR-IOV NIC Agent +After=syslog.target network.target openvswitch.service +Before=pmon.service + +[Service] +Type=forking +RemainAfterExit=yes +ExecStart=/etc/rc.d/init.d/neutron-sriov-nic-agent start +ExecStop=/etc/rc.d/init.d/neutron-sriov-nic-agent stop +ExecReload=/etc/rc.d/init.d/neutron-sriov-nic-agent reload +PIDFile=/var/run/neutron-sriov-nic-agent.pid +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-neutron/centos/files/neutron-sudoers b/openstack/python-neutron/centos/files/neutron-sudoers new file mode 100644 index 00000000..9273f587 --- /dev/null +++ b/openstack/python-neutron/centos/files/neutron-sudoers @@ -0,0 +1,4 @@ +Defaults:neutron !requiretty + +neutron ALL = (root) NOPASSWD: /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf * +neutron ALL = (root) NOPASSWD: /usr/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf diff --git a/openstack/python-neutron/centos/openstack-neutron.spec b/openstack/python-neutron/centos/openstack-neutron.spec new file mode 100644 index 00000000..8ae028b7 --- /dev/null +++ b/openstack/python-neutron/centos/openstack-neutron.spec @@ -0,0 +1,812 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%global service neutron + +%define cleanup_orphan_rootwrap_daemons() \ +for pid in $(ps -f --ppid 1 | awk '/.*neutron-rootwrap-daemon/ { print $2 }'); do \ + kill $(ps --ppid $pid -o pid=) \ +done \ +%nil + +%global common_desc \ +Neutron is a virtual network service for Openstack. Just like \ +OpenStack Nova provides an API to dynamically request and configure \ +virtual servers, Neutron provides an API to dynamically request and \ +configure virtual networks. These networks connect "interfaces" from \ +other OpenStack services (e.g., virtual NICs from Nova VMs). The \ +Neutron API supports extensions to provide advanced network \ +capabilities (e.g., QoS, ACLs, network monitoring, etc.) + +Name: openstack-%{service} +Version: 11.0.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Epoch: 1 +Summary: OpenStack Networking Service + +License: ASL 2.0 +URL: http://launchpad.net/%{service}/ + +Source0: %{service}-%{version}.tar.gz +#Source1: %{service}.logrotate +Source2: %{service}-sudoers +Source10: neutron-server.service +Source11: neutron-linuxbridge-agent.service +Source12: neutron-openvswitch-agent.service +Source15: neutron-dhcp-agent.service +Source16: neutron-l3-agent.service +Source17: neutron-metadata-agent.service +Source18: neutron-ovs-cleanup.service +Source19: neutron-macvtap-agent.service +Source20: neutron-metering-agent.service +Source21: neutron-sriov-nic-agent.service +Source22: neutron-netns-cleanup.service +Source29: neutron-rpc-server.service + +Source30: %{service}-dist.conf +Source31: conf.README +Source32: neutron-linuxbridge-cleanup.service +#Source33: neutron-enable-bridge-firewall.sh +#Source34: neutron-l2-agent-sysctl.conf +# We use the legacy service to load modules because it allows to gracefully +# ignore a missing kernel module (f.e. br_netfilter on earlier kernels). It's +# essentially because .modules files are shell scripts. +#Source35: neutron-l2-agent.modules + +# WRS +Source44: neutron-dhcp-agent.pmon +Source45: neutron-metadata-agent.pmon +Source46: neutron-sriov-nic-agent.pmon +Source49: neutron-dhcp-agent.init +Source50: neutron-metadata-agent.init +Source51: neutron-sriov-nic-agent.init +Source52: neutron-server.init + +BuildArch: noarch + +BuildRequires: git +BuildRequires: openstack-macros +BuildRequires: python2-devel +BuildRequires: python-babel +BuildRequires: python-d2to1 +BuildRequires: python-keystoneauth1 >= 3.1.0 +BuildRequires: python-keystonemiddleware +BuildRequires: python-neutron-lib >= 1.9.0 +BuildRequires: python-novaclient +BuildRequires: python-os-xenapi +BuildRequires: python-oslo-cache +BuildRequires: python-oslo-concurrency +BuildRequires: python-oslo-config +BuildRequires: python-oslo-db +BuildRequires: python-oslo-log +BuildRequires: python-oslo-messaging +BuildRequires: python-oslo-policy +BuildRequires: python-oslo-privsep +BuildRequires: python-oslo-rootwrap +BuildRequires: python-oslo-service +BuildRequires: python-oslo-versionedobjects +BuildRequires: python-osprofiler >= 1.3.0 +BuildRequires: python-ovsdbapp +BuildRequires: python-pbr >= 2.0.0 +BuildRequires: python-psutil >= 3.2.2 +BuildRequires: python-pyroute2 >= 0.4.19 +BuildRequires: python-pecan +BuildRequires: python-tenacity >= 3.2.1 +BuildRequires: python-weakrefmethod >= 1.0.2 +BuildRequires: systemd-units +# WRS +BuildRequires: systemd +BuildRequires: tsconfig +BuildRequires: systemd-devel +BuildRequires: python-retrying +BuildRequires: python-networking-sfc + +Requires: openstack-%{service}-common = %{epoch}:%{version}-%{release} + +# dnsmasq is not a hard requirement, but is currently the only option +# when neutron-dhcp-agent is deployed. +Requires: dnsmasq +Requires: dnsmasq-utils + +# radvd is not a hard requirement, but is currently the only option +# for IPv6 deployments. +Requires: radvd + +# dibbler is not a hard requirement, but is currently the default option +# for IPv6 prefix delegation. +Requires: dibbler-client + +# conntrack is not a hard requirement, but is currently used by L3 agent +# to immediately drop connections after a floating IP is disassociated +Requires: conntrack-tools + +# keepalived is not a hard requirement, but is currently used by DVR L3 +# agent +Requires: keepalived + +# haproxy implements metadata proxy process +Requires: haproxy >= 1.5.0 + +# Those are not hard requirements, ipset is used by ipset-cleanup in the subpackage, +# and iptables is used by the l3-agent which currently is not in a separate package. +Requires: ipset +Requires: iptables + +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd + +Obsoletes: openstack-%{service}-dev-server + +%description +%{common_desc} + + +%package -n python-%{service} +Summary: Neutron Python libraries +Requires: python-alembic >= 0.8.7 +Requires: python-debtcollector >= 1.2.0 +Requires: python-designateclient >= 1.5.0 +Requires: python-eventlet >= 0.18.2 +Requires: python-greenlet >= 0.3.2 +Requires: python-httplib2 >= 0.7.5 +# Upstream jinja2 set to 2.8 due to Python 3 support. +# CentOS repos currently don't have the packege rebased to 2.8. +Requires: python-jinja2 >= 2.7 +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-keystonemiddleware >= 4.12.0 +Requires: python-netaddr >= 0.7.13 +Requires: python-netifaces >= 0.10.4 +Requires: python-neutronclient >= 6.3.0 +Requires: python-neutron-lib >= 1.9.0 +Requires: python-novaclient >= 9.0.0 +Requires: python-os-xenapi >= 0.2.0 +Requires: python-oslo-cache >= 1.5.0 +Requires: python-oslo-concurrency >= 3.8.0 +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-oslo-context >= 2.14.0 +Requires: python-oslo-db >= 4.24.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-messaging >= 5.24.2 +Requires: python-oslo-middleware >= 3.27.0 +Requires: python-oslo-policy >= 1.23.0 +Requires: python-oslo-privsep >= 1.9.0 +Requires: python-oslo-reports >= 0.6.0 +Requires: python-oslo-rootwrap >= 5.0.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-service >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-oslo-versionedobjects >= 1.17.0 +Requires: python-osprofiler >= 1.4.0 +Requires: python-ovsdbapp +Requires: python-paste +Requires: python-paste-deploy >= 1.5.0 +Requires: python-pecan >= 1.0.0 +Requires: python-pbr >= 2.0.0 +Requires: python-psutil >= 3.2.2 +Requires: python-pyroute2 >= 0.4.19 +Requires: python-requests >= 2.10.0 +Requires: python-tenacity >= 3.2.1 +Requires: python-routes >= 2.3.1 +Requires: python-ryu >= 4.14 +Requires: python-six >= 1.9.0 +Requires: python-sqlalchemy >= 1.0.10 +Requires: python-stevedore >= 1.20.0 +Requires: python-weakrefmethod >= 1.0.2 +Requires: python-webob >= 1.7.1 + + + +%description -n python-%{service} +%{common_desc} + +This package contains the Neutron Python library. + + +%package -n python-%{service}-tests +Summary: Neutron tests +Requires: python-%{service} = %{epoch}:%{version}-%{release} +Requires: python-ddt >= 1.0.1 +Requires: python-fixtures >= 3.0.0 +Requires: python-mock >= 2.0 +Requires: python-subunit >= 0.0.18 +Requires: python-testrepository >= 0.0.18 +Requires: python-testtools >= 1.4.0 +Requires: python-testresources >= 0.2.4 +Requires: python-testscenarios >= 0.4 +Requires: python-oslotest >= 1.10.0 +Requires: python-oslo-db-tests >= 4.10.0 +Requires: python-os-testr >= 0.7.0 +Requires: python-PyMySQL >= 0.6.2 +Requires: python-tempest >= 12.1.0 +Requires: python-webtest >= 2.0 + +# pstree is used during functional testing to ensure our internal +# libraries managing processes work correctly. +Requires: psmisc +# nfs-utils is needed because it creates user with uid 65534 which +# is required by neutron functional tests. +Requires: nfs-utils + + +%description -n python-%{service}-tests +%{common_desc} + +This package contains Neutron test files. + + +%package common +Summary: Neutron common files +Requires(pre): shadow-utils +Requires: python-%{service} = %{epoch}:%{version}-%{release} +Requires: sudo + + +%description common +%{common_desc} + +This package contains Neutron common files. + + +%package linuxbridge +Summary: Neutron Linuxbridge agent +Requires: bridge-utils +Requires: ebtables +Requires: ipset +Requires: iptables +# kmod is needed to get access to /usr/sbin/modprobe needed by +# neutron-enable-bridge-firewall.sh triggered by the service unit file +Requires: kmod +Requires: openstack-%{service}-common = %{epoch}:%{version}-%{release} + + +%description linuxbridge +%{common_desc} + +This package contains the Neutron agent that implements virtual +networks using VLAN or VXLAN using Linuxbridge technology. + + +%package macvtap-agent +Summary: Neutron macvtap agent +Requires: openstack-%{service}-common = %{epoch}:%{version}-%{release} + + +%description macvtap-agent +%{common_desc} + +This package contains the Neutron agent that implements +macvtap attachments for libvirt qemu/kvm instances. + + +%package ml2 +Summary: Neutron ML2 plugin +Requires: openstack-%{service}-common = %{epoch}:%{version}-%{release} +# needed for brocade and cisco drivers +Requires: python-ncclient + + +%description ml2 +%{common_desc} + +This package contains a Neutron plugin that allows the use of drivers +to support separately extensible sets of network types and the mechanisms +for accessing those types. + + +%package openvswitch +Summary: Neutron openvswitch plugin +Requires: openstack-%{service}-common = %{epoch}:%{version}-%{release} +# We require openvswitch when using vsctl to access ovsdb; +# but if we use native access, then we just need python bindings. +# since we don't know what users actually use, we depend on both. +Requires: ipset +Requires: iptables +Requires: openvswitch +Requires: python-openvswitch >= 2.6.1 +# kmod is needed to get access to /usr/sbin/modprobe needed by +# neutron-enable-bridge-firewall.sh triggered by the service unit file +Requires: kmod + + +%description openvswitch +%{common_desc} + +This package contains the Neutron plugin that implements virtual +networks using Open vSwitch. + + +%package metering-agent +Summary: Neutron bandwidth metering agent +Requires: iptables +Requires: openstack-%{service}-common = %{epoch}:%{version}-%{release} + + +%description metering-agent +%{common_desc} + +This package contains the Neutron agent responsible for generating bandwidth +utilization notifications. + + +%package rpc-server +Summary: Neutron (RPC only) Server +Requires: openstack-%{service}-common = %{epoch}:%{version}-%{release} + + +%description rpc-server +%{common_desc} + +This package contains an alternative Neutron server that handles AMQP RPC +workload only. + + +%package sriov-nic-agent +Summary: Neutron SR-IOV NIC agent +Requires: openstack-%{service}-common = %{epoch}:%{version}-%{release} + + +%description sriov-nic-agent +%{common_desc} + +This package contains the Neutron agent to support advanced features of +SR-IOV network cards. + + +%prep +%autosetup -n %{service}-%{upstream_version} -S git + +find %{service} -name \*.py -exec sed -i '/\/usr\/bin\/env python/{d;q}' {} + + +# Let's handle dependencies ourseleves +%py_req_cleanup + +# Kill egg-info in order to generate new SOURCES.txt +rm -rf neutron.egg-info + + +%build +export PBR_VERSION=%{version} +export SKIP_PIP_INSTALL=1 +%{__python2} setup.py build +# Generate i18n files +# (amoralej) we can remove '-D neutron' once https://review.openstack.org/#/c/485070/ is merged +#%{__python2} setup.py compile_catalog +pwd +%{__python2} setup.py compile_catalog -d %{service}/locale -D neutron + +# Generate configuration files +PYTHONPATH=. tools/generate_config_file_samples.sh +find etc -name *.sample | while read filename +do + filedir=$(dirname $filename) + file=$(basename $filename .sample) + mv ${filename} ${filedir}/${file} +done + +# Loop through values in neutron-dist.conf and make sure that the values +# are substituted into the neutron.conf as comments. Some of these values +# will have been uncommented as a way of upstream setting defaults outside +# of the code. For notification_driver, there are commented examples +# above uncommented settings, so this specifically skips those comments +# and instead comments out the actual settings and substitutes the +# correct default values. +while read name eq value; do + test "$name" && test "$value" || continue + if [ "$name" = "notification_driver" ]; then + sed -ri "0,/^$name *=/{s!^$name *=.*!# $name = $value!}" etc/%{service}.conf + else + sed -ri "0,/^(#)? *$name *=/{s!^(#)? *$name *=.*!# $name = $value!}" etc/%{service}.conf + fi +done < %{SOURCE30} + +%install +export PBR_VERSION=%{version} +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} + +# Remove unused files +rm -rf %{buildroot}%{python2_sitelib}/bin +rm -rf %{buildroot}%{python2_sitelib}/doc +rm -rf %{buildroot}%{python2_sitelib}/tools + +# Move rootwrap files to proper location +install -d -m 755 %{buildroot}%{_datarootdir}/%{service}/rootwrap +mv %{buildroot}/usr/etc/%{service}/rootwrap.d/*.filters %{buildroot}%{_datarootdir}/%{service}/rootwrap + +# Move config files to proper location +install -d -m 755 %{buildroot}%{_sysconfdir}/%{service} +mv %{buildroot}/usr/etc/%{service}/* %{buildroot}%{_sysconfdir}/%{service} +# WRS: to do: revisit service files to handle /usr/share rather than /etc/neutron for api-paste.ini +#mv %{buildroot}%{_sysconfdir}/%{service}/api-paste.ini %{buildroot}%{_datadir}/%{service}/api-paste.ini + +# The generated config files are not moved automatically by setup.py +install -d -m 755 %{buildroot}%{_sysconfdir}/%{service}/plugins/ml2 + +mv etc/%{service}.conf %{buildroot}%{_sysconfdir}/%{service}/%{service}.conf +for agent in dhcp l3 metadata metering +do + mv etc/${agent}_agent.ini %{buildroot}%{_sysconfdir}/%{service}/${agent}_agent.ini +done +for file in linuxbridge_agent ml2_conf openvswitch_agent sriov_agent +do + mv etc/%{service}/plugins/ml2/${file}.ini %{buildroot}%{_sysconfdir}/%{service}/plugins/ml2/${file}.ini +done + +# Install logrotate +#install -p -D -m 644 %{SOURCE1} %{buildroot}%{_sysconfdir}/logrotate.d/openstack-%{service} + +# Install sudoers +install -p -D -m 440 %{SOURCE2} %{buildroot}%{_sysconfdir}/sudoers.d/%{service} + +# Install systemd units +install -p -D -m 644 %{SOURCE10} %{buildroot}%{_unitdir}/neutron-server.service +install -p -D -m 644 %{SOURCE11} %{buildroot}%{_unitdir}/neutron-linuxbridge-agent.service +install -p -D -m 644 %{SOURCE12} %{buildroot}%{_unitdir}/neutron-openvswitch-agent.service +install -p -D -m 644 %{SOURCE15} %{buildroot}%{_unitdir}/neutron-dhcp-agent.service +install -p -D -m 644 %{SOURCE16} %{buildroot}%{_unitdir}/neutron-l3-agent.service +install -p -D -m 644 %{SOURCE17} %{buildroot}%{_unitdir}/neutron-metadata-agent.service +install -p -D -m 644 %{SOURCE18} %{buildroot}%{_unitdir}/neutron-ovs-cleanup.service +install -p -D -m 644 %{SOURCE19} %{buildroot}%{_unitdir}/neutron-macvtap-agent.service +install -p -D -m 644 %{SOURCE20} %{buildroot}%{_unitdir}/neutron-metering-agent.service +install -p -D -m 644 %{SOURCE21} %{buildroot}%{_unitdir}/neutron-sriov-nic-agent.service +install -p -D -m 644 %{SOURCE22} %{buildroot}%{_unitdir}/neutron-netns-cleanup.service +install -p -D -m 644 %{SOURCE29} %{buildroot}%{_unitdir}/neutron-rpc-server.service +install -p -D -m 644 %{SOURCE32} %{buildroot}%{_unitdir}/neutron-linuxbridge-cleanup.service + +# Install helper scripts +#install -p -D -m 755 %{SOURCE33} %{buildroot}%{_bindir}/neutron-enable-bridge-firewall.sh + +# Install sysctl and modprobe config files to enable bridge firewalling +# NOTE(ihrachys) we effectively duplicate same settings for each affected l2 +# agent. This can be revisited later. +#install -p -D -m 644 %{SOURCE34} %{buildroot}%{_sysctldir}/99-neutron-openvswitch-agent.conf +#install -p -D -m 644 %{SOURCE34} %{buildroot}%{_sysctldir}/99-neutron-linuxbridge-agent.conf +#install -p -D -m 755 %{SOURCE35} %{buildroot}%{_sysconfdir}/sysconfig/modules/neutron-openvswitch-agent.modules +#install -p -D -m 755 %{SOURCE35} %{buildroot}%{_sysconfdir}/sysconfig/modules/neutron-linuxbridge-agent.modules + +# Install README file that describes how to configure services with custom configuration files +install -p -D -m 755 %{SOURCE31} %{buildroot}%{_sysconfdir}/%{service}/conf.d/README + +# Setup directories +install -d -m 755 %{buildroot}%{_datadir}/%{service} +install -d -m 755 %{buildroot}%{_sharedstatedir}/%{service} +install -d -m 755 %{buildroot}%{_localstatedir}/log/%{service} +install -d -m 755 %{buildroot}%{_localstatedir}/run/%{service} + +# Install dist conf +install -p -D -m 640 %{SOURCE30} %{buildroot}%{_datadir}/%{service}/%{service}-dist.conf + +# Create and populate configuration directory for L3 agent that is not accessible for user modification +mkdir -p %{buildroot}%{_datadir}/%{service}/l3_agent +ln -s %{_sysconfdir}/%{service}/l3_agent.ini %{buildroot}%{_datadir}/%{service}/l3_agent/l3_agent.conf + +# Create dist configuration directory for neutron-server (may be filled by advanced services) +mkdir -p %{buildroot}%{_datadir}/%{service}/server + +# Create configuration directories for all services that can be populated by users with custom *.conf files +mkdir -p %{buildroot}/%{_sysconfdir}/%{service}/conf.d/common +for service in server rpc-server ovs-cleanup netns-cleanup linuxbridge-cleanup macvtap-agent; do + mkdir -p %{buildroot}/%{_sysconfdir}/%{service}/conf.d/%{service}-$service +done +for service in linuxbridge openvswitch dhcp l3 metadata metering sriov-nic; do + mkdir -p %{buildroot}/%{_sysconfdir}/%{service}/conf.d/%{service}-$service-agent +done + + +# WRS process init scripts +install -d %{buildroot}%{_sysconfdir}/init.d +install -m 755 %{SOURCE49} %{buildroot}%{_sysconfdir}/init.d/neutron-dhcp-agent +install -m 755 %{SOURCE50} %{buildroot}%{_sysconfdir}/init.d/neutron-metadata-agent +install -m 755 %{SOURCE51} %{buildroot}%{_sysconfdir}/init.d/neutron-sriov-nic-agent +install -m 755 %{SOURCE52} %{buildroot}%{_sysconfdir}/init.d/neutron-server + +# WRS process monitor configuration files +install -d %{buildroot}%{_sysconfdir}/%{service}/pmon +install -m 755 %{SOURCE44} %{buildroot}%{_sysconfdir}/%{service}/pmon/neutron-dhcp-agent.conf +install -m 755 %{SOURCE45} %{buildroot}%{_sysconfdir}/%{service}/pmon/neutron-metadata-agent.conf +install -m 755 %{SOURCE46} %{buildroot}%{_sysconfdir}/%{service}/pmon/neutron-sriov-nic-agent.conf + +# Install i18n .mo files (.po and .pot are not required) +install -d -m 755 %{buildroot}%{_datadir} +rm -f %{service}/locale/*/LC_*/%{service}*po +rm -f %{service}/locale/*pot +mv %{service}/locale %{buildroot}%{_datadir}/locale + +# Find language files +%find_lang %{service} --all-name + +# Create fake tempest entrypoint +%py2_entrypoint %{service} %{service} + +%pre common +getent group %{service} >/dev/null || groupadd -r %{service} +getent passwd %{service} >/dev/null || \ + useradd -r -g %{service} -d %{_sharedstatedir}/%{service} -s /sbin/nologin \ + -c "OpenStack Neutron Daemons" %{service} +exit 0 + + +%post +%systemd_post neutron-dhcp-agent.service +%systemd_post neutron-l3-agent.service +%systemd_post neutron-metadata-agent.service +%systemd_post neutron-server.service +%systemd_post neutron-netns-cleanup.service +%systemd_post neutron-ovs-cleanup.service +%systemd_post neutron-linuxbridge-cleanup.service + + +%preun +%systemd_preun neutron-dhcp-agent.service +%systemd_preun neutron-l3-agent.service +%systemd_preun neutron-metadata-agent.service +%systemd_preun neutron-server.service +%systemd_preun neutron-netns-cleanup.service +%systemd_preun neutron-ovs-cleanup.service +%systemd_preun neutron-linuxbridge-cleanup.service + + +%postun +%systemd_postun_with_restart neutron-dhcp-agent.service +%systemd_postun_with_restart neutron-l3-agent.service +%systemd_postun_with_restart neutron-metadata-agent.service +%systemd_postun_with_restart neutron-server.service +%cleanup_orphan_rootwrap_daemons + + +%post macvtap-agent +%systemd_post neutron-macvtap-agent.service + + +%preun macvtap-agent +%systemd_preun neutron-macvtap-agent.service + + +%postun macvtap-agent +%systemd_postun_with_restart neutron-macvtap-agent.service +%cleanup_orphan_rootwrap_daemons + + +%post linuxbridge +%systemd_post neutron-linuxbridge-agent.service + + +%preun linuxbridge +%systemd_preun neutron-linuxbridge-agent.service + + +%postun linuxbridge +%systemd_postun_with_restart neutron-linuxbridge-agent.service +%cleanup_orphan_rootwrap_daemons + +%post openvswitch +%systemd_post neutron-openvswitch-agent.service + +if [ $1 -ge 2 ]; then + # We're upgrading + + # Detect if the neutron-openvswitch-agent is running + ovs_agent_running=0 + systemctl status neutron-openvswitch-agent > /dev/null 2>&1 && ovs_agent_running=1 || : + + # If agent is running, stop it + [ $ovs_agent_running -eq 1 ] && systemctl stop neutron-openvswitch-agent > /dev/null 2>&1 || : + + # Search all orphaned neutron-rootwrap-daemon processes and since all are triggered by sudo, + # get the actual rootwrap-daemon process. + %cleanup_orphan_rootwrap_daemons + + # If agent was running, start it back with new code + [ $ovs_agent_running -eq 1 ] && systemctl start neutron-openvswitch-agent > /dev/null 2>&1 || : +fi + + +%preun openvswitch +%systemd_preun neutron-openvswitch-agent.service + + +%post metering-agent +%systemd_post neutron-metering-agent.service + + +%preun metering-agent +%systemd_preun neutron-metering-agent.service + + +%postun metering-agent +%systemd_postun_with_restart neutron-metering-agent.service +%cleanup_orphan_rootwrap_daemons + + +%post sriov-nic-agent +%systemd_post neutron-sriov-nic-agent.service + + +%preun sriov-nic-agent +%systemd_preun neutron-sriov-nic-agent.service + + +%postun sriov-nic-agent +%systemd_postun_with_restart neutron-sriov-nic-agent.service +%cleanup_orphan_rootwrap_daemons + + +%files +%license LICENSE +%{_bindir}/neutron-api +%{_bindir}/neutron-db-manage +%{_bindir}/neutron-debug +%{_bindir}/neutron-dhcp-agent +%{_bindir}/neutron-ipset-cleanup +%{_bindir}/neutron-keepalived-state-change +%{_bindir}/neutron-l3-agent +%{_bindir}/neutron-linuxbridge-cleanup +%{_bindir}/neutron-metadata-agent +%{_bindir}/neutron-netns-cleanup +%{_bindir}/neutron-ovs-cleanup +%{_bindir}/neutron-pd-notify +%{_bindir}/neutron-sanity-check +%{_bindir}/neutron-server +%{_bindir}/neutron-usage-audit +%{_unitdir}/neutron-dhcp-agent.service +%{_unitdir}/neutron-l3-agent.service +%{_unitdir}/neutron-metadata-agent.service +%{_unitdir}/neutron-server.service +%{_unitdir}/neutron-netns-cleanup.service +%{_unitdir}/neutron-ovs-cleanup.service +%{_unitdir}/neutron-linuxbridge-cleanup.service +%attr(-, root, %{service}) %{_sysconfdir}/%{service}/api-paste.ini +%dir %{_datadir}/%{service}/l3_agent +%dir %{_datadir}/%{service}/server +%{_datadir}/%{service}/l3_agent/*.conf +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/dhcp_agent.ini +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/l3_agent.ini +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/metadata_agent.ini +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/policy.json +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/pmon/neutron-dhcp-agent.conf +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/pmon/neutron-metadata-agent.conf +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-dhcp-agent +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-l3-agent +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-metadata-agent +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-server +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-netns-cleanup +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-ovs-cleanup +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-linuxbridge-cleanup +%{_sysconfdir}/init.d/%{service}-server +%{_sysconfdir}/init.d/%{service}-dhcp-agent +%{_sysconfdir}/init.d/%{service}-metadata-agent + + +%files -n python-%{service}-tests +%license LICENSE +%{python2_sitelib}/%{service}/tests +%{python2_sitelib}/%{service}_tests.egg-info + +%files -n python-%{service} +%license LICENSE +%{python2_sitelib}/%{service} +%{python2_sitelib}/%{service}-*.egg-info +%exclude %{python2_sitelib}/%{service}/tests + + +%files common -f %{service}.lang +%license LICENSE +%doc README.rst +# though this script is not exactly needed on all nodes but for ovs and +# linuxbridge agents only, it's probably good enough to put it here +#%{_bindir}/neutron-enable-bridge-firewall.sh +%{_bindir}/neutron-restart +%{_bindir}/neutron-rootwrap +%{_bindir}/neutron-rootwrap-daemon +%{_bindir}/neutron-rootwrap-xen-dom0 +%dir %{_sysconfdir}/%{service} +%{_sysconfdir}/%{service}/conf.d/README +%dir %{_sysconfdir}/%{service}/conf.d +%dir %{_sysconfdir}/%{service}/conf.d/common +%dir %{_sysconfdir}/%{service}/plugins +%attr(-, root, %{service}) %{_datadir}/%{service}/%{service}-dist.conf +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/%{service}.conf +%config(noreplace) %{_sysconfdir}/%{service}/rootwrap.conf +#%config(noreplace) %{_sysconfdir}/logrotate.d/* +%{_sysconfdir}/sudoers.d/%{service} +%dir %attr(0755, %{service}, %{service}) %{_sharedstatedir}/%{service} +%dir %attr(0750, %{service}, %{service}) %{_localstatedir}/log/%{service} +%dir %{_datarootdir}/%{service} +%dir %{_datarootdir}/%{service}/rootwrap +%{_datarootdir}/%{service}/rootwrap/debug.filters +%{_datarootdir}/%{service}/rootwrap/dhcp.filters +%{_datarootdir}/%{service}/rootwrap/dibbler.filters +%{_datarootdir}/%{service}/rootwrap/ebtables.filters +%{_datarootdir}/%{service}/rootwrap/ipset-firewall.filters +%{_datarootdir}/%{service}/rootwrap/iptables-firewall.filters +%{_datarootdir}/%{service}/rootwrap/l3.filters +%{_datarootdir}/%{service}/rootwrap/netns-cleanup.filters + + +#%files linuxbridge +#%license LICENSE +%{_bindir}/neutron-linuxbridge-agent +%{_unitdir}/neutron-linuxbridge-agent.service +%{_datarootdir}/%{service}/rootwrap/linuxbridge-plugin.filters +#%dir %{_sysconfdir}/%{service}/plugins/ml2 +#%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/plugins/ml2/linuxbridge_agent.ini +#%dir %{_sysconfdir}/%{service}/conf.d/%{service}-linuxbridge-agent +#%{_sysctldir}/99-neutron-linuxbridge-agent.conf +#%{_sysconfdir}/sysconfig/modules/neutron-linuxbridge-agent.modules + + +%files macvtap-agent +%license LICENSE +%{_bindir}/neutron-macvtap-agent +%{_unitdir}/neutron-macvtap-agent.service +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-macvtap-agent + + +%files ml2 +%license LICENSE +%doc %{service}/plugins/ml2/README +%dir %{_sysconfdir}/%{service}/plugins/ml2 +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/plugins/ml2/*.ini +%exclude %{_sysconfdir}/%{service}/plugins/ml2/linuxbridge_agent.ini +%exclude %{_sysconfdir}/%{service}/plugins/ml2/openvswitch_agent.ini +%exclude %{_sysconfdir}/%{service}/plugins/ml2/sriov_agent.ini + + +%files openvswitch +%license LICENSE +%{_bindir}/neutron-openvswitch-agent +%{_unitdir}/neutron-openvswitch-agent.service +%{_datarootdir}/%{service}/rootwrap/openvswitch-plugin.filters +%dir %{_sysconfdir}/%{service}/plugins/ml2 +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/plugins/ml2/openvswitch_agent.ini +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-openvswitch-agent +#%{_sysctldir}/99-neutron-openvswitch-agent.conf +#%{_sysconfdir}/sysconfig/modules/neutron-openvswitch-agent.modules + + +%files metering-agent +%license LICENSE +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/metering_agent.ini +%{_unitdir}/neutron-metering-agent.service +%{_bindir}/neutron-metering-agent +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-metering-agent + + +%files rpc-server +%license LICENSE +%{_bindir}/neutron-rpc-server +%{_unitdir}/neutron-rpc-server.service +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-rpc-server + + +%files sriov-nic-agent +%license LICENSE +%{_unitdir}/neutron-sriov-nic-agent.service +%{_bindir}/neutron-sriov-nic-agent +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/plugins/ml2/sriov_agent.ini +%config(noreplace) %attr(0640, root, %{service}) %{_sysconfdir}/%{service}/pmon/neutron-sriov-nic-agent.conf +%dir %{_sysconfdir}/%{service}/conf.d/%{service}-sriov-nic-agent +%{_sysconfdir}/init.d/%{service}-sriov-nic-agent + + +%changelog +* Mon Sep 25 2017 rdo-trunk 1:11.0.1-1 +- Update to 11.0.1 + +* Mon Sep 4 2017 Haïkel Guémar - 1:11.0.0-3 +- Bump python-pyroute2 (rhbz#1487766) + +* Sat Sep 2 2017 Assaf Muller 1:11.0.0-2 +- Bump python-pyroute2, rhbz 1487766 + +* Wed Aug 30 2017 rdo-trunk 1:11.0.0-1 +- Update to 11.0.0 + +* Fri Aug 25 2017 rdo-trunk 1:11.0.0-0.3.0rc3 +- Update to 11.0.0.0rc3 + +* Fri Aug 25 2017 Alfredo Moralejo 1:11.0.0-0.2.0rc2 +- Update to 11.0.0.0rc2 + +* Tue Aug 22 2017 Alfredo Moralejo 1:11.0.0-0.1.0rc1 +- Update to 11.0.0.0rc1 + diff --git a/openstack/python-neutron/python-neutron/neutron-agent.init b/openstack/python-neutron/python-neutron/neutron-agent.init new file mode 100644 index 00000000..2e8cfa2c --- /dev/null +++ b/openstack/python-neutron/python-neutron/neutron-agent.init @@ -0,0 +1,87 @@ +#! /bin/sh + +### BEGIN INIT INFO +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Description: Neutron networking agent +### END INIT INFO + +SUFFIX=@suffix@ +DESC="neutron-$SUFFIX-agent" +DAEMON="/usr/bin/neutron-$SUFFIX-agent" +PIDFILE="/var/run/neutron-$SUFFIX-agent.pid" +DAEMON_ARGS="@args@" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} \ + -- ${DAEMON_ARGS} + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + return + fi + fi + echo "$DESC is not running" +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload|reset) + stop + start + ;; + status) + status + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-neutron/python-neutron/neutron-server.init b/openstack/python-neutron/python-neutron/neutron-server.init new file mode 100644 index 00000000..dd9631a0 --- /dev/null +++ b/openstack/python-neutron/python-neutron/neutron-server.init @@ -0,0 +1,137 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: neutron-server +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 5 +# Default-Stop: 0 1 6 +# Short-Description: neutron-server +# Description: Provides the Neutron networking service +### END INIT INFO + +DESC="neutron-server" +DAEMON="/usr/bin/neutron-server" +PIDFILE="/var/run/neutron-server.pid" +DAEMON_ARGS="--config-file=/etc/neutron/neutron.conf \ + --config-file=@plugin@ --config-file=@sriovplugin@" + +start() +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} \ + -- $DAEMON_ARGS + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop() +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + return + fi + fi + echo "$DESC is not running" +} + +reset() +{ + . /etc/nova/openrc + + # Cleanup all neutron floating ip + simple_delete "neutron floatingip-list --all-tenant" "neutron floatingip-delete" 1 "neutron floatingip" + + # Cleanup all neutron router + neutron router-list | while read line; do + router_id=`echo $line | get_field 1` + neutron router-port-list $router_id | while read line_port; do + port_id=`echo $line_port | get_field 1` + subnet_id=`echo $line_port | get_field 4 | cut -d ' ' -f 2 | cut -d '"' -f 2` + if [ ! -z "$router_id" ] && [ ! -z "$subnet_id" ] ; then + echo ">>> Delete router-port: router_id=$router_id, port_id=$port_id, subnet_id=$subnet_id" + neutron router-interface-delete $router_id $subnet_id > /dev/null 2>&1 + fi + done + if [ ! -z "$router_id" ] ; then + echo ">>> Delete router: router_id=$router_id" + neutron router-delete $router_id > /dev/null 2>&1 + fi + done + + # Cleanup all neutron ports + simple_delete "neutron port-list --all-tenant" "neutron port-delete" 1 "neutron port" + + # Cleanup all neutron net + simple_delete "neutron net-list --all-tenant" "neutron net-delete" 1 "neutron net" + + stop + + # This is to make sure postgres is configured and running + if ! pidof postmaster > /dev/null; then + /etc/init.d/postgresql-init + /etc/init.d/postgresql start + sleep 2 + fi + + sudo -u postgres dropdb ovs_neutron + sudo -u postgres createdb ovs_neutron + + start +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + reset) + reset + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 diff --git a/openstack/python-neutron/python-neutron/neutron-src-dist-files.exclude b/openstack/python-neutron/python-neutron/neutron-src-dist-files.exclude new file mode 100644 index 00000000..c98406e9 --- /dev/null +++ b/openstack/python-neutron/python-neutron/neutron-src-dist-files.exclude @@ -0,0 +1,5 @@ +*.pyc +neutron/plugins/wrs/drivers/qos.py +neutron/plugins/wrs/drivers/type_managed_flat.py +neutron/plugins/wrs/drivers/type_managed_vlan.py +neutron/plugins/wrs/drivers/type_managed_vxlan.py diff --git a/openstack/python-neutron/python-neutron_meta-cloud-services/neutron-dhcp-agent-netns-cleanup.cron b/openstack/python-neutron/python-neutron_meta-cloud-services/neutron-dhcp-agent-netns-cleanup.cron new file mode 100644 index 00000000..0ebd6c52 --- /dev/null +++ b/openstack/python-neutron/python-neutron_meta-cloud-services/neutron-dhcp-agent-netns-cleanup.cron @@ -0,0 +1,2 @@ +# Periodically cleans Neutron's network namespaces on behalf of the Neutron DHCP agent. +30 * * * * neutron if [ -x @bindir@/neutron-netns-cleanup ] ; then @bindir@/neutron-netns-cleanup --config-file=@confdir@/neutron/neutron.conf --config-file=@confdir@/neutron/dhcp_agent.ini >/dev/null 2>&1; fi diff --git a/openstack/python-neutronclient/centos/build_srpm.data b/openstack/python-neutronclient/centos/build_srpm.data new file mode 100644 index 00000000..f5384f29 --- /dev/null +++ b/openstack/python-neutronclient/centos/build_srpm.data @@ -0,0 +1,4 @@ +TAR_NAME=python-neutronclient +SRC_DIR="$CGCS_BASE/git/python-neutronclient" +TIS_BASE_SRCREV=e145c4ef8a0e8390f0468df422a757760e77f823 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-neutronclient/centos/python-neutronclient.spec b/openstack/python-neutronclient/centos/python-neutronclient.spec new file mode 100644 index 00000000..dd7f8328 --- /dev/null +++ b/openstack/python-neutronclient/centos/python-neutronclient.spec @@ -0,0 +1,190 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%global sname neutronclient + +%if 0%{?fedora} +%global with_python3 1 +%endif + +Name: python-neutronclient +Version: 6.5.0 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: Python API and CLI for OpenStack Neutron + +License: ASL 2.0 +URL: http://launchpad.net/python-neutronclient/ +Source0: %{name}-%{version}.tar.gz + +BuildArch: noarch + +Obsoletes: python-%{sname}-tests <= 4.1.1-3 + +%description +Client library and command line utility for interacting with OpenStack +Neutron's API. + +%package -n python2-%{sname} +Summary: Python API and CLI for OpenStack Neutron +%{?python_provide:%python_provide python2-neutronclient} + +BuildRequires: git +BuildRequires: python2-devel +BuildRequires: python-setuptools +BuildRequires: python-pbr + +Requires: python-babel >= 2.3.4 +Requires: python-cliff >= 2.8.0 +Requires: python-dateutil +Requires: python-iso8601 >= 0.1.11 +Requires: python-netaddr >= 0.7.13 +Requires: python-os-client-config >= 1.28.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-pbr +Requires: python-requests >= 2.10.0 +Requires: python-simplejson >= 2.2.0 +Requires: python-six >= 1.9.0 +Requires: python-debtcollector >= 1.2.0 +Requires: python-osc-lib >= 1.7.0 +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-keystoneclient >= 1:3.8.0 + +%description -n python2-%{sname} +Client library and command line utility for interacting with OpenStack +Neutron's API. + +%if 0%{?with_python3} +%package -n python3-%{sname} +Summary: Python API and CLI for OpenStack Neutron +%{?python_provide:%python_provide python3-neutronclient} + +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pbr + +Requires: python3-babel >= 2.3.4 +Requires: python3-cliff >= 2.8.0 +Requires: python3-iso8601 >= 0.1.11 +Requires: python3-netaddr >= 0.7.13 +Requires: python3-os-client-config >= 1.28.0 +Requires: python3-oslo-i18n >= 2.1.0 +Requires: python3-oslo-serialization >= 1.10.0 +Requires: python3-oslo-utils >= 3.20.0 +Requires: python3-pbr +Requires: python3-requests >= 2.10.0 +Requires: python3-simplejson >= 2.2.0 +Requires: python3-six >= 1.9.0 +Requires: python3-debtcollector >= 1.2.0 +Requires: python3-osc-lib >= 1.7.0 +Requires: python3-keystoneauth1 >= 3.1.0 +Requires: python3-keystoneclient >= 1:3.8.0 + +%description -n python3-%{sname} +Client library and command line utility for interacting with OpenStack +Neutron's API. +%endif + +%package doc +Summary: Documentation for OpenStack Neutron API Client + +BuildRequires: python-dateutil +BuildRequires: python-sphinx +BuildRequires: python-oslo-sphinx +BuildRequires: python-openstackdocstheme +BuildRequires: python-reno +BuildRequires: python-cliff +BuildRequires: python-keystoneauth1 +BuildRequires: python-keystoneclient +BuildRequires: python-os-client-config +BuildRequires: python-osc-lib +BuildRequires: python-oslo-serialization +BuildRequires: python-oslo-utils + +%description doc +Client library and command line utility for interacting with OpenStack +Neutron's API. + + +%package sdk +Summary: SDK files for %{name} + +%description sdk +Contains SDK files for %{name} package + + +%prep +%autosetup -n %{name}-%{upstream_version} -S git + +# Let RPM handle the dependencies +rm -f test-requirements.txt requirements.txt + +%build +export PBR_VERSION=%{version} +%py2_build +%if 0%{?with_python3} +%py3_build +%endif + +%install +export PBR_VERSION=%{version} +%if 0%{?with_python3} +%py3_install +mv %{buildroot}%{_bindir}/neutron %{buildroot}%{_bindir}/neutron-%{python3_version} +ln -s ./neutron-%{python3_version} %{buildroot}%{_bindir}/neutron-3 +# Delete tests +rm -fr %{buildroot}%{python3_sitelib}/neutronclient/tests +%endif + +%py2_install +mv %{buildroot}%{_bindir}/neutron %{buildroot}%{_bindir}/neutron-%{python2_version} +ln -s ./neutron-%{python2_version} %{buildroot}%{_bindir}/neutron-2 + +ln -s ./neutron-2 %{buildroot}%{_bindir}/neutron + +mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d +install -pm 644 tools/neutron.bash_completion \ + %{buildroot}%{_sysconfdir}/bash_completion.d/neutron + +# Delete tests +rm -fr %{buildroot}%{python2_sitelib}/neutronclient/tests + +%{__python2} setup.py build_sphinx -b html + +# prep SDK package +mkdir -p %{buildroot}/usr/share/remote-clients/%{name} +tar zcf %{buildroot}/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} + + +%files -n python2-%{sname} +%doc README.rst +%license LICENSE +%{python2_sitelib}/neutronclient +%{python2_sitelib}/*.egg-info +%{_sysconfdir}/bash_completion.d +%{_bindir}/neutron +%{_bindir}/neutron-2 +%{_bindir}/neutron-%{python2_version} + +%if 0%{?with_python3} +%files -n python3-%{sname} +%license LICENSE +%doc README.rst +%{python3_sitelib}/%{sname} +%{python3_sitelib}/*.egg-info +%{_sysconfdir}/bash_completion.d +%{_bindir}/neutron-3 +%{_bindir}/neutron-%{python3_version} +%endif + +%files doc +%doc doc/build/html +%license LICENSE + +%files sdk +/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz + +%changelog +* Mon Aug 14 2017 Alfredo Moralejo 6.5.0-1 +- Update to 6.5.0 + diff --git a/openstack/python-nova/centos/build_srpm.data b/openstack/python-nova/centos/build_srpm.data new file mode 100644 index 00000000..c143dc82 --- /dev/null +++ b/openstack/python-nova/centos/build_srpm.data @@ -0,0 +1,6 @@ +TAR_NAME="nova" +SRC_DIR="$CGCS_BASE/git/nova" +COPY_LIST="$FILES_BASE/* python-nova/*" + +TIS_BASE_SRCREV=b535f0808526c8eba37f15e83cede536e4e06029 +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-nova/centos/files/kvm_timer_advance_setup.service b/openstack/python-nova/centos/files/kvm_timer_advance_setup.service new file mode 100644 index 00000000..c81dabcf --- /dev/null +++ b/openstack/python-nova/centos/files/kvm_timer_advance_setup.service @@ -0,0 +1,14 @@ +[Unit] +Description=KVM Timer Advance Setup +After=openstack-nova-compute-setup.service +Before=nova-compute.service goenabled-compute.service + +[Service] +Type=simple +RemainAfterExit=yes +User=root +ExecStart=/usr/bin/nova_setup_timer_advance + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/nova-clean-thinpool.service b/openstack/python-nova/centos/files/nova-clean-thinpool.service new file mode 100644 index 00000000..5cd5c47b --- /dev/null +++ b/openstack/python-nova/centos/files/nova-clean-thinpool.service @@ -0,0 +1,13 @@ +[Unit] +Description=Clean nova-local thinpool +Before=computeconfig.service + +[Service] +Type=oneshot +RemainAfterExit=yes +User=root +ExecStart=/usr/bin/nova_clean_thinpool start + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/nova-compute.init b/openstack/python-nova/centos/files/nova-compute.init new file mode 100644 index 00000000..7d434b75 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-compute.init @@ -0,0 +1,95 @@ +#!/bin/sh + +# This is a backwards compatibility hack to allow an /etc/init.d/nova-compute +# script that will still start up nova-compute but also allow the "service" +# mechanism to properly track the run state of the service. + +. /etc/init.d/task_affinity_functions.sh + +function log () +{ + logger -p local1.info -t nova-compute $@ + echo nova-compute: "$@" +} + +# WRS: nova-compute may be started only by nova-cleanup or nova-startup. +# Initial start via runlevel nova-init -> /etc/init.d/nova-cleanup -> /etc/init.d/nova-compute +# Restart via pmond monitor -> /etc/init.d/nova-startup -> /etc/init.d/nova-compute +function check_caller () +{ + caller=$(pstree -s -p $$) + if echo "${caller}" | grep -q -v -e "nova-cleanup" -e "nova-startup"; then + log "Call $0 ignored, expecting caller: nova-cleanup or nova-startup. Caller=${caller}" + exit 0 + fi +} + +start () +{ + if [ ! -d /var/log/nova ]; then + mkdir /var/log/nova + fi + + log "Reaffining tasks back to platform cores..." + affine_tasks_to_platform_cores + [[ $? -eq 0 ]] && log "Tasks re-affining done." || log "Tasks re-affining failed." + + echo -n "Starting nova-compute..." + systemctl start nova-compute.service + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop () +{ + echo -n "Stopping nova-compute..." + systemctl stop nova-compute.service + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + systemctl status nova-compute.service +} + +reset() +{ + : +} + +case "$1" in + start) + check_caller + start + ;; + stop) + check_caller + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + reset) + reset + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 + diff --git a/openstack/python-nova/centos/files/nova-dist.conf b/openstack/python-nova/centos/files/nova-dist.conf new file mode 100644 index 00000000..8ada131b --- /dev/null +++ b/openstack/python-nova/centos/files/nova-dist.conf @@ -0,0 +1,25 @@ +[DEFAULT] +#log_dir = /var/log/nova +state_path = /var/lib/nova +lock_path = /var/lib/nova/tmp +dhcpbridge = /usr/bin/nova-dhcpbridge +dhcpbridge_flagfile = /usr/share/nova/nova-dist.conf +dhcpbridge_flagfile = /etc/nova/nova.conf +force_dhcp_release = True +injected_network_template = /usr/share/nova/interfaces.template +libvirt_nonblocking = True +libvirt_inject_partition = -1 +network_manager = nova.network.manager.FlatDHCPManager +compute_driver = libvirt.LibvirtDriver +firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver +rootwrap_config = /etc/nova/rootwrap.conf +use_stderr = False + +[database] +connection = mysql://nova:nova@localhost/nova +max_retries = -1 + +[keystone_authtoken] +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http diff --git a/openstack/python-nova/centos/files/nova-ifc-template b/openstack/python-nova/centos/files/nova-ifc-template new file mode 100644 index 00000000..7d1d28b1 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-ifc-template @@ -0,0 +1,15 @@ +DEVICE="{{ name }}" +NM_CONTROLLED="no" +ONBOOT=yes +TYPE=Ethernet +BOOTPROTO=static +IPADDR={{ address }} +NETMASK={{ netmask }} +BROADCAST={{ broadcast }} +GATEWAY={{ gateway }} +DNS1={{ dns }} + +#if $use_ipv6 +IPV6INIT=yes +IPV6ADDR={{ address_v6 }} +#end if diff --git a/openstack/python-nova/centos/files/nova-migration-wrapper b/openstack/python-nova/centos/files/nova-migration-wrapper new file mode 100644 index 00000000..fab54a7f --- /dev/null +++ b/openstack/python-nova/centos/files/nova-migration-wrapper @@ -0,0 +1,65 @@ +#!/usr/bin/python2 +import os +import sys +import syslog + +command = os.environ.get('SSH_ORIGINAL_COMMAND') +ssh_connection = os.environ.get('SSH_CONNECTION') +if command is None: + sys.stderr.write('This command must be run via SSH ForceCommand (see man 5 sshd_config).\n') + sys.exit(1) + +syslog.openlog('nova_migration_wrapper') + +def allow_command(user, args): + syslog.syslog(syslog.LOG_INFO, "Allowing connection='{}' command={} ".format( + ssh_connection, + repr(args) + )) + os.execlp('sudo', 'sudo', '-u', user, *args) + +def deny_command(args): + syslog.syslog(syslog.LOG_ERR, "Denying connection='{}' command={}".format( + ssh_connection, + repr(args) + )) + sys.stderr.write('Forbidden\n') + sys.exit(1) + +# Handle libvirt ssh tunnel script snippet +# https://github.com/libvirt/libvirt/blob/f0803dae93d62a4b8a2f67f4873c290a76d978b3/src/rpc/virnetsocket.c#L890 +libvirt_sock = '/var/run/libvirt/libvirt-sock' +live_migration_tunnel_cmd = "sh -c 'if 'nc' -q 2>&1 | grep \"requires an argument\" >/dev/null 2>&1; then " \ + "ARG=-q0;" \ + "else " \ + "ARG=;" \ + "fi;" \ + "'nc' $ARG -U {}'".format(libvirt_sock) + +cold_migration_root = '/var/lib/nova/instances/' +cold_migration_cmds = [ + ['mkdir', '-p'], + ['rm', '-rf'], + ['touch'], + ['rm'], + ['scp', '-r', '-t'], + ['scp', '-r', '-f'], + ['scp', '-t'], + ['scp', '-f'], +] +rootwrap_args = ['/usr/bin/nova-rootwrap', '/etc/nova/migration/rootwrap.conf'] + +def validate_cold_migration_cmd(args): + target_path = os.path.normpath(args[-1]) + cmd = args[:-1] + return cmd in cold_migration_cmds and target_path.startswith(cold_migration_root) + +# Rules +args = command.split(' ') +if command == live_migration_tunnel_cmd: + args = ['nc', '-U', libvirt_sock] + allow_command('nova', args) +if validate_cold_migration_cmd(args): + args = rootwrap_args + args + allow_command('root', args) +deny_command(args) diff --git a/openstack/python-nova/centos/files/nova-pci-interrupts b/openstack/python-nova/centos/files/nova-pci-interrupts new file mode 100644 index 00000000..9fd90840 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-pci-interrupts @@ -0,0 +1,108 @@ +#!/bin/bash +################################################################################ +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +################################################################################ +# +# Purpose: +# Query nova configured PCI devices and display related IRQ cpulist info. +# +# Usage: +# /usr/bin/nova-pci-interrupts.sh +# +# Define minimal path +PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin + +# logger setup +WHOAMI=`basename $0` + +function LOG () +{ + local tstamp_H=$( date +"%Y-%0m-%0e %H:%M:%S.%N" ) + echo -e "${tstamp_H} ${HOSTNAME} $0($$): $@"; +} + +function INFO() +{ + MSG="INFO" + LOG "${MSG} $@" +} +function ERROR() +{ + MSG="ERROR" + LOG "${MSG} $@" +} + + +# Require root to access all /proc and /sys details (e.g., smp_affinity_cpulist, etc) +if [ $UID -ne 0 ]; then + ERROR "require root or sudo" + exit 1 +fi + +# Define array of PCI addresses to display IRQ information +declare -a ADDRS +if [ "$#" -eq 0 ]; then + INFO "No PCI addrs specified. usage: $0 ...\n(querying nova configured pci devices on ${HOSTNAME})" + + source /etc/nova/openrc + TMPNAME=$(printf "/tmp/%s-tmp.%d" $(basename $0) $$) + CMD="nova list --all --host ${HOSTNAME} --fields=wrs-res:topology,wrs-res:pci_devices" + + INFO "nova configured PCI devices and associated IRQ cpulists:" + echo ${CMD} + ${CMD} 2>/dev/null > ${TMPNAME} + cat ${TMPNAME} + + # extract pci_addrs from field 'wrs-res:pci_devices' + ADDRS+=( $(cat ${TMPNAME} | awk '/addr:/ {match($0, "addr:([^,]*),", a); print a[1]}') ) + rm -f ${TMPNAME} + + INFO "Found: pci_addrs:" ${ADDRS[@]} +else + ADDRS+=( $@ ) +fi + +for pci_addr in ${ADDRS[@]}; do + # Find PCI device matching address, keep last matching device name + dev=$(find /sys/devices -name "${pci_addr}" | \ + perl -ne 'print $1 if /\/sys\/devices\/pci.*([[:xdigit:]]{4}:[[:xdigit:]]{2}:[[:xdigit:]]{2}\.[[:xdigit:]])$/;') + + if [ -z "${dev}" ] ; then + ERROR "cannot find pci_addr: ${pci_addr}" + continue + fi + + # Obtain all IRQs for this device + irq=$(cat /sys/bus/pci/devices/${dev}/irq 2>/dev/null) + if [ "${irq}" -eq 0 ]; then + irq="" + fi + msi_irqs=$(ls /sys/bus/pci/devices/${dev}/msi_irqs 2>/dev/null | xargs) + numa_node=$(cat /sys/bus/pci/devices/${dev}/numa_node 2>/dev/null | xargs) + uevent=$(cat /sys/bus/pci/devices/${dev}/uevent 2>/dev/null | xargs) + if [[ ${uevent} =~ PCI_ID=([^[:space:]]+):([^[:space:]]+) ]]; then + vendor_id=${BASH_REMATCH[1]} + product_id=${BASH_REMATCH[2],,} + else + vendor_id="" + product_id="" + fi + pci_info=$(lspci -s ${dev} 2>/dev/null) + INFO "addr:${dev} vendor:${vendor_id} product:${product_id} numa_node:${numa_node} irq:${irq} msi_irqs:${msi_irqs} ; ${pci_info}" + + # flatten list of irqs, removing duplicates + declare -a irqs=( $(echo "${irq} ${msi_irqs}" | xargs | tr ' ' '\n' | sort -nu) ) + for i in ${irqs[@]} + do + if [[ -e /proc/irq/${i} ]]; then + cpulist=$(cat /proc/irq/${i}/smp_affinity_list 2>/dev/null) + LOG "irq:${i} cpulist:${cpulist}" + fi + done +done + +exit 0 + diff --git a/openstack/python-nova/centos/files/nova-placement-api b/openstack/python-nova/centos/files/nova-placement-api new file mode 100755 index 00000000..7d761027 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-placement-api @@ -0,0 +1,64 @@ +#!/usr/bin/python2 +#PBR Generated from u'wsgi_scripts' + +import threading + +from nova.api.openstack.placement.wsgi import init_application + +if __name__ == "__main__": + import argparse + import netaddr + import socket + import sys + import wsgiref.simple_server as wss + + parser = argparse.ArgumentParser( + description=init_application.__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + usage='%(prog)s [-h] [--port PORT] [--host IP] -- [passed options]') + parser.add_argument('--port', '-p', type=int, default=8000, + help='TCP port to listen on') + parser.add_argument('--host', '-b', default='', + help='IP to bind the server to') + parser.add_argument('args', + nargs=argparse.REMAINDER, + metavar='-- [passed options]', + help="'--' is the separator of the arguments used " + "to start the WSGI server and the arguments passed " + "to the WSGI application.") + args = parser.parse_args() + if args.args: + if args.args[0] == '--': + args.args.pop(0) + else: + parser.error("unrecognized arguments: %s" % ' '.join(args.args)) + sys.argv[1:] = args.args + + # WRS: In order to support IPv6, server_class.address_family must be set + # to the correct address family. Determine this from specified address. + class server_class(wss.WSGIServer): + pass + if netaddr.valid_ipv4(args.host): + server_class.address_family = socket.AF_INET + else: + server_class.address_family = socket.AF_INET6 + + server = wss.make_server(args.host, args.port, init_application(), + server_class=server_class) + + print("*" * 80) + print("STARTING test server nova.api.openstack.placement.wsgi.init_application") + url = "http://%s:%d/" % (server.server_name, server.server_port) + print("Available at %s" % url) + print("*" * 80) + sys.stdout.flush() + + server.serve_forever() +else: + application = None + app_lock = threading.Lock() + + with app_lock: + if application is None: + application = init_application() + diff --git a/openstack/python-nova/centos/files/nova-placement-api.conf b/openstack/python-nova/centos/files/nova-placement-api.conf new file mode 100644 index 00000000..fe8c3ad2 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-placement-api.conf @@ -0,0 +1,25 @@ +Listen 8778 + + + WSGIProcessGroup nova-placement-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova + WSGIScriptAlias / /usr/bin/nova-placement-api + = 2.4> + ErrorLogFormat "%M" + + ErrorLog /var/log/nova/nova-placement-api.log + #SSLEngine On + #SSLCertificateFile ... + #SSLCertificateKeyFile ... + + +Alias /nova-placement-api /usr/bin/nova-placement-api + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup nova-placement-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + \ No newline at end of file diff --git a/openstack/python-nova/centos/files/nova-polkit.pkla b/openstack/python-nova/centos/files/nova-polkit.pkla new file mode 100644 index 00000000..ae1467d6 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-polkit.pkla @@ -0,0 +1,6 @@ +[Allow nova libvirt management permissions] +Identity=unix-user:nova +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes diff --git a/openstack/python-nova/centos/files/nova-polkit.rules b/openstack/python-nova/centos/files/nova-polkit.rules new file mode 100644 index 00000000..0146b81a --- /dev/null +++ b/openstack/python-nova/centos/files/nova-polkit.rules @@ -0,0 +1,8 @@ +// openstack-nova libvirt management permissions + +polkit.addRule(function(action, subject) { + if (action.id == "org.libvirt.unix.manage" && + subject.user == "nova") { + return polkit.Result.YES; + } +}); diff --git a/openstack/python-nova/centos/files/nova-purge-deleted-active b/openstack/python-nova/centos/files/nova-purge-deleted-active new file mode 100644 index 00000000..15898a82 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-purge-deleted-active @@ -0,0 +1,68 @@ +#!/bin/bash + +# +# Wrapper script to run nova-manage db purge_deleted_instances on active controller only +# also purges nova action events +# +NOVA_PURGE_INFO="/var/run/nova-purge.info" +NOVA_INSTANCE_PURGE_CMD="/usr/bin/nice -n 2 /usr/bin/nova-manage db purge_deleted_instances --older-than 1 >> /var/log/nova/nova-rowspurge.log 2>&1" +NOVA_EVENT_PURGE_CMD="/usr/bin/nice -n 2 /usr/bin/nova-manage db action_events_purge --keep-time-range 5 --max-events 1000 >> /var/log/nova/nova-rowspurge.log 2>&1" + +function is_active_pgserver() +{ + # Determine whether we're running on the same controller as the service. + local service=postgres + local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active) + if [ "x$enabledactive" == "x" ] + then + # enabled-active not found for that service on this controller + return 1 + else + # enabled-active found for that resource + return 0 + fi +} + +if is_active_pgserver +then + if [ ! -f ${NOVA_PURGE_INFO} ] + then + echo delay_count=0 > ${NOVA_PURGE_INFO} + fi + + source ${NOVA_PURGE_INFO} + sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null + if [ $? -eq 0 ] + then + source /etc/platform/platform.conf + if [ "${system_type}" = "All-in-one" ] + then + source /etc/init.d/task_affinity_functions.sh + idle_core=$(get_most_idle_core) + if [ "$idle_core" -ne "0" ] + then + # Purge soft deleted records that are older than 1 day and excess action events from nova database. + sh -c "exec taskset -c $idle_core ${NOVA_INSTANCE_PURGE_CMD}" + sh -c "exec taskset -c $idle_core ${NOVA_EVENT_PURGE_CMD}" + sed -i "/delay_count/s/=.*/=0/" ${NOVA_PURGE_INFO} + exit 0 + fi + fi + + if [ "$delay_count" -lt "3" ] + then + newval=$(($delay_count+1)) + sed -i "/delay_count/s/=.*/=$newval/" ${NOVA_PURGE_INFO} + (sleep 3600; /usr/bin/nova-purge-deleted-active) & + exit 0 + fi + fi + + # Purge soft deleted records that are older than 1 day and excess action events from nova database. + eval ${NOVA_INSTANCE_PURGE_CMD} + eval ${NOVA_EVENT_PURGE_CMD} + sed -i "/delay_count/s/=.*/=0/" ${NOVA_PURGE_INFO} +fi + +exit 0 + diff --git a/openstack/python-nova/centos/files/nova-restart b/openstack/python-nova/centos/files/nova-restart new file mode 100644 index 00000000..0ea5f035 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-restart @@ -0,0 +1,60 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# +# This script provides an example in-service patching restart, +# triggering a restart of the patching daemons themselves +# + +# +# The patching subsystem provides a patch-functions bash source file +# with useful function and variable definitions. +# +. /etc/patching/patch-functions + +# +# We can now check to see what type of node we're on, if it's locked, etc, +# and act accordingly +# + +# +# Declare an overall script return code +# +declare -i GLOBAL_RC=$PATCH_STATUS_OK + +# +# Processes that run with compute subfunction +# +if is_compute || is_cpe +then + processes_to_restart="nova-compute" + /usr/local/sbin/patch-restart-processes nova-compute + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + exit ${PATCH_STATUS_FAILED} + fi +fi + +# +# Processes that run on controller +# +if is_controller +then + processes_to_restart="nova-conductor nova-api nova-scheduler nova-console-auth nova-novnc nova-placement-api nova-compute nova-serialproxy" + /usr/local/sbin/patch-restart-processes --parallel ${processes_to_restart} + if [ $? != 0 ] ; then + loginfo "patching restart failed" + loginfo "... process-restart ${processes_to_restart}" + exit ${PATCH_STATUS_FAILED} + fi +fi + +# +# Exit the script with the overall return code +# +exit $GLOBAL_RC diff --git a/openstack/python-nova/centos/files/nova-ssh-config b/openstack/python-nova/centos/files/nova-ssh-config new file mode 100644 index 00000000..a5618f71 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-ssh-config @@ -0,0 +1,4 @@ +Host * + User nova_migration + UserKnownHostsFile /dev/null + IdentityFile /etc/nova/migration/identity \ No newline at end of file diff --git a/openstack/python-nova/centos/files/nova-sudoers b/openstack/python-nova/centos/files/nova-sudoers new file mode 100644 index 00000000..db618036 --- /dev/null +++ b/openstack/python-nova/centos/files/nova-sudoers @@ -0,0 +1,4 @@ +Defaults:nova !requiretty + +nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf * +nova ALL = (root) NOPASSWD: /usr/bin/privsep-helper * diff --git a/openstack/python-nova/centos/files/nova.conf.sample b/openstack/python-nova/centos/files/nova.conf.sample new file mode 100644 index 00000000..84847b3f --- /dev/null +++ b/openstack/python-nova/centos/files/nova.conf.sample @@ -0,0 +1,4007 @@ +[DEFAULT] + +# +# From oslo.messaging +# + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. (string value) +#rpc_zmq_bind_address = * + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port = 9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts = 1 + +# Maximum number of ingress messages to locally buffer per topic. Default is +# unlimited. (integer value) +#rpc_zmq_topic_backlog = + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir = /var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match +# "host" option, if running Nova. (string value) +#rpc_zmq_host = localhost + +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# (integer value) +#rpc_cast_timeout = 30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq = 300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl = 600 + +# Size of RPC thread pool. (integer value) +#rpc_thread_pool_size = 64 + +# Driver or drivers to handle sending notifications. (multi valued) +#notification_driver = + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics = notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# A URL representing the messaging driver to use and its full configuration. If +# not set, we fall back to the rpc_backend option and driver specific +# configuration. (string value) +#transport_url = + +# The messaging driver to use, defaults to rabbit. Other drivers include qpid +# and zmq. (string value) +#rpc_backend = rabbit + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option. (string value) +#control_exchange = openstack + + +# +# Options defined in nova.availability_zones +# + +# The availability_zone to show internal services under +# (string value) +#internal_service_availability_zone=internal + +# Default compute node availability_zone (string value) +#default_availability_zone=nova + + +# +# Options defined in nova.crypto +# + +# Filename of root CA (string value) +#ca_file=cacert.pem + +# Filename of private key (string value) +#key_file=private/cakey.pem + +# Filename of root Certificate Revocation List (string value) +#crl_file=crl.pem + +# Where we keep our keys (string value) +#keys_path=$state_path/keys + +# Where we keep our root CA (string value) +#ca_path=$state_path/CA + +# Should we use a CA for each project? (boolean value) +#use_project_ca=false + +# Subject for certificate for users, %s for project, user, +# timestamp (string value) +#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s + +# Subject for certificate for projects, %s for project, +# timestamp (string value) +#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s + + +# +# Options defined in nova.exception +# + +# Make exception message format errors fatal (boolean value) +#fatal_exception_format_errors=false + + +# +# Options defined in nova.netconf +# + +# IP address of this host (string value) +#my_ip=10.0.0.1 + +# Block storage IP address of this host (string value) +#my_block_storage_ip=$my_ip + +# Name of this node. This can be an opaque identifier. It is +# not necessarily a hostname, FQDN, or IP address. However, +# the node name must be valid within an AMQP key, and if using +# ZeroMQ, a valid hostname, FQDN, or IP address (string value) +#host=nova + +# Use IPv6 (boolean value) +#use_ipv6=false + + +# +# Options defined in nova.notifications +# + +# If set, send compute.instance.update notifications on +# instance state changes. Valid values are None for no +# notifications, "vm_state" for notifications on VM state +# changes, or "vm_and_task_state" for notifications on VM and +# task state changes. (string value) +#notify_on_state_change= + +# If set, send api.fault notifications on caught exceptions in +# the API service. (boolean value) +#notify_api_faults=false + +# Default notification level for outgoing notifications +# (string value) +#default_notification_level=INFO + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in nova.paths +# + +# Directory where the nova python module is installed (string +# value) +#pybasedir=/usr/lib/python/site-packages + +# Directory where nova binaries are installed (string value) +#bindir=/usr/local/bin + +# Top-level directory for maintaining nova's state (string +# value) +#state_path=$pybasedir + + +# +# Options defined in nova.quota +# + +# Number of instances allowed per project (integer value) +#quota_instances=10 + +# Number of instance cores allowed per project (integer value) +#quota_cores=20 + +# Megabytes of instance RAM allowed per project (integer +# value) +#quota_ram=51200 + +# Number of floating IPs allowed per project (integer value) +#quota_floating_ips=10 + +# Number of fixed IPs allowed per project (this should be at +# least the number of instances allowed) (integer value) +#quota_fixed_ips=-1 + +# Number of metadata items allowed per instance (integer +# value) +#quota_metadata_items=128 + +# Number of injected files allowed (integer value) +#quota_injected_files=5 + +# Number of bytes allowed per injected file (integer value) +#quota_injected_file_content_bytes=10240 + +# Length of injected file path (integer value) +#quota_injected_file_path_length=255 + +# Number of security groups per project (integer value) +#quota_security_groups=10 + +# Number of security rules per security group (integer value) +#quota_security_group_rules=20 + +# Number of key pairs per user (integer value) +#quota_key_pairs=100 + +# Number of server groups per project (integer value) +#quota_server_groups=10 + +# Number of servers per server group (integer value) +#quota_server_group_members=10 + +# Number of seconds until a reservation expires (integer +# value) +#reservation_expire=86400 + +# Count of reservations until usage is refreshed. This +# defaults to 0(off) to avoid additional load but it is useful +# to turn on to help keep quota usage up to date and reduce +# the impact of out of sync usage issues. (integer value) +#until_refresh=0 + +# Number of seconds between subsequent usage refreshes. This +# defaults to 0(off) to avoid additional load but it is useful +# to turn on to help keep quota usage up to date and reduce +# the impact of out of sync usage issues. Note that quotas are +# not updated on a periodic task, they will update on a new +# reservation if max_age has passed since the last reservation +# (integer value) +#max_age=0 + +# Default driver to use for quota checks (string value) +#quota_driver=nova.quota.DbQuotaDriver + + +# +# Options defined in nova.service +# + +# Seconds between nodes reporting state to datastore (integer +# value) +#report_interval=10 + +# Enable periodic tasks (boolean value) +#periodic_enable=true + +# Range of seconds to randomly delay when starting the +# periodic task scheduler to reduce stampeding. (Disable by +# setting to 0) (integer value) +#periodic_fuzzy_delay=60 + +# A list of APIs to enable by default (list value) +#enabled_apis=ec2,osapi_compute,metadata + +# A list of APIs with enabled SSL (list value) +#enabled_ssl_apis= + +# The IP address on which the EC2 API will listen. (string +# value) +#ec2_listen=0.0.0.0 + +# The port on which the EC2 API will listen. (integer value) +#ec2_listen_port=8773 + +# Number of workers for EC2 API service. The default will be +# equal to the number of CPUs available. (integer value) +#ec2_workers= + +# The IP address on which the OpenStack API will listen. +# (string value) +#osapi_compute_listen=0.0.0.0 + +# The port on which the OpenStack API will listen. (integer +# value) +#osapi_compute_listen_port=8774 + +# Number of workers for OpenStack API service. The default +# will be the number of CPUs available. (integer value) +#osapi_compute_workers= + +# OpenStack metadata service manager (string value) +#metadata_manager=nova.api.manager.MetadataManager + +# The IP address on which the metadata API will listen. +# (string value) +#metadata_listen=0.0.0.0 + +# The port on which the metadata API will listen. (integer +# value) +#metadata_listen_port=8775 + +# Number of workers for metadata service. The default will be +# the number of CPUs available. (integer value) +#metadata_workers= + +# Full class name for the Manager for compute (string value) +#compute_manager=nova.compute.manager.ComputeManager + +# Full class name for the Manager for console proxy (string +# value) +#console_manager=nova.console.manager.ConsoleProxyManager + +# Manager for console auth (string value) +#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager + +# Full class name for the Manager for cert (string value) +#cert_manager=nova.cert.manager.CertManager + +# Full class name for the Manager for network (string value) +#network_manager=nova.network.manager.VlanManager + +# Full class name for the Manager for scheduler (string value) +#scheduler_manager=nova.scheduler.manager.SchedulerManager + +# Maximum time since last check-in for up service (integer +# value) +#service_down_time=60 + + +# +# Options defined in nova.utils +# + +# Whether to log monkey patching (boolean value) +#monkey_patch=false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator + +# Length of generated instance admin passwords (integer value) +#password_length=12 + +# Time period to generate instance usages for. Time period +# must be hour, day, month or year (string value) +#instance_usage_audit_period=month + +# Path to the rootwrap configuration file to use for running +# commands as root (string value) +#rootwrap_config=/etc/nova/rootwrap.conf + +# Explicitly specify the temporary working directory (string +# value) +#tempdir= + + +# +# Options defined in nova.wsgi +# + +# File name for the paste.deploy config for nova-api (string +# value) +#api_paste_config=api-paste.ini + +# A python format string that is used as the template to +# generate log lines. The following values can be formatted +# into it: client_ip, date_time, request_line, status_code, +# body_length, wall_seconds. (string value) +#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f + +# CA certificate file to use to verify connecting clients +# (string value) +#ssl_ca_file= + +# SSL certificate of API server (string value) +#ssl_cert_file= + +# SSL private key of API server (string value) +#ssl_key_file= + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepidle=600 + +# Size of the pool of greenthreads used by wsgi (integer +# value) +#wsgi_default_pool_size=1000 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + +# If False, closes the client socket connection explicitly. +# (boolean value) +#wsgi_keep_alive=true + +# Timeout for client connections' socket operations. If an +# incoming connection is idle for this number of seconds it +# will be closed. A value of '0' means wait forever. (integer +# value) +#client_socket_timeout=900 + + +# +# Options defined in nova.api.auth +# + +# Whether to use per-user rate limiting for the api. This +# option is only used by v2 api. Rate limiting is removed from +# v3 api. (boolean value) +#api_rate_limit=false + +# The strategy to use for auth: keystone, noauth +# (deprecated), or noauth2. Both noauth and noauth2 are +# designed for testing only, as they do no actual credential +# checking. noauth provides administrative credentials +# regardless of the passed in user, noauth2 only does if +# 'admin' is specified as the username. (string value) +#auth_strategy=keystone + +# Treat X-Forwarded-For as the canonical remote address. Only +# enable this if you have a sanitizing proxy. (boolean value) +#use_forwarded_for=false + + +# +# Options defined in nova.api.ec2 +# + +# Number of failed auths before lockout. (integer value) +#lockout_attempts=5 + +# Number of minutes to lockout if triggered. (integer value) +#lockout_minutes=15 + +# Number of minutes for lockout window. (integer value) +#lockout_window=15 + +# URL to get token from ec2 request. (string value) +#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens + +# Return the IP address as private dns hostname in describe +# instances (boolean value) +#ec2_private_dns_show_ip=false + +# Validate security group names according to EC2 specification +# (boolean value) +#ec2_strict_validation=true + +# Time in seconds before ec2 timestamp expires (integer value) +#ec2_timestamp_expiry=300 + +# Disable SSL certificate verification. (boolean value) +#keystone_ec2_insecure=false + + +# +# Options defined in nova.api.ec2.cloud +# + +# The IP address of the EC2 API server (string value) +#ec2_host=$my_ip + +# The internal IP address of the EC2 API server (string value) +#ec2_dmz_host=$my_ip + +# The port of the EC2 API server (integer value) +#ec2_port=8773 + +# The protocol to use when connecting to the EC2 API server +# (http, https) (string value) +#ec2_scheme=http + +# The path prefix used to call the ec2 API server (string +# value) +#ec2_path=/ + +# List of region=fqdn pairs separated by commas (list value) +#region_list= + + +# +# Options defined in nova.api.metadata.base +# + +# List of metadata versions to skip placing into the config +# drive (string value) +#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 + +# Driver to use for vendor data (string value) +#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData + + +# +# Options defined in nova.api.metadata.handler +# + +# Time in seconds to cache metadata; 0 to disable metadata +# caching entirely (not recommended). Increasingthis should +# improve response times of the metadata API when under heavy +# load. Higher values may increase memoryusage and result in +# longer times for host metadata changes to take effect. +# (integer value) +#metadata_cache_expiration=15 + + +# +# Options defined in nova.api.metadata.vendordata_json +# + +# File to load JSON formatted vendor data from (string value) +#vendordata_jsonfile_path= + + +# +# Options defined in nova.api.openstack.common +# + +# The maximum number of items returned in a single response +# from a collection resource (integer value) +#osapi_max_limit=1000 + +# Base URL that will be presented to users in links to the +# OpenStack Compute API (string value) +#osapi_compute_link_prefix= + +# Base URL that will be presented to users in links to glance +# resources (string value) +#osapi_glance_link_prefix= + + +# +# Options defined in nova.api.openstack.compute +# + +# Permit instance snapshot operations. (boolean value) +#allow_instance_snapshots=true + + +# +# Options defined in nova.api.openstack.compute.contrib +# + +# Specify list of extensions to load when using +# osapi_compute_extension option with +# nova.api.openstack.compute.contrib.select_extensions (list +# value) +#osapi_compute_ext_list= + + +# +# Options defined in nova.api.openstack.compute.contrib.fping +# + +# Full path to fping. (string value) +#fping_path=/usr/sbin/fping + + +# +# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks +# + +# Enables or disables quota checking for tenant networks +# (boolean value) +#enable_network_quota=false + +# Control for checking for default networks (string value) +#use_neutron_default_nets=False + +# Default tenant id when creating neutron networks (string +# value) +#neutron_default_tenant_id=default + +# Number of private networks allowed per project (integer +# value) +#quota_networks=3 + + +# +# Options defined in nova.api.openstack.compute.extensions +# + +# osapi compute extension to load (multi valued) +#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions + + +# +# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses +# + +# List of instance states that should hide network info (list +# value) +#osapi_hide_server_address_states=building + + +# +# Options defined in nova.api.openstack.compute.servers +# + +# Enables returning of the instance password by the relevant +# server API calls such as create, rebuild or rescue, If the +# hypervisor does not support password injection then the +# password returned will not be correct (boolean value) +#enable_instance_password=true + + +# +# Options defined in nova.cert.rpcapi +# + +# The topic cert nodes listen on (string value) +#cert_topic=cert + + +# +# Options defined in nova.cloudpipe.pipelib +# + +# Image ID used when starting up a cloudpipe vpn server +# (string value) +#vpn_image_id=0 + +# Flavor for vpn instances (string value) +#vpn_flavor=m1.tiny + +# Template for cloudpipe instance boot script (string value) +#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template + +# Network to push into openvpn config (string value) +#dmz_net=10.0.0.0 + +# Netmask to push into openvpn config (string value) +#dmz_mask=255.255.255.0 + +# Suffix to add to project name for vpn key and secgroups +# (string value) +#vpn_key_suffix=-vpn + + +# +# Options defined in nova.cmd.novnc +# + +# Record sessions to FILE.[session_number] (boolean value) +#record=false + +# Become a daemon (background process) (boolean value) +#daemon=false + +# Disallow non-encrypted connections (boolean value) +#ssl_only=false + +# Source is ipv6 (boolean value) +#source_is_ipv6=false + +# SSL certificate file (string value) +#cert=self.pem + +# SSL key file (if separate from cert) (string value) +#key= + +# Run webserver on same port. Serve files from DIR. (string +# value) +#web=/usr/share/spice-html5 + + +# +# Options defined in nova.cmd.novncproxy +# + +# Host on which to listen for incoming requests (string value) +#novncproxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer +# value) +#novncproxy_port=6080 + + +# +# Options defined in nova.compute.api +# + +# Allow destination machine to match source for resize. Useful +# when testing in single-host environments. (boolean value) +#allow_resize_to_same_host=false + +# Allow migrate machine to the same host. Useful when testing +# in single-host environments. (boolean value) +#allow_migrate_to_same_host=false + +# Availability zone to use when user doesn't specify one +# (string value) +#default_schedule_zone= + +# These are image properties which a snapshot should not +# inherit from an instance (list value) +#non_inheritable_image_properties=cache_in_nova,bittorrent + +# Kernel image that indicates not to use a kernel, but to use +# a raw disk image instead (string value) +#null_kernel=nokernel + +# When creating multiple instances with a single request using +# the os-multiple-create API extension, this template will be +# used to build the display name for each instance. The +# benefit is that the instances end up with different +# hostnames. To restore legacy behavior of every instance +# having the same name, set this option to "%(name)s". Valid +# keys for the template are: name, uuid, count. (string value) +#multi_instance_display_name_template=%(name)s-%(count)d + +# Maximum number of devices that will result in a local image +# being created on the hypervisor node. Setting this to 0 +# means nova will allow only boot from volume. A negative +# number means unlimited. (integer value) +#max_local_block_devices=3 + + +# +# Options defined in nova.compute.flavors +# + +# Default flavor to use for the EC2 API only. The Nova API +# does not support a default flavor. (string value) +#default_flavor=m1.small + + +# +# Options defined in nova.compute.manager +# + +# Console proxy host to use to connect to instances on this +# host. (string value) +#console_host=nova + +# Name of network to use to set access IPs for instances +# (string value) +#default_access_ip_network_name= + +# Whether to batch up the application of IPTables rules during +# a host restart and apply all at the end of the init phase +# (boolean value) +#defer_iptables_apply=false + +# Where instances are stored on disk (string value) +#instances_path=$state_path/instances + +# Generate periodic compute.instance.exists notifications +# (boolean value) +#instance_usage_audit=false + +# Number of 1 second retries needed in live_migration (integer +# value) +#live_migration_retry_count=30 + +# Whether to start guests that were running before the host +# rebooted (boolean value) +#resume_guests_state_on_host_boot=false + +# Number of times to retry network allocation on failures +# (integer value) +#network_allocate_retries=0 + +# Maximum number of instance builds to run concurrently +# (integer value) +#max_concurrent_builds=10 + +# Number of times to retry block device allocation on failures +# (integer value) +#block_device_allocate_retries=60 + +# The number of times to attempt to reap an instance's files. +# (integer value) +#maximum_instance_delete_attempts=5 + +# Interval to pull network bandwidth usage info. Not supported +# on all hypervisors. Set to -1 to disable. Setting this to 0 +# will run at the default rate. (integer value) +#bandwidth_poll_interval=600 + +# Interval to sync power states between the database and the +# hypervisor. Set to -1 to disable. Setting this to 0 will run +# at the default rate. (integer value) +#sync_power_state_interval=600 + +# Number of seconds between instance network information cache +# updates (integer value) +#heal_instance_info_cache_interval=60 + +# Interval in seconds for reclaiming deleted instances +# (integer value) +#reclaim_instance_interval=0 + +# Interval in seconds for gathering volume usages (integer +# value) +#volume_usage_poll_interval=0 + +# Interval in seconds for polling shelved instances to +# offload. Set to -1 to disable.Setting this to 0 will run at +# the default rate. (integer value) +#shelved_poll_interval=3600 + +# Time in seconds before a shelved instance is eligible for +# removing from a host. -1 never offload, 0 offload when +# shelved (integer value) +#shelved_offload_time=0 + +# Interval in seconds for retrying failed instance file +# deletes. Set to -1 to disable. Setting this to 0 will run at +# the default rate. (integer value) +#instance_delete_interval=300 + +# Waiting time interval (seconds) between block device +# allocation retries on failures (integer value) +#block_device_allocate_retries_interval=3 + +# Waiting time interval (seconds) between sending the +# scheduler a list of current instance UUIDs to verify that +# its view of instances is in sync with nova. If the CONF +# option `scheduler_tracks_instance_changes` is False, +# changing this option will have no effect. (integer value) +#scheduler_instance_sync_interval=120 + +# Action to take if a running deleted instance is detected. +# Valid options are 'noop', 'log', 'shutdown', or 'reap'. Set +# to 'noop' to take no action. (string value) +#running_deleted_instance_action=reap + +# Number of seconds to wait between runs of the cleanup task. +# (integer value) +#running_deleted_instance_poll_interval=1800 + +# Number of seconds after being deleted when a running +# instance should be considered eligible for cleanup. (integer +# value) +#running_deleted_instance_timeout=0 + +# Automatically hard reboot an instance if it has been stuck +# in a rebooting state longer than N seconds. Set to 0 to +# disable. (integer value) +#reboot_timeout=0 + +# Amount of time in seconds an instance can be in BUILD before +# going into ERROR status. Set to 0 to disable. (integer +# value) +#instance_build_timeout=0 + +# Automatically unrescue an instance after N seconds. Set to 0 +# to disable. (integer value) +#rescue_timeout=0 + +# Automatically confirm resizes after N seconds. Set to 0 to +# disable. (integer value) +#resize_confirm_window=0 + +# Total amount of time to wait in seconds for an instance to +# perform a clean shutdown. (integer value) +#shutdown_timeout=60 + + +# +# Options defined in nova.compute.monitors +# + +# Monitor classes available to the compute which may be +# specified more than once. (multi valued) +#compute_available_monitors=nova.compute.monitors.all_monitors + +# A list of monitors that can be used for getting compute +# metrics. (list value) +#compute_monitors= + + +# +# Options defined in nova.compute.resource_tracker +# + +# Amount of disk in MB to reserve for the host (integer value) +#reserved_host_disk_mb=0 + +# Amount of memory in MB to reserve for the host (integer +# value) +#reserved_host_memory_mb=512 + +# Class that will manage stats for the local compute host +# (string value) +#compute_stats_class=nova.compute.stats.Stats + +# The names of the extra resources to track. (list value) +#compute_resources=vcpu + + +# +# Options defined in nova.compute.rpcapi +# + +# The topic compute nodes listen on (string value) +#compute_topic=compute + + +# +# Options defined in nova.conductor.tasks.live_migrate +# + +# Number of times to retry live-migration before failing. If +# == -1, try until out of hosts. If == 0, only try once, no +# retries. (integer value) +#migrate_max_retries=-1 + + +# +# Options defined in nova.console.manager +# + +# Driver to use for the console proxy (string value) +#console_driver=nova.console.xvp.XVPConsoleProxy + +# Stub calls to compute worker for tests (boolean value) +#stub_compute=false + +# Publicly visible name for this console host (string value) +#console_public_hostname=nova + + +# +# Options defined in nova.console.rpcapi +# + +# The topic console proxy nodes listen on (string value) +#console_topic=console + + +# +# Options defined in nova.console.xvp +# + +# XVP conf template (string value) +#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template + +# Generated XVP conf file (string value) +#console_xvp_conf=/etc/xvp.conf + +# XVP master process pid file (string value) +#console_xvp_pid=/var/run/xvp.pid + +# XVP log file (string value) +#console_xvp_log=/var/log/xvp.log + +# Port for XVP to multiplex VNC connections on (integer value) +#console_xvp_multiplex_port=5900 + + +# +# Options defined in nova.consoleauth +# + +# The topic console auth proxy nodes listen on (string value) +#consoleauth_topic=consoleauth + + +# +# Options defined in nova.consoleauth.manager +# + +# How many seconds before deleting tokens (integer value) +#console_token_ttl=600 + + +# +# Options defined in nova.db.api +# + +# Services to be added to the available pool on create +# (boolean value) +#enable_new_services=true + +# Template string to be used to generate instance names +# (string value) +#instance_name_template=instance-%08x + +# Template string to be used to generate snapshot names +# (string value) +#snapshot_name_template=snapshot-%s + + +# +# Options defined in nova.db.base +# + +# The driver to use for database access (string value) +#db_driver=nova.db + + +# +# Options defined in nova.db.sqlalchemy.api +# + +# When set, compute API will consider duplicate hostnames +# invalid within the specified scope, regardless of case. +# Should be empty, "project" or "global". (string value) +#osapi_compute_unique_server_name_scope= + + +# +# Options defined in nova.image.s3 +# + +# Parent directory for tempdir used for image decryption +# (string value) +#image_decryption_dir=/tmp + +# Hostname or IP for OpenStack to use when accessing the S3 +# api (string value) +#s3_host=$my_ip + +# Port used when accessing the S3 api (integer value) +#s3_port=3333 + +# Access key to use for S3 server for images (string value) +#s3_access_key=notchecked + +# Secret key to use for S3 server for images (string value) +#s3_secret_key=notchecked + +# Whether to use SSL when talking to S3 (boolean value) +#s3_use_ssl=false + +# Whether to affix the tenant id to the access key when +# downloading from S3 (boolean value) +#s3_affix_tenant=false + + +# +# Options defined in nova.ipv6.api +# + +# Backend to use for IPv6 generation (string value) +#ipv6_backend=rfc2462 + + +# +# Options defined in nova.network +# + +# The full class name of the network API class to use (string +# value) +#network_api_class=nova.network.api.API + + +# +# Options defined in nova.network.driver +# + +# Driver to use for network creation (string value) +#network_driver=nova.network.linux_net + + +# +# Options defined in nova.network.floating_ips +# + +# Default pool for floating IPs (string value) +#default_floating_pool=nova + +# Autoassigning floating IP to VM (boolean value) +#auto_assign_floating_ip=false + +# Full class name for the DNS Manager for floating IPs (string +# value) +#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Manager for instance IPs (string +# value) +#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Zone for instance IPs (string +# value) +#instance_dns_domain= + + +# +# Options defined in nova.network.ldapdns +# + +# URL for LDAP server which will store DNS entries (string +# value) +#ldap_dns_url=ldap://ldap.example.com:389 + +# User for LDAP DNS (string value) +#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org + +# Password for LDAP DNS (string value) +#ldap_dns_password=password + +# Hostmaster for LDAP DNS driver Statement of Authority +# (string value) +#ldap_dns_soa_hostmaster=hostmaster@example.org + +# DNS Servers for LDAP DNS driver (multi valued) +#ldap_dns_servers=dns.example.org + +# Base DN for DNS entries in LDAP (string value) +#ldap_dns_base_dn=ou=hosts,dc=example,dc=org + +# Refresh interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_refresh=1800 + +# Retry interval (in seconds) for LDAP DNS driver Statement of +# Authority (string value) +#ldap_dns_soa_retry=3600 + +# Expiry interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_expiry=86400 + +# Minimum interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_minimum=7200 + + +# +# Options defined in nova.network.linux_net +# + +# Location of flagfiles for dhcpbridge (multi valued) +#dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf + +# Location to keep network config files (string value) +#networks_path=$state_path/networks + +# Interface for public IP addresses (string value) +#public_interface=eth0 + +# Location of nova-dhcpbridge (string value) +#dhcpbridge=$bindir/nova-dhcpbridge + +# Public IP of network host (string value) +#routing_source_ip=$my_ip + +# Lifetime of a DHCP lease in seconds (integer value) +#dhcp_lease_time=86400 + +# If set, uses specific DNS server for dnsmasq. Can be +# specified multiple times. (multi valued) +#dns_server= + +# If set, uses the dns1 and dns2 from the network ref. as dns +# servers. (boolean value) +#use_network_dns_servers=false + +# A list of dmz ranges that should be accepted (list value) +#dmz_cidr= + +# Traffic to this range will always be snatted to the fallback +# ip, even if it would normally be bridged out of the node. +# Can be specified multiple times. (multi valued) +#force_snat_range= + +# Override the default dnsmasq settings with this file (string +# value) +#dnsmasq_config_file= + +# Driver used to create ethernet devices. (string value) +#linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver + +# Name of Open vSwitch bridge used with linuxnet (string +# value) +#linuxnet_ovs_integration_bridge=br-int + +# Send gratuitous ARPs for HA setup (boolean value) +#send_arp_for_ha=false + +# Send this many gratuitous ARPs for HA setup (integer value) +#send_arp_for_ha_count=3 + +# Use single default gateway. Only first nic of vm will get +# default gateway from dhcp server (boolean value) +#use_single_default_gateway=false + +# An interface that bridges can forward to. If this is set to +# all then all traffic will be forwarded. Can be specified +# multiple times. (multi valued) +#forward_bridge_interface=all + +# The IP address for the metadata API server (string value) +#metadata_host=$my_ip + +# The port for the metadata API port (integer value) +#metadata_port=8775 + +# Regular expression to match the iptables rule that should +# always be on the top. (string value) +#iptables_top_regex= + +# Regular expression to match the iptables rule that should +# always be on the bottom. (string value) +#iptables_bottom_regex= + +# The table that iptables to jump to when a packet is to be +# dropped. (string value) +#iptables_drop_action=DROP + +# Amount of time, in seconds, that ovs_vsctl should wait for a +# response from the database. 0 is to wait forever. (integer +# value) +#ovs_vsctl_timeout=120 + +# If passed, use fake network devices and addresses (boolean +# value) +#fake_network=false + +# Number of times to retry ebtables commands on failure. +# (integer value) +#ebtables_exec_attempts=3 + +# Number of seconds to wait between ebtables retries. +# (floating point value) +#ebtables_retry_interval=1.0 + + +# +# Options defined in nova.network.manager +# + +# Bridge for simple network instances (string value) +#flat_network_bridge= + +# DNS server for simple network (string value) +#flat_network_dns=8.8.4.4 + +# Whether to attempt to inject network setup into guest +# (boolean value) +#flat_injected=false + +# FlatDhcp will bridge into this interface if set (string +# value) +#flat_interface= + +# First VLAN for private networks (integer value) +#vlan_start=100 + +# VLANs will bridge into this interface if set (string value) +#vlan_interface= + +# Number of networks to support (integer value) +#num_networks=1 + +# Public IP for the cloudpipe VPN servers (string value) +#vpn_ip=$my_ip + +# First Vpn port for private networks (integer value) +#vpn_start=1000 + +# Number of addresses in each private subnet (integer value) +#network_size=256 + +# Fixed IPv6 address block (string value) +#fixed_range_v6=fd00::/48 + +# Default IPv4 gateway (string value) +#gateway= + +# Default IPv6 gateway (string value) +#gateway_v6= + +# Number of addresses reserved for vpn clients (integer value) +#cnt_vpn_clients=0 + +# Seconds after which a deallocated IP is disassociated +# (integer value) +#fixed_ip_disassociate_timeout=600 + +# Number of attempts to create unique mac address (integer +# value) +#create_unique_mac_address_attempts=5 + +# If True, skip using the queue and make local calls (boolean +# value) +#fake_call=false + +# If True, unused gateway devices (VLAN and bridge) are +# deleted in VLAN network mode with multi hosted networks +# (boolean value) +#teardown_unused_network_gateway=false + +# If True, send a dhcp release on instance termination +# (boolean value) +#force_dhcp_release=true + +# If True, when a DNS entry must be updated, it sends a fanout +# cast to all network hosts to update their DNS entries in +# multi host mode (boolean value) +#update_dns_entries=false + +# Number of seconds to wait between runs of updates to DNS +# entries. (integer value) +#dns_update_periodic_interval=-1 + +# Domain to use for building the hostnames (string value) +#dhcp_domain=novalocal + +# Indicates underlying L3 management library (string value) +#l3_lib=nova.network.l3.LinuxNetL3 + + +# +# Options defined in nova.network.rpcapi +# + +# The topic network nodes listen on (string value) +#network_topic=network + +# Default value for multi_host in networks. Also, if set, some +# rpc network calls will be sent directly to host. (boolean +# value) +#multi_host=false + + +# +# Options defined in nova.network.security_group.openstack_driver +# + +# The full class name of the security API class (string value) +#security_group_api=nova + + +# +# Options defined in nova.objects.network +# + +# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE +# NETWORK. If True in multi_host mode, all compute hosts share +# the same dhcp address. The same IP address used for DHCP +# will be added on each nova-network node which is only +# visible to the vms on the same host. (boolean value) +#share_dhcp_address=false + +# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE +# NETWORK. MTU setting for network interface. (integer value) +#network_device_mtu= + + +# +# Options defined in nova.objectstore.s3server +# + +# Path to S3 buckets (string value) +#buckets_path=$state_path/buckets + +# IP address for S3 API to listen (string value) +#s3_listen=0.0.0.0 + +# Port for S3 API to listen (integer value) +#s3_listen_port=3333 + + +# +# From oslo.log +# + +# Print debugging output (set logging level to DEBUG instead of default WARNING +# level). (boolean value) +#debug = false + +# Print more verbose output (set logging level to INFO instead of default +# WARNING level). (boolean value) +#verbose = false + +# The name of a logging configuration file. This file is appended to any +# existing logging configuration files. For details about logging configuration +# files, see the Python logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# DEPRECATED. A logging.Formatter log message format string which may use any +# of the available logging.LogRecord attributes. This option is deprecated. +# Please use logging_context_format_string and logging_default_format_string +# instead. (string value) +#log_format = + +# Format string for %%(asctime)s in log records. Default: %(default)s . (string +# value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is set, logging will +# go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative --log-file paths. (string +# value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Use syslog for logging. Existing syslog format is DEPRECATED during I, and +# will change in J to honor RFC5424. (boolean value) +#use_syslog = false + +# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, +# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The +# format without the APP-NAME is deprecated in I, and will be removed in J. +# (boolean value) +#use_syslog_rfc_format = false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility = LOG_USER + +# Log output to standard error. (boolean value) +#use_stderr = true + +# Format string to use for log messages with context. (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. (string value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. (string value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# The format for an instance that is passed with the log message. (string +# value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. (string +# value) +#instance_uuid_format = "[instance: %(uuid)s] " + + +# +# Options defined in nova.pci.request +# + +# An alias for a PCI passthrough device requirement. This +# allows users to specify the alias in the extra_spec for a +# flavor, without needing to repeat all the PCI property +# requirements. For example: pci_alias = { "name": +# "QuicAssist", "product_id": "0443", "vendor_id": "8086", +# "device_type": "ACCEL" } defines an alias for the Intel +# QuickAssist card. (multi valued) (multi valued) +#pci_alias= + + +# +# Options defined in nova.pci.whitelist +# + +# White list of PCI devices available to VMs. For example: +# pci_passthrough_whitelist = [{"vendor_id": "8086", +# "product_id": "0443"}] (multi valued) +#pci_passthrough_whitelist= + + +# +# Options defined in nova.scheduler.driver +# + +# The scheduler host manager class to use (string value) +#scheduler_host_manager=nova.scheduler.host_manager.HostManager + + +# +# Options defined in nova.scheduler.filter_scheduler +# + +# New instances will be scheduled on a host chosen randomly +# from a subset of the N best hosts. This property defines the +# subset size that a host is chosen from. A value of 1 chooses +# the first host returned by the weighing functions. This +# value must be at least 1. Any value less than 1 will be +# ignored, and 1 will be used instead (integer value) +#scheduler_host_subset_size=1 + + +# +# Options defined in nova.scheduler.filters.aggregate_image_properties_isolation +# + +# Force the filter to consider only keys matching the given +# namespace. (string value) +#aggregate_image_properties_isolation_namespace= + +# The separator used between the namespace and keys (string +# value) +#aggregate_image_properties_isolation_separator=. + + +# +# Options defined in nova.scheduler.filters.core_filter +# + +# Virtual CPU to physical CPU allocation ratio which affects +# all CPU filters. This configuration specifies a global ratio +# for CoreFilter. For AggregateCoreFilter, it will fall back +# to this configuration value if no per-aggregate setting +# found. (floating point value) +#cpu_allocation_ratio=16.0 + + +# +# Options defined in nova.scheduler.filters.disk_filter +# + +# Virtual disk to physical disk allocation ratio (floating +# point value) +#disk_allocation_ratio=1.0 + + +# +# Options defined in nova.scheduler.filters.io_ops_filter +# + +# Tells filters to ignore hosts that have this many or more +# instances currently in build, resize, snapshot, migrate, +# rescue or unshelve task states (integer value) +#max_io_ops_per_host=8 + + +# +# Options defined in nova.scheduler.filters.isolated_hosts_filter +# + +# Images to run on isolated host (list value) +#isolated_images= + +# Host reserved for specific images (list value) +#isolated_hosts= + +# Whether to force isolated hosts to run only isolated images +# (boolean value) +#restrict_isolated_hosts_to_isolated_images=true + + +# +# Options defined in nova.scheduler.filters.num_instances_filter +# + +# Ignore hosts that have too many instances (integer value) +#max_instances_per_host=50 + + +# +# Options defined in nova.scheduler.filters.ram_filter +# + +# Virtual ram to physical ram allocation ratio which affects +# all ram filters. This configuration specifies a global ratio +# for RamFilter. For AggregateRamFilter, it will fall back to +# this configuration value if no per-aggregate setting found. +# (floating point value) +#ram_allocation_ratio=1.5 + + +# +# Options defined in nova.scheduler.host_manager +# + +# Filter classes available to the scheduler which may be +# specified more than once. An entry of +# "nova.scheduler.filters.all_filters" maps to all filters +# included with nova. (multi valued) +#scheduler_available_filters=nova.scheduler.filters.all_filters + +# Which filter class names to use for filtering hosts when not +# specified in the request. (list value) +#scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter + +# Which weight class names to use for weighing hosts (list +# value) +#scheduler_weight_classes=nova.scheduler.weights.all_weighers + +# Determines if the Scheduler tracks changes to instances to +# help with its filtering decisions. (boolean value) +#scheduler_tracks_instance_changes=true + + +# +# Options defined in nova.scheduler.ironic_host_manager +# + +# Which filter class names to use for filtering baremetal +# hosts when not specified in the request. (list value) +#baremetal_scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter + +# Flag to decide whether to use +# baremetal_scheduler_default_filters or not. (boolean value) +#scheduler_use_baremetal_filters=false + + +# +# Options defined in nova.scheduler.manager +# + +# Default driver to use for the scheduler (string value) +#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler + +# How often (in seconds) to run periodic tasks in the +# scheduler driver of your choice. Please note this is likely +# to interact with the value of service_down_time, but exactly +# how they interact will depend on your choice of scheduler +# driver. (integer value) +#scheduler_driver_task_period=60 + + +# +# Options defined in nova.scheduler.rpcapi +# + +# The topic scheduler nodes listen on (string value) +#scheduler_topic=scheduler + + +# +# Options defined in nova.scheduler.scheduler_options +# + +# Absolute path to scheduler configuration JSON file. (string +# value) +#scheduler_json_config_location= + + +# +# Options defined in nova.scheduler.utils +# + +# Maximum number of attempts to schedule an instance (integer +# value) +#scheduler_max_attempts=3 + + +# +# Options defined in nova.scheduler.weights.io_ops +# + +# Multiplier used for weighing host io ops. Negative numbers +# mean a preference to choose light workload compute hosts. +# (floating point value) +#io_ops_weight_multiplier=-1.0 + + +# +# Options defined in nova.scheduler.weights.ram +# + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=1.0 + + +# +# Options defined in nova.servicegroup.api +# + +# The driver for servicegroup service (valid options are: db, +# zk, mc) (string value) +#servicegroup_driver=db + + +# +# Options defined in nova.virt.configdrive +# + +# Config drive format. One of iso9660 (default) or vfat +# (string value) +#config_drive_format=iso9660 + +# Set to "always" to force injection to take place on a config +# drive. NOTE: The "always" will be deprecated in the Liberty +# release cycle. (string value) +#force_config_drive= + +# Name and optionally path of the tool used for ISO image +# creation (string value) +#mkisofs_cmd=genisoimage + + +# +# Options defined in nova.virt.disk.api +# + +# Name of the mkfs commands for ephemeral device. The format +# is = (multi valued) +#virt_mkfs= + +# Attempt to resize the filesystem by accessing the image over +# a block device. This is done by the host and may not be +# necessary if the image contains a recent version of cloud- +# init. Possible mechanisms require the nbd driver (for qcow +# and raw), or loop (for raw). (boolean value) +#resize_fs_using_block_device=false + + +# +# Options defined in nova.virt.disk.mount.nbd +# + +# Amount of time, in seconds, to wait for NBD device start up. +# (integer value) +#timeout_nbd=10 + + +# +# Options defined in nova.virt.driver +# + +# Driver to use for controlling virtualization. Options +# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, +# fake.FakeDriver, baremetal.BareMetalDriver, +# vmwareapi.VMwareVCDriver, hyperv.HyperVDriver (string value) +#compute_driver= + +# The default format an ephemeral_volume will be formatted +# with on creation. (string value) +#default_ephemeral_format= + +# VM image preallocation mode: "none" => no storage +# provisioning is done up front, "space" => storage is fully +# allocated at instance start (string value) +#preallocate_images=none + +# Whether to use cow images (boolean value) +#use_cow_images=true + +# Fail instance boot if vif plugging fails (boolean value) +#vif_plugging_is_fatal=true + +# Number of seconds to wait for neutron vif plugging events to +# arrive before continuing or failing (see +# vif_plugging_is_fatal). If this is set to zero and +# vif_plugging_is_fatal is False, events should not be +# expected to arrive at all. (integer value) +#vif_plugging_timeout=300 + + +# +# Options defined in nova.virt.firewall +# + +# Firewall driver (defaults to hypervisor specific iptables +# driver) (string value) +#firewall_driver= + +# Whether to allow network traffic from same network (boolean +# value) +#allow_same_net_traffic=true + + +# +# Options defined in nova.virt.hardware +# + +# Defines which pcpus that instance vcpus can use. For +# example, "4-12,^8,15" (string value) +#vcpu_pin_set= + + +# +# Options defined in nova.virt.imagecache +# + +# Number of seconds to wait between runs of the image cache +# manager. Set to -1 to disable. Setting this to 0 will run at +# the default rate. (integer value) +#image_cache_manager_interval=2400 + +# Where cached images are stored under $instances_path. This +# is NOT the full path - just a folder name. For per-compute- +# host cached images, set to _base_$my_ip (string value) +#image_cache_subdirectory_name=_base + +# Should unused base images be removed? (boolean value) +#remove_unused_base_images=true + +# Unused unresized base images younger than this will not be +# removed (integer value) +#remove_unused_original_minimum_age_seconds=86400 + + +# +# Options defined in nova.virt.images +# + +# Force backing images to raw format (boolean value) +#force_raw_images=true + + +# +# Options defined in nova.virt.netutils +# + +# Template file for injected network (string value) +#injected_network_template=$pybasedir/nova/virt/interfaces.template + + +# +# Options defined in nova.vnc +# + +# Location of VNC console proxy, in the form +# "http://127.0.0.1:6080/vnc_auto.html" (string value) +#novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html + +# Location of nova xvp VNC console proxy, in the form +# "http://127.0.0.1:6081/console" (string value) +#xvpvncproxy_base_url=http://127.0.0.1:6081/console + +# IP address on which instance vncservers should listen +# (string value) +#vncserver_listen=127.0.0.1 + +# The address to which proxy clients (like nova-xvpvncproxy) +# should connect (string value) +#vncserver_proxyclient_address=127.0.0.1 + +# Enable VNC related features (boolean value) +#vnc_enabled=true + +# Keymap for VNC (string value) +#vnc_keymap=en-us + + +# +# Options defined in nova.vnc.xvp_proxy +# + +# Port that the XCP VNC proxy should bind to (integer value) +#xvpvncproxy_port=6081 + +# Address that the XCP VNC proxy should bind to (string value) +#xvpvncproxy_host=0.0.0.0 + + +# +# Options defined in nova.volume +# + +# The full class name of the volume API class to use (string +# value) +#volume_api_class=nova.volume.cinder.API + + +# +# Options defined in nova.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in nova.openstack.common.memorycache +# + +# Memcached servers or None for in process cache. (list value) +#memcached_servers= + + +# +# Options defined in nova.openstack.common.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + + +# +# Options defined in nova.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. +# (string value) +#policy_default_rule=default + +# Directories where policy configuration files are stored. +# They can be relative to any directory in the search path +# defined by the config_dir option, or absolute paths. The +# file defined by policy_file must exist for these directories +# to be searched. Missing or empty directories are ignored. +# (multi valued) +#policy_dirs=policy.d + + +# +# Options defined in nova.openstack.common.versionutils +# + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + + +[api_database] + +# +# Options defined in nova.db.sqlalchemy.api +# + +# The SQLAlchemy connection string to use to connect to the +# Nova API database. (string value) +#connection= + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous=true + +# Timeout before idle SQL connections are reaped. (integer +# value) +#idle_timeout=3600 + +# Maximum number of SQL connections to keep open in a pool. +# (integer value) +#max_pool_size= + +# Maximum number of database connection retries during +# startup. Set to -1 to specify an infinite retry count. +# (integer value) +#max_retries=10 + +# Interval between retries of opening a SQL connection. +# (integer value) +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. +# (integer value) +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, +# 100=Everything. (integer value) +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean +# value) +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. +# (integer value) +#pool_timeout= + + +[barbican] + +# +# Options defined in nova.keymgr.barbican +# + +# Info to match when looking for barbican in the service +# catalog. Format is: separated values of the form: +# :: (string value) +#catalog_info=key-manager:barbican:public + +# Override service catalog lookup with template for barbican +# endpoint e.g. http://localhost:9311/v1/%(project_id)s +# (string value) +#endpoint_template= + +# Region name of this node (string value) +#os_region_name= + + +# +# Options defined in nova.volume.cinder +# + +# Region name of this node (string value) +#os_region_name= + + +[cells] + +# +# Options defined in nova.cells.manager +# + +# Cells communication driver to use (string value) +#driver=nova.cells.rpc_driver.CellsRPCDriver + +# Number of seconds after an instance was updated or deleted +# to continue to update cells (integer value) +#instance_updated_at_threshold=3600 + +# Number of instances to update per periodic task run (integer +# value) +#instance_update_num_instances=1 + + +# +# Options defined in nova.cells.messaging +# + +# Maximum number of hops for cells routing. (integer value) +#max_hop_count=10 + +# Cells scheduler to use (string value) +#scheduler=nova.cells.scheduler.CellsScheduler + + +# +# Options defined in nova.cells.opts +# + +# Enable cell functionality (boolean value) +#enable=false + +# The topic cells nodes listen on (string value) +#topic=cells + +# Manager for cells (string value) +#manager=nova.cells.manager.CellsManager + +# Name of this cell (string value) +#name=nova + +# Key/Multi-value list with the capabilities of the cell (list +# value) +#capabilities=hypervisor=xenserver;kvm,os=linux;windows + +# Seconds to wait for response from a call to a cell. (integer +# value) +#call_timeout=60 + +# Percentage of cell capacity to hold in reserve. Affects both +# memory and disk utilization (floating point value) +#reserve_percent=10.0 + +# Type of cell: api or compute (string value) +#cell_type=compute + +# Number of seconds after which a lack of capability and +# capacity updates signals the child cell is to be treated as +# a mute. (integer value) +#mute_child_interval=300 + +# Seconds between bandwidth updates for cells. (integer value) +#bandwidth_update_interval=600 + + +# +# Options defined in nova.cells.rpc_driver +# + +# Base queue name to use when communicating between cells. +# Various topics by message type will be appended to this. +# (string value) +#rpc_driver_queue_base=cells.intercell + + +# +# Options defined in nova.cells.scheduler +# + +# Filter classes the cells scheduler should use. An entry of +# "nova.cells.filters.all_filters" maps to all cells filters +# included with nova. (list value) +#scheduler_filter_classes=nova.cells.filters.all_filters + +# Weigher classes the cells scheduler should use. An entry of +# "nova.cells.weights.all_weighers" maps to all cell weighers +# included with nova. (list value) +#scheduler_weight_classes=nova.cells.weights.all_weighers + +# How many retries when no cells are available. (integer +# value) +#scheduler_retries=10 + +# How often to retry in seconds when no cells are available. +# (integer value) +#scheduler_retry_delay=2 + + +# +# Options defined in nova.cells.state +# + +# Interval, in seconds, for getting fresh cell information +# from the database. (integer value) +#db_check_interval=60 + +# Configuration file from which to read cells configuration. +# If given, overrides reading cells from the database. (string +# value) +#cells_config= + + +# +# Options defined in nova.cells.weights.mute_child +# + +# Multiplier used to weigh mute children. (The value should be +# negative.) (floating point value) +#mute_weight_multiplier=-10.0 + +# Weight value assigned to mute children. (The value should be +# positive.) (floating point value) +#mute_weight_value=1000.0 + + +# +# Options defined in nova.cells.weights.ram_by_instance_type +# + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=10.0 + + +# +# Options defined in nova.cells.weights.weight_offset +# + +# Multiplier used to weigh offset weigher. (floating point +# value) +#offset_weight_multiplier=1.0 + + +[cinder] + +# +# Options defined in nova.volume.cinder +# + +# Info to match when looking for cinder in the service +# catalog. Format is: separated values of the form: +# :: (string value) +#catalog_info=volumev2:cinderv2:publicURL + +# Override service catalog lookup with template for cinder +# endpoint e.g. http://localhost:8776/v1/%(project_id)s +# (string value) +#endpoint_template= + +# Number of cinderclient retries on failed http calls (integer +# value) +#http_retries=3 + +# Allow attach between instance and volume in different +# availability zones. (boolean value) +#cross_az_attach=true + + +[conductor] + +# +# Options defined in nova.conductor.api +# + +# Perform nova-conductor operations locally (boolean value) +#use_local=false + +# The topic on which conductor nodes listen (string value) +#topic=conductor + +# Full class name for the Manager for conductor (string value) +#manager=nova.conductor.manager.ConductorManager + +# Number of workers for OpenStack Conductor service. The +# default will be the number of CPUs available. (integer +# value) +#workers= + + +[database] + +# +# From oslo.db +# + +# The file name to use with SQLite. (string value) +# Deprecated group/name - [DEFAULT]/sqlite_db +#sqlite_db = oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group/name - [DEFAULT]/sqlite_synchronous +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. (string +# value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection lost. +# (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count. (integer value) +#db_max_retries = 20 + + +# +# Options defined in nova.db.sqlalchemy.api +# + +# The SQLAlchemy connection string to use to connect to the +# slave database. (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + + +[ephemeral_storage_encryption] + +# +# Options defined in nova.compute.api +# + +# Whether to encrypt ephemeral storage (boolean value) +#enabled=false + +# The cipher and mode to be used to encrypt ephemeral storage. +# Which ciphers are available ciphers depends on kernel +# support. See /proc/crypto for the list of available options. +# (string value) +#cipher=aes-xts-plain64 + +# The bit length of the encryption key to be used to encrypt +# ephemeral storage (in XTS mode only half of the bits are +# used for encryption key) (integer value) +#key_size=512 + + +[glance] + +# +# Options defined in nova.image.glance +# + +# Default glance hostname or IP address (string value) +#host=$my_ip + +# Default glance port (integer value) +#port=9292 + +# Default protocol to use when connecting to glance. Set to +# https for SSL. (string value) +#protocol=http + +# A list of the glance api servers available to nova. Prefix +# with https:// for ssl-based glance api servers. +# ([hostname|ip]:port) (list value) +#api_servers= + +# Allow to perform insecure SSL (https) requests to glance +# (boolean value) +#api_insecure=false + +# Number of retries when uploading / downloading an image to / +# from glance. (integer value) +#num_retries=0 + +# A list of url scheme that can be downloaded directly via the +# direct_url. Currently supported schemes: [file]. (list +# value) +#allowed_direct_url_schemes= + + +[guestfs] + +# +# Options defined in nova.virt.disk.vfs.guestfs +# + +# Enable guestfs debug (boolean value) +#debug=false + + +[hyperv] + +# +# Options defined in nova.virt.hyperv.pathutils +# + +# The name of a Windows share name mapped to the +# "instances_path" dir and used by the resize feature to copy +# files to the target host. If left blank, an administrative +# share will be used, looking for the same "instances_path" +# used locally (string value) +#instances_path_share= + + +# +# Options defined in nova.virt.hyperv.utilsfactory +# + +# Force V1 WMI utility classes (boolean value) +#force_hyperv_utils_v1=false + +# Force V1 volume utility class (boolean value) +#force_volumeutils_v1=false + + +# +# Options defined in nova.virt.hyperv.vif +# + +# External virtual switch Name, if not provided, the first +# external virtual switch is used (string value) +#vswitch_name= + + +# +# Options defined in nova.virt.hyperv.vmops +# + +# Required for live migration among hosts with different CPU +# features (boolean value) +#limit_cpu_features=false + +# Sets the admin password in the config drive image (boolean +# value) +#config_drive_inject_password=false + +# Path of qemu-img command which is used to convert between +# different image types (string value) +#qemu_img_cmd=qemu-img.exe + +# Attaches the Config Drive image as a cdrom drive instead of +# a disk drive (boolean value) +#config_drive_cdrom=false + +# Enables metrics collections for an instance by using +# Hyper-V's metric APIs. Collected data can by retrieved by +# other apps and services, e.g.: Ceilometer. Requires Hyper-V +# / Windows Server 2012 and above (boolean value) +#enable_instance_metrics_collection=false + +# Enables dynamic memory allocation (ballooning) when set to a +# value greater than 1. The value expresses the ratio between +# the total RAM assigned to an instance and its startup RAM +# amount. For example a ratio of 2.0 for an instance with +# 1024MB of RAM implies 512MB of RAM allocated at startup +# (floating point value) +#dynamic_memory_ratio=1.0 + +# Number of seconds to wait for instance to shut down after +# soft reboot request is made. We fall back to hard reboot if +# instance does not shutdown within this window. (integer +# value) +#wait_soft_reboot_seconds=60 + + +# +# Options defined in nova.virt.hyperv.volumeops +# + +# The number of times to retry to attach a volume (integer +# value) +#volume_attach_retry_count=10 + +# Interval between volume attachment attempts, in seconds +# (integer value) +#volume_attach_retry_interval=5 + +# The number of times to retry checking for a disk mounted via +# iSCSI. (integer value) +#mounted_disk_query_retry_count=10 + +# Interval between checks for a mounted iSCSI disk, in +# seconds. (integer value) +#mounted_disk_query_retry_interval=5 + + +[image_file_url] + +# +# Options defined in nova.image.download.file +# + +# List of file systems that are configured in this file in the +# image_file_url: sections (list value) +#filesystems= + + +[ironic] + +# +# Options defined in nova.virt.ironic.driver +# + +# Version of Ironic API service endpoint. (integer value) +#api_version=1 + +# URL for Ironic API endpoint. (string value) +#api_endpoint= + +# Ironic keystone admin name (string value) +#admin_username= + +# Ironic keystone admin password. (string value) +#admin_password= + +# Ironic keystone auth token. (string value) +#admin_auth_token= + +# Keystone public API endpoint. (string value) +#admin_url= + +# Log level override for ironicclient. Set this in order to +# override the global "default_log_levels", "verbose", and +# "debug" settings. DEPRECATED: use standard logging +# configuration. (string value) +#client_log_level= + +# Ironic keystone tenant name. (string value) +#admin_tenant_name= + +# How many retries when a request does conflict. (integer +# value) +#api_max_retries=60 + +# How often to retry in seconds when a request does conflict +# (integer value) +#api_retry_interval=2 + + +[keymgr] + +# +# Options defined in nova.keymgr +# + +# The full class name of the key manager API class (string +# value) +#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager + + +# +# Options defined in nova.keymgr.conf_key_mgr +# + +# Fixed key returned by key manager, specified in hex (string +# value) +#fixed_key= + + +[keystone_authtoken] + +# +# From keystonemiddleware.auth_token +# + +# Complete public Identity API endpoint. (string value) +#auth_uri = + +# API version of the admin Identity API endpoint. (string value) +#auth_version = + +# Do not handle authorization requests within the middleware, but delegate the +# authorization decision to downstream WSGI components. (boolean value) +#delay_auth_decision = false + +# Request timeout value for communicating with Identity API server. (integer +# value) +#http_connect_timeout = + +# How many times are we trying to reconnect when communicating with Identity +# API Server. (integer value) +#http_request_max_retries = 3 + +# Env key for the swift cache. (string value) +#cache = + +# Required if identity server requires client certificate (string value) +#certfile = + +# Required if identity server requires client certificate (string value) +#keyfile = + +# A PEM encoded Certificate Authority to use when verifying HTTPs connections. +# Defaults to system CAs. (string value) +#cafile = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# Directory used to cache files related to PKI tokens. (string value) +#signing_dir = + +# Optionally specify a list of memcached server(s) to use for caching. If left +# undefined, tokens will instead be cached in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers = + +# In order to prevent excessive effort spent validating tokens, the middleware +# caches previously-seen tokens for a configurable duration (in seconds). Set +# to -1 to disable caching completely. (integer value) +#token_cache_time = 300 + +# Determines the frequency at which the list of revoked tokens is retrieved +# from the Identity service (in seconds). A high number of revocation events +# combined with a low cache duration may significantly reduce performance. +# (integer value) +#revocation_cache_time = 10 + +# (Optional) If defined, indicate whether token data should be authenticated or +# authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, +# token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data +# is encrypted and authenticated in the cache. If the value is not one of these +# options or empty, auth_token will raise an exception on initialization. +# (string value) +#memcache_security_strategy = + +# (Optional, mandatory if memcache_security_strategy is defined) This string is +# used for key derivation. (string value) +#memcache_secret_key = + +# (Optional) Number of seconds memcached server is considered dead before it is +# tried again. (integer value) +#memcache_pool_dead_retry = 300 + +# (Optional) Maximum total number of open connections to every memcached +# server. (integer value) +#memcache_pool_maxsize = 10 + +# (Optional) Socket timeout in seconds for communicating with a memcache +# server. (integer value) +#memcache_pool_socket_timeout = 3 + +# (Optional) Number of seconds a connection to memcached is held unused in the +# pool before it is closed. (integer value) +#memcache_pool_unused_timeout = 60 + +# (Optional) Number of seconds that an operation will wait to get a memcache +# client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout = 10 + +# (Optional) Use the advanced (eventlet safe) memcache client pool. The +# advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool = false + +# (Optional) Indicate whether to set the X-Service-Catalog header. If False, +# middleware will not ask for service catalog on token validation and will not +# set the X-Service-Catalog header. (boolean value) +#include_service_catalog = true + +# Used to control the use and type of token binding. Can be set to: "disabled" +# to not check token binding. "permissive" (default) to validate binding +# information if the bind type is of a form known to the server and ignore it +# if not. "strict" like "permissive" but if the bind type is unknown the token +# will be rejected. "required" any form of token binding is needed to be +# allowed. Finally the name of a binding method that must be present in tokens. +# (string value) +#enforce_token_bind = permissive + +# If true, the revocation list will be checked for cached tokens. This requires +# that PKI tokens are configured on the identity server. (boolean value) +#check_revocations_for_cached = false + +# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm +# or multiple. The algorithms are those supported by Python standard +# hashlib.new(). The hashes will be tried in the order given, so put the +# preferred one first for performance. The result of the first hash will be +# stored in the cache. This will typically be set to multiple values only while +# migrating from a less secure algorithm to a more secure one. Once all the old +# tokens are expired this option should be set to a single value for better +# performance. (list value) +#hash_algorithms = md5 + +# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri. +# (string value) +#auth_admin_prefix = + +# Host providing the admin Identity API endpoint. Deprecated, use identity_uri. +# (string value) +#auth_host = 127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use identity_uri. +# (integer value) +#auth_port = 35357 + +# Protocol of the admin Identity API endpoint (http or https). Deprecated, use +# identity_uri. (string value) +#auth_protocol = https + +# Complete admin Identity API endpoint. This should specify the unversioned +# root endpoint e.g. https://localhost:35357/ (string value) +#identity_uri = + +# This option is deprecated and may be removed in a future release. Single +# shared secret with the Keystone configuration used for bootstrapping a +# Keystone installation, or otherwise bypassing the normal authentication +# process. This option should not be used, use `admin_user` and +# `admin_password` instead. (string value) +#admin_token = + +# Service username. (string value) +#admin_user = + +# Service user password. (string value) +#admin_password = + +# Service tenant name. (string value) +#admin_tenant_name = admin + + +[libvirt] + +# +# Options defined in nova.virt.libvirt.driver +# + +# Rescue ami image. This will not be used if an image id is +# provided by the user. (string value) +#rescue_image_id= + +# Rescue aki image (string value) +#rescue_kernel_id= + +# Rescue ari image (string value) +#rescue_ramdisk_id= + +# Libvirt domain type (valid options are: kvm, lxc, qemu, uml, +# xen and parallels) (string value) +#virt_type=kvm + +# Override the default libvirt URI (which is dependent on +# virt_type) (string value) +#connection_uri= + +# Inject the admin password at boot time, without an agent. +# (boolean value) +#inject_password=false + +# Inject the ssh public key at boot time (boolean value) +#inject_key=false + +# The partition to inject to : -2 => disable, -1 => inspect +# (libguestfs only), 0 => not partitioned, >0 => partition +# number (integer value) +#inject_partition=-2 + +# Sync virtual and real mouse cursors in Windows VMs (boolean +# value) +#use_usb_tablet=true + +# Migration target URI (any included "%s" is replaced with the +# migration target hostname) (string value) +#live_migration_uri=qemu+tcp://%s/system + +# Migration flags to be set for live migration (string value) +#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED + +# Migration flags to be set for block migration (string value) +#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_INC + +# Maximum bandwidth to be used during migration, in Mbps +# (integer value) +#live_migration_bandwidth=0 + +# Snapshot image format (valid options are : raw, qcow2, vmdk, +# vdi). Defaults to same as source image (string value) +#snapshot_image_format= + +# Override the default disk prefix for the devices attached to +# a server, which is dependent on virt_type. (valid options +# are: sd, xvd, uvd, vd) (string value) +#disk_prefix= + +# Number of seconds to wait for instance to shut down after +# soft reboot request is made. We fall back to hard reboot if +# instance does not shutdown within this window. (integer +# value) +#wait_soft_reboot_seconds=120 + +# Set to "host-model" to clone the host CPU feature flags; to +# "host-passthrough" to use the host CPU model exactly; to +# "custom" to use a named CPU model; to "none" to not set any +# CPU model. If virt_type="kvm|qemu", it will default to +# "host-model", otherwise it will default to "none" (string +# value) +#cpu_mode= + +# Set to a named libvirt CPU model (see names listed in +# /usr/share/libvirt/cpu_map.xml). Only has effect if +# cpu_mode="custom" and virt_type="kvm|qemu" (string value) +#cpu_model= + +# Location where libvirt driver will store snapshots before +# uploading them to image service (string value) +#snapshots_directory=$instances_path/snapshots + +# Location where the Xen hvmloader is kept (string value) +#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader + +# Specific cachemodes to use for different disk types e.g: +# file=directsync,block=none (list value) +#disk_cachemodes= + +# A path to a device that will be used as source of entropy on +# the host. Permitted options are: /dev/random or /dev/hwrng +# (string value) +#rng_dev_path= + +# For qemu or KVM guests, set this option to specify a default +# machine type per host architecture. You can find a list of +# supported machine types in your environment by checking the +# output of the "virsh capabilities"command. The format of the +# value for this config option is host-arch=machine-type. For +# example: x86_64=machinetype1,armv7l=machinetype2 (list +# value) +#hw_machine_type= + +# The data source used to the populate the host "serial" UUID +# exposed to guest in the virtual BIOS. Permitted options are +# "hardware", "os", "none" or "auto" (default). (string value) +#sysinfo_serial=auto + +# A number of seconds to memory usage statistics period. Zero +# or negative value mean to disable memory usage statistics. +# (integer value) +#mem_stats_period_seconds=10 + +# List of uid targets and ranges.Syntax is guest-uid:host- +# uid:countMaximum of 5 allowed. (list value) +#uid_maps= + +# List of guid targets and ranges.Syntax is guest-gid:host- +# gid:countMaximum of 5 allowed. (list value) +#gid_maps= + + +# +# Options defined in nova.virt.libvirt.imagebackend +# + +# VM Images format. Acceptable values are: raw, qcow2, lvm, +# rbd, default. If default is specified, then use_cow_images +# flag is used instead of this one. (string value) +#images_type=default + +# LVM Volume Group that is used for VM images, when you +# specify images_type=lvm. (string value) +#images_volume_group= + +# Create sparse logical volumes (with virtualsize) if this +# flag is set to True. (boolean value) +#sparse_logical_volumes=false + +# The RADOS pool in which rbd volumes are stored (string +# value) +#images_rbd_pool=rbd + +# Path to the ceph configuration file to use (string value) +#images_rbd_ceph_conf= + +# Discard option for nova managed disks (valid options are: +# ignore, unmap). Need Libvirt(1.0.6) Qemu1.5 (raw format) +# Qemu1.6(qcow2 format) (string value) +#hw_disk_discard= + + +# +# Options defined in nova.virt.libvirt.imagecache +# + +# Allows image information files to be stored in non-standard +# locations (string value) +#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info + +# Should unused kernel images be removed? This is only safe to +# enable if all compute nodes have been updated to support +# this option. This will be enabled by default in future. +# (boolean value) +#remove_unused_kernels=false + +# Unused resized base images younger than this will not be +# removed (integer value) +#remove_unused_resized_minimum_age_seconds=3600 + +# Write a checksum for files in _base to disk (boolean value) +#checksum_base_images=false + +# How frequently to checksum base images (integer value) +#checksum_interval_seconds=3600 + + +# +# Options defined in nova.virt.libvirt.lvm +# + +# Method used to wipe old volumes (valid options are: none, +# zero, shred) (string value) +#volume_clear=zero + +# Size in MiB to wipe at start of old volumes. 0 => all +# (integer value) +#volume_clear_size=0 + + +# +# Options defined in nova.virt.libvirt.utils +# + +# Compress snapshot images when possible. This currently +# applies exclusively to qcow2 images (boolean value) +#snapshot_compression=false + + +# +# Options defined in nova.virt.libvirt.vif +# + +# Use virtio for bridge interfaces with KVM/QEMU (boolean +# value) +#use_virtio_for_bridges=true + + +# +# Options defined in nova.virt.libvirt.volume +# + +# Number of times to rescan iSCSI target to find volume +# (integer value) +#num_iscsi_scan_tries=5 + +# Number of times to rescan iSER target to find volume +# (integer value) +#num_iser_scan_tries=5 + +# The RADOS client name for accessing rbd volumes (string +# value) +#rbd_user= + +# The libvirt UUID of the secret for the rbd_uservolumes +# (string value) +#rbd_secret_uuid= + +# Directory where the NFS volume is mounted on the compute +# node (string value) +#nfs_mount_point_base=$state_path/mnt + +# Mount options passed to the NFS client. See section of the +# nfs man page for details (string value) +#nfs_mount_options= + +# Directory where the SMBFS shares are mounted on the compute +# node (string value) +#smbfs_mount_point_base=$state_path/mnt + +# Mount options passed to the SMBFS client. See mount.cifs man +# page for details. Note that the libvirt-qemu uid and gid +# must be specified. (string value) +#smbfs_mount_options= + +# Number of times to rediscover AoE target to find volume +# (integer value) +#num_aoe_discover_tries=3 + +# Directory where the glusterfs volume is mounted on the +# compute node (string value) +#glusterfs_mount_point_base=$state_path/mnt + +# Use multipath connection of the iSCSI volume (boolean value) +#iscsi_use_multipath=false + +# Use multipath connection of the iSER volume (boolean value) +#iser_use_multipath=false + +# Path or URL to Scality SOFS configuration file (string +# value) +#scality_sofs_config= + +# Base dir where Scality SOFS shall be mounted (string value) +#scality_sofs_mount_point=$state_path/scality + +# Protocols listed here will be accessed directly from QEMU. +# Currently supported protocols: [gluster] (list value) +#qemu_allowed_storage_drivers= + +# Directory where the Quobyte volume is mounted on the compute +# node (string value) +#quobyte_mount_point_base=$state_path/mnt + +# Path to a Quobyte Client configuration file. (string value) +#quobyte_client_cfg= + +# The iSCSI transport iface to use to connect to target in +# case offload support is desired. Supported transports are +# be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx and ocs. Default +# format is transport_name.hwaddress and can be generated +# manually or via iscsiadm -m iface (string value) +# Deprecated group/name - [DEFAULT]/iscsi_transport +#iscsi_iface= + + +[metrics] + +# +# Options defined in nova.scheduler.weights.metrics +# + +# Multiplier used for weighing metrics. (floating point value) +#weight_multiplier=1.0 + +# How the metrics are going to be weighed. This should be in +# the form of "=, =, ...", where +# is one of the metrics to be weighed, and is +# the corresponding ratio. So for "name1=1.0, name2=-1.0" The +# final weight would be name1.value * 1.0 + name2.value * +# -1.0. (list value) +#weight_setting= + +# How to treat the unavailable metrics. When a metric is NOT +# available for a host, if it is set to be True, it would +# raise an exception, so it is recommended to use the +# scheduler filter MetricFilter to filter out those hosts. If +# it is set to be False, the unavailable metric would be +# treated as a negative factor in weighing process, the +# returned value would be set by the option +# weight_of_unavailable. (boolean value) +#required=true + +# The final weight value to be returned if required is set to +# False and any one of the metrics set by weight_setting is +# unavailable. (floating point value) +#weight_of_unavailable=-10000.0 + + +[neutron] + +# +# Options defined in nova.api.metadata.handler +# + +# Set flag to indicate Neutron will proxy metadata requests +# and resolve instance ids. (boolean value) +#service_metadata_proxy=false + +# Shared secret to validate proxies Neutron metadata requests +# (string value) +#metadata_proxy_shared_secret= + + +# +# Options defined in nova.network.neutronv2.api +# + +# URL for connecting to neutron (string value) +#url=http://127.0.0.1:9696 + +# User id for connecting to neutron in admin context. +# DEPRECATED: specify an auth_plugin and appropriate +# credentials instead. (string value) +#admin_user_id= + +# Username for connecting to neutron in admin context +# DEPRECATED: specify an auth_plugin and appropriate +# credentials instead. (string value) +#admin_username= + +# Password for connecting to neutron in admin context +# DEPRECATED: specify an auth_plugin and appropriate +# credentials instead. (string value) +#admin_password= + +# Tenant id for connecting to neutron in admin context +# DEPRECATED: specify an auth_plugin and appropriate +# credentials instead. (string value) +#admin_tenant_id= + +# Tenant name for connecting to neutron in admin context. This +# option will be ignored if neutron_admin_tenant_id is set. +# Note that with Keystone V3 tenant names are only unique +# within a domain. DEPRECATED: specify an auth_plugin and +# appropriate credentials instead. (string value) +#admin_tenant_name= + +# Region name for connecting to neutron in admin context +# (string value) +#region_name= + +# Authorization URL for connecting to neutron in admin +# context. DEPRECATED: specify an auth_plugin and appropriate +# credentials instead. (string value) +#admin_auth_url=http://localhost:5000/v2.0 + +# Authorization strategy for connecting to neutron in admin +# context. DEPRECATED: specify an auth_plugin and appropriate +# credentials instead. If an auth_plugin is specified strategy +# will be ignored. (string value) +#auth_strategy=keystone + +# Name of Integration Bridge used by Open vSwitch (string +# value) +#ovs_bridge=br-int + +# Number of seconds before querying neutron for extensions +# (integer value) +#extension_sync_interval=600 + +# DEPRECATED: Allow an instance to have multiple vNICs +# attached to the same Neutron network. This option is +# deprecated in the 2015.1 release and will be removed in the +# 2015.2 release where the default behavior will be to always +# allow multiple ports from the same network to be attached to +# an instance. (boolean value) +#allow_duplicate_networks=false + + +[osapi_v3] + +# +# Options defined in nova.api.openstack +# + +# Whether the V3 API is enabled or not (boolean value) +#enabled=false + +# A list of v3 API extensions to never load. Specify the +# extension aliases here. (list value) +#extensions_blacklist= + +# If the list is not empty then a v3 API extension will only +# be loaded if it exists in this list. Specify the extension +# aliases here. (list value) +#extensions_whitelist= + + +[rdp] + +# +# Options defined in nova.rdp +# + +# Location of RDP html5 console proxy, in the form +# "http://127.0.0.1:6083/" (string value) +#html5_proxy_base_url=http://127.0.0.1:6083/ + +# Enable RDP related features (boolean value) +#enabled=false + + +[serial_console] + +# +# Options defined in nova.cmd.serialproxy +# + +# Host on which to listen for incoming requests (string value) +#serialproxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer +# value) +#serialproxy_port=6083 + + +# +# Options defined in nova.console.serial +# + +# Enable serial console related features (boolean value) +#enabled=false + +# Range of TCP ports to use for serial ports on compute hosts +# (string value) +#port_range=10000:20000 + +# Location of serial console proxy. (string value) +#base_url=ws://127.0.0.1:6083/ + +# IP address on which instance serial console should listen +# (string value) +#listen=127.0.0.1 + +# The address to which proxy clients (like nova-serialproxy) +# should connect (string value) +#proxyclient_address=127.0.0.1 + + +[spice] + +# +# Options defined in nova.cmd.spicehtml5proxy +# + +# Host on which to listen for incoming requests (string value) +#html5proxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer +# value) +#html5proxy_port=6082 + + +# +# Options defined in nova.spice +# + +# Location of spice HTML5 console proxy, in the form +# "http://127.0.0.1:6082/spice_auto.html" (string value) +#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html + +# IP address on which instance spice server should listen +# (string value) +#server_listen=127.0.0.1 + +# The address to which proxy clients (like nova- +# spicehtml5proxy) should connect (string value) +#server_proxyclient_address=127.0.0.1 + +# Enable spice related features (boolean value) +#enabled=false + +# Enable spice guest agent support (boolean value) +#agent_enabled=true + +# Keymap for spice (string value) +#keymap=en-us + + +[ssl] + +# +# Options defined in nova.openstack.common.sslutils +# + +# CA certificate file to use to verify connecting clients. +# (string value) +#ca_file= + +# Certificate file to use when starting the server securely. +# (string value) +#cert_file= + +# Private key file to use when starting the server securely. +# (string value) +#key_file= + + +[trusted_computing] + +# +# Options defined in nova.scheduler.filters.trusted_filter +# + +# Attestation server HTTP (string value) +#attestation_server= + +# Attestation server Cert file for Identity verification +# (string value) +#attestation_server_ca_file= + +# Attestation server port (string value) +#attestation_port=8443 + +# Attestation web API URL (string value) +#attestation_api_url=/OpenAttestationWebServices/V1.0 + +# Attestation authorization blob - must change (string value) +#attestation_auth_blob= + +# Attestation status cache valid period length (integer value) +#attestation_auth_timeout=60 + +# Disable SSL cert verification for Attestation service +# (boolean value) +#attestation_insecure_ssl=false + + +[upgrade_levels] + +# +# Options defined in nova.baserpc +# + +# Set a version cap for messages sent to the base api in any +# service (string value) +#baseapi= + + +# +# Options defined in nova.cells.rpc_driver +# + +# Set a version cap for messages sent between cells services +# (string value) +#intercell= + + +# +# Options defined in nova.cells.rpcapi +# + +# Set a version cap for messages sent to local cells services +# (string value) +#cells= + + +# +# Options defined in nova.cert.rpcapi +# + +# Set a version cap for messages sent to cert services (string +# value) +#cert= + + +# +# Options defined in nova.compute.rpcapi +# + +# Set a version cap for messages sent to compute services. If +# you plan to do a live upgrade from havana to icehouse, you +# should set this option to "icehouse-compat" before beginning +# the live upgrade procedure. (string value) +#compute= + + +# +# Options defined in nova.conductor.rpcapi +# + +# Set a version cap for messages sent to conductor services +# (string value) +#conductor= + + +# +# Options defined in nova.console.rpcapi +# + +# Set a version cap for messages sent to console services +# (string value) +#console= + + +# +# Options defined in nova.consoleauth.rpcapi +# + +# Set a version cap for messages sent to consoleauth services +# (string value) +#consoleauth= + + +# +# Options defined in nova.network.rpcapi +# + +# Set a version cap for messages sent to network services +# (string value) +#network= + + +# +# Options defined in nova.scheduler.rpcapi +# + +# Set a version cap for messages sent to scheduler services +# (string value) +#scheduler= + + +[vmware] + +# +# Options defined in nova.virt.vmwareapi.driver +# + +# The PBM status. (boolean value) +#pbm_enabled=false + +# PBM service WSDL file location URL. e.g. +# file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this +# will disable storage policy based placement of instances. +# (string value) +#pbm_wsdl_location= + +# The PBM default policy. If pbm_wsdl_location is set and +# there is no defined storage policy for the specific request +# then this policy will be used. (string value) +#pbm_default_policy= + +# Hostname or IP address for connection to VMware VC host. +# (string value) +#host_ip= + +# Port for connection to VMware VC host. (integer value) +#host_port=443 + +# Username for connection to VMware VC host. (string value) +#host_username= + +# Password for connection to VMware VC host. (string value) +#host_password= + +# Name of a VMware Cluster ComputeResource. (multi valued) +#cluster_name= + +# Regex to match the name of a datastore. (string value) +#datastore_regex= + +# The interval used for polling of remote tasks. (floating +# point value) +#task_poll_interval=0.5 + +# The number of times we retry on failures, e.g., socket +# error, etc. (integer value) +#api_retry_count=10 + +# VNC starting port (integer value) +#vnc_port=5900 + +# Total number of VNC ports (integer value) +#vnc_port_total=10000 + +# Whether to use linked clone (boolean value) +#use_linked_clone=true + +# Optional VIM Service WSDL Location e.g +# http:///vimService.wsdl. Optional over-ride to +# default location for bug work-arounds (string value) +#wsdl_location= + + +# +# Options defined in nova.virt.vmwareapi.vif +# + +# Physical ethernet adapter name for vlan networking (string +# value) +#vlan_interface=vmnic0 + +# Name of Integration Bridge (string value) +#integration_bridge=br-int + + +# +# Options defined in nova.virt.vmwareapi.vim_util +# + +# The maximum number of ObjectContent data objects that should +# be returned in a single result. A positive value will cause +# the operation to suspend the retrieval when the count of +# objects reaches the specified maximum. The server may still +# limit the count to something less than the configured value. +# Any remaining objects may be retrieved with additional +# requests. (integer value) +#maximum_objects=100 + + +# +# Options defined in nova.virt.vmwareapi.vmops +# + +# The prefix for Where cached images are stored. This is NOT +# the full path - just a folder prefix. This should only be +# used when a datastore cache should be shared between compute +# nodes. Note: this should only be used when the compute nodes +# have a shared file system. (string value) +#cache_prefix= + + +[workarounds] + +# +# Options defined in nova.utils +# + +# This option allows a fallback to sudo for performance +# reasons. For example see +# https://bugs.launchpad.net/nova/+bug/1415106 (boolean value) +#disable_rootwrap=false + +# When using libvirt 1.2.2 fails live snapshots intermittently +# under load. This config option provides mechanism to +# disable livesnapshot while this is resolved. See +# https://bugs.launchpad.net/nova/+bug/1334398 (boolean value) +#disable_libvirt_livesnapshot=true + +# Whether to destroy instances on startup when we suspect they +# have previously been evacuated. This can result in data loss +# if undesired. See https://launchpad.net/bugs/1419785 +# (boolean value) +#destroy_after_evacuate=true + + +[xenserver] + +# +# Options defined in nova.virt.xenapi.agent +# + +# Number of seconds to wait for agent reply (integer value) +#agent_timeout=30 + +# Number of seconds to wait for agent to be fully operational +# (integer value) +#agent_version_timeout=300 + +# Number of seconds to wait for agent reply to resetnetwork +# request (integer value) +#agent_resetnetwork_timeout=60 + +# Specifies the path in which the XenAPI guest agent should be +# located. If the agent is present, network configuration is +# not injected into the image. Used if +# compute_driver=xenapi.XenAPIDriver and flat_injected=True +# (string value) +#agent_path=usr/sbin/xe-update-networking + +# Disables the use of the XenAPI agent in any image regardless +# of what image properties are present. (boolean value) +#disable_agent=false + +# Determines if the XenAPI agent should be used when the image +# used does not contain a hint to declare if the agent is +# present or not. The hint is a glance property +# "xenapi_use_agent" that has the value "True" or "False". +# Note that waiting for the agent when it is not present will +# significantly increase server boot times. (boolean value) +#use_agent_default=false + + +# +# Options defined in nova.virt.xenapi.client.session +# + +# Timeout in seconds for XenAPI login. (integer value) +#login_timeout=10 + +# Maximum number of concurrent XenAPI connections. Used only +# if compute_driver=xenapi.XenAPIDriver (integer value) +#connection_concurrent=5 + + +# +# Options defined in nova.virt.xenapi.driver +# + +# URL for connection to XenServer/Xen Cloud Platform. A +# special value of unix://local can be used to connect to the +# local unix socket. Required if +# compute_driver=xenapi.XenAPIDriver (string value) +#connection_url= + +# Username for connection to XenServer/Xen Cloud Platform. +# Used only if compute_driver=xenapi.XenAPIDriver (string +# value) +#connection_username=root + +# Password for connection to XenServer/Xen Cloud Platform. +# Used only if compute_driver=xenapi.XenAPIDriver (string +# value) +#connection_password= + +# The interval used for polling of coalescing vhds. Used only +# if compute_driver=xenapi.XenAPIDriver (floating point value) +#vhd_coalesce_poll_interval=5.0 + +# Ensure compute service is running on host XenAPI connects +# to. (boolean value) +#check_host=true + +# Max number of times to poll for VHD to coalesce. Used only +# if compute_driver=xenapi.XenAPIDriver (integer value) +#vhd_coalesce_max_attempts=20 + +# Base path to the storage repository (string value) +#sr_base_path=/var/run/sr-mount + +# The iSCSI Target Host (string value) +#target_host= + +# The iSCSI Target Port, default is port 3260 (string value) +#target_port=3260 + +# IQN Prefix (string value) +#iqn_prefix=iqn.2010-10.org.openstack + +# Used to enable the remapping of VBD dev (Works around an +# issue in Ubuntu Maverick) (boolean value) +#remap_vbd_dev=false + +# Specify prefix to remap VBD dev to (ex. /dev/xvdb -> +# /dev/sdb) (string value) +#remap_vbd_dev_prefix=sd + + +# +# Options defined in nova.virt.xenapi.image.bittorrent +# + +# Base URL for torrent files. (string value) +#torrent_base_url= + +# Probability that peer will become a seeder. (1.0 = 100%) +# (floating point value) +#torrent_seed_chance=1.0 + +# Number of seconds after downloading an image via BitTorrent +# that it should be seeded for other peers. (integer value) +#torrent_seed_duration=3600 + +# Cached torrent files not accessed within this number of +# seconds can be reaped (integer value) +#torrent_max_last_accessed=86400 + +# Beginning of port range to listen on (integer value) +#torrent_listen_port_start=6881 + +# End of port range to listen on (integer value) +#torrent_listen_port_end=6891 + +# Number of seconds a download can remain at the same progress +# percentage w/o being considered a stall (integer value) +#torrent_download_stall_cutoff=600 + +# Maximum number of seeder processes to run concurrently +# within a given dom0. (-1 = no limit) (integer value) +#torrent_max_seeder_processes_per_host=1 + + +# +# Options defined in nova.virt.xenapi.pool +# + +# To use for hosts with different CPUs (boolean value) +#use_join_force=true + + +# +# Options defined in nova.virt.xenapi.vif +# + +# Name of Integration Bridge used by Open vSwitch (string +# value) +#ovs_integration_bridge=xapi1 + + +# +# Options defined in nova.virt.xenapi.vm_utils +# + +# Cache glance images locally. `all` will cache all images, +# `some` will only cache images that have the image_property +# `cache_in_nova=True`, and `none` turns off caching entirely +# (string value) +#cache_images=all + +# Compression level for images, e.g., 9 for gzip -9. Range is +# 1-9, 9 being most compressed but most CPU intensive on dom0. +# (integer value) +#image_compression_level= + +# Default OS type (string value) +#default_os_type=linux + +# Time to wait for a block device to be created (integer +# value) +#block_device_creation_timeout=10 + +# Maximum size in bytes of kernel or ramdisk images (integer +# value) +#max_kernel_ramdisk_size=16777216 + +# Filter for finding the SR to be used to install guest +# instances on. To use the Local Storage in default +# XenServer/XCP installations set this flag to other-config +# :i18n-key=local-storage. To select an SR with a different +# matching criteria, you could set it to other- +# config:my_favorite_sr=true. On the other hand, to fall back +# on the Default SR, as displayed by XenCenter, set this flag +# to: default-sr:true (string value) +#sr_matching_filter=default-sr:true + +# Whether to use sparse_copy for copying data on a resize down +# (False will use standard dd). This speeds up resizes down +# considerably since large runs of zeros won't have to be +# rsynced (boolean value) +#sparse_copy=true + +# Maximum number of retries to unplug VBD. if <=0, should try +# once and no retry (integer value) +#num_vbd_unplug_retries=10 + +# Whether or not to download images via Bit Torrent +# (all|some|none). (string value) +#torrent_images=none + +# Name of network to use for booting iPXE ISOs (string value) +#ipxe_network_name= + +# URL to the iPXE boot menu (string value) +#ipxe_boot_menu_url= + +# Name and optionally path of the tool used for ISO image +# creation (string value) +#ipxe_mkisofs_cmd=mkisofs + + +# +# Options defined in nova.virt.xenapi.vmops +# + +# Number of seconds to wait for instance to go to running +# state (integer value) +#running_timeout=60 + +# The XenAPI VIF driver using XenServer Network APIs. (string +# value) +#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver + +# Dom0 plugin driver used to handle image uploads. (string +# value) +#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore + + +# +# Options defined in nova.virt.xenapi.volume_utils +# + +# Number of seconds to wait for an SR to settle if the VDI +# does not exist when first introduced (integer value) +#introduce_vdi_retry_wait=20 + + +[zookeeper] + +# +# Options defined in nova.servicegroup.drivers.zk +# + +# The ZooKeeper addresses for servicegroup service in the +# format of host1:port,host2:port,host3:port (string value) +#address= + +# The recv_timeout parameter for the zk session (integer +# value) +#recv_timeout=4000 + +# The prefix used in ZooKeeper to store ephemeral nodes +# (string value) +#sg_prefix=/servicegroups + +# Number of seconds to wait until retrying to join the session +# (integer value) +#sg_retry_interval=5 + + +[matchmaker_redis] + +# +# From oslo.messaging +# + +# Host to locate redis. (string value) +#host = 127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port = 6379 + +# Password for Redis server (optional). (string value) +#password = + + +[matchmaker_ring] + +# +# From oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile = /etc/oslo/matchmaker_ring.json + + +[oslo_concurrency] + +# +# From oslo.concurrency +# + +# Enables or disables inter-process locks. (boolean value) +# Deprecated group/name - [DEFAULT]/disable_process_locking +#disable_process_locking = false + +# Directory to use for lock files. For security, the specified directory +# should only be writable by the user running the processes that need locking. +# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, +# a lock path must be set. (string value) +# Deprecated group/name - [DEFAULT]/lock_path +#lock_path = + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# address prefix used when sending to a specific server (string value) +# Deprecated group/name - [amqp1]/server_request_prefix +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +# Deprecated group/name - [amqp1]/broadcast_prefix +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +# Deprecated group/name - [amqp1]/group_request_prefix +#group_request_prefix = unicast + +# Name for the AMQP container (string value) +# Deprecated group/name - [amqp1]/container_name +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +# Deprecated group/name - [amqp1]/idle_timeout +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +# Deprecated group/name - [amqp1]/trace +#trace = false + +# CA certificate PEM file for verifing server certificate (string value) +# Deprecated group/name - [amqp1]/ssl_ca_file +#ssl_ca_file = + +# Identifying certificate PEM file to present to clients (string value) +# Deprecated group/name - [amqp1]/ssl_cert_file +#ssl_cert_file = + +# Private key PEM file used to sign cert_file certificate (string value) +# Deprecated group/name - [amqp1]/ssl_key_file +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +# Deprecated group/name - [amqp1]/ssl_key_password +#ssl_key_password = + +# Accept clients using either SSL or plain TCP (boolean value) +# Deprecated group/name - [amqp1]/allow_insecure_clients +#allow_insecure_clients = false + + +[oslo_messaging_qpid] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete = false + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +#rpc_conn_pool_size = 30 + +# Qpid broker hostname. (string value) +# Deprecated group/name - [DEFAULT]/qpid_hostname +#qpid_hostname = localhost + +# Qpid broker port. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_port +#qpid_port = 5672 + +# Qpid HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/qpid_hosts +#qpid_hosts = $qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_username +#qpid_username = + +# Password for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_password +#qpid_password = + +# Space separated list of SASL mechanisms to use for auth. (string value) +# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms +#qpid_sasl_mechanisms = + +# Seconds between connection keepalive heartbeats. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_heartbeat +#qpid_heartbeat = 60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +# Deprecated group/name - [DEFAULT]/qpid_protocol +#qpid_protocol = tcp + +# Whether to disable the Nagle algorithm. (boolean value) +# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay +#qpid_tcp_nodelay = true + +# The number of prefetched messages held by receiver. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity +#qpid_receiver_capacity = 1 + +# The qpid topology version to use. Version 1 is what was originally used by +# impl_qpid. Version 2 includes some backwards-incompatible changes that allow +# broker federation to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_topology_version +#qpid_topology_version = 1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete = false + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +#rpc_conn_pool_size = 30 + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_version +#kombu_ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile +#kombu_ssl_keyfile = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile +#kombu_ssl_certfile = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs +#kombu_ssl_ca_certs = + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay +#kombu_reconnect_delay = 1.0 + +# The RabbitMQ broker address where a single node is used. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_host +#rabbit_host = localhost + +# The RabbitMQ broker port where a single node is used. (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_port +#rabbit_port = 5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/rabbit_hosts +#rabbit_hosts = $rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_use_ssl +#rabbit_use_ssl = false + +# The RabbitMQ userid. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_userid +#rabbit_userid = guest + +# The RabbitMQ password. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_password +#rabbit_password = guest + +# The RabbitMQ login method. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_login_method +#rabbit_login_method = AMQPLAIN + +# The RabbitMQ virtual host. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_virtual_host +#rabbit_virtual_host = / + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff +#rabbit_retry_backoff = 2 + +# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry +# count). (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_max_retries +#rabbit_max_retries = 0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you +# must wipe the RabbitMQ database. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_ha_queues +#rabbit_ha_queues = false + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disable the heartbeat). (integer value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) +# Deprecated group/name - [DEFAULT]/fake_rabbit +#fake_rabbit = false diff --git a/openstack/python-nova/centos/files/nova.logrotate b/openstack/python-nova/centos/files/nova.logrotate new file mode 100644 index 00000000..6017ca60 --- /dev/null +++ b/openstack/python-nova/centos/files/nova.logrotate @@ -0,0 +1,7 @@ +/var/log/nova/*.log { + rotate 14 + size 10M + missingok + compress + copytruncate +} diff --git a/openstack/python-nova/centos/files/nova_migration-rootwrap.conf b/openstack/python-nova/centos/files/nova_migration-rootwrap.conf new file mode 100644 index 00000000..dd3dc726 --- /dev/null +++ b/openstack/python-nova/centos/files/nova_migration-rootwrap.conf @@ -0,0 +1,6 @@ +[DEFAULT] +use_syslog=True +syslog_log_facility=syslog +syslog_log_level=ERROR +filters_path=/etc/nova/migration/rootwrap.d + diff --git a/openstack/python-nova/centos/files/nova_migration-rootwrap_cold_migration b/openstack/python-nova/centos/files/nova_migration-rootwrap_cold_migration new file mode 100644 index 00000000..ad56460a --- /dev/null +++ b/openstack/python-nova/centos/files/nova_migration-rootwrap_cold_migration @@ -0,0 +1,9 @@ +[Filters] +create_file: PathFilter, /usr/bin/touch, nova, /var/lib/nova/instances/ +remove_file: PathFilter, /usr/bin/rm, nova, /var/lib/nova/instances/ +create_dir: PathFilter, /usr/bin/mkdir, nova, -p, /var/lib/nova/instances/ +remove_dir: PathFilter, /usr/bin/rm, nova, -rf, /var/lib/nova/instances/ +copy_file_local_to_remote_recursive: PathFilter, /usr/bin/scp, nova, -r, -t, /var/lib/nova/instances/ +copy_file_remote_to_local_recursive: PathFilter, /usr/bin/scp, nova, -r, -f, /var/lib/nova/instances/ +copy_file_local_to_remote: PathFilter, /usr/bin/scp, nova, -t, /var/lib/nova/instances/ +copy_file_remote_to_local: PathFilter, /usr/bin/scp, nova, -f, /var/lib/nova/instances/ diff --git a/openstack/python-nova/centos/files/nova_migration-sudoers b/openstack/python-nova/centos/files/nova_migration-sudoers new file mode 100644 index 00000000..eefdc0ba --- /dev/null +++ b/openstack/python-nova/centos/files/nova_migration-sudoers @@ -0,0 +1,4 @@ +Defaults:nova_migration !requiretty + +nova_migration ALL = (nova) NOPASSWD: /usr/bin/nc -U /var/run/libvirt/libvirt-sock +nova_migration ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/migration/rootwrap.conf * diff --git a/openstack/python-nova/centos/files/nova_migration_authorized_keys b/openstack/python-nova/centos/files/nova_migration_authorized_keys new file mode 100644 index 00000000..bd4c88d9 --- /dev/null +++ b/openstack/python-nova/centos/files/nova_migration_authorized_keys @@ -0,0 +1,4 @@ +# SSH authorized_keys file for Openstack Nova migration +# +# This controls with hosts are allowed to migration VMs to this host. +# Append the SSH public keys of authorized hosts to this file. \ No newline at end of file diff --git a/openstack/python-nova/centos/files/nova_migration_identity b/openstack/python-nova/centos/files/nova_migration_identity new file mode 100644 index 00000000..c81ef164 --- /dev/null +++ b/openstack/python-nova/centos/files/nova_migration_identity @@ -0,0 +1,6 @@ +# SSH identity file (private key) for Openstack Nova migration +# +# Generate an ssh key pair for this host. +# Add the private key (e.g id_rsa) to this file. +# Add the public key (e.g id_rsa.pub) to /etc/nova/migration/authorized_keys +# on the migration target hosts. \ No newline at end of file diff --git a/openstack/python-nova/centos/files/nova_setup_timer_advance b/openstack/python-nova/centos/files/nova_setup_timer_advance new file mode 100644 index 00000000..00c655c7 --- /dev/null +++ b/openstack/python-nova/centos/files/nova_setup_timer_advance @@ -0,0 +1,120 @@ +#!/bin/bash + +# The qemu command details and the 98-102% range is taken from +# find-lapictscdeadline-optimal.sh and run-tscdeadline-latency.sh +# from the tuned package available at +# https://git.fedorahosted.org/cgit/tuned.git/tree/profiles/realtime-virtual-host +# +# The upstream code has no copyright notice in the scripts, but the +# overall package is licensed under the GPLv2 +# +# Copyright(c) 2016-2017 Wind River Systems, Inc. All rights reserved. + +QEMU=/usr/libexec/qemu-kvm +ADVANCE_FILE="/sys/module/kvm/parameters/lapic_timer_advance_ns" +ADVANCE_CALIB="/etc/nova/calibrated_lapic_timer_advance_ns" +ADVANCE_GOENABLED="/var/run/.nova_timer_advance_enabled" + +function log () +{ + logger -p local1.info -t $0 $@ + echo $0: "$@" +} + + +rm -f ${ADVANCE_GOENABLED} + +if [ ! -f $ADVANCE_FILE ]; then + touch ${ADVANCE_GOENABLED} + exit 1 +fi + +# Use previous calibrated advance result +if [ -f $ADVANCE_CALIB ]; then + read -r advance < $ADVANCE_CALIB + if [[ "$advance" =~ ^[0-9]+$ ]]; then + echo $advance > $ADVANCE_FILE + log "using advance value of" $(cat $ADVANCE_FILE) + touch ${ADVANCE_GOENABLED} + exit 0 + fi +fi + +# Move ourselves to the nova global cpuset. This will ensure that +# we run on a CPU that isn't being used by management or vswitch. +VCPU_PIN_STR=$(grep vcpu_pin_set /etc/nova/nova.conf) +VCPU_PIN_STR=${VCPU_PIN_STR//\"/} +FLOAT_CPUS=${VCPU_PIN_STR##*=} +if [ -z "${FLOAT_CPUS}" ]; then + log "skip calibration, we have not configured yet" + exit 0 +fi +log "Calibrating with FLOAT_CPUS: ${FLOAT_CPUS}" +taskset --pid --cpu-list ${FLOAT_CPUS} $$ &> /dev/null + +dir=$(mktemp -d) + +advance=1500 +latency=1000000 + + +for i in $(seq 1500 500 7000); do + log "test advance ${i}" + echo $i > $ADVANCE_FILE + timeout --foreground --signal TERM 10s \ + chrt -f 1 stdbuf -oL ${QEMU} -enable-kvm -device pc-testdev \ + -device isa-debug-exit,iobase=0xf4,iosize=0x4 \ + -display none -serial stdio -device pci-testdev \ + -kernel /usr/share/qemu-kvm/tscdeadline_latency.flat \ + -cpu host | awk 'NF==2 && /latency:/ {print $2}' > ${dir}/out0 + # chomp last line since output may be incomplete + sed \$d < ${dir}/out0 > ${dir}/out + + # Calculate the average of all the latency numbers output by + # the test image. + A=0 + while read l; do + A=$(($A + $l)) + done < $dir/out + + lines=$(wc -l $dir/out | cut -f 1 -d " ") + if [ ${lines} -eq 0 ]; then + # this shouldn't happen + log "got no output from test, aborting" + break + fi + + ans=$(($A/$lines)) + + # Get the current latency as a percentage of the previous latency + value=$((${ans}*100/${latency})) + + if [ $value -ge 102 ]; then + # Latency has increased by too much, we don't want to use this + # much advance. I didn't see this in practice, this is just + # a sanity check. + advance=$((${i} - 500)) + log "latency too large, reverting to advance of ${advance}" + echo $advance > $ADVANCE_FILE + break + elif [ $value -ge 98 ]; then + # If we're close to the previous latency, then use the current + # advance. The algorithm has a tendency to underestimate a bit, + # so we don't want to use the previous advance value. + break + else + # We're substantially lower than the previous latency, so store + # the current advance and latency numbers and loop through again + # to see if it improves further with a bit higher advance. + latency=$ans + advance=$i + fi +done + +# Save calibrated result +cat $ADVANCE_FILE > $ADVANCE_CALIB +log "using advance value of" $(cat $ADVANCE_FILE) + +rm -rf $dir +touch ${ADVANCE_GOENABLED} +exit 0 diff --git a/openstack/python-nova/centos/files/openstack-nova-api.service b/openstack/python-nova/centos/files/openstack-nova-api.service new file mode 100644 index 00000000..9ed93c32 --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-api.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova API Server +After=syslog.target network.target + +[Service] +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=root +ExecStart=/usr/bin/nova-api + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-cells.service b/openstack/python-nova/centos/files/openstack-nova-cells.service new file mode 100644 index 00000000..1dfbff80 --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-cells.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova Cells Server +After=syslog.target network.target + +[Service] +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=root +ExecStart=/usr/bin/nova-cells + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-compute.service b/openstack/python-nova/centos/files/openstack-nova-compute.service new file mode 100644 index 00000000..9e3e0e56 --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-compute.service @@ -0,0 +1,20 @@ +[Unit] +Description=OpenStack Nova Compute Server +After=syslog.target network.target libvirtd.service + +[Service] +Environment=LIBGUESTFS_ATTACH_METHOD=appliance +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=root +PIDFile=/var/run/nova/nova-compute.pid +ExecStart=/usr/bin/nova-compute +ExecStartPost=/bin/bash -c 'echo $MAINPID > /var/run/nova/nova-compute.pid' +ExecStop=/bin/kill -HUP $MAINPID +StandardOutput=null + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-conductor.service b/openstack/python-nova/centos/files/openstack-nova-conductor.service new file mode 100644 index 00000000..5407ccaa --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-conductor.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova Conductor Server +After=syslog.target network.target + +[Service] +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=nova +ExecStart=/usr/bin/nova-conductor + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-console.service b/openstack/python-nova/centos/files/openstack-nova-console.service new file mode 100644 index 00000000..5c4070cc --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-console.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova Console Proxy Server +After=syslog.target network.target + +[Service] +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=root +ExecStart=/usr/bin/nova-console + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-consoleauth.service b/openstack/python-nova/centos/files/openstack-nova-consoleauth.service new file mode 100644 index 00000000..1d194147 --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-consoleauth.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova VNC console auth Server +After=syslog.target network.target + +[Service] +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=root +ExecStart=/usr/bin/nova-consoleauth + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-metadata-api.service b/openstack/python-nova/centos/files/openstack-nova-metadata-api.service new file mode 100644 index 00000000..c016ad8e --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-metadata-api.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova Metadata API Server +After=syslog.target network.target + +[Service] +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=root +ExecStart=/usr/bin/nova-api-metadata + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-network.service b/openstack/python-nova/centos/files/openstack-nova-network.service new file mode 100644 index 00000000..cd0d329b --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-network.service @@ -0,0 +1,18 @@ +[Unit] +Description=OpenStack Nova Network Server +After=syslog.target network.target + +[Service] +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=root +ExecStart=/usr/bin/nova-network + +# Don't kill dnsmasq on shutdown (#805947) +KillMode=process + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-novncproxy.service b/openstack/python-nova/centos/files/openstack-nova-novncproxy.service new file mode 100644 index 00000000..50ce6751 --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-novncproxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack Nova NoVNC Proxy Server +After=syslog.target network.target + +[Service] +Type=simple +User=root +EnvironmentFile=-/etc/sysconfig/openstack-nova-novncproxy +ExecStart=/usr/bin/nova-novncproxy --web /usr/share/novnc/ $OPTIONS +#Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-nova/centos/files/openstack-nova-novncproxy.sysconfig b/openstack/python-nova/centos/files/openstack-nova-novncproxy.sysconfig new file mode 100644 index 00000000..8c905ed4 --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-novncproxy.sysconfig @@ -0,0 +1,2 @@ +# You may specify other parameters to the nova-novncproxy here +#OPTIONS= diff --git a/openstack/python-nova/centos/files/openstack-nova-os-compute-api.service b/openstack/python-nova/centos/files/openstack-nova-os-compute-api.service new file mode 100644 index 00000000..5b1a40b9 --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-os-compute-api.service @@ -0,0 +1,14 @@ +[Unit] +Description=OpenStack Nova Compute API Server +After=syslog.target network.target + +[Service] +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=root +ExecStart=/usr/bin/nova-api-os-compute + +[Install] +WantedBy=multi-user.target diff --git a/openstack/python-nova/centos/files/openstack-nova-scheduler.service b/openstack/python-nova/centos/files/openstack-nova-scheduler.service new file mode 100644 index 00000000..822967db --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-scheduler.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Nova Scheduler Server +After=syslog.target network.target + +[Service] +Type=simple +#NotifyAccess=all +TimeoutStartSec=0 +#Restart=always +User=root +ExecStart=/usr/bin/nova-scheduler + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-serialproxy.service b/openstack/python-nova/centos/files/openstack-nova-serialproxy.service new file mode 100644 index 00000000..ba02b86b --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-serialproxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack Nova Serial Proxy Server +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/nova-serialproxy +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-spicehtml5proxy.service b/openstack/python-nova/centos/files/openstack-nova-spicehtml5proxy.service new file mode 100644 index 00000000..8abbc755 --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-spicehtml5proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack Nova Spice HTML5 Proxy Server +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/nova-spicehtml5proxy +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/openstack-nova-xvpvncproxy.service b/openstack/python-nova/centos/files/openstack-nova-xvpvncproxy.service new file mode 100644 index 00000000..0f9c2b82 --- /dev/null +++ b/openstack/python-nova/centos/files/openstack-nova-xvpvncproxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenStack Nova XVP VncProxy Server +After=syslog.target network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/bin/nova-xvpvncproxy +#Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-nova/centos/files/policy.json b/openstack/python-nova/centos/files/policy.json new file mode 100644 index 00000000..2c63c085 --- /dev/null +++ b/openstack/python-nova/centos/files/policy.json @@ -0,0 +1,2 @@ +{ +} diff --git a/openstack/python-nova/centos/files/resctrl-show b/openstack/python-nova/centos/files/resctrl-show new file mode 100755 index 00000000..7b6957b2 --- /dev/null +++ b/openstack/python-nova/centos/files/resctrl-show @@ -0,0 +1,281 @@ +#!/usr/bin/env python + +import os +import sys +import re +import time +import copy +import uuid + +from oslo_utils import uuidutils + +from itertools import groupby +from operator import itemgetter + +import logging + + +# logger +logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') +logging.getLogger('multiprocessing').setLevel(logging.DEBUG) +LOG = logging.getLogger(__name__) + +# L3 CAT Support +_L3_RESCTRL_SUPPORT = None +_L3_CACHE = None +RESCTRL_BASE = '/sys/fs/resctrl' + + +def get_l3_cache_allocation_info(): + """Get resctrl L3 cache allocation technology information. + + :param: None + :return info[cache][field]: dictionary of fields, per cache type, + defined fields: + 'cbm_mask' - hexidecimal bitmask of allocatable cache lanes + 'min_cbm_bits' - minimum granularity of bits that can be specified + 'num_cbm_bits' - number of allocatable cache lanes + 'num_closids' - number of allocatable CLOS ids, eg, COS + where cache is . + + If resctrl is not available, this will return empty dictionary. + """ + global _L3_RESCTRL_SUPPORT + global _L3_CACHE + if _L3_CACHE is not None: + return _L3_CACHE + + info_dir = RESCTRL_BASE + '/info' + if _L3_RESCTRL_SUPPORT is None: + _L3_CACHE = {} + if not os.path.isdir(info_dir): + LOG.info('L3 cache allocation technology not available') + _L3_RESCTRL_SUPPORT = False + return _L3_CACHE + else: + _L3_RESCTRL_SUPPORT = True + if not _L3_RESCTRL_SUPPORT: + _L3_CACHE = {} + return _L3_CACHE + + known_types = {'int': int, 'str': str} + fields = [('cbm_mask', 'str'), + ('min_cbm_bits', 'int'), + ('num_closids', 'int')] + info_dirs = [name for name in os.listdir(info_dir)] + for cache in info_dirs: + _L3_CACHE[cache] = {} + for field, type_ in fields: + filename = RESCTRL_BASE + '/info/' + cache + '/' + field + try: + with open(filename, 'r') as f: + value = f.readline().strip() + _L3_CACHE[cache][field] = known_types[type_](value) + except Exception as e: + _L3_CACHE[cache][field] = None + LOG.error('Cannot parse file=%(file)s, error=%(err)s', + {'file': filename, 'err': e}) + if _L3_CACHE[cache]['cbm_mask'] is not None: + _L3_CACHE[cache]['num_cbm_bits'] = \ + int(_L3_CACHE[cache]['cbm_mask'], 16).bit_length() + else: + _L3_CACHE[cache]['num_cbm_bits'] = None + return _L3_CACHE + +def get_l3_cache_allocation_schemata(uuid=None): + """Get resctrl L3 cache allocation technology schemata CBM corresponding + to instance uuid, or the default schemata if uuid not provided. + The CBM is a hexedecimal bitmask representing allocated cache lanes. + + The contents of schemata has the following line-pattern: + :=; ... = + + Example: L3 with cache type 'both', with two banks: + L3:0=ffffe;1=fffff + + Example: L3 with cache type 'code' and 'data', i.e., CDP enabled + L3CODE:0=ffffe;1=fffff + L3DATA:0=ffffe;1=fffff + + :param: uuid string + :return schemata[cache][bank]: dictionary of CBM per cache type, per bank + """ + global _L3_RESCTRL_SUPPORT + re_schemata = re.compile(r"^\s*(\S+):(\d+=\w+;?.*)$") + schemata = {} + + info_dir = RESCTRL_BASE + '/info' + if _L3_RESCTRL_SUPPORT is None: + if not os.path.isdir(info_dir): + LOG.info('L3 cache allocation technology not available') + _L3_RESCTRL_SUPPORT = False + return schemata + else: + _L3_RESCTRL_SUPPORT = True + if not _L3_RESCTRL_SUPPORT: + return schemata + + if uuid is None: + filename = RESCTRL_BASE + '/schemata' + else: + filename = RESCTRL_BASE + '/' + uuid + '/schemata' + try: + with open(filename, 'r') as f: + for line in f: + m = re.search(re_schemata, line) + if m: + cache_type = m.group(1) + cache_cbm = m.group(2).split(';') + schemata[cache_type] = {} + for scheme in cache_cbm: + bank, cbm = scheme.split('=') + schemata[cache_type][int(bank)] = cbm + except Exception as e: + LOG.error('Cannot parse file=%(file)s, error=%(err)s', + {'file': filename, 'err': e}) + + return schemata + +def get_all_l3_schemata(): + """Get L3 CLOS schemata CBM for all resctrl uuids. + :param: None + :return schematas[uuid][cache][bank]: dictionary of CBM per uuid, + per cache type, per bank + """ + global _L3_RESCTRL_SUPPORT + schematas = {} + + info_dir = RESCTRL_BASE + '/info' + if _L3_RESCTRL_SUPPORT is None: + if not os.path.isdir(info_dir): + LOG.info('L3 cache allocation technology not available') + _L3_RESCTRL_SUPPORT = False + return schematas + else: + _L3_RESCTRL_SUPPORT = True + if not _L3_RESCTRL_SUPPORT: + return schematas + + for name in os.listdir(RESCTRL_BASE): + path = os.path.join(name, RESCTRL_BASE) + if os.path.isdir(path) and uuidutils.is_uuid_like(name): + schemata = get_l3_cache_allocation_schemata(uuid=name) + schematas[name] = copy.deepcopy(schemata) + return schematas + + +def hextoset(mask=None): + """Convert hex string to equivalent set of enabled bits. + + :param: mask: hex string representing enabled bits + :return: set of enabled bits + """ + s = set([]) + if not mask: + return s + bits = '{0:b}'.format(int(mask, 16)) + for i, c in enumerate(bits[::-1]): + if int(c): + s.add(i) + return s + + +def settohex(setbits=None): + """Convert set of enabled bits to equivalent hex string. + + :param: setbits: set of enabled bits + :return: hex string representing enabled bits + """ + if setbits is None: + return '' + mask = 0 + for i in setbits: + mask += (1 << i) + s = '{0:x}'.format(mask) + return s + + +def msb(x): + """Position of Most Significant Bit. + :param: x: integer + :return integer position of most significant bit + """ + return x.bit_length() - 1 + + +def list_to_range(input_list=None): + """Convert a list into a string of comma separate ranges. + E.g., [1,2,3,8,9,15] is converted to '1-3,8-9,15' + """ + if input_list is None: + return '' + if len(input_list) < 3: + return ','.join(str(x) for x in input_list) + else: + G = (list(x) for _, x in groupby(enumerate(input_list), + lambda (i, x): i - x)) + return ','.join( + '-'.join(map(str, (g[0][1], g[-1][1])[:len(g)])) for g in G) + + +def print_all_instance_schematas(l3_info=None, default_schemata=None, schematas=None): + if l3_info is None: + return + if default_schemata is None: + return + if schematas is None: + return + + cache_types = sorted(default_schemata.keys()) + cache_type0 = cache_types[0] + banks = sorted(default_schemata[cache_type0].keys()) + + cbm_mask = l3_info[cache_type0]['cbm_mask'] + closids_total = l3_info[cache_type0]['num_closids'] + num_cbm_bits = l3_info[cache_type0]['num_cbm_bits'] + uuid_len = len(str(uuid.uuid4())) + dum_name = "".ljust(uuid_len)[:uuid_len] + closids_used = 1 + len(schematas) + + print('%6s %4s : %*s : %8s : %20s : %4s : %s' + % ('cache', 'bank', uuid_len, 'uuid', + 'CBM', 'bitarray', 'size', 'setbits')) + for cache_type in cache_types: + for bank in banks: + default_s = hextoset(mask=default_schemata[cache_type][bank]) + default_h = settohex(setbits=default_s) + default_d = int(default_h, 16) + name = 'default' + print('%6s %4d : %*s : %08x : %s : %4d : %s' + % (cache_type, bank, uuid_len, name, default_d, + format(default_d, '020b'), bin(default_d).count('1'), + list_to_range(input_list=default_s))) + + for name, schemata in sorted(schematas.items(), + key=lambda x: msb(int(x[1][cache_type][bank], 16))): + + if schemata[cache_type][bank] == cbm_mask: + cbm_s = set() + else: + cbm_s = hextoset(mask=schemata[cache_type][bank]) + cbm_h = settohex(setbits=cbm_s) + cbm_d = int(cbm_h, 16) + print('%6s %4d : %s : %08x : %s : %4d : %s' + % (cache_type, bank, name, cbm_d, + format(cbm_d, '020b'), bin(cbm_d).count('1'), + list_to_range(input_list=cbm_s) or '-')) + print('CLOSIDS/type: %d total, %d used' % (closids_total, closids_used)) + +def main(): + l3_info = get_l3_cache_allocation_info() + if not _L3_RESCTRL_SUPPORT: + return + default_schemata = get_l3_cache_allocation_schemata() + schematas = get_all_l3_schemata() + print_all_instance_schematas(l3_info=l3_info, + default_schemata=default_schemata, + schematas=schematas) + +if __name__ == '__main__': + main() + sys.exit(0) diff --git a/openstack/python-nova/centos/openstack-nova.spec b/openstack/python-nova/centos/openstack-nova.spec new file mode 100644 index 00000000..93264163 --- /dev/null +++ b/openstack/python-nova/centos/openstack-nova.spec @@ -0,0 +1,896 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%global with_doc %{!?_without_doc:1}%{?_without_doc:0} +%global with_trans %{!?_without_trans:1}%{?_without_trans:0} +%global distro RDO + +%global common_desc \ +OpenStack Compute (codename Nova) is open source software designed to \ +provision and manage large networks of virtual machines, creating a \ +redundant and scalable cloud computing platform. It gives you the \ +software, control panels, and APIs required to orchestrate a cloud, \ +including running instances, managing networks, and controlling access \ +through users and projects. OpenStack Compute strives to be both \ +hardware and hypervisor agnostic, currently supporting a variety of \ +standard hardware configurations and seven major hypervisors. + +Name: openstack-nova +# Liberty semver reset +# https://review.openstack.org/#/q/I6a35fa0dda798fad93b804d00a46af80f08d475c,n,z +Epoch: 1 +Version: 16.0.2 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: OpenStack Compute (nova) + +License: ASL 2.0 +URL: http://openstack.org/projects/compute/ +Source0: nova-%{version}.tar.gz + +# + +Source1: nova-dist.conf + +Source10: openstack-nova-api.service +Source12: openstack-nova-compute.service +Source13: openstack-nova-network.service +Source15: openstack-nova-scheduler.service +Source18: openstack-nova-xvpvncproxy.service +Source19: openstack-nova-console.service +Source20: openstack-nova-consoleauth.service +Source25: openstack-nova-metadata-api.service +Source26: openstack-nova-conductor.service +Source27: openstack-nova-cells.service +Source28: openstack-nova-spicehtml5proxy.service +Source29: openstack-nova-novncproxy.service +Source31: openstack-nova-serialproxy.service +Source32: openstack-nova-os-compute-api.service + +Source22: nova-ifc-template +Source24: nova-sudoers +Source30: openstack-nova-novncproxy.sysconfig +Source33: nova-placement-api.conf +Source34: policy.json + +Source35: nova_migration-sudoers +Source36: nova-ssh-config +Source37: nova-migration-wrapper +Source38: nova_migration_identity +Source39: nova_migration_authorized_keys +Source40: nova_migration-rootwrap.conf +Source41: nova_migration-rootwrap_cold_migration + +#WRS +Source60: nova_authorized_cmds +Source61: nova.conf +Source62: nova-purge-deleted-active +Source63: nova_setup_cpusets +Source64: openstack-nova-compute-setup.service +Source65: nova-compute.init +Source66: nova_clean_thinpool +Source67: nova-clean-thinpool.service +Source68: nova-restart +Source69: kvm_timer_advance_setup.service +Source70: nova_setup_timer_advance +Source71: nova-pci-interrupts +Source72: nova-placement-api +Source73: resctrl-show +Source74: collect_host_memory_info.sh + +BuildArch: noarch +BuildRequires: openstack-macros +BuildRequires: intltool +BuildRequires: python2-devel +BuildRequires: git +BuildRequires: python-sphinx +BuildRequires: python-oslo-cache +BuildRequires: python-openstackdocstheme +BuildRequires: python-os-traits +BuildRequires: python-setuptools +BuildRequires: python-netaddr +BuildRequires: python-pbr +BuildRequires: python-d2to1 +BuildRequires: python-six +BuildRequires: python-oslo-i18n +BuildRequires: python-cryptography >= 1.6 +BuildRequires: python-oslo-policy +# Required for unit tests +BuildRequires: python-barbicanclient +BuildRequires: python-ddt +BuildRequires: python-ironicclient +BuildRequires: python-mox3 +BuildRequires: python-os-testr +BuildRequires: python-os-vif +BuildRequires: python-oslo-rootwrap +BuildRequires: python-oslotest +BuildRequires: python-osprofiler +BuildRequires: python-requests-mock +BuildRequires: python-subunit +BuildRequires: python-testrepository +BuildRequires: python-testresources +BuildRequires: python-testscenarios +BuildRequires: python-tooz +BuildRequires: python-oslo-vmware +BuildRequires: python-cursive +# WRS +BuildRequires: tsconfig +BuildRequires: python-suds +BuildRequires: systemd-devel +BuildRequires: systemd + +Requires: openstack-nova-compute = %{epoch}:%{version}-%{release} +Requires: openstack-nova-scheduler = %{epoch}:%{version}-%{release} +Requires: openstack-nova-api = %{epoch}:%{version}-%{release} +Requires: openstack-nova-network = %{epoch}:%{version}-%{release} +Requires: openstack-nova-conductor = %{epoch}:%{version}-%{release} +Requires: openstack-nova-console = %{epoch}:%{version}-%{release} +Requires: openstack-nova-cells = %{epoch}:%{version}-%{release} +Requires: openstack-nova-novncproxy = %{epoch}:%{version}-%{release} +Requires: openstack-nova-placement-api = %{epoch}:%{version}-%{release} +Requires: openstack-nova-migration = %{epoch}:%{version}-%{release} + + +%description +%{common_desc} + +%package common +Summary: Components common to all OpenStack Nova services +Obsoletes: openstack-nova-cert <= 1:16.0.0-1 + +Requires: python-nova = %{epoch}:%{version}-%{release} +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd +Requires(pre): shadow-utils +BuildRequires: systemd +# Required to build nova.conf.sample +BuildRequires: python-castellan >= 0.7.0 +BuildRequires: python-glanceclient +BuildRequires: python-keystonemiddleware +BuildRequires: python-lxml +BuildRequires: python-microversion-parse >= 0.1.3 +BuildRequires: python-os-brick +BuildRequires: python-oslo-db +BuildRequires: python-oslo-reports +BuildRequires: python-oslo-service +BuildRequires: python-oslo-versionedobjects +BuildRequires: python-paramiko +BuildRequires: python-websockify +# Required to compile translation files +BuildRequires: python-babel + +# remove old service subpackage +Obsoletes: %{name}-objectstore + + +%description common +%{common_desc} + +This package contains scripts, config and dependencies shared +between all the OpenStack nova services. + + +%package compute +Summary: OpenStack Nova Virtual Machine control service + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} +Requires: curl +Requires: iscsi-initiator-utils +Requires: iptables iptables-ipv6 +Requires: ipmitool +Requires: python-libguestfs +Requires: libvirt-python +Requires: libvirt-daemon-kvm +Requires: /usr/bin/virsh +%if 0%{?rhel}==0 +Requires: libvirt-daemon-lxc +%endif +Requires: openssh-clients +Requires: rsync +Requires: lvm2 +Requires: python-cinderclient >= 3.1.0 +Requires(pre): qemu-kvm >= 2.3.0 +Requires: genisoimage +Requires: bridge-utils +Requires: sg3_utils +Requires: sysfsutils +Requires: libosinfo +# WRS +Requires: host-guest-comm +Requires: guest-scale-helper + +%description compute +%{common_desc} + +This package contains the Nova service for controlling Virtual Machines. + + +%package network +Summary: OpenStack Nova Network control service + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} +Requires: radvd +Requires: bridge-utils +Requires: dnsmasq +Requires: dnsmasq-utils +Requires: ebtables +Requires: conntrack-tools + +%description network +%{common_desc} + +This package contains the Nova service for controlling networking. + + +%package scheduler +Summary: OpenStack Nova VM distribution service + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} + +%description scheduler +%{common_desc} + +This package contains the service for scheduling where +to run Virtual Machines in the cloud. + + +%package api +Summary: OpenStack Nova API services + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} +Requires: python-cinderclient >= 3.1.0 + +%description api +%{common_desc} + +This package contains the Nova services providing programmatic access. + +%package conductor +Summary: OpenStack Nova Conductor services + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} + +%description conductor +%{common_desc} + +This package contains the Nova services providing database access for +the compute service + +%package console +Summary: OpenStack Nova console access services + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} +Requires: python-websockify >= 0.8.0 + +%description console +%{common_desc} + +This package contains the Nova services providing +console access services to Virtual Machines. + +%package cells +Summary: OpenStack Nova Cells services + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} + +%description cells +%{common_desc} + +This package contains the Nova Cells service providing additional +scaling and (geographic) distribution for compute services. + +%package novncproxy +Summary: OpenStack Nova noVNC proxy service + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} +Requires: novnc +Requires: python-websockify >= 0.8.0 + + +%description novncproxy +%{common_desc} + +This package contains the Nova noVNC Proxy service that can proxy +VNC traffic over browser websockets connections. + +%package spicehtml5proxy +Summary: OpenStack Nova Spice HTML5 console access service + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} +Requires: python-websockify >= 0.8.0 + +%description spicehtml5proxy +%{common_desc} + +This package contains the Nova services providing the +spice HTML5 console access service to Virtual Machines. + +%package serialproxy +Summary: OpenStack Nova serial console access service + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} +Requires: python-websockify >= 0.8.0 + +%description serialproxy +%{common_desc} + +This package contains the Nova services providing the +serial console access service to Virtual Machines. + +%package placement-api +Summary: OpenStack Nova Placement APIservice + +Requires: openstack-nova-common = %{epoch}:%{version}-%{release} +Requires: httpd +Requires: mod_wsgi + +%description placement-api +%{common_desc} + +This package contains the Nova placement service, which will initially +allow for the management of resource providers. + +%package migration +Summary: OpenStack Nova Migration + +Requires: openstack-nova-compute = %{epoch}:%{version}-%{release} + +%description migration +%{common_desc} + +This package contains scripts and config to support VM migration in Nova. + +%package -n python-nova +Summary: Nova Python libraries + +Requires: openssl +# Require openssh for ssh-keygen +Requires: openssh +Requires: sudo + +Requires: python-paramiko >= 2.0 + +Requires: python-decorator >= 3.4.0 +Requires: python-enum34 +Requires: python-eventlet >= 0.18.2 +Requires: python-iso8601 >= 0.1.11 +Requires: python-netaddr >= 0.7.13 +Requires: python-lxml >= 2.3 +Requires: python-boto +Requires: python-ldap +Requires: python-stevedore >= 1.20.0 + +Requires: python-memcached + +Requires: python-sqlalchemy >= 1.0.10 +Requires: python-migrate >= 0.11.0 +Requires: python-alembic >= 0.8.0 + +Requires: python-paste +Requires: python-paste-deploy >= 1.5.0 +Requires: python-routes >= 2.3.1 +Requires: python-webob >= 1.7.1 + +Requires: python-babel >= 2.3.4 +Requires: python-castellan >= 0.7.0 +Requires: python-cryptography >= 1.6 +Requires: python-cursive >= 0.1.2 +Requires: python-glanceclient >= 1:2.8.0 +Requires: python-greenlet >= 0.3.2 +Requires: python-keystonemiddleware >= 4.12.0 +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-jinja2 +Requires: python-jsonschema >= 2.0.0 +Requires: python-microversion-parse >= 0.1.2 +Requires: python-netifaces >= 0.10.4 +Requires: python-neutronclient >= 6.3.0 +Requires: python-novaclient >= 2.30.1 +Requires: python-os-brick >= 1.15.2 +Requires: python-os-traits +Requires: python-oslo-cache >= 1.5.0 +Requires: python-oslo-concurrency >= 3.8.0 +Requires: python-oslo-config >= 2:4.0.0 +Requires: python-oslo-context >= 2.14.0 +Requires: python-oslo-db >= 4.24.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-log >= 3.22.0 +Requires: python-oslo-messaging >= 5.24.2 +Requires: python-oslo-middleware >= 3.27.0 +Requires: python-oslo-policy >= 1.23.0 +Requires: python-oslo-privsep >= 1.9.0 +Requires: python-oslo-reports >= 0.6.0 +Requires: python-oslo-rootwrap >= 5.0.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-service >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-oslo-versionedobjects >= 1.17.0 +Requires: python-os-vif >= 1.7.0 +Requires: python-oslo-vmware >= 1.16.0 +Requires: python-pbr +Requires: python-prettytable >= 0.7.1 +Requires: python-psutil +Requires: python-requests >= 2.10.0 +Requires: python-rfc3986 >= 0.3.1 +Requires: python-six >= 1.9.0 +Requires: python-tooz + +%description -n python-nova +%{common_desc} + +This package contains the nova Python library. + +%package -n python-nova-tests +Summary: Nova tests +Requires: openstack-nova = %{epoch}:%{version}-%{release} + +%description -n python-nova-tests +%{common_desc} + +This package contains the nova Python library. + +%if 0%{?with_doc} +%package doc +Summary: Documentation for OpenStack Compute + +BuildRequires: graphviz + +# Required to build module documents +BuildRequires: python-boto +BuildRequires: python-eventlet +BuildRequires: python-barbicanclient +BuildRequires: python-cinderclient +BuildRequires: python-keystoneclient +BuildRequires: python-neutronclient +BuildRequires: python-os-win +BuildRequires: python-oslo-config +BuildRequires: python-oslo-log +BuildRequires: python-oslo-messaging +BuildRequires: python-oslo-utils +BuildRequires: python-redis +BuildRequires: python-rfc3986 >= 0.2.2 +BuildRequires: python-routes +BuildRequires: python-sqlalchemy +BuildRequires: python-webob +BuildRequires: python-zmq +# while not strictly required, quiets the build down when building docs. +BuildRequires: python-migrate, python-iso8601 + +%description doc +%{common_desc} + +This package contains documentation files for nova. +%endif + +%prep +%autosetup -n nova-%{upstream_version} -S git + +find . \( -name .gitignore -o -name .placeholder \) -delete + +find nova -name \*.py -exec sed -i '/\/usr\/bin\/env python/{d;q}' {} + + +# Remove the requirements file so that pbr hooks don't add it +# to distutils requiers_dist config +%py_req_cleanup + +%build +PYTHONPATH=. oslo-config-generator --config-file=etc/nova/nova-config-generator.conf +# Generate a sample policy.yaml file for documentation purposes only +PYTHONPATH=. oslopolicy-sample-generator --config-file=etc/nova/nova-policy-generator.conf +export PBR_VERSION=%{version} + +%{__python2} setup.py build + +# Generate i18n files +# (amoralej) we can remove '-D nova' once https://review.openstack.org/#/c/439500/ is merged +%{__python2} setup.py compile_catalog -d build/lib/nova/locale -D nova + +# Avoid http://bugzilla.redhat.com/1059815. Remove when that is closed +sed -i 's|group/name|group;name|; s|\[DEFAULT\]/|DEFAULT;|' etc/nova/nova.conf.sample + +# Programmatically update defaults in sample config +# which is installed at /etc/nova/nova.conf + +# First we ensure all values are commented in appropriate format. +# Since icehouse, there was an uncommented keystone_authtoken section +# at the end of the file which mimics but also conflicted with our +# distro editing that had been done for many releases. +sed -i '/^[^#[]/{s/^/#/; s/ //g}; /^#[^ ]/s/ = /=/' etc/nova/nova.conf.sample + +# TODO: Make this more robust +# Note it only edits the first occurrence, so assumes a section ordering in sample +# and also doesn't support multi-valued variables like dhcpbridge_flagfile. +while read name eq value; do + test "$name" && test "$value" || continue + sed -i "0,/^# *$name=/{s!^# *$name=.*!#$name=$value!}" etc/nova/nova.conf.sample +done < %{SOURCE1} + +%install +export PBR_VERSION=%{version} + +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} + +# WRS +# Install sql migration stuff that wasn't installed by setup.py +install -m 640 nova/db/sqlalchemy/api_migrations/migrate_repo/migrate.cfg %{buildroot}%{python2_sitelib}/nova/db/sqlalchemy/api_migrations/migrate_repo/migrate.cfg +install -m 640 nova/db/sqlalchemy/migrate_repo/migrate.cfg %{buildroot}%{python2_sitelib}/nova/db/sqlalchemy/migrate_repo/migrate.cfg +install -m 640 nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql %{buildroot}%{python2_sitelib}/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql +install -d 755 %{buildroot}%{python2_sitelib}/nova/CA/. +install -m 755 nova/CA/*.sh %{buildroot}%{python2_sitelib}/nova/CA/. +install -m 644 nova/CA/*.tmpl %{buildroot}%{python2_sitelib}/nova/CA/. + +# Remove this once sphinxcontrib.seqdiag becomes available +sed -i -e '/sphinxcontrib.seqdiag/d' doc/source/conf.py +sed -i -e 's#../../etc/nova/nova-config-generator.conf#etc/nova/nova-config-generator.conf#' doc/source/conf.py + +%if 0%{?with_doc} +%{__python2} setup.py build_sphinx +%endif + +%{__python2} setup.py build_sphinx --builder man +mkdir -p %{buildroot}%{_mandir}/man1 +install -p -D -m 644 doc/build/man/*.1 %{buildroot}%{_mandir}/man1/ + +# Setup directories +install -d -m 755 %{buildroot}%{_sharedstatedir}/nova +install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/buckets +install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/instances +install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/keys +install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/networks +install -d -m 755 %{buildroot}%{_sharedstatedir}/nova/tmp +install -d -m 750 %{buildroot}%{_localstatedir}/log/nova +install -d -m 700 %{buildroot}%{_sharedstatedir}/nova/.ssh + +# Install config files +install -d -m 755 %{buildroot}%{_sysconfdir}/nova +install -p -D -m 640 %{SOURCE1} %{buildroot}%{_datarootdir}/nova/nova-dist.conf +install -p -D -m 640 %{SOURCE61} %{buildroot}%{_sysconfdir}/nova/nova.conf +install -p -D -m 640 %{SOURCE61} %{buildroot}%{_sysconfdir}/nova/nova-ironic.conf +install -p -D -m 640 etc/nova/rootwrap.conf %{buildroot}%{_sysconfdir}/nova/rootwrap.conf +install -p -D -m 640 etc/nova/api-paste.ini %{buildroot}%{_sysconfdir}/nova/api-paste.ini +install -p -D -m 640 %{SOURCE33} %{buildroot}%{_sysconfdir}/httpd/conf.d/00-nova-placement-api.conf +install -d -m 755 %{buildroot}%{_sysconfdir}/nova/migration +install -p -D -m 600 %{SOURCE38} %{buildroot}%{_sysconfdir}/nova/migration/identity +install -p -D -m 644 %{SOURCE39} %{buildroot}%{_sysconfdir}/nova/migration/authorized_keys +install -p -D -m 640 %{SOURCE40} %{buildroot}%{_sysconfdir}/nova/migration/rootwrap.conf +install -d -m 755 %{buildroot}%{_sysconfdir}/nova/migration/rootwrap.d +install -p -D -m 640 %{SOURCE41} %{buildroot}%{_sysconfdir}/nova/migration/rootwrap.d/cold_migration.filters + +# Install empty policy.json file to cover rpm updates with untouched policy files. +install -p -D -m 640 %{SOURCE34} %{buildroot}%{_sysconfdir}/nova/policy.json + +# Install version info file +cat > %{buildroot}%{_sysconfdir}/nova/release < os_xenapi/client.py </dev/null || groupadd -r nova --gid 162 +if ! getent passwd nova >/dev/null; then + useradd -u 162 -r -g nova -G nova,nobody -d %{_sharedstatedir}/nova -s /sbin/nologin -c "OpenStack Nova Daemons" nova +fi +exit 0 + +%pre compute +usermod -a -G qemu nova +usermod -a -G libvirt nova +%pre migration +getent group nova_migration >/dev/null || groupadd -r nova_migration +getent passwd nova_migration >/dev/null || \ + useradd -r -g nova_migration -d / -s /bin/bash -c "OpenStack Nova Migration" nova_migration +exit 0 + +%post compute +%systemd_post %{name}-compute.service +/usr/bin/systemctl enable %{name}-compute-setup.service +/usr/bin/systemctl enable nova-clean-thinpool.service +/usr/bin/systemctl enable kvm_timer_advance_setup.service +%post network +%systemd_post %{name}-network.service +%post scheduler +%systemd_post %{name}-scheduler.service +%post api +%systemd_post %{name}-api.service %{name}-metadata-api.service %{name}-os-compute-api.service +%post conductor +%systemd_post %{name}-conductor.service +%post console +%systemd_post %{name}-console.service %{name}-consoleauth.service %{name}-xvpvncproxy.service +%post cells +%systemd_post %{name}-cells.service +%post novncproxy +%systemd_post %{name}-novncproxy.service +%post spicehtml5proxy +%systemd_post %{name}-spicehtml5proxy.service +%post serialproxy +%systemd_post %{name}-serialproxy.service + +%preun compute +%systemd_preun %{name}-compute.service +if [ $1 -eq 0 ] ; then + # Package removal, not upgrade + /usr/bin/systemctl disable nova-clean-thinpool.service + /usr/bin/systemctl disable %{name}-compute-setup.service + /usr/bin/systemctl disable kvm_timer_advance_setup.service +fi +%preun network +%systemd_preun %{name}-network.service +%preun scheduler +%systemd_preun %{name}-scheduler.service +%preun api +%systemd_preun %{name}-api.service %{name}-metadata-api.service %{name}-os-compute-api.service +%preun conductor +%systemd_preun %{name}-conductor.service +%preun console +%systemd_preun %{name}-console.service %{name}-consoleauth.service %{name}-xvpvncproxy.service +%preun cells +%systemd_preun %{name}-cells.service +%preun novncproxy +%systemd_preun %{name}-novncproxy.service +%preun spicehtml5proxy +%systemd_preun %{name}-spicehtml5proxy.service +%preun serialproxy +%systemd_preun %{name}-serialproxy.service + +%postun compute +%systemd_postun_with_restart %{name}-compute.service +%postun network +%systemd_postun_with_restart %{name}-network.service +%postun scheduler +%systemd_postun_with_restart %{name}-scheduler.service +%postun api +%systemd_postun_with_restart %{name}-api.service %{name}-metadata-api.service %{name}-os-compute-api.service +%postun conductor +%systemd_postun_with_restart %{name}-conductor.service +%postun console +%systemd_postun_with_restart %{name}-console.service %{name}-consoleauth.service %{name}-xvpvncproxy.service +%postun cells +%systemd_postun_with_restart %{name}-cells.service +%postun novncproxy +%systemd_postun_with_restart %{name}-novncproxy.service +%postun spicehtml5proxy +%systemd_postun_with_restart %{name}-spicehtml5proxy.service +%postun serialproxy +%systemd_postun_with_restart %{name}-serialproxy.service + +%files + +%files common -f nova.lang +%doc LICENSE +%doc etc/nova/policy.yaml.sample +%dir %{_datarootdir}/nova +%attr(-, root, nova) %{_datarootdir}/nova/nova-dist.conf +%{_datarootdir}/nova/interfaces.template +%{_datarootdir}/nova/rootwrap/network.filters +%dir %attr(0755, root, nova) %{_sysconfdir}/nova +%{_sysconfdir}/nova/release +%config(noreplace) %attr(-, root, nova) %{_sysconfdir}/nova/nova.conf +%config(noreplace) %attr(-, root, nova) %{_sysconfdir}/nova/nova-ironic.conf +%config(noreplace) %attr(0660, root, nova) %{_sysconfdir}/nova/api-paste.ini +%config(noreplace) %attr(0640, root, nova) %{_sysconfdir}/nova/rootwrap.conf +%config(noreplace) %attr(-, root, nova) %{_sysconfdir}/nova/policy.json +%config(noreplace) %{_sysconfdir}/sudoers.d/nova +%{_sysconfdir}/bash_completion.d/nova-manage.bash_completion + +%dir %attr(0750, nova, root) %{_localstatedir}/log/nova +%dir %attr(0755, nova, root) %{_localstatedir}/run/nova + +%{_bindir}/nova-manage +%{_bindir}/nova-policy +%{_bindir}/nova-rootwrap +%{_bindir}/nova-rootwrap-daemon +%{_bindir}/nova-status + +%{_mandir}/man1/nova*.1.gz + +%defattr(-, nova, nova, -) +%dir %{_sharedstatedir}/nova +%dir %{_sharedstatedir}/nova/buckets +%dir %{_sharedstatedir}/nova/instances +%dir %{_sharedstatedir}/nova/keys +%dir %{_sharedstatedir}/nova/networks +%dir %{_sharedstatedir}/nova/tmp + +%files compute +%{_bindir}/nova-compute +%{_bindir}/nova-idmapshift +%{_unitdir}/nova-compute.service +%{_datarootdir}/nova/rootwrap/compute.filters +%{_bindir}/nova_authorized_cmds +%{_unitdir}/nova-clean-thinpool.service +%{_unitdir}/openstack-nova-compute-setup.service +%{_bindir}/d_nova_setup_cpusets +%{_sysconfdir}/rc.d/init.d/nova-compute +%{_bindir}/nova_clean_thinpool +%{_unitdir}/kvm_timer_advance_setup.service +%{_bindir}/nova_setup_timer_advance +%{_bindir}/nova-pci-interrupts +%{_bindir}/resctrl-show +%{_bindir}/collect_host_memory_info.sh + +%files network +%{_bindir}/nova-network +%{_bindir}/nova-dhcpbridge +%{_unitdir}/openstack-nova-network.service + +%files scheduler +%{_bindir}/nova-scheduler +%{_unitdir}/openstack-nova-scheduler.service + +%files api +%{_bindir}/nova-api* +%{_bindir}/nova-metadata-wsgi +%{_unitdir}/openstack-nova-*api.service +%{_datarootdir}/nova/rootwrap/api-metadata.filters + +%files conductor +%{_bindir}/nova-conductor +%{_bindir}/nova-purge-deleted-active +%{_unitdir}/openstack-nova-conductor.service + +%files console +%{_bindir}/nova-console* +%{_bindir}/nova-xvpvncproxy +%{_unitdir}/openstack-nova-console*.service +%{_unitdir}/openstack-nova-xvpvncproxy.service + +%files cells +%{_bindir}/nova-cells +%{_unitdir}/openstack-nova-cells.service + +%files novncproxy +%{_bindir}/nova-novncproxy +%{_unitdir}/openstack-nova-novncproxy.service +%config(noreplace) %{_sysconfdir}/sysconfig/openstack-nova-novncproxy + +%files spicehtml5proxy +%{_bindir}/nova-spicehtml5proxy +%{_unitdir}/openstack-nova-spicehtml5proxy.service + +%files serialproxy +%{_bindir}/nova-serialproxy +%{_unitdir}/openstack-nova-serialproxy.service + +%files placement-api +%config(noreplace) %{_sysconfdir}/httpd/conf.d/00-nova-placement-api.conf +%{_bindir}/nova-placement-api + +%files migration +%{_bindir}/nova-migration-wrapper +%config(noreplace) %{_sysconfdir}/sudoers.d/nova_migration +%dir %attr(0700, nova, nova) %{_sharedstatedir}/nova/.ssh +%attr(0600, nova, nova) %{_sharedstatedir}/nova/.ssh/config +%dir %{_sysconfdir}/nova/migration +%config(noreplace) %attr(0640, root, nova_migration) %{_sysconfdir}/nova/migration/authorized_keys +%config(noreplace) %attr(0600, nova, nova) %{_sysconfdir}/nova/migration/identity +%config(noreplace) %attr(0640, root, root) %{_sysconfdir}/nova/migration/rootwrap.conf +%dir %{_sysconfdir}/nova/migration/rootwrap.d +%config(noreplace) %attr(0640, root, root) %{_sysconfdir}/nova/migration/rootwrap.d/cold_migration.filters + +%files -n python-nova +%doc LICENSE +%{python2_sitelib}/nova +%{python2_sitelib}/nova-*.egg-info +%exclude %{python2_sitelib}/nova/tests +%{_bindir}/nova-restart + +%files -n python-nova-tests +%license LICENSE +%{python2_sitelib}/nova/tests + +%if 0%{?with_doc} +%files doc +%doc LICENSE doc/build/html +%endif + +%changelog +* Wed Oct 25 2017 rdo-trunk 1:16.0.2-1 +- Update to 16.0.2 + +* Mon Sep 25 2017 rdo-trunk 1:16.0.1-1 +- Update to 16.0.1 + +* Wed Aug 30 2017 rdo-trunk 1:16.0.0-1 +- Update to 16.0.0 + +* Fri Aug 25 2017 Alfredo Moralejo 1:16.0.0-0.2.0rc2 +- Update to 16.0.0.0rc2 + +* Thu Aug 24 2017 Alfredo Moralejo 1:16.0.0-0.1.0rc1 +- Update to 16.0.0.0rc1 + diff --git a/openstack/python-nova/python-nova/collect_host_memory_info.sh b/openstack/python-nova/python-nova/collect_host_memory_info.sh new file mode 100755 index 00000000..5f1de1ab --- /dev/null +++ b/openstack/python-nova/python-nova/collect_host_memory_info.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# +# Copyright (c) 2013-2018 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# This script is intended to collect host memory information and append +# that info to collect_host_memory_info.log in /var/log/nova + +logfile="/var/log/nova/collect_host_memory_info.log" + +touch ${logfile} +echo "`date '+%F %T'`: Collect of host memory info" >> ${logfile} + +echo "process listing" >> ${logfile} +echo "---------------" >> ${logfile} +ps -eLf >> ${logfile} 2>> ${logfile} + +echo "lsof huge mounts" >> ${logfile} +echo "----------------" >> ${logfile} +lsof -n +c 15 | awk '($3 !~ /^[0-9]+$/ && /\/mnt\/huge/) || NR==1 {print $0;}' >> ${logfile} 2>> ${logfile} + +echo "numa maps" >> ${logfile} +echo "---------" >> ${logfile} +grep huge /proc/*/numa_maps >> ${logfile} 2>> ${logfile} + +tail -vn +1 /proc/meminfo >> ${logfile} 2>> ${logfile} +tail -vn +1 /sys/devices/system/node/node?/meminfo >> ${logfile} 2>> ${logfile} +tail -vn +1 /sys/devices/system/node/node?/hugepages/hugepages-*/*_hugepages >> ${logfile} 2>> ${logfile} + +echo "find /mnt/huge-2048kB|xargs ls -ld" >> ${logfile} +echo "----------------------------------" >> ${logfile} +find /mnt/huge-2048kB|xargs ls -ld >> ${logfile} 2>> ${logfile} + +echo "find /mnt/huge-1048576kB/|xargs ls -ld" >> ${logfile} +echo "--------------------------------------" >> ${logfile} +find /mnt/huge-1048576kB/|xargs ls -ld >> ${logfile} 2>> ${logfile} + +echo "Locked smaps" >> ${logfile} +echo "------------" >> ${logfile} +grep Locked: /proc/*/smaps 2>/dev/null| awk '($2 > 0) {a[$1]+=$2} END {for (i in a) print i,a[i]/1024.0, "MiB";}' >> ${logfile} 2>> ${logfile} + +date '+%F %T' >> ${logfile} 2>> ${logfile} + +exit 0 diff --git a/openstack/python-nova/python-nova/nova.conf b/openstack/python-nova/python-nova/nova.conf new file mode 100644 index 00000000..4496d729 --- /dev/null +++ b/openstack/python-nova/python-nova/nova.conf @@ -0,0 +1,62 @@ +[DEFAULT] +# firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver +compute_driver = libvirt.LibvirtDriver +libvirt_type = kvm +libvirt_cpu_mode = none +default_floating_pool = public +fixed_range = +force_dhcp_release = True +dhcpbridge_flagfile = /etc/nova/nova.conf +dhcpbridge_flagfile = /etc/nova/nova.conf +compute_scheduler_driver = nova.scheduler.filter_scheduler.FilterScheduler +rootwrap_config = /etc/nova/rootwrap.conf +api_paste_config = /etc/nova/api-paste.ini +allow_resize_to_same_host = true +auth_strategy = keystone +instances_path = /etc/nova/instances +debug = True +my_ip = 127.0.0.1 +glance_host = 127.0.0.1 +lock_path=/var/lock/nova/ +state_path=/var/run/nova/ +libvirt_images_type = default +#notification_driver=nova.openstack.common.notifier.server_group_notifier + +#Network +flat_interface = eth0 +flat_network_bridge = br1 +vlan_interface = eth0 +public_interface = br1 +network_manager = nova.network.manager.FlatDHCPManager +fixed_range= +force_dhcp_release=False +dhcpbridge=/usr/bin/nova-dhcpbridge + +# Neutron +neutron_url=http://127.0.0.1:9696 +neutron_auth_strategy=keystone +neutron_admin_tenant_name=service +neutron_admin_username=neutron +neutron_admin_password=password +neutron_admin_auth_url=http://127.0.0.1:5000/v2.0/ + +# nova-compute configuration for ceilometer +instance_usage_audit=True +instance_usage_audit_period=hour +notify_on_state_change=vm_and_task_state +#notification_driver=ceilometer.compute.nova_notifier + +# nova-compute configuration for ceph +libvirt_images_rbd_pool=cinder-volumes +libvirt_images_rbd_ceph_conf=/etc/ceph/ceph.conf +rbd_user=cinder-volume +#rbd_secret_uuid= + +[spice] +agent_enabled=False +enabled=False +html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html +keymap=en-us +server_listen=127.0.0.1 +server_proxyclient_address=127.0.0.1 diff --git a/openstack/python-nova/python-nova/nova.init b/openstack/python-nova/python-nova/nova.init new file mode 100644 index 00000000..a9a6ef18 --- /dev/null +++ b/openstack/python-nova/python-nova/nova.init @@ -0,0 +1,157 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: nova-compute +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 3 5 +# Default-Stop: 0 1 2 6 +# Short-Description: OpenStack Compute (Nova) - compute +# Description: OpenStack Compute (Nova) - compute +### END INIT INFO + +SUFFIX="compute" +DESC="nova-compute" +DAEMON="/usr/bin/nova-$SUFFIX" +PIDFILE="/var/run/nova/nova-$SUFFIX.pid" + +WHOAMI=$(basename $0) +NOVA_STARTUP_TAG=${NOVA_STARTUP_TAG:-"${WHOAMI}"} +function log () +{ + logger -p local1.info -t ${NOVA_STARTUP_TAG} $@ + echo "${NOVA_STARTUP_TAG} $@" +} + +if [ ! -d /var/run/nova ]; then + mkdir -p /var/run/nova + chown nova:root /var/run/nova/ +fi + +if [ ! -d /var/lock/nova ]; then + mkdir -p /var/lock/nova + chown nova:root /var/lock/nova/ +fi + +if ! [ -x ${DAEMON} ] ; then + exit 0 +fi + +# Ensure the program only works with intended target nova-compute, +# and not other nova-* binaries. +if [ "${WHOAMI}" != "${DESC}" ] ; then + log "Call $0 ignored, this service managed by OCF script." + exit 0 +fi + +start () +{ + if [ -e $PIDFILE ]; then + PIDDIR=/proc/$(cat $PIDFILE) + if [ -d ${PIDDIR} ]; then + echo "$DESC already running." + exit 1 + else + echo "Removing stale PID file $PIDFILE" + rm -f $PIDFILE + fi + fi + PIDDIR=`dirname $PIDFILE` + if [ ! -d $PIDDIR ]; then + mkdir -p $PIDDIR + chown nova $PIDDIR + fi + if [ ! -d /var/log/nova ]; then + mkdir /var/log/nova + fi + echo -n "Starting $DESC..." + + start-stop-daemon --start --quiet --background \ + --pidfile ${PIDFILE} --make-pidfile --exec ${DAEMON} + + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi +} + +stop () +{ + echo -n "Stopping $DESC..." + start-stop-daemon --stop --quiet --pidfile $PIDFILE + if [ $? -eq 0 ]; then + echo "done." + else + echo "failed." + fi + rm -f $PIDFILE +} + +status() +{ + # Status function has a standard set of return codes to indicate daemon status + # http://refspecs.linuxbase.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html + pid=`cat $PIDFILE 2>/dev/null` + if [ -n "$pid" ]; then + if ps -p $pid &>/dev/null ; then + echo "$DESC is running" + exit 0 + else + echo "$DESC is not running" + exit 1 + fi + fi + echo "$DESC is not running" + exit 3 +} + +reset() +{ + . /etc/nova/openrc + + # Nova comute + simple_delete "nova list --all-tenant" "nova delete" 1 "vm" + + stop + + # This is to make sure postgres is configured and running + if ! pidof postmaster > /dev/null; then + /etc/init.d/postgresql-init + /etc/init.d/postgresql start + sleep 5 + fi + + sudo -u postgres dropdb nova + sudo -u postgres createdb nova + sleep 2 + nova-manage db sync + + start +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart|force-reload|reload) + stop + start + ;; + status) + status + ;; + reset) + reset + ;; + *) + echo "Usage: $0 {start|stop|force-reload|restart|reload|status|reset}" + exit 1 + ;; +esac + +exit 0 + diff --git a/openstack/python-nova/python-nova/nova_authorized_cmds b/openstack/python-nova/python-nova/nova_authorized_cmds new file mode 100644 index 00000000..fd9a7ddd --- /dev/null +++ b/openstack/python-nova/python-nova/nova_authorized_cmds @@ -0,0 +1,16 @@ +#!/bin/bash +# Extract the command being executed. Basically we want everything up to the first blank. +# There are lots of security issues still, but it's better than nothing. +# Maybe later we can check the paths too. +ORIG_CMD=${SSH_ORIGINAL_COMMAND%%[[:blank:]]*} +case $ORIG_CMD in + scp|rsync|rm|touch|mkdir) + # Command is expected + $SSH_ORIGINAL_COMMAND + ;; + *) + # Command is not expected + logger -i -p auth.warning "$0: disallowing passwordless execution of command: ${SSH_ORIGINAL_COMMAND}" + exit 255 + ;; +esac diff --git a/openstack/python-nova/python-nova/nova_clean_thinpool b/openstack/python-nova/python-nova/nova_clean_thinpool new file mode 100644 index 00000000..a6637edc --- /dev/null +++ b/openstack/python-nova/python-nova/nova_clean_thinpool @@ -0,0 +1,66 @@ +#!/bin/bash +# +# Copyright (c) 2016 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# This script is intended to clean up the LVM thin pool used by +# nova-compute. This is required so that packstack is free to resize the +# instances_lv volume or to switch between volume-backed or image-backed +# instance storage. +# +# It should only run on compute nodes, and it must be run before packstack. + +errlog() +{ + logger -t nova_clean_thinpool -p daemon.err "$@" +} + + +start() +{ + # We expect this as the volume group + VOL_GROUP="nova-local" + + # We expect this as the thin pool + THIN_POOL="nova-local-pool" + + # Test if any volume is the thin pool + lvs -o lv_name --noheadings | grep -e $THIN_POOL + if [ $? -eq 0 ] ; then + # Now test if any volumes are in the thin pool + lvs -o pool_lv --noheadings | grep -e $THIN_POOL + if [ $? -ne 0 ] ; then + # Thin pool is present and empty, delete it. + lvremove -f ${VOL_GROUP}/$THIN_POOL + if [ $? -ne 0 ] ; then + errlog "Problem deleting ${VOL_GROUP}/$THIN_POOL" + fi + else + errlog "Not deleting $THIN_POOL because it's not empty." + fi + fi +} + +stop () +{ + return +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + *) + echo "Usage: $0 {start|stop}" + exit 1 + ;; +esac + +exit 0 + diff --git a/openstack/python-nova/python-nova/nova_setup_cpusets b/openstack/python-nova/python-nova/nova_setup_cpusets new file mode 100644 index 00000000..3138540e --- /dev/null +++ b/openstack/python-nova/python-nova/nova_setup_cpusets @@ -0,0 +1,188 @@ +#!/bin/bash +# +# Copyright (c) 2013-2017 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# This script is intended to set up the cpusets for use by nova-compute. +# It should only run on compute nodes, and it must be run after the +# /etc/nova/nova.conf file has been modified by packstack since it +# extracts the "vcpu_pin_set" value from that file. +# +# The intent of the script is to create a top-level "floating" cpuset which +# has access to all the nova CPUs and all NUMA nodes. For each NUMA node X +# on the host a child cpuset called "nodeX" will be created which has access +# to the CPUs/memory from that specific NUMA node. + +# Platform paths and flags +. /usr/bin/tsconfig + +# This is a bit hackish, but we already have the python helper functions +# available for nova, so we may as well use them. Also python has +# built-in support for set intersections. +# +# Arg 1 is the full range of CPUs assigned to nova +# Arg 2 is the range of CPUs on this numa node +# +# We want to print the intersection of the two ranges in range +# notation. +# +# Note: the py_script can't be indented otherwise python complains. +get_float_node_cpus() +{ +py_script=" +from nova import utils +float_cpurange = \"${1}\" +node_cpurange = \"${2}\" +float_cpus = utils.range_to_list(float_cpurange) +node_cpus = utils.range_to_list(node_cpurange) +float_node_cpus = list(set(float_cpus) & set(node_cpus)) +float_node_cpus.sort() +print utils.list_to_range(float_node_cpus) +" + python -c "$py_script" +} + +log() +{ + logger -t nova_setup_cpusets -p daemon.info "$@" +} + +errlog() +{ + logger -t nova_setup_cpusets -p daemon.err "$@" +} + + +start() +{ + # Do not continue if the host has not been configured. We can't do + # anything until the nova.conf file has been updated. + if [ ! -f ${INITIAL_COMPUTE_CONFIG_COMPLETE} ] + then + log "Initial compute configuration is not complete, nothing to do" + exit 0 + fi + + mkdir -p /dev/cpuset + if [ $? -ne 0 ]; then + errlog "unable to create /dev/cpuset" + exit 1 + fi + + if ! mountpoint -q /dev/cpuset + then + mount -t cpuset none /dev/cpuset + if [ $? -ne 0 ]; then + errlog "unable to mount /dev/cpuset" + exit 1 + fi + + fi + + # Figure out whether to use WRL or CentOS naming + if [ -f /dev/cpuset/cpuset.cpus ] + then + CPUS='cpuset.cpus' + MEMS='cpuset.mems' + else + CPUS='cpus' + MEMS='mems' + fi + + # Make the global "floating" cpuset + mkdir -p /dev/cpuset/floating + if [ $? -ne 0 ]; then + errlog "unable to create 'floating' cpuset" + exit 1 + fi + + # Assign the full set of NUMA nodes to the global floating nodeset + MEMNODES=$(cat /dev/cpuset/${MEMS}) + log "Assign nodes ${MEMNODES} to floating/${MEMS}" + cp /dev/cpuset/${MEMS} /dev/cpuset/floating/${MEMS} + if [ $? -ne 0 ]; then + errlog "unable to write to floating/${MEMS}" + fi + + # Assign the full set of nova CPUs to the global floating cpuset + VCPU_PIN_STR=$(grep vcpu_pin_set /etc/nova/nova.conf) + VCPU_PIN_STR=${VCPU_PIN_STR//\"/} + FLOAT_CPUS=${VCPU_PIN_STR##*=} + log "Assign cpus ${FLOAT_CPUS} to floating/${CPUS}" + /bin/echo $FLOAT_CPUS > /dev/cpuset/floating/${CPUS} + if [ $? -ne 0 ]; then + errlog "unable to write to floating/${CPUS}" + fi + + # Set ownership/permissions on the cpus/tasks files + chown nova.nova /dev/cpuset/floating/${CPUS} /dev/cpuset/floating/tasks + chmod 644 /dev/cpuset/floating/${CPUS} /dev/cpuset/floating/tasks + + # Now set up the per-NUMA-node cpusets + cd /sys/devices/system/node/ + NODES=`ls -d node*` + cd /dev/cpuset/floating + for NODE in $NODES + do + NODENUM=${NODE#node} + mkdir -p $NODE + if [ $? -ne 0 ]; then + errlog "unable to create ${NODE} cpuset" + continue + fi + + # Assign the full set of NUMA nodes to the cpuset so that VM memory + # may span numa nodes. Default linux memory allocation policy is to + # use the same node(s) where the task is affined if that is possible. + log "Assign nodes ${MEMNODES} to floating/${NODE}/${MEMS}" + cp /dev/cpuset/${MEMS} ${NODE}/${MEMS} + if [ $? -ne 0 ]; then + errlog "unable to write to ${NODE}/${MEMS}" + continue + fi + + # Now assign the subset of FLOAT_CPUS that are part of this + # NUMA node to this cpuset + NODE_CPUS=$(cat /sys/devices/system/node/${NODE}/cpulist) + FLOAT_NODE_CPUS=$(get_float_node_cpus $FLOAT_CPUS $NODE_CPUS) + if [ $? -ne 0 ]; then + errlog "unable to calculate FLOAT_NODE_CPUS for ${NODE}" + continue + fi + + log "Assign cpus ${FLOAT_NODE_CPUS} to floating/${NODE}/${CPUS}" + /bin/echo $FLOAT_NODE_CPUS > $NODE/${CPUS} + if [ $? -ne 0 ]; then + errlog "unable to write to ${NODE}/${CPUS}" + continue + fi + + # Set ownership/permissions on the cpus/tasks files + chown nova.nova ${NODE}/${CPUS} ${NODE}/tasks + chmod 644 ${NODE}/${CPUS} ${NODE}/tasks + done +} + +stop () +{ + return +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + *) + echo "Usage: $0 {start|stop}" + exit 1 + ;; +esac + +exit 0 + diff --git a/openstack/python-nova/python-nova/openstack-nova-compute-setup.service b/openstack/python-nova/python-nova/openstack-nova-compute-setup.service new file mode 100644 index 00000000..4fbfdeee --- /dev/null +++ b/openstack/python-nova/python-nova/openstack-nova-compute-setup.service @@ -0,0 +1,14 @@ +[Unit] +Description=OpenStack Nova Compute Server Pre-Startup +After=syslog.target compute-config-gate.service +Before=nova-compute.service goenabled-compute.service + +[Service] +Type=oneshot +RemainAfterExit=yes +User=root +ExecStart=/usr/bin/d_nova_setup_cpusets start + +[Install] +WantedBy=multi-user.target + diff --git a/openstack/python-novaclient/centos/build_srpm.data b/openstack/python-novaclient/centos/build_srpm.data new file mode 100644 index 00000000..cde47bcf --- /dev/null +++ b/openstack/python-novaclient/centos/build_srpm.data @@ -0,0 +1,4 @@ +TAR_NAME=python-novaclient +SRC_DIR=$CGCS_BASE/git/python-novaclient +TIS_BASE_SRCREV=a1c00740c5b709a7d2bc4289fa6e28eac7909b8f +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-novaclient/centos/python-novaclient.spec b/openstack/python-novaclient/centos/python-novaclient.spec new file mode 100644 index 00000000..6ce40bfb --- /dev/null +++ b/openstack/python-novaclient/centos/python-novaclient.spec @@ -0,0 +1,186 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} + +%global sname novaclient + +%if 0%{?fedora} +%global with_python3 1 +%endif + +Name: python-novaclient +Epoch: 1 +Version: 9.1.1 +Release: 1%{?_tis_dist}.%{tis_patch_ver} +Summary: Python API and CLI for OpenStack Nova +License: ASL 2.0 +URL: https://launchpad.net/python-novaclient +Source0: %{name}-%{version}.tar.gz +BuildArch: noarch + +%description +This is a client for the OpenStack Nova API. There's a Python API (the +novaclient module), and a command-line script (nova). Each implements 100% of +the OpenStack Nova API. + +%package -n python2-%{sname} +Summary: Python API and CLI for OpenStack Nova +%{?python_provide:%python_provide python2-novaclient} + +BuildRequires: python2-devel +BuildRequires: python-pbr +BuildRequires: git +BuildRequires: python-setuptools +BuildRequires: python-dateutil + +Requires: python-babel >= 2.3.4 +Requires: python-iso8601 >= 0.1.11 +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-oslo-i18n >= 2.1.0 +Requires: python-oslo-serialization >= 1.10.0 +Requires: python-oslo-utils >= 3.20.0 +Requires: python-pbr >= 2.0.0 +Requires: python-prettytable >= 0.7.1 +Requires: python-requests +Requires: python-simplejson >= 2.2.0 +Requires: python-six >= 1.9.0 + +%description -n python2-%{sname} +This is a client for the OpenStack Nova API. There's a Python API (the +novaclient module), and a command-line script (nova). Each implements 100% of +the OpenStack Nova API. + +%if 0%{?with_python3} +%package -n python3-%{sname} +Summary: Python API and CLI for OpenStack Nova +%{?python_provide:%python_provide python3-novaclient} + +BuildRequires: python3-devel +BuildRequires: python3-pbr +BuildRequires: python3-setuptools + +Requires: python3-babel >= 2.3.4 +Requires: python3-iso8601 >= 0.1.11 +Requires: python3-keystoneauth1 >= 3.1.0 +Requires: python3-oslo-i18n >= 2.1.0 +Requires: python3-oslo-serialization >= 1.10.0 +Requires: python3-oslo-utils >= 3.20.0 +Requires: python3-pbr >= 2.0.0 +Requires: python3-prettytable >= 0.7.1 +Requires: python3-requests +Requires: python3-simplejson >= 2.2.0 +Requires: python3-six >= 1.9.0 + +%description -n python3-%{sname} +This is a client for the OpenStack Nova API. There's a Python API (the +novaclient module), and a command-line script (nova). Each implements 100% of +the OpenStack Nova API. +%endif + +%package doc +Summary: Documentation for OpenStack Nova API Client + +BuildRequires: python-sphinx +BuildRequires: python-openstackdocstheme +BuildRequires: python-oslo-utils +BuildRequires: python-keystoneauth1 +BuildRequires: python-oslo-serialization +BuildRequires: python-prettytable + +%description doc +This is a client for the OpenStack Nova API. There's a Python API (the +novaclient module), and a command-line script (nova). Each implements 100% of +the OpenStack Nova API. + +This package contains auto-generated documentation. + +%package sdk +Summary: SDK files for %{name} + +%description sdk +Contains SDK files for %{name} package + +%prep +%autosetup -n %{name}-%{upstream_version} -S git + +# Let RPM handle the requirements +rm -f test-requirements.txt + +%build +export PBR_VERSION=%{version} +%py2_build +%if 0%{?with_python3} +%py3_build +%endif + +%install +export PBR_VERSION=%{version} +%if 0%{?with_python3} +%py3_install +mv %{buildroot}%{_bindir}/nova %{buildroot}%{_bindir}/nova-%{python3_version} +ln -s ./nova-%{python3_version} %{buildroot}%{_bindir}/nova-3 +# Delete tests +rm -fr %{buildroot}%{python3_sitelib}/novaclient/tests +%endif + +%py2_install +mv %{buildroot}%{_bindir}/nova %{buildroot}%{_bindir}/nova-%{python2_version} +ln -s ./nova-%{python2_version} %{buildroot}%{_bindir}/nova-2 + +ln -s ./nova-2 %{buildroot}%{_bindir}/nova + +mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d +install -pm 644 tools/nova.bash_completion \ + %{buildroot}%{_sysconfdir}/bash_completion.d/nova + +# Delete tests +rm -fr %{buildroot}%{python2_sitelib}/novaclient/tests + +%{__python2} setup.py build_sphinx -b html +%{__python2} setup.py build_sphinx -b man + +install -p -D -m 644 doc/build/man/nova.1 %{buildroot}%{_mandir}/man1/nova.1 + +# Fix hidden-file-or-dir warnings +rm -fr doc/build/html/.doctrees doc/build/html/.buildinfo + +# prep SDK package +mkdir -p %{buildroot}/usr/share/remote-clients/%{name} +tar zcf %{buildroot}/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} + +%files -n python2-%{sname} +%license LICENSE +%doc README.rst +%{python2_sitelib}/%{sname} +%{python2_sitelib}/*.egg-info +%{_sysconfdir}/bash_completion.d +%{_mandir}/man1/nova.1.gz +%{_bindir}/nova +%{_bindir}/nova-2 +%{_bindir}/nova-%{python2_version} + + +%if 0%{?with_python3} +%files -n python3-%{sname} +%license LICENSE +%doc README.rst +%{python3_sitelib}/%{sname} +%{python3_sitelib}/*.egg-info +%{_sysconfdir}/bash_completion.d +%{_mandir}/man1/nova.1.gz +%{_bindir}/nova-3 +%{_bindir}/nova-%{python3_version} +%endif + +%files doc +%doc doc/build/html +%license LICENSE + +%files sdk +/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz + +%changelog +* Fri Oct 06 2017 rdo-trunk 1:9.1.1-1 +- Update to 9.1.1 + +* Mon Aug 14 2017 Alfredo Moralejo 1:9.1.0-1 +- Update to 9.1.0 + diff --git a/openstack/python-openstackclient/centos/build_srpm.data b/openstack/python-openstackclient/centos/build_srpm.data new file mode 100644 index 00000000..52b88651 --- /dev/null +++ b/openstack/python-openstackclient/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=16 diff --git a/openstack/python-openstackclient/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch b/openstack/python-openstackclient/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..fb48c0b6 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch @@ -0,0 +1,13 @@ +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 62817fd..2c25c75 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -10,7 +10,7 @@ + + Name: python-openstackclient + Version: 3.12.0 +-Release: 1%{?dist} ++Release: 2.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: OpenStack Command-line Client + + License: ASL 2.0 diff --git a/openstack/python-openstackclient/centos/meta_patches/0001-meta-US106901-Openstack-CLI-Adoption.patch b/openstack/python-openstackclient/centos/meta_patches/0001-meta-US106901-Openstack-CLI-Adoption.patch new file mode 100644 index 00000000..fba542e5 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0001-meta-US106901-Openstack-CLI-Adoption.patch @@ -0,0 +1,25 @@ +From 12504bfa15db581d310f3e059b5c8dce0df1be9f Mon Sep 17 00:00:00 2001 +From: Sen Yang +Date: Thu, 1 Feb 2018 12:05:55 -0500 +Subject: [PATCH 1/1] US106901 Openstack-CLI-Adoption for + Glance/Cinder/Ceilometer + +--- + SPECS/python-openstackclient.spec | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 1cf210a..6774444 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -24,6 +24,7 @@ Patch0003: CGTS-7814-warning-only-when-the-admin-password-chang.patch + Patch0004: 0001-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch + Patch0005: 0001-US101470-Openstackclient-implementation-of-novaclien.patch + Patch0006: 0002-US101470-Openstackclient-implementation-of-novaclien.patch ++Patch0007: 0001-US106901-Openstack-CLI-Adoption.patch + + BuildArch: noarch + +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/meta_patches/0001-meta-us101470.patch b/openstack/python-openstackclient/centos/meta_patches/0001-meta-us101470.patch new file mode 100644 index 00000000..b2f3cd4a --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0001-meta-us101470.patch @@ -0,0 +1,24 @@ +From 47de75f37476e81f627222417e652ac77b2a17ab Mon Sep 17 00:00:00 2001 +From: Sen Yang +Date: Wed, 20 Dec 2017 23:53:02 -0500 +Subject: [PATCH 1/1] meta us101470 + +--- + SPECS/python-openstackclient.spec | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 9bca398..3e184a9 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -22,6 +22,7 @@ Patch0001: 0001-neutron-extensions.patch + Patch0002: openstackClient_Passwordchange_warning.patch + Patch0003: CGTS-7814-warning-only-when-the-admin-password-chang.patch + Patch0004: 0001-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch ++Patch0005: 0001-US101470-Openstackclient-implementation-of-novaclien.patch + + BuildArch: noarch + +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/meta_patches/0002-meta-US106901-Openstack-CLI-Adoption.patch b/openstack/python-openstackclient/centos/meta_patches/0002-meta-US106901-Openstack-CLI-Adoption.patch new file mode 100644 index 00000000..2ea5be5d --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0002-meta-US106901-Openstack-CLI-Adoption.patch @@ -0,0 +1,25 @@ +From ddf5ea208a3ba122558e355fe8535bcba024d2c8 Mon Sep 17 00:00:00 2001 +From: Sen Yang +Date: Mon, 5 Feb 2018 15:37:29 -0500 +Subject: [PATCH 1/1] US106901 Openstack CLI Adoption: Cinder/Glance/Ceilometer + + CI part2 +--- + SPECS/python-openstackclient.spec | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 6774444..4b1682e 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -25,6 +25,7 @@ Patch0004: 0001-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patc + Patch0005: 0001-US101470-Openstackclient-implementation-of-novaclien.patch + Patch0006: 0002-US101470-Openstackclient-implementation-of-novaclien.patch + Patch0007: 0001-US106901-Openstack-CLI-Adoption.patch ++Patch0008: 0002-US106901-Openstack-CLI-Adoption.patch + + BuildArch: noarch + +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/meta_patches/0002-meta-us101470.patch b/openstack/python-openstackclient/centos/meta_patches/0002-meta-us101470.patch new file mode 100644 index 00000000..31974e51 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0002-meta-us101470.patch @@ -0,0 +1,24 @@ +From c330094f36df9e38c497d6cbfb03574e6ad368a0 Mon Sep 17 00:00:00 2001 +From: Sen Yang +Date: Fri, 5 Jan 2018 15:25:06 -0500 +Subject: [PATCH 1/1] US101470 part2 + +--- + SPECS/python-openstackclient.spec | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 3e184a9..1cf210a 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -23,6 +23,7 @@ Patch0002: openstackClient_Passwordchange_warning.patch + Patch0003: CGTS-7814-warning-only-when-the-admin-password-chang.patch + Patch0004: 0001-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch + Patch0005: 0001-US101470-Openstackclient-implementation-of-novaclien.patch ++Patch0006: 0002-US101470-Openstackclient-implementation-of-novaclien.patch + + BuildArch: noarch + +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/meta_patches/0002-spec-remote-clients-sdk.patch b/openstack/python-openstackclient/centos/meta_patches/0002-spec-remote-clients-sdk.patch new file mode 100644 index 00000000..f7f9d5e5 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0002-spec-remote-clients-sdk.patch @@ -0,0 +1,42 @@ +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 2c25c75..874e8a4 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -162,6 +162,13 @@ It is a thin wrapper to the stock python-*client modules that implement the + actual REST API client actions. + %endif + ++%package sdk ++Summary: SDK files for %{name} ++ ++%description sdk ++Contains SDK files for %{name} package ++ ++ + %prep + %autosetup -n %{name}-%{upstream_version} -S git + +@@ -206,6 +213,12 @@ mv %{buildroot}%{python2_sitelib}/openstackclient/locale %{buildroot}%{_datadir} + rm -rf %{buildroot}%{python3_sitelib}/openstackclient/locale + %endif + ++ ++# prep SDK package ++mkdir -p %{buildroot}/usr/share/remote-clients/%{name} ++tar zcf %{buildroot}/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} ++ ++ + # Find language files + %find_lang openstackclient --all-name + +@@ -242,6 +255,10 @@ rm -rf .testrepository + %{python3_sitelib}/openstackclient + %{python3_sitelib}/*.egg-info + %endif ++ ++%files sdk ++/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz ++ + %changelog + * Mon Aug 21 2017 Alfredo Moralejo 3.12.0-1 + - Update to 3.12.0 diff --git a/openstack/python-openstackclient/centos/meta_patches/0003-meta-US106901-Openstack-CLI-Adoption.patch b/openstack/python-openstackclient/centos/meta_patches/0003-meta-US106901-Openstack-CLI-Adoption.patch new file mode 100644 index 00000000..5b00ebea --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0003-meta-US106901-Openstack-CLI-Adoption.patch @@ -0,0 +1,24 @@ +From 9d598a1ee8f1a98ce5f14d76f88123d4a7614e1c Mon Sep 17 00:00:00 2001 +From: Sen Yang +Date: Thu, 15 Feb 2018 11:52:09 -0500 +Subject: [PATCH 1/1] US106901 Openstack-CLI-Adoption + +--- + SPECS/python-openstackclient.spec | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 4b1682e..6b582ce 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -26,6 +26,7 @@ Patch0005: 0001-US101470-Openstackclient-implementation-of-novaclien.patc + Patch0006: 0002-US101470-Openstackclient-implementation-of-novaclien.patch + Patch0007: 0001-US106901-Openstack-CLI-Adoption.patch + Patch0008: 0002-US106901-Openstack-CLI-Adoption.patch ++Patch0009: 0003-US106901-Openstack-CLI-Adoption.patch + + BuildArch: noarch + +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/meta_patches/0003-meta-dont-remove-requirements-txt.patch b/openstack/python-openstackclient/centos/meta_patches/0003-meta-dont-remove-requirements-txt.patch new file mode 100644 index 00000000..a2ab1217 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0003-meta-dont-remove-requirements-txt.patch @@ -0,0 +1,13 @@ +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 874e8a4..8cea3b8 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -173,7 +173,7 @@ Contains SDK files for %{name} package + %autosetup -n %{name}-%{upstream_version} -S git + + # We handle requirements ourselves, pkg_resources only bring pain +-rm -rf requirements.txt test-requirements.txt ++rm -rf test-requirements.txt + + %build + %py2_build diff --git a/openstack/python-openstackclient/centos/meta_patches/0004-added-missing-build-require-dateutil.patch b/openstack/python-openstackclient/centos/meta_patches/0004-added-missing-build-require-dateutil.patch new file mode 100644 index 00000000..1ba5a430 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0004-added-missing-build-require-dateutil.patch @@ -0,0 +1,12 @@ +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 8cea3b8..dbb1daa 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -59,6 +59,7 @@ BuildRequires: python-reno + BuildRequires: python-requestsexceptions + BuildRequires: python-openstacksdk + BuildRequires: python-osprofiler ++BuildRequires: python-dateutil + + Requires: python-pbr + Requires: python-babel diff --git a/openstack/python-openstackclient/centos/meta_patches/0005-meta-patch-for-neutron-extensions.patch b/openstack/python-openstackclient/centos/meta_patches/0005-meta-patch-for-neutron-extensions.patch new file mode 100644 index 00000000..d248296f --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0005-meta-patch-for-neutron-extensions.patch @@ -0,0 +1,14 @@ +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index dbb1daa..3ef7f0d 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -17,6 +17,9 @@ License: ASL 2.0 + URL: http://github.com/openstack/%{name} + Source0: https://tarballs.openstack.org/%{name}/%{name}-%{upstream_version}.tar.gz + ++# WRS patches ++Patch0001: 0001-neutron-extensions.patch ++ + BuildArch: noarch + + %description diff --git a/openstack/python-openstackclient/centos/meta_patches/0006-openstackClient_Passwordchange_warning.patch b/openstack/python-openstackclient/centos/meta_patches/0006-openstackClient_Passwordchange_warning.patch new file mode 100644 index 00000000..fc3f3b25 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0006-openstackClient_Passwordchange_warning.patch @@ -0,0 +1,12 @@ +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 3ef7f0d..9f624f1 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -19,6 +19,7 @@ Source0: https://tarballs.openstack.org/%{name}/%{name}-%{upstream_vers + + # WRS patches + Patch0001: 0001-neutron-extensions.patch ++Patch0002: openstackClient_Passwordchange_warning.patch + + BuildArch: noarch + diff --git a/openstack/python-openstackclient/centos/meta_patches/0007-CGTS-7814-warning-only-when-the-admin-password-chang.patch b/openstack/python-openstackclient/centos/meta_patches/0007-CGTS-7814-warning-only-when-the-admin-password-chang.patch new file mode 100644 index 00000000..d054e184 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0007-CGTS-7814-warning-only-when-the-admin-password-chang.patch @@ -0,0 +1,12 @@ +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 9f624f1..4791547 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -20,6 +20,7 @@ Source0: https://tarballs.openstack.org/%{name}/%{name}-%{upstream_vers + # WRS patches + Patch0001: 0001-neutron-extensions.patch + Patch0002: openstackClient_Passwordchange_warning.patch ++Patch0003: CGTS-7814-warning-only-when-the-admin-password-chang.patch + + BuildArch: noarch + diff --git a/openstack/python-openstackclient/centos/meta_patches/0008-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch b/openstack/python-openstackclient/centos/meta_patches/0008-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch new file mode 100644 index 00000000..f8e0b385 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/0008-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch @@ -0,0 +1,26 @@ +From e407dce42d83c82407318474b1e86184d7abdfb5 Mon Sep 17 00:00:00 2001 +From: Andy Ning +Date: Thu, 2 Nov 2017 10:45:40 -0400 +Subject: [PATCH 1/1] CGTS-7947: add --os-keystone-region-name to identity + client + +Signed-off-by: Andy Ning +--- + SPECS/python-openstackclient.spec | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 7bf7a9b..968920a 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -12,6 +12,7 @@ Source0: https://tarballs.openstack.org/%{name}/%{name}-%{upstream_vers + Patch0001: 0001-neutron-extensions.patch + Patch0002: openstackClient_Passwordchange_warning.patch + Patch0003: CGTS-7814-warning-only-when-the-admin-password-chang.patch ++Patch0004: 0001-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch + + BuildArch: noarch + +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/meta_patches/1000-remove-version-requirements.patch b/openstack/python-openstackclient/centos/meta_patches/1000-remove-version-requirements.patch new file mode 100644 index 00000000..b2f7b8a7 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/1000-remove-version-requirements.patch @@ -0,0 +1,33 @@ +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 4791547..1db72f9 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -69,17 +69,17 @@ BuildRequires: python-dateutil + Requires: python-pbr + Requires: python-babel + Requires: python-cliff +-Requires: python-openstacksdk >= 0.9.17 +-Requires: python-oslo-i18n >= 2.1.0 +-Requires: python-oslo-utils >= 3.20.0 +-Requires: python-glanceclient >= 1:2.8.0 +-Requires: python-keystoneauth1 >= 3.1.0 +-Requires: python-keystoneclient >= 1:3.8.0 +-Requires: python-novaclient >= 1:9.0.0 +-Requires: python-cinderclient >= 3.1.0 +-Requires: python-neutronclient >= 6.3.0 +-Requires: python-six >= 1.9.0 +-Requires: python-osc-lib >= 1.7.0 ++Requires: python-openstacksdk ++Requires: python-oslo-i18n ++Requires: python-oslo-utils ++Requires: python-glanceclient ++Requires: python-keystoneauth1 ++Requires: python-keystoneclient ++Requires: python-novaclient ++Requires: python-cinderclient ++Requires: python-neutronclient ++Requires: python-six ++Requires: python-osc-lib + Requires: python-%{client}-lang = %{version}-%{release} + + diff --git a/openstack/python-openstackclient/centos/meta_patches/1001-Turn-off-openstackclient-check.patch b/openstack/python-openstackclient/centos/meta_patches/1001-Turn-off-openstackclient-check.patch new file mode 100644 index 00000000..03399d62 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/1001-Turn-off-openstackclient-check.patch @@ -0,0 +1,36 @@ +From be8dcc12e9a6719cb5031fd009bd2b587dfd33a9 Mon Sep 17 00:00:00 2001 +From: Al Bailey +Date: Thu, 19 Oct 2017 14:19:56 -0500 +Subject: [PATCH] Turn off openstackclient check + +--- + SPECS/python-openstackclient.spec | 13 +++++++------ + 1 file changed, 7 insertions(+), 6 deletions(-) + +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 1db72f9..a04299e 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -228,12 +228,13 @@ tar zcf %{buildroot}/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz --e + # Find language files + %find_lang openstackclient --all-name + +-%check +-%{__python2} setup.py test +-%if 0%{?with_python3} +-rm -rf .testrepository +-%{__python3} setup.py test +-%endif ++# WRS disable check. ++#%check ++#%{__python2} setup.py test ++#%if 0%{?with_python3} ++#rm -rf .testrepository ++#%{__python3} setup.py test ++#%endif + + %files -n python2-%{client} + %license LICENSE +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/meta_patches/1002-require-python-ceilometerclient.patch b/openstack/python-openstackclient/centos/meta_patches/1002-require-python-ceilometerclient.patch new file mode 100644 index 00000000..be8ff070 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/1002-require-python-ceilometerclient.patch @@ -0,0 +1,20 @@ +diff --git a/SPECS/python-openstackclient.spec b/SPECS/python-openstackclient.spec +index 6b582ce..5d75107 100644 +--- a/SPECS/python-openstackclient.spec ++++ b/SPECS/python-openstackclient.spec +@@ -55,6 +55,7 @@ BuildRequires: python-keystoneclient + BuildRequires: python-novaclient + BuildRequires: python-cinderclient + BuildRequires: python-neutronclient ++BuildRequires: python-ceilometerclient + BuildRequires: python-mock + BuildRequires: python-requests-mock + BuildRequires: python-os-client-config +@@ -84,6 +85,7 @@ Requires: python-keystoneclient + Requires: python-novaclient + Requires: python-cinderclient + Requires: python-neutronclient ++Requires: python-ceilometerclient + Requires: python-six + Requires: python-osc-lib + Requires: python-%{client}-lang = %{version}-%{release} diff --git a/openstack/python-openstackclient/centos/meta_patches/PATCH_ORDER b/openstack/python-openstackclient/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..624ca310 --- /dev/null +++ b/openstack/python-openstackclient/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,16 @@ +0001-Update-package-versioning-for-TIS-format.patch +0002-spec-remote-clients-sdk.patch +0003-meta-dont-remove-requirements-txt.patch +0004-added-missing-build-require-dateutil.patch +0005-meta-patch-for-neutron-extensions.patch +0006-openstackClient_Passwordchange_warning.patch +0007-CGTS-7814-warning-only-when-the-admin-password-chang.patch +0008-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch +1000-remove-version-requirements.patch +1001-Turn-off-openstackclient-check.patch +0001-meta-us101470.patch +0002-meta-us101470.patch +0001-meta-US106901-Openstack-CLI-Adoption.patch +0002-meta-US106901-Openstack-CLI-Adoption.patch +0003-meta-US106901-Openstack-CLI-Adoption.patch +1002-require-python-ceilometerclient.patch diff --git a/openstack/python-openstackclient/centos/patches/0001-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch b/openstack/python-openstackclient/centos/patches/0001-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch new file mode 100644 index 00000000..d8a92ed1 --- /dev/null +++ b/openstack/python-openstackclient/centos/patches/0001-CGTS-7947-add-os-keystone-region-name-to-identity-cl.patch @@ -0,0 +1,27 @@ +From 54d7b55413d3374911957f6327ebd73f70ee24b8 Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Thu, 2 Nov 2017 10:40:15 -0400 +Subject: [PATCH 1/1] CGTS-7947: add --os-keystone-region-name to identity + client + +Signed-off-by: rpm-build +--- + openstackclient/identity/client.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/openstackclient/identity/client.py b/openstackclient/identity/client.py +index 0292aac..e2f4756 100644 +--- a/openstackclient/identity/client.py ++++ b/openstackclient/identity/client.py +@@ -53,7 +53,7 @@ def make_client(instance): + + client = identity_client( + session=instance.session, +- region_name=instance.region_name, ++ region_name=instance.keystone_region_name, + **kwargs + ) + +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/patches/0001-US101470-Openstackclient-implementation-of-novaclien.patch b/openstack/python-openstackclient/centos/patches/0001-US101470-Openstackclient-implementation-of-novaclien.patch new file mode 100644 index 00000000..d1bd60fb --- /dev/null +++ b/openstack/python-openstackclient/centos/patches/0001-US101470-Openstackclient-implementation-of-novaclien.patch @@ -0,0 +1,232 @@ +From 47122b746e16ffb3d35d14aa4f51f0f759400b71 Mon Sep 17 00:00:00 2001 +From: Sen Yang +Date: Mon, 18 Dec 2017 10:08:08 -0500 +Subject: [PATCH 1/1] US101470 Openstackclient implementation of novaclient cli + with wrs extension + + In this commit the following OSC equivalence of novaclient cli with + wrs extension is implemented: + openstack pcidevice list - nova device-list + openstack pcidevice show - nova device-show + openstack providernet pci show - nova providernet-show +--- + openstackclient/compute/v2/wrs_pci.py | 88 ++++++++++++++++++++++++ + openstackclient/compute/v2/wrs_providernets.py | 46 +++++++++++++ + python_openstackclient.egg-info/SOURCES.txt | 4 +- + python_openstackclient.egg-info/entry_points.txt | 3 + + setup.cfg | 3 + + 5 files changed, 143 insertions(+), 1 deletion(-) + create mode 100644 openstackclient/compute/v2/wrs_pci.py + create mode 100644 openstackclient/compute/v2/wrs_providernets.py + +diff --git a/openstackclient/compute/v2/wrs_pci.py b/openstackclient/compute/v2/wrs_pci.py +new file mode 100644 +index 0000000..c3fba81 +--- /dev/null ++++ b/openstackclient/compute/v2/wrs_pci.py +@@ -0,0 +1,88 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2015-2017 Wind River Systems, Inc. ++# ++# ++# ++# ++ ++ ++"""Compute v2 wrs_pci action implementations""" ++ ++from osc_lib.cli import parseractions ++from osc_lib.command import command ++from osc_lib import utils ++import six ++ ++from openstackclient.i18n import _ ++ ++ ++class ListPciDevices(command.Lister): ++ """List pci device""" ++ ++ def get_parser(self, prog_name): ++ parser = super(ListPciDevices, self).get_parser(prog_name) ++ parser.add_argument( ++ "--device", ++ metavar="", ++ help=_("PCI devices matching a particular device id or alias.") ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ compute_client = self.app.client_manager.compute ++ ++ columns = ( ++ "PCI Alias", ++ "Device Id", ++ "Vendor Id", ++ "Class Id", ++ "pci_pfs_configured", ++ "pci_pfs_used", ++ "pci_vfs_configured", ++ "pci_vfs_used" ++ ) ++ ++ data = compute_client.wrs_pci.list(parsed_args.device) ++ ++ return (columns, ++ (utils.get_item_properties( ++ s, columns, ++ ) for s in data)) ++ ++ ++class ShowPciDevices(command.ShowOne): ++ """Show details of a given PCI device.""" ++ ++ def get_parser(self, prog_name): ++ parser = super(ShowPciDevices, self).get_parser(prog_name) ++ parser.add_argument( ++ "device", ++ metavar="", ++ help=_("Device alias or device id of the PCI device.") ++ ) ++ parser.add_argument( ++ "--host", ++ metavar="", ++ help=_("Limit matches to PCI devices from a particular host") ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ compute_client = self.app.client_manager.compute ++ deviceInfo = compute_client.wrs_pci.get(parsed_args.device, ++ parsed_args.host) ++ ++ data = deviceInfo._info.copy() ++ return zip(*sorted(six.iteritems(data))) ++ +diff --git a/openstackclient/compute/v2/wrs_providernets.py b/openstackclient/compute/v2/wrs_providernets.py +new file mode 100644 +index 0000000..b487fb6 +--- /dev/null ++++ b/openstackclient/compute/v2/wrs_providernets.py +@@ -0,0 +1,46 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2015-2017 Wind River Systems, Inc. ++# ++# ++# ++# ++ ++ ++"""Compute v2 wrs_providernet action implementations""" ++ ++from osc_lib.cli import parseractions ++from osc_lib.command import command ++import six ++ ++from openstackclient.i18n import _ ++ ++ ++class ShowProvidernetPci(command.ShowOne): ++ """Show details of a given provider network""" ++ ++ def get_parser(self, prog_name): ++ parser = super(ShowProvidernetPci, self).get_parser(prog_name) ++ parser.add_argument( ++ "providernet", ++ metavar="", ++ help=_("Id of the provider network") ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ compute_client = self.app.client_manager.compute ++ ++ providernet = compute_client.wrs_providernets.get(parsed_args.providernet) ++ stats = providernet._info.copy() ++ return zip(*sorted(six.iteritems(stats))) +diff --git a/python_openstackclient.egg-info/SOURCES.txt b/python_openstackclient.egg-info/SOURCES.txt +index 82dae1c..66b9513 100644 +--- a/python_openstackclient.egg-info/SOURCES.txt ++++ b/python_openstackclient.egg-info/SOURCES.txt +@@ -187,6 +187,8 @@ openstackclient/compute/v2/server_group.py + openstackclient/compute/v2/server_image.py + openstackclient/compute/v2/service.py + openstackclient/compute/v2/usage.py ++openstackclient/compute/v2/wrs_pci.py ++openstackclient/compute/v2/wrs_providernets.py + openstackclient/identity/__init__.py + openstackclient/identity/client.py + openstackclient/identity/common.py +@@ -894,4 +896,4 @@ releasenotes/source/unreleased.rst + releasenotes/source/_static/.placeholder + releasenotes/source/_templates/.placeholder + tools/fast8.sh +-tools/tox_install.sh +\ No newline at end of file ++tools/tox_install.sh +diff --git a/python_openstackclient.egg-info/entry_points.txt b/python_openstackclient.egg-info/entry_points.txt +index 99aaf5e..9a717b8 100644 +--- a/python_openstackclient.egg-info/entry_points.txt ++++ b/python_openstackclient.egg-info/entry_points.txt +@@ -45,6 +45,8 @@ compute_service_list = openstackclient.compute.v2.service:ListService + compute_service_set = openstackclient.compute.v2.service:SetService + console_log_show = openstackclient.compute.v2.console:ShowConsoleLog + console_url_show = openstackclient.compute.v2.console:ShowConsoleURL ++pcidevice_list = openstackclient.compute.v2.wrs_pci:ListPciDevices ++pcidevice_show = openstackclient.compute.v2.wrs_pci:ShowPciDevices + flavor_create = openstackclient.compute.v2.flavor:CreateFlavor + flavor_delete = openstackclient.compute.v2.flavor:DeleteFlavor + flavor_list = openstackclient.compute.v2.flavor:ListFlavor +@@ -65,6 +67,7 @@ keypair_create = openstackclient.compute.v2.keypair:CreateKeypair + keypair_delete = openstackclient.compute.v2.keypair:DeleteKeypair + keypair_list = openstackclient.compute.v2.keypair:ListKeypair + keypair_show = openstackclient.compute.v2.keypair:ShowKeypair ++providernet_pci_show = openstackclient.compute.v2.wrs_providernets:ShowProvidernetPci + server_add_fixed_ip = openstackclient.compute.v2.server:AddFixedIP + server_add_floating_ip = openstackclient.compute.v2.server:AddFloatingIP + server_add_port = openstackclient.compute.v2.server:AddPort +diff --git a/setup.cfg b/setup.cfg +index 1bff735..f162417 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -71,6 +71,8 @@ openstack.compute.v2 = + flavor_show = openstackclient.compute.v2.flavor:ShowFlavor + flavor_set = openstackclient.compute.v2.flavor:SetFlavor + flavor_unset = openstackclient.compute.v2.flavor:UnsetFlavor ++ pcidevice_list = openstackclient.compute.v2.wrs_pci:ListPciDevices ++ pcidevice_show = openstackclient.compute.v2.wrs_pci:ShowPciDevices + host_list = openstackclient.compute.v2.host:ListHost + host_set = openstackclient.compute.v2.host:SetHost + host_show = openstackclient.compute.v2.host:ShowHost +@@ -85,6 +87,7 @@ openstack.compute.v2 = + keypair_delete = openstackclient.compute.v2.keypair:DeleteKeypair + keypair_list = openstackclient.compute.v2.keypair:ListKeypair + keypair_show = openstackclient.compute.v2.keypair:ShowKeypair ++ providernet_pci_show = openstackclient.compute.v2.wrs_providernets:ShowProvidernetPci + server_add_fixed_ip = openstackclient.compute.v2.server:AddFixedIP + server_add_floating_ip = openstackclient.compute.v2.server:AddFloatingIP + server_add_port = openstackclient.compute.v2.server:AddPort +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/patches/0001-US106901-Openstack-CLI-Adoption.patch b/openstack/python-openstackclient/centos/patches/0001-US106901-Openstack-CLI-Adoption.patch new file mode 100644 index 00000000..72e144fc --- /dev/null +++ b/openstack/python-openstackclient/centos/patches/0001-US106901-Openstack-CLI-Adoption.patch @@ -0,0 +1,80 @@ +From 9ab2fc77ed85aee4c6b63b0113e309136de419f0 Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Thu, 1 Feb 2018 10:20:53 -0500 +Subject: [PATCH 1/1] US106901 Openstack CLI Adoption: + Neutron/Cinder/Glance/Ceilometer + + CI partI - OSC cli support of "--cache-raw" and "--wait" i + options for "nova image-create" +--- + openstackclient/image/v2/image.py | 33 +++++++++++++++++++++++++++++++++ + 1 file changed, 33 insertions(+) + +diff --git a/openstackclient/image/v2/image.py b/openstackclient/image/v2/image.py +index c2c5c59..f5d4acf 100644 +--- a/openstackclient/image/v2/image.py ++++ b/openstackclient/image/v2/image.py +@@ -12,6 +12,12 @@ + # License for the specific language governing permissions and limitations + # under the License. + # ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + + """Image V2 Action Implementations""" + +@@ -253,6 +259,25 @@ class CreateImage(command.ShowOne): + dest=deadopt.replace('-', '_'), + help=argparse.SUPPRESS, + ) ++ #WRS extension ++ parser.add_argument( ++ "--cache-raw", ++ default=False, ++ action="store_true", ++ help=_("Convert the image to RAW in the background" ++ " and store it for fast access."), ++ ) ++ #WRS extension ++ parser.add_argument( ++ "--wait", ++ metavar="", ++ nargs='?', ++ type=int, ++ default=None, ++ const=0, ++ help=_("Wait for the convertion of the image to RAW" ++ " to finish before returning the image."), ++ ) + return parser + + def take_action(self, parsed_args): +@@ -302,6 +327,9 @@ class CreateImage(command.ShowOne): + kwargs['visibility'] = 'community' + if parsed_args.shared: + kwargs['visibility'] = 'shared' ++ #WRS extension ++ if parsed_args.cache_raw: ++ kwargs['cache_raw'] = 'True' + # Handle deprecated --owner option + project_arg = parsed_args.project + if parsed_args.owner: +@@ -361,6 +389,11 @@ class CreateImage(command.ShowOne): + with fp: + try: + image_client.images.upload(image.id, fp) ++ #WRS extension ++ # If cache_raw and wait options were chosen, wait until ++ # image is cached. ++ if parsed_args.cache_raw is True and parsed_args.wait is not None: ++ gc_utils.wait_for_caching(parsed_args.wait, image_client, image.id) + except Exception: + # If the upload fails for some reason attempt to remove the + # dangling queued image made by the create() call above but +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/patches/0001-neutron-extensions.patch b/openstack/python-openstackclient/centos/patches/0001-neutron-extensions.patch new file mode 100644 index 00000000..ffe1886b --- /dev/null +++ b/openstack/python-openstackclient/centos/patches/0001-neutron-extensions.patch @@ -0,0 +1,2119 @@ +commit bf6c3b52f676888203082e25e404ed5e9503648e +Author: rpm-build +Date: Mon Feb 12 12:11:43 2018 -0500 + + neutron extensions + +diff --git a/openstackclient/network/v2/host.py b/openstackclient/network/v2/host.py +new file mode 100644 +index 0000000..f9dab8b +--- /dev/null ++++ b/openstackclient/network/v2/host.py +@@ -0,0 +1,191 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2016 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++"""Host action implementations""" ++ ++from osc_lib.command import command ++from osc_lib import exceptions ++from osc_lib import utils ++from openstackclient.network import common ++from openstackclient.network import sdk_utils ++ ++_showhost_formatters = { ++ 'agents': utils.format_list_of_dicts ++} ++ ++_listhost_formatters = { ++ 'agents': len ++} ++ ++ ++def _get_columns(item): ++ column_map = {} ++ return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map) ++ ++ ++def _get_attrs(client_manager, parsed_args): ++ attrs = {key: parsed_args[key] for key in ['availability', 'id', 'name'] ++ if key in parsed_args and parsed_args[key] is not None} ++ return attrs ++ ++ ++class ListHost(common.NetworkAndComputeLister): ++ """List host""" ++ ++ def update_parser_common(self, parser): ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ columns = ( ++ 'id', ++ 'name', ++ 'availability', ++ 'agents', ++ 'subnets', ++ 'routers', ++ 'ports' ++ ) ++ column_headers = ( ++ 'Id', ++ 'Name', ++ 'Availability', ++ 'Agents', ++ 'Subnets', ++ 'Routers', ++ 'Ports' ++ ) ++ ++ args = {} ++ ++ data = client.hosts(**args) ++ ++ return (column_headers, ++ (utils.get_item_properties( ++ s, columns, ++ formatters=_listhost_formatters, ++ ) for s in data)) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class ShowHost(common.NetworkAndComputeShowOne): ++ """Show host details""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'host', ++ metavar="", ++ help=("ID or name of host to look up") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_host(parsed_args.host, ignore_missing=False) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, ++ formatters=_showhost_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class CreateHost(common.NetworkAndComputeShowOne): ++ """Create a host record""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument('--availability', metavar="availability", ++ help='Set host availability status to up or down', ++ required=False) ++ parser.add_argument('--id', metavar="id", ++ help='Create a new host record', ++ required=False) ++ parser.add_argument('name', metavar='NAME', ++ help='System hostname of given host') ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ ++ # the neutron equivalent command defaults to availability=down ++ # when not specified ++ if "availability" not in attrs: ++ attrs['availability'] = "down" ++ ++ obj = client.create_host(**attrs) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, ++ formatters=_listhost_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class DeleteHost(common.NetworkAndComputeDelete): ++ """Delete host""" ++ ++ # Used by base class to find resources in parsed_args. ++ resource = 'name' ++ r = None ++ ++ def update_parser_common(self, parser): ++ parser.add_argument('name', metavar='NAME', nargs="+", ++ help='System hostname of given host') ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_host(self.r) ++ client.delete_host(obj) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class UpdateHost(command.Command): ++ """Set host properties""" ++ ++ def get_parser(self, prog_name): ++ parser = super(UpdateHost, self).get_parser(prog_name) ++ parser.add_argument('--availability', metavar="availability", ++ help='Set host availability status to up or down', ++ required=False) ++ parser.add_argument('host', metavar='HOST', ++ help='System hostname of given host') ++ return parser ++ ++ def take_action(self, parsed_args): ++ client = self.app.client_manager.network ++ obj = client.find_host(parsed_args.host, ignore_missing=False) ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ ++ if attrs == {}: ++ msg = "Nothing specified to be set" ++ raise exceptions.CommandError(msg) ++ client.update_host(obj, **attrs) ++ return +diff --git a/openstackclient/network/v2/network.py b/openstackclient/network/v2/network.py +index 4c1725c..9a8355b 100644 +--- a/openstackclient/network/v2/network.py ++++ b/openstackclient/network/v2/network.py +@@ -58,6 +58,7 @@ def _get_columns_network(item): + 'ipv6_address_scope_id': 'ipv6_address_scope', + 'tenant_id': 'project_id', + 'tags': 'tags', ++ 'qos': 'wrs-tm:qos', + } + return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map) + +@@ -123,6 +124,8 @@ def _get_attrs_network(client_manager, parsed_args): + attrs['provider:physical_network'] = parsed_args.physical_network + if parsed_args.segmentation_id: + attrs['provider:segmentation_id'] = parsed_args.segmentation_id ++ if hasattr(parsed_args, 'wrs-tm:qos'): ++ attrs['wrs-tm:qos'] = getattr(parsed_args, 'wrs-tm:qos') + if parsed_args.qos_policy is not None: + network_client = client_manager.network + _qos_policy = network_client.find_qos_policy(parsed_args.qos_policy, +@@ -167,6 +170,11 @@ def _add_additional_network_options(parser): + dest='segmentation_id', + help=_("VLAN ID for VLAN networks or Tunnel ID for " + "GENEVE/GRE/VXLAN networks")) ++ parser.add_argument( ++ '--wrs-tm:qos', ++ metavar='', ++ dest='wrs-tm:qos', ++ help=_("wrs-tm:qos of the network")) + + + # TODO(sindhu): Use the SDK resource mapped attribute names once the +diff --git a/openstackclient/network/v2/port.py b/openstackclient/network/v2/port.py +index 9536fe8..4a29daf 100644 +--- a/openstackclient/network/v2/port.py ++++ b/openstackclient/network/v2/port.py +@@ -62,6 +62,9 @@ def _get_columns(item): + 'is_admin_state_up': 'admin_state_up', + 'is_port_security_enabled': 'port_security_enabled', + 'tenant_id': 'project_id', ++ 'mtu': 'wrs-binding:mtu', ++ 'vif_model': 'wrs-binding:vif_model', ++ 'mac_filtering': 'wrs-binding:mac_filtering', + } + return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map) + +diff --git a/openstackclient/network/v2/portforwarding.py b/openstackclient/network/v2/portforwarding.py +new file mode 100644 +index 0000000..0f70e84 +--- /dev/null ++++ b/openstackclient/network/v2/portforwarding.py +@@ -0,0 +1,259 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2016 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++"""Port forwarding action implementations""" ++ ++import argparse ++from osc_lib.command import command ++from osc_lib import exceptions ++from osc_lib import utils ++from openstackclient.i18n import _ ++from openstackclient.identity import common as identity_common ++from openstackclient.network import common ++from openstackclient.network import sdk_utils ++ ++_formatters = {} ++ ++ ++def _get_columns(item): ++ column_map = {} ++ invisible_columns = ["name"] ++ return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map, ++ invisible_columns) ++ ++ ++def _get_attrs(client_manager, parsed_args): ++ attr_names = ['inside_addr', 'inside_port', 'outside_port', ++ 'protocol', 'description', 'router_id'] ++ attrs = {key: parsed_args[key] for key in attr_names ++ if key in parsed_args and parsed_args[key] is not None} ++ ++ if 'project' in parsed_args and parsed_args["project"] is not None: ++ identity_client = client_manager.identity ++ project_id = identity_common.find_project( ++ identity_client, ++ parsed_args["project"] ++ ).id ++ # TODO(dtroyer): Remove tenant_id when we clean up the SDK refactor ++ attrs['tenant_id'] = project_id ++ attrs['project_id'] = project_id ++ ++ return attrs ++ ++ ++class ListPortforwarding(common.NetworkAndComputeLister): ++ """List portforwarding""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--project', ++ metavar='', ++ help=_("Owner's project (name or ID)") ++ ) ++ parser.add_argument( ++ '--router-id', ++ metavar='', ++ help=_("Router's ID") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ columns = ( ++ 'id', ++ 'router_id', ++ 'inside_addr', ++ 'inside_port', ++ 'outside_port', ++ 'protocol', ++ ) ++ column_headers = ( ++ 'ID', ++ 'Router ID', ++ 'Inside Address', ++ 'Inside Port', ++ 'Outside Port', ++ 'Protocol' ++ ) ++ ++ args = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ ++ data = client.portforwardings(**args) ++ ++ return (column_headers, ++ (utils.get_item_properties( ++ s, columns, ++ formatters=_formatters, ++ ) for s in data)) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class ShowPortforwarding(common.NetworkAndComputeShowOne): ++ """Show portforwarding details""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'portforwarding', ++ metavar="", ++ help=("Portforwarding to display (ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_portforwarding(parsed_args.portforwarding, ++ ignore_missing=False) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class CreatePortforwarding(common.NetworkAndComputeShowOne): ++ """Create new portforwarding""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--inside-addr', ++ help='Private IP address.') ++ parser.add_argument( ++ '--inside_addr', ++ help=argparse.SUPPRESS) ++ parser.add_argument( ++ '--inside-port', ++ help='Private layer4 protocol port.') ++ parser.add_argument( ++ '--inside_port', ++ help=argparse.SUPPRESS) ++ parser.add_argument( ++ '--outside-port', ++ help='Public layer4 protocol port.') ++ parser.add_argument( ++ '--outside_port', ++ help=argparse.SUPPRESS) ++ parser.add_argument( ++ '--protocol', ++ help='Layer4 protocol port number.') ++ parser.add_argument( ++ '--project', ++ metavar='', ++ help=_("Owner's project (name or ID)") ++ ) ++ identity_common.add_project_domain_option_to_parser(parser) ++ parser.add_argument( ++ '--description', ++ help='User specified text description') ++ parser.add_argument( ++ 'router_id', metavar='ROUTERID', ++ help='Router instance identifier.') ++ ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ router = client.find_router(attrs['router_id'], ignore_missing=False) ++ attrs['router_id'] = router.id ++ obj = client.create_portforwarding(**attrs) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class DeletePortforwarding(common.NetworkAndComputeDelete): ++ """Delete portforwarding""" ++ ++ # Used by base class to find resources in parsed_args. ++ resource = 'portforwarding' ++ r = None ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'portforwarding', ++ metavar="", ++ nargs="+", ++ help=("Portforwarding to delete (ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_portforwarding(self.r) ++ client.delete_portforwarding(obj) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class UpdatePortforwarding(command.Command): ++ """Set portforwarding properties""" ++ ++ def get_parser(self, prog_name): ++ parser = super(UpdatePortforwarding, self).get_parser(prog_name) ++ parser.add_argument( ++ '--inside-addr', ++ help='Private IP address.') ++ parser.add_argument( ++ '--inside_addr', ++ help=argparse.SUPPRESS) ++ parser.add_argument( ++ '--inside-port', ++ help='Private layer4 protocol port.') ++ parser.add_argument( ++ '--inside_port', ++ help=argparse.SUPPRESS) ++ parser.add_argument( ++ '--outside-port', ++ help='Public layer4 protocol port.') ++ parser.add_argument( ++ '--outside_port', ++ help=argparse.SUPPRESS) ++ parser.add_argument( ++ '--protocol', ++ help='Layer4 protocol port number.') ++ parser.add_argument( ++ '--description', ++ help='User specified text description') ++ parser.add_argument( ++ 'portforwarding', metavar='PORTFORWARDING', ++ help='Portforwarding to delete (ID)') ++ return parser ++ ++ def take_action(self, parsed_args): ++ client = self.app.client_manager.network ++ obj = client.find_portforwarding(parsed_args.portforwarding, ++ ignore_missing=False) ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ ++ if attrs == {}: ++ msg = "Nothing specified to be set" ++ raise exceptions.CommandError(msg) ++ client.update_portforwarding(obj, **attrs) ++ return +diff --git a/openstackclient/network/v2/providernet.py b/openstackclient/network/v2/providernet.py +new file mode 100644 +index 0000000..635eb64 +--- /dev/null ++++ b/openstackclient/network/v2/providernet.py +@@ -0,0 +1,302 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2016 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++"""Providernet action implementations""" ++ ++import argparse ++ ++from osc_lib.command import command ++from osc_lib import exceptions ++from osc_lib import utils ++from openstackclient.network import common ++from openstackclient.network import sdk_utils ++ ++ ++def add_boolean_argument(parser, name, **kwargs): ++ for keyword in ('metavar', 'choices'): ++ kwargs.pop(keyword, None) ++ default = kwargs.pop('default', argparse.SUPPRESS) ++ parser.add_argument( ++ name, ++ metavar='{True,False}', ++ choices=['True', 'true', 'False', 'false'], ++ default=default, ++ **kwargs) ++ ++ ++def _format_ranges(item): ++ item = utils.format_list_of_dicts(item) ++ # we want to remove some fields ++ # to match the output to neutron providernet-list ++ separator = ', ' ++ item = item.split(separator) ++ item = [s for s in item if "name" in s or "maximum" in s or "minimum" in s] ++ ++ return separator.join(item) ++ ++# the providernet list command does not display some values in the ranges field ++_filtered_ranges_formatters = { ++ 'ranges': _format_ranges ++} ++ ++_formatters = { ++ 'ranges': utils.format_list_of_dicts ++} ++ ++_net_list_on_providernet_formatters = { ++ 'vxlan': utils.format_dict ++} ++ ++ ++def _get_columns(item): ++ column_map = {} ++ return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map) ++ ++ ++def _get_attrs(client_manager, parsed_args): ++ attrs = {key: parsed_args[key] for key in ++ ["name", "type", "vlan_transparent", "description", "mtu"] ++ if key in parsed_args} ++ if "mtu" in attrs and attrs["mtu"] is None: ++ del attrs["mtu"] ++ ++ return attrs ++ ++ ++class ListProvidernet(common.NetworkAndComputeLister): ++ """List providernets""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--type', ++ dest='type', ++ help='List all providernets of type') ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ columns = ( ++ 'id', ++ 'name', ++ 'type', ++ 'mtu', ++ 'ranges' ++ ) ++ column_headers = ( ++ 'ID', ++ 'Name', ++ 'Type', ++ 'MTU', ++ 'Ranges' ++ ) ++ ++ args = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ ++ data = client.providernets(**args) ++ ++ return (column_headers, ++ (utils.get_item_properties( ++ s, columns, ++ formatters=_filtered_ranges_formatters, ++ ) for s in data)) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class ShowProvidernet(common.NetworkAndComputeShowOne): ++ """Show providernet details""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'providernet', ++ metavar="", ++ help=("Providernet to display (name or ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_providernet(parsed_args.providernet, ++ ignore_missing=False) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class CreateProvidernet(common.NetworkAndComputeShowOne): ++ """Create new providernet""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--description', ++ dest='description', ++ help='Set user-defined description field for a provider network') ++ parser.add_argument( ++ '--type', required=True, ++ dest='type', default='flat', ++ choices=['flat', 'vlan', 'vxlan'], ++ help='Set network type for a provider network') ++ parser.add_argument( ++ '--mtu', dest='mtu', type=int, ++ help='Maximum transmit unit on provider network') ++ add_boolean_argument( ++ parser, ++ '--vlan-transparent', ++ default='False', ++ help='Allow VLAN tagged packets on tenant networks') ++ parser.add_argument( ++ 'name', metavar='NAME', ++ help='Set user-defined name for a provider network') ++ ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ obj = client.create_providernet(**attrs) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class DeleteProvidernet(common.NetworkAndComputeDelete): ++ """Delete providernet""" ++ ++ # Used by base class to find resources in parsed_args. ++ resource = 'providernet' ++ r = None ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'providernet', ++ metavar="", ++ nargs="+", ++ help=("Providernet to delete (name or ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_providernet(self.r) ++ client.delete_providernet(obj) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class UpdateProvidernet(command.Command): ++ """Set providernet properties""" ++ ++ def get_parser(self, prog_name): ++ parser = super(UpdateProvidernet, self).get_parser(prog_name) ++ parser.add_argument( ++ '--description', ++ dest='description', ++ help='Set user-defined description field for a provider network') ++ parser.add_argument( ++ '--mtu', dest='mtu', type=int, ++ help='Maximum transmit unit on provider network') ++ add_boolean_argument( ++ parser, ++ '--vlan-transparent', ++ help='Allow VLAN tagged packets on tenant networks') ++ parser.add_argument( ++ 'providernet', metavar='PROVIDERNET', ++ help='Set user-defined name for a provider network') ++ ++ return parser ++ ++ def take_action(self, parsed_args): ++ client = self.app.client_manager.network ++ ++ obj = client.find_providernet(parsed_args.providernet, ++ ignore_missing=False) ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ if attrs == {}: ++ msg = "Nothing specified to be set" ++ raise exceptions.CommandError(msg) ++ ++ client.update_providernet(obj, **attrs) ++ return ++ ++ ++class NetListOnProvidernet(common.NetworkAndComputeLister): ++ """List the networks on a provider network.""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'providernet', ++ metavar="", ++ help=("Providernet to display (name or ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_providernet(parsed_args.providernet, ++ ignore_missing=False) ++ providernet_id = obj.id ++ ++ columns = ( ++ 'id', ++ 'name', ++ 'vlan_id', ++ 'providernet_type', ++ 'segmentation_id', ++ 'vxlan', ++ ) ++ column_headers = ( ++ 'ID', ++ 'Name', ++ 'VLAN ID', ++ 'Providernet Type', ++ 'Segmentation ID', ++ 'Providernet Attributes' ++ ) ++ ++ args = {} ++ ++ # cheated a bit here, doing the same request as a providernet list, ++ # except using providernet_id/providernet-bindings ++ # as the base path. Openstack client framwork does not support what ++ # we need in terms of editing the address at the ++ # time of implementing this ++ data = client.net_list_on_providernet(providernet_id + ++ "/providernet-bindings", **args) ++ ++ return (column_headers, ++ (utils.get_item_properties( ++ s, columns, ++ formatters=_net_list_on_providernet_formatters, ++ ) for s in data)) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return +diff --git a/openstackclient/network/v2/providernet_connectivity_test.py b/openstackclient/network/v2/providernet_connectivity_test.py +new file mode 100644 +index 0000000..e879ec2 +--- /dev/null ++++ b/openstackclient/network/v2/providernet_connectivity_test.py +@@ -0,0 +1,220 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2016 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++"""Providernet connectivity test action implementations""" ++ ++import itertools ++from osc_lib import exceptions ++from osc_lib import utils ++from openstackclient.network import common ++from openstackclient.network import sdk_utils ++ ++_formatters = { ++} ++ ++ ++def _get_columns(item): ++ column_map = {} ++ invisible_columns = ["host_id", "host_name", "id", "message", "name", ++ "providernet_id", "providernet_name", ++ "segmentation_id", "status", "type", "updated_at"] ++ return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map, ++ invisible_columns) ++ ++ ++def _get_attrs(client_manager, parsed_args, client): ++ attrs = {key: parsed_args[key] for key in ++ ["providernet", "host", "segmentation_id", "audit_uuid"] ++ if key in parsed_args and parsed_args[key] is not None} ++ if "providernet" in attrs: ++ providernet = client.find_providernet(attrs.pop("providernet"), ++ ignore_missing=False) ++ attrs["providernet_id"] = providernet.id ++ if "host" in attrs: ++ host = client.find_host(attrs.pop("host"), ignore_missing=False) ++ attrs["host_id"] = host.id ++ return attrs ++ ++ ++# copied from neutron client and modified to fit data formats here ++def _list_segments(segments): ++ """Takes a list of segments, and outputs them as a string""" ++ msg = ", ".join([str(x or "*") for x in sorted(segments)]) ++ return msg ++ ++ ++# copied from neutron client and modified to fit data formats here ++def _group_segmentation_id_list(segmentation_ids): ++ """Takes a list of integers and groups them into ranges""" ++ if len(segmentation_ids) < 1: ++ return "" ++ try: ++ sorted_segmentation_ids = sorted( ++ [int(segmentation_id) for segmentation_id in segmentation_ids] ++ ) ++ except Exception: ++ return _list_segments(segmentation_ids) ++ grouped_ids = [tuple(g[1]) for g in itertools.groupby( ++ enumerate(sorted_segmentation_ids), lambda (i, n): i - n ++ )] ++ msg = ", ".join( ++ [(("%s-%s" % (g[0][1], g[-1][1])) if g[0][1] != g[-1][1] ++ else ("%s" % g[0][1])) for g in grouped_ids] ++ ) ++ return msg ++ ++ ++# copied from neutron client and modified to fit data formats here ++def _format_connectivity_results(data): ++ """Takes a list of results, and formats them for reporting ++ ++ order assumed: providernet_id, providernet_name, type, host_name, ++ segmentation_id, status, message ++ """ ++ ++ parsed_results = {} ++ has_message = False ++ for result in data: ++ providernet_id = result.providernet_id ++ providernet_name = result.providernet_name ++ providernet_type = result.type ++ hostname = result.host_name ++ if hasattr(result, "segmentation_id"): ++ segmentation_id = result.segmentation_id ++ else: ++ segmentation_id = None ++ status = result.status ++ message = result.message ++ if message: ++ has_message = True ++ test = (providernet_id, providernet_name, providernet_type, ++ hostname, status, message) ++ if test not in parsed_results: ++ parsed_results[test] = [] ++ parsed_results[test].append(segmentation_id) ++ ++ formatted_results = [] ++ for test, results in parsed_results.iteritems(): ++ (providernet_id, providernet_name, providernet_type, ++ hostname, status, message) = test ++ formatted_segmentation_ids = \ ++ _group_segmentation_id_list(results) ++ ++ if has_message: ++ formatted_result = (providernet_id, ++ providernet_name, ++ providernet_type, ++ hostname, ++ formatted_segmentation_ids, ++ status, ++ message ++ ) ++ else: ++ formatted_result = (providernet_id, ++ providernet_name, ++ providernet_type, ++ hostname, ++ formatted_segmentation_ids, ++ status ++ ) ++ formatted_results.append(formatted_result) ++ ++ return tuple(formatted_results), has_message ++ ++ ++class ListProvidernetConnectivityTest(common.NetworkAndComputeLister): ++ """List providernet connectivity tests""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--audit-uuid', ++ dest='audit_uuid', default=None, ++ help='List only for this audit-uuid') ++ parser.add_argument( ++ '--providernet', ++ dest='providernet', default=None, ++ help='List only for this providernet') ++ parser.add_argument( ++ '--host', ++ dest='host', default=None, ++ help='List only for this host') ++ parser.add_argument( ++ '--segmentation-id', ++ dest='segmentation_id', default=None, ++ help='List only for this segmentation-id') ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ column_headers = ( ++ 'Providernet ID', ++ 'Providernet Name', ++ 'Type', ++ 'Host Name', ++ 'Segmentation IDs', ++ 'Status' ++ ) ++ args = _get_attrs(self.app.client_manager, vars(parsed_args), client) ++ ++ data = client.providernet_connectivity_tests(**args) ++ formatted_data, has_message = _format_connectivity_results(data) ++ ++ # replicate behavior from neutron command: ++ # dont show message column if it does not exist ++ if has_message: ++ column_headers = column_headers + ('Message',) ++ ++ return (column_headers, ++ formatted_data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class CreateProvidernetConnectivityTest(common.NetworkAndComputeShowOne): ++ """Create new providernet connectivity test""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--providernet', ++ dest='providernet', default=None, ++ help=('Schedule audit for given providernet')) ++ parser.add_argument( ++ '--host', ++ dest='host', default=None, ++ help='Schedule audits for all providernets on host') ++ parser.add_argument( ++ '--segmentation-id', ++ dest='segmentation_id', default=None, ++ help='Schedule for this segmentation ID') ++ ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args), client) ++ obj = client.create_providernet_connectivity_test(**attrs) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return +diff --git a/openstackclient/network/v2/providernet_range.py b/openstackclient/network/v2/providernet_range.py +new file mode 100644 +index 0000000..8748c4c +--- /dev/null ++++ b/openstackclient/network/v2/providernet_range.py +@@ -0,0 +1,272 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2016 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++"""Providernet range action implementations""" ++ ++import argparse ++from osc_lib.command import command ++from osc_lib import exceptions ++from osc_lib import utils ++from openstackclient.i18n import _ ++from openstackclient.identity import common as identity_common ++from openstackclient.network import common ++from openstackclient.network import sdk_utils ++ ++_formatters = { ++ 'vxlan': utils.format_dict ++} ++ ++ ++def _get_columns(item): ++ column_map = {} ++ return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map) ++ ++ ++def _get_attrs(client_manager, parsed_args): ++ attrs = {key: parsed_args[key] for key in ++ ["shared", "description", "name", "group", "ttl", "port", "mode", ++ "providernet_id", "providernet_range_id"] ++ if key in parsed_args} ++ if "range" in parsed_args and parsed_args["range"] is not None: ++ attrs["maximum"] = parsed_args["range"]["maximum"] ++ attrs["minimum"] = parsed_args["range"]["minimum"] ++ if "port" in attrs and attrs["port"] is None: ++ del attrs["port"] ++ if "ttl" in attrs and attrs["ttl"] is None: ++ del attrs["ttl"] ++ if "group" in attrs and attrs["group"] is None: ++ del attrs["group"] ++ if "mode" in attrs and attrs["mode"] is None: ++ del attrs["mode"] ++ if 'project' in parsed_args and parsed_args["project"] is not None: ++ identity_client = client_manager.identity ++ project_id = identity_common.find_project( ++ identity_client, ++ parsed_args["project"] ++ ).id ++ # TODO(dtroyer): Remove tenant_id when we clean up the SDK refactor ++ attrs['tenant_id'] = project_id ++ attrs['project_id'] = project_id ++ ++ return attrs ++ ++ ++def _id_range_value(value): ++ range_list = value.split('-') ++ if (len(range_list) != 2): ++ raise argparse.ArgumentTypeError( ++ 'Expecting MIN_VALUE-MAX_VALUE in range list') ++ return {'minimum': range_list[0], ++ 'maximum': range_list[1]} ++ ++ ++class ListProvidernetRange(common.NetworkAndComputeLister): ++ """List providernet ranges""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--project', ++ metavar='', ++ help=_("Owner's project (name or ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ columns = ( ++ 'id', ++ 'name', ++ 'providernet_name', ++ 'providernet_type', ++ 'minimum', ++ 'maximum', ++ 'vxlan' ++ ) ++ column_headers = ( ++ 'ID', ++ 'Name', ++ 'Providernet', ++ 'Type', ++ 'Minimum', ++ 'Maximum', ++ 'Attributes' ++ ) ++ ++ args = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ ++ data = client.providernet_ranges(**args) ++ ++ return (column_headers, ++ (utils.get_item_properties( ++ s, columns, ++ formatters=_formatters, ++ ) for s in data)) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class ShowProvidernetRange(common.NetworkAndComputeShowOne): ++ """Show providernet range details""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'providernet_range', ++ metavar="", ++ help=("Providernet range to display (name or ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_providernet_range(parsed_args.providernet_range, ++ ignore_missing=False) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class CreateProvidernetRange(common.NetworkAndComputeShowOne): ++ """Create new providernet range""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--shared', ++ dest='shared', action='store_true', default=False, ++ help=('Set whether a provider network segmentation id range ' ++ 'may be shared between tenants')) ++ parser.add_argument( ++ '--project', ++ metavar='', ++ help=_("Owner's project (name or ID)") ++ ) ++ identity_common.add_project_domain_option_to_parser(parser) ++ parser.add_argument( ++ '--description', ++ dest='description', ++ help='Set user-defined description field for a provider network') ++ parser.add_argument( ++ '--range', metavar='MIN_VALUE-MAX_VALUE', required=True, ++ dest='range', type=_id_range_value, ++ help='Segmentation id value range') ++ parser.add_argument( ++ '--name', required=True, ++ dest='name', ++ help=('Set user-defined name for a provider network ' ++ 'segmentation id range')) ++ parser.add_argument( ++ '--group', ++ dest='group', ++ help='Multicast IP addresses for VXLAN endpoints') ++ parser.add_argument( ++ '--ttl', dest='ttl', type=int, ++ help='Time-to-live value for VXLAN provider networks') ++ parser.add_argument( ++ '--port', dest='port', type=int, ++ help=('Destination UDP port value to use for ' ++ 'VXLAN provider networks')) ++ parser.add_argument( ++ '--mode', ++ dest='mode', default='dynamic', ++ choices=['dynamic', 'static', 'evpn'], ++ help='Set vxlan learning mode') ++ parser.add_argument( ++ 'providernet_id', metavar='PROVIDERNET', ++ help='Provider network this segmentation id range belongs to') ++ ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ obj = client.find_providernet(parsed_args.providernet_id, ++ ignore_missing=False) ++ attrs["providernet_id"] = obj.id ++ obj = client.create_providernet_range(**attrs) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class DeleteProvidernetRange(common.NetworkAndComputeDelete): ++ """Delete providernet range""" ++ ++ # Used by base class to find resources in parsed_args. ++ resource = 'providernet_range' ++ r = None ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'providernet_range', ++ metavar="", ++ nargs="+", ++ help=("Providernet to Delete (name or ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_providernet_range(self.r) ++ client.delete_providernet_range(obj) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class UpdateProvidernetRange(command.Command): ++ """Set providernet range properties""" ++ ++ def get_parser(self, prog_name): ++ parser = super(UpdateProvidernetRange, self).get_parser(prog_name) ++ parser.add_argument( ++ '--description', ++ dest='description', ++ help='Set user-defined description field for a provider network') ++ parser.add_argument( ++ '--range', metavar='MIN_VALUE-MAX_VALUE', ++ dest='range', type=_id_range_value, ++ help='Segmentation id value range') ++ parser.add_argument( ++ 'providernet_range_id', metavar='PROVIDERNET_RANGE', ++ help='Name or ID of this providernet range') ++ return parser ++ ++ def take_action(self, parsed_args): ++ client = self.app.client_manager.network ++ obj = client.find_providernet_range(parsed_args.providernet_range_id, ++ ignore_missing=False) ++ ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ del attrs['providernet_range_id'] ++ ++ if attrs == {}: ++ msg = "Nothing specified to be set" ++ raise exceptions.CommandError(msg) ++ client.update_providernet_range(obj, **attrs) ++ return +diff --git a/openstackclient/network/v2/providernet_type.py b/openstackclient/network/v2/providernet_type.py +new file mode 100644 +index 0000000..2abd8c4 +--- /dev/null ++++ b/openstackclient/network/v2/providernet_type.py +@@ -0,0 +1,60 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2016 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++"""Providernet type action implementations""" ++ ++from osc_lib import exceptions ++from osc_lib import utils ++from openstackclient.network import common ++from openstackclient.network import sdk_utils ++ ++_formatters = { ++} ++ ++ ++class ListProvidernetType(common.NetworkAndComputeLister): ++ """List providernet types""" ++ ++ def update_parser_common(self, parser): ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ columns = ( ++ 'type', ++ 'description' ++ ) ++ column_headers = ( ++ 'Type', ++ 'Description' ++ ) ++ ++ args = {} ++ ++ data = client.providernet_types(**args) ++ ++ return (column_headers, ++ (utils.get_item_properties( ++ s, columns, ++ formatters=_formatters, ++ ) for s in data)) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return +diff --git a/openstackclient/network/v2/qos.py b/openstackclient/network/v2/qos.py +new file mode 100644 +index 0000000..dc5ab93 +--- /dev/null ++++ b/openstackclient/network/v2/qos.py +@@ -0,0 +1,253 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2016 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++"""QOS action implementations""" ++ ++import argparse ++from osc_lib.command import command ++from osc_lib import exceptions ++from osc_lib import utils ++from openstackclient.i18n import _ ++from openstackclient.identity import common as identity_common ++from openstackclient.network import common ++from openstackclient.network import sdk_utils ++ ++_formatters = {} ++ ++TYPE_QOS_DSCP = "dscp" ++TYPE_QOS_RATELIMIT = "ratelimit" ++TYPE_QOS_SCHEDULER = "scheduler" ++ ++ ++def _get_columns(item): ++ column_map = {} ++ return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map) ++ ++ ++def _args2body_policies(qos, type, policies): ++ qos['policies'][type] = {} ++ for parg in policies: ++ if parg.count('=') != 1: ++ raise exceptions.CommandError("Policies must be specified" ++ " in the format a=b") ++ args = parg.split('=') ++ qos['policies'][type][args[0]] = args[1] ++ ++ ++def _get_attrs(client_manager, parsed_args): ++ attrs = {key: parsed_args[key] for key in ++ ["name", "description"] if key in parsed_args} ++ if "description" in attrs and attrs["description"] is None: ++ del attrs["description"] ++ attrs['policies'] = {} ++ if "dscp" in parsed_args and parsed_args["dscp"] is not None: ++ _args2body_policies(attrs, TYPE_QOS_DSCP, ++ parsed_args['dscp']) ++ ++ if "ratelimit" in parsed_args and parsed_args["ratelimit"] is not None: ++ _args2body_policies(attrs, TYPE_QOS_RATELIMIT, ++ parsed_args['ratelimit']) ++ ++ if "scheduler" in parsed_args and parsed_args["scheduler"] is not None: ++ _args2body_policies(attrs, TYPE_QOS_SCHEDULER, ++ parsed_args['scheduler']) ++ ++ if "project" in parsed_args and parsed_args["project"] is not None: ++ identity_client = client_manager.identity ++ project_id = identity_common.find_project( ++ identity_client, ++ parsed_args["project"] ++ ).id ++ # TODO(dtroyer): Remove tenant_id when we clean up the SDK refactor ++ attrs['tenant_id'] = project_id ++ attrs['project_id'] = project_id ++ ++ return attrs ++ ++ ++def _id_range_value(value): ++ range_list = value.split('-') ++ if (len(range_list) != 2): ++ raise argparse.ArgumentTypeError( ++ 'Expecting MIN_VALUE-MAX_VALUE in range list') ++ return {'minimum': range_list[0], ++ 'maximum': range_list[1]} ++ ++ ++class ListQos(common.NetworkAndComputeLister): ++ """List qos""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--project', ++ metavar='', ++ help=_("Owner's project (name or ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ columns = ( ++ 'id', ++ 'name', ++ 'description' ++ ) ++ column_headers = ( ++ 'ID', ++ 'Name', ++ 'Description' ++ ) ++ ++ args = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ ++ data = client.qoses(**args) ++ ++ return (column_headers, ++ (utils.get_item_properties( ++ s, columns, ++ formatters=_formatters, ++ ) for s in data)) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class ShowQos(common.NetworkAndComputeShowOne): ++ """Show qos details""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'qos', ++ metavar="", ++ help=("Qos to display (name or ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_qos(parsed_args.qos, ignore_missing=False) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class CreateQos(common.NetworkAndComputeShowOne): ++ """Create new qos""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument('--name', metavar='NAME', ++ help='Name of QoS policy') ++ parser.add_argument( ++ '--project', ++ metavar='', ++ help=_("Owner's project (name or ID)") ++ ) ++ identity_common.add_project_domain_option_to_parser(parser) ++ parser.add_argument('--description', metavar='DESCRIPTION', ++ help="Description of QoS policy", required=False) ++ parser.add_argument('--dscp', metavar="POLICY", ++ help='Set of policies for dscp', ++ nargs='+', required=False) ++ parser.add_argument('--ratelimit', metavar="POLICY", ++ help='Set of policies for ratelimit', ++ nargs='+', required=False) ++ parser.add_argument('--scheduler', metavar="POLICY", ++ help='Set of policies for scheduler', ++ nargs='+', required=False) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ obj = client.create_qos(**attrs) ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class DeleteQos(common.NetworkAndComputeDelete): ++ """Delete qos""" ++ ++ # Used by base class to find resources in parsed_args. ++ resource = 'qos' ++ r = None ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ 'qos', ++ metavar="", ++ nargs="+", ++ help=("QOS to delete (name or ID)") ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ obj = client.find_qos(self.r) ++ client.delete_qos(obj) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class UpdateQos(command.Command): ++ """Set qos properties""" ++ ++ def get_parser(self, prog_name): ++ parser = super(UpdateQos, self).get_parser(prog_name) ++ parser.add_argument('--name', metavar='NAME', ++ help='Name of QoS policy') ++ parser.add_argument('--description', metavar='DESCRIPTION', ++ help="Description of QoS policy", required=False) ++ parser.add_argument('--dscp', metavar="POLICY", ++ help='Set of policies for dscp', ++ nargs='+', required=False) ++ parser.add_argument('--ratelimit', metavar="POLICY", ++ help='Set of policies for ratelimit', ++ nargs='+', required=False) ++ parser.add_argument('--scheduler', metavar="POLICY", ++ help='Set of policies for scheduler', ++ nargs='+', required=False) ++ parser.add_argument( ++ 'qos', ++ metavar="", ++ help=("QOS to delete (name or ID)") ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ client = self.app.client_manager.network ++ obj = client.find_qos(parsed_args.qos, ignore_missing=False) ++ attrs = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ ++ if attrs == {}: ++ msg = "Nothing specified to be set" ++ raise exceptions.CommandError(msg) ++ client.update_qos(obj, **attrs) ++ return +diff --git a/openstackclient/network/v2/router.py b/openstackclient/network/v2/router.py +index 4f90853..4eb5908 100644 +--- a/openstackclient/network/v2/router.py ++++ b/openstackclient/network/v2/router.py +@@ -68,6 +68,7 @@ def _get_columns(item): + 'is_ha': 'ha', + 'is_distributed': 'distributed', + 'is_admin_state_up': 'admin_state_up', ++ 'host': 'wrs-net:host', + } + return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map) + +diff --git a/openstackclient/network/v2/setting.py b/openstackclient/network/v2/setting.py +new file mode 100644 +index 0000000..d404325 +--- /dev/null ++++ b/openstackclient/network/v2/setting.py +@@ -0,0 +1,183 @@ ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2016 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++"""Settings action implementations""" ++ ++from osc_lib.command import command ++from osc_lib import exceptions ++from osc_lib import utils ++from openstackclient.i18n import _ ++from openstackclient.identity import common as identity_common ++from openstackclient.network import common ++from openstackclient.network import sdk_utils ++ ++_formatters = {} ++ ++ ++def _get_columns(item): ++ column_map = {"id": "project_id"} ++ invisible_columns = ["name"] ++ return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map, ++ invisible_columns) ++ ++ ++def _get_attrs(client_manager, parsed_args): ++ attrs = {key: parsed_args[key] for key in ["mac_filtering"] ++ if key in parsed_args} ++ ++ if 'project' in parsed_args and parsed_args["project"] is not None: ++ identity_client = client_manager.identity ++ project_id = identity_common.find_project( ++ identity_client, ++ parsed_args["project"] ++ ).id ++ attrs['project_id'] = project_id ++ ++ return attrs ++ ++ ++class ListSetting(common.NetworkAndComputeLister): ++ """List settings of all projects who have non-default setting values""" ++ ++ def update_parser_common(self, parser): ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ columns = ( ++ 'mac_filtering', ++ 'project_id' ++ ) ++ column_headers = ( ++ 'Mac Filtering', ++ 'Project ID' ++ ) ++ ++ args = {} ++ ++ data = client.settings(**args) ++ ++ return (column_headers, ++ (utils.get_item_properties( ++ s, columns, ++ formatters=_formatters, ++ ) for s in data)) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++class ShowSetting(common.NetworkAndComputeShowOne): ++ """Show settings of a given project""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--project', ++ metavar='', ++ help=_("Owner's project (name or ID)"), ++ required=False ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ client = self.app.client_manager.network ++ # if no project id is specified, operate on current project ++ args = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ if not "project_id" in args: ++ args["project_id"] = client.find_tenant().project_id ++ project_id = args["project_id"] ++ ++ obj = client.find_setting(project_id, ignore_missing=False) ++ ++ display_columns, columns = _get_columns(obj) ++ data = utils.get_item_properties(obj, columns, formatters=_formatters) ++ return (display_columns, data) ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs access to" ++ " a network endpoint.") ++ return ++ ++ ++# this one uses NetworkAndComputeCommand because settings can be deleted ++# without a project id ++class DeleteSetting(common.NetworkAndComputeCommand): ++ """Delete setting""" ++ ++ def update_parser_common(self, parser): ++ parser.add_argument( ++ '--project', ++ metavar='', ++ help=_("Owner's project (name or ID)"), ++ required=False ++ ) ++ return parser ++ ++ def take_action_network(self, client, parsed_args): ++ client = self.app.client_manager.network ++ # if no project id is specified, operate on current project ++ args = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ if not "project_id" in args: ++ args["project_id"] = client.find_tenant().project_id ++ project_id = args["project_id"] ++ ++ client.delete_setting(project_id) ++ return ++ ++ def take_action_compute(self, client, parsed_args): ++ raise exceptions.CommandError("This command needs " ++ "access to a network endpoint.") ++ return ++ ++ ++class UpdateSetting(command.Command): ++ """Set setting properties""" ++ ++ def get_parser(self, prog_name): ++ parser = super(UpdateSetting, self).get_parser(prog_name) ++ parser.add_argument( ++ '--project', ++ metavar='', ++ help=_("Owner's project (name or ID)"), ++ required=False ++ ) ++ parser.add_argument('--mac-filtering', metavar='mac_filtering', ++ help="Enable/Disable source MAC filtering" ++ " on all ports", ++ required=True) ++ return parser ++ ++ def take_action(self, parsed_args): ++ client = self.app.client_manager.network ++ # if no project id is specified, operate on current project ++ args = _get_attrs(self.app.client_manager, vars(parsed_args)) ++ if not "project_id" in args: ++ args["project_id"] = client.find_tenant().project_id ++ project_id = args["project_id"] ++ del args['project_id'] ++ ++ client.find_setting(project_id, ignore_missing=False) ++ ++ if args == {}: ++ msg = "Nothing specified to be set" ++ raise exceptions.CommandError(msg) ++ ++ client.update_setting(project_id, **args) ++ return +diff --git a/openstackclient/network/v2/subnet.py b/openstackclient/network/v2/subnet.py +index b96dff7..864c832 100644 +--- a/openstackclient/network/v2/subnet.py ++++ b/openstackclient/network/v2/subnet.py +@@ -13,7 +13,9 @@ + + """Subnet action implementations""" + ++import argparse + import copy ++import functools + import logging + + from osc_lib.cli import parseractions +@@ -27,6 +29,10 @@ from openstackclient.network import sdk_utils + from openstackclient.network.v2 import _tag + + ++MIN_VLAN_TAG = 1 ++MAX_VLAN_TAG = 4094 ++ ++ + LOG = logging.getLogger(__name__) + + +@@ -233,9 +239,36 @@ def _get_attrs(client_manager, parsed_args, is_create=True): + attrs['service_types'] = parsed_args.service_types + if parsed_args.description is not None: + attrs['description'] = parsed_args.description ++ ++ # wrs extensions ++ if ('vlan_id' in parsed_args and ++ parsed_args.vlan_id is not None): ++ attrs['wrs-net:vlan_id'] = parsed_args.vlan_id ++ if ('network_type' in parsed_args and ++ parsed_args.network_type is not None): ++ attrs['wrs-provider:network_type'] = parsed_args.network_type ++ if ('physical_network' in parsed_args and ++ parsed_args.physical_network is not None): ++ attrs['wrs-provider:vlan_id'] = parsed_args.physical_network ++ if ('segmentation_id' in parsed_args and ++ parsed_args.segmentation_id is not None): ++ attrs['wrs-provider:segmentation_id'] = parsed_args.segmentation_id ++ if ('unmanaged' in parsed_args and ++ parsed_args.unmanaged is not False): ++ attrs['wrs-net:managed'] = False ++ + return attrs + + ++def _check_vlan_id(value): ++ vlan_id = int(value) ++ if vlan_id < MIN_VLAN_TAG or vlan_id > MAX_VLAN_TAG: ++ raise argparse.ArgumentTypeError( ++ "VLAN ID must be between {} and {}".format( ++ MIN_VLAN_TAG, MAX_VLAN_TAG)) ++ return vlan_id ++ ++ + # TODO(abhiraut): Use the SDK resource mapped attribute names once the + # OSC minimum requirements include SDK 1.0. + class CreateSubnet(command.ShowOne): +@@ -337,6 +370,27 @@ class CreateSubnet(command.ShowOne): + metavar='', + help=_("Set subnet description") + ) ++ parser.add_argument( ++ '--wrs-net:vlan_id', ++ dest='vlan_id', ++ type=_check_vlan_id, ++ help='VLAN ID of the subnet') ++ parser.add_argument( ++ '--wrs-provider:network_type', ++ dest='network_type', ++ help='Provider network type (admin only)') ++ parser.add_argument( ++ '--wrs-provider:physical_network', ++ dest='physical_network', ++ help='Provider network name (admin only)') ++ parser.add_argument( ++ '--wrs-provider:segmentation_id', ++ dest='segmentation_id', ++ help='Provider network segmentation id (admin only)') ++ parser.add_argument( ++ '--unmanaged', ++ action='store_true', ++ help='Disable IP allocation on this subnet') + _get_common_parse_arguments(parser) + _tag.add_tag_option_to_parser_for_create(parser, _('subnet')) + return parser +@@ -497,14 +551,16 @@ class ListSubnet(command.Lister): + _tag.get_tag_filtering_args(parsed_args, filters) + data = network_client.subnets(**filters) + +- headers = ('ID', 'Name', 'Network', 'Subnet') +- columns = ('id', 'name', 'network_id', 'cidr') ++ headers = ('ID', 'Name', 'Network', 'Subnet', 'Allocation Pools', ++ 'WRS-Net:VLAN ID') ++ columns = ('id', 'name', 'network_id', 'cidr', 'allocation_pools', ++ 'wrs-net:vlan_id') + if parsed_args.long: + headers += ('Project', 'DHCP', 'Name Servers', +- 'Allocation Pools', 'Host Routes', 'IP Version', ++ 'Host Routes', 'IP Version', + 'Gateway', 'Service Types', 'Tags') + columns += ('project_id', 'is_dhcp_enabled', 'dns_nameservers', +- 'allocation_pools', 'host_routes', 'ip_version', ++ 'host_routes', 'ip_version', + 'gateway_ip', 'service_types', 'tags') + + return (headers, +diff --git a/openstackclient/tests/unit/network/v2/fakes.py b/openstackclient/tests/unit/network/v2/fakes.py +index eadab58..0d53fc5 100644 +--- a/openstackclient/tests/unit/network/v2/fakes.py ++++ b/openstackclient/tests/unit/network/v2/fakes.py +@@ -1244,7 +1244,8 @@ class FakeSecurityGroupRule(object): + + @staticmethod + def get_security_group_rules(security_group_rules=None, count=2): +- """Get an iterable Mock object with a list of faked security group rules. ++ """Get an iterable Mock object with a list of faked security group ++ rules. + + If security group rules list is provided, then initialize the Mock + object with the list. Otherwise create one. +@@ -1298,6 +1299,7 @@ class FakeSubnet(object): + 'subnetpool_id': None, + 'description': 'subnet-description-' + uuid.uuid4().hex, + 'tags': [], ++ 'wrs-net:vlan_id': '1', + } + + # Overwrite default attributes. +diff --git a/openstackclient/tests/unit/network/v2/test_subnet.py b/openstackclient/tests/unit/network/v2/test_subnet.py +index c96d680..a4c8914 100644 +--- a/openstackclient/tests/unit/network/v2/test_subnet.py ++++ b/openstackclient/tests/unit/network/v2/test_subnet.py +@@ -126,6 +126,7 @@ class TestCreateSubnet(TestSubnet): + 'service_types', + 'subnetpool_id', + 'tags', ++ 'wrs-net:vlan_id', + ) + + data = ( +@@ -147,6 +148,7 @@ class TestCreateSubnet(TestSubnet): + utils.format_list(_subnet.service_types), + _subnet.subnetpool_id, + utils.format_list(_subnet.tags), ++ getattr(_subnet, 'wrs-net:vlan_id', ''), + ) + + data_subnet_pool = ( +@@ -168,6 +170,7 @@ class TestCreateSubnet(TestSubnet): + utils.format_list(_subnet_from_pool.service_types), + _subnet_from_pool.subnetpool_id, + utils.format_list(_subnet.tags), ++ getattr(_subnet_from_pool, 'wrs-net:vlan_id', ''), + ) + + data_ipv6 = ( +@@ -189,6 +192,7 @@ class TestCreateSubnet(TestSubnet): + utils.format_list(_subnet_ipv6.service_types), + _subnet_ipv6.subnetpool_id, + utils.format_list(_subnet.tags), ++ getattr(_subnet_ipv6, 'wrs-net:vlan_id', ''), + ) + + def setUp(self): +@@ -589,12 +593,13 @@ class TestListSubnet(TestSubnet): + 'Name', + 'Network', + 'Subnet', ++ 'Allocation Pools', ++ 'WRS-Net:VLAN ID', + ) + columns_long = columns + ( + 'Project', + 'DHCP', + 'Name Servers', +- 'Allocation Pools', + 'Host Routes', + 'IP Version', + 'Gateway', +@@ -609,6 +614,8 @@ class TestListSubnet(TestSubnet): + subnet.name, + subnet.network_id, + subnet.cidr, ++ subnet_v2._format_allocation_pools(subnet.allocation_pools), ++ getattr(subnet, 'wrs-net:vlan_id', ''), + )) + + data_long = [] +@@ -618,10 +625,11 @@ class TestListSubnet(TestSubnet): + subnet.name, + subnet.network_id, + subnet.cidr, ++ subnet_v2._format_allocation_pools(subnet.allocation_pools), ++ getattr(subnet, 'wrs-net:vlan_id', ''), + subnet.tenant_id, + subnet.enable_dhcp, + utils.format_list(subnet.dns_nameservers), +- subnet_v2._format_allocation_pools(subnet.allocation_pools), + utils.format_list(subnet.host_routes), + subnet.ip_version, + subnet.gateway_ip, +@@ -1093,6 +1101,7 @@ class TestShowSubnet(TestSubnet): + 'service_types', + 'subnetpool_id', + 'tags', ++ 'wrs-net:vlan_id', + ) + + data = ( +@@ -1114,6 +1123,7 @@ class TestShowSubnet(TestSubnet): + utils.format_list(_subnet.service_types), + _subnet.subnetpool_id, + utils.format_list(_subnet.tags), ++ getattr(_subnet, 'wrs-net:vlan_id', ''), + ) + + def setUp(self): +diff --git a/setup.cfg b/setup.cfg +index 0afa479..1bff735 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -398,6 +398,39 @@ openstack.network.v2 = + subnet_pool_set = openstackclient.network.v2.subnet_pool:SetSubnetPool + subnet_pool_show = openstackclient.network.v2.subnet_pool:ShowSubnetPool + subnet_pool_unset = openstackclient.network.v2.subnet_pool:UnsetSubnetPool ++ providernet_list = openstackclient.network.v2.providernet:ListProvidernet ++ providernet_show = openstackclient.network.v2.providernet:ShowProvidernet ++ providernet_create = openstackclient.network.v2.providernet:CreateProvidernet ++ providernet_update = openstackclient.network.v2.providernet:UpdateProvidernet ++ providernet_delete = openstackclient.network.v2.providernet:DeleteProvidernet ++ providernet_range_list = openstackclient.network.v2.providernet_range:ListProvidernetRange ++ providernet_range_show = openstackclient.network.v2.providernet_range:ShowProvidernetRange ++ providernet_range_create = openstackclient.network.v2.providernet_range:CreateProvidernetRange ++ providernet_range_update = openstackclient.network.v2.providernet_range:UpdateProvidernetRange ++ providernet_range_delete = openstackclient.network.v2.providernet_range:DeleteProvidernetRange ++ qos_list = openstackclient.network.v2.qos:ListQos ++ qos_show = openstackclient.network.v2.qos:ShowQos ++ qos_create = openstackclient.network.v2.qos:CreateQos ++ qos_update = openstackclient.network.v2.qos:UpdateQos ++ qos_delete = openstackclient.network.v2.qos:DeleteQos ++ portforwarding_list = openstackclient.network.v2.portforwarding:ListPortforwarding ++ portforwarding_show = openstackclient.network.v2.portforwarding:ShowPortforwarding ++ portforwarding_create = openstackclient.network.v2.portforwarding:CreatePortforwarding ++ portforwarding_update = openstackclient.network.v2.portforwarding:UpdatePortforwarding ++ portforwarding_delete = openstackclient.network.v2.portforwarding:DeletePortforwarding ++ setting_list = openstackclient.network.v2.setting:ListSetting ++ setting_show = openstackclient.network.v2.setting:ShowSetting ++ setting_update = openstackclient.network.v2.setting:UpdateSetting ++ setting_delete = openstackclient.network.v2.setting:DeleteSetting ++ net_host_list = openstackclient.network.v2.host:ListHost ++ net_host_show = openstackclient.network.v2.host:ShowHost ++ net_host_create = openstackclient.network.v2.host:CreateHost ++ net_host_update = openstackclient.network.v2.host:UpdateHost ++ net_host_delete = openstackclient.network.v2.host:DeleteHost ++ providernet_type_list = openstackclient.network.v2.providernet_type:ListProvidernetType ++ providernet_connectivity_test_list = openstackclient.network.v2.providernet_connectivity_test:ListProvidernetConnectivityTest ++ providernet_connectivity_test_schedule = openstackclient.network.v2.providernet_connectivity_test:CreateProvidernetConnectivityTest ++ net_list_on_providernet = openstackclient.network.v2.providernet:NetListOnProvidernet + openstack.object_store.v1 = + object_store_account_set = openstackclient.object.v1.account:SetAccount + object_store_account_show = openstackclient.object.v1.account:ShowAccount diff --git a/openstack/python-openstackclient/centos/patches/0002-US101470-Openstackclient-implementation-of-novaclien.patch b/openstack/python-openstackclient/centos/patches/0002-US101470-Openstackclient-implementation-of-novaclien.patch new file mode 100644 index 00000000..35b657ea --- /dev/null +++ b/openstack/python-openstackclient/centos/patches/0002-US101470-Openstackclient-implementation-of-novaclien.patch @@ -0,0 +1,273 @@ +From 8fe2869469d3b853e24140e4ffb487d672232b1b Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Fri, 5 Jan 2018 15:01:24 -0500 +Subject: [PATCH] US101470 OSC equivalence of nova cli with WRS extension + +CI part2 +--- + openstackclient/compute/v2/server.py | 37 +++++++++++ + openstackclient/compute/v2/server_group.py | 99 ++++++++++++++++++++++++++++-- + setup.cfg | 2 + + 3 files changed, 132 insertions(+), 6 deletions(-) + +diff --git a/openstackclient/compute/v2/server.py b/openstackclient/compute/v2/server.py +index 151c678..c7ffab9 100644 +--- a/openstackclient/compute/v2/server.py ++++ b/openstackclient/compute/v2/server.py +@@ -12,6 +12,13 @@ + # License for the specific language governing permissions and limitations + # under the License. + # ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++ + + """Compute v2 Server action implementations""" + +@@ -1714,6 +1721,36 @@ class ResumeServer(command.Command): + ).resume() + + ++# WRS:extension ++class ScaleServer(command.Command): ++ _description = _("Scale server properties") ++ ++ def get_parser(self, prog_name): ++ parser = super(ScaleServer, self).get_parser(prog_name) ++ parser.add_argument( ++ 'server', ++ metavar='', ++ help=_('Name or ID of server'), ++ ) ++ parser.add_argument( ++ 'resource', ++ metavar='', ++ help=_('Resource to scale. Currently only "cpu"'), ++ ) ++ parser.add_argument( ++ 'direction', ++ metavar='', ++ help=_('Direction to scale ("up" or "down")'), ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ compute_client = self.app.client_manager.compute ++ compute_client.servers.scale(parsed_args.server, ++ parsed_args.resource, ++ parsed_args.direction) ++ ++ + class SetServer(command.Command): + _description = _("Set server properties") + +diff --git a/openstackclient/compute/v2/server_group.py b/openstackclient/compute/v2/server_group.py +index c6e2161..551ed8e 100644 +--- a/openstackclient/compute/v2/server_group.py ++++ b/openstackclient/compute/v2/server_group.py +@@ -12,6 +12,12 @@ + # License for the specific language governing permissions and limitations + # under the License. + # ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + + """Compute v2 Server Group action implementations""" + +@@ -22,6 +28,7 @@ from osc_lib import exceptions + from osc_lib import utils + + from openstackclient.i18n import _ ++from novaclient import api_versions + + + LOG = logging.getLogger(__name__) +@@ -35,12 +42,40 @@ _formatters = { + + def _get_columns(info): + columns = list(info.keys()) +- if 'metadata' in columns: +- # NOTE(RuiChen): The metadata of server group is always empty since API +- # compatible, so hide it in order to avoid confusion. +- columns.remove('metadata') + return tuple(sorted(columns)) + ++#WRS:extension ++def _extract_metadata(args): ++ metadata = {} ++ for server_group in args.metadata: ++ for metadatum in server_group: ++ if metadatum.find('=') > -1: ++ (key, value) = metadatum.split('=', 1) ++ else: ++ key = metadatum ++ value = None ++ metadata[key] = value ++ return metadata ++ ++# WRS:extension - type checking for key-value pair ++# returns text instead of tuple like above ++def _key_value_type(text): ++ try: ++ (k, v) = text.split('=', 1) ++ return text ++ except ValueError: ++ msg = "%r is not in the format of key=value" % text ++ raise argparse.ArgumentTypeError(msg) ++ ++ ++# WRS:extension - type checking for CSV key-value pairs ++def _csv_key_value_type(text): ++ try: ++ return map(_key_value_type, text.split(',')) ++ except Exception as e: ++ raise exceptions.CommandError( ++ "Invalid csv key-value argument '%s'. %s" % (text, unicode(e))) ++ + + class CreateServerGroup(command.ShowOne): + _description = _("Create a new server group.") +@@ -52,6 +87,15 @@ class CreateServerGroup(command.ShowOne): + metavar='', + help=_("New server group name") + ) ++ # WRS:extension ++ parser.add_argument( ++ '--metadata', ++ metavar='', ++ action='append', ++ default=[], ++ type=_csv_key_value_type, ++ help=_("Metadata for this server group") ++ ) + parser.add_argument( + '--policy', + metavar='', +@@ -66,17 +110,21 @@ class CreateServerGroup(command.ShowOne): + def take_action(self, parsed_args): + compute_client = self.app.client_manager.compute + info = {} ++ ++ #WRS:extension ++ meta = _extract_metadata(parsed_args) ++ compute_client.api_version = api_versions.APIVersion("2.53") + server_group = compute_client.server_groups.create( + name=parsed_args.name, ++ metadata=meta, + policies=[parsed_args.policy]) +- info.update(server_group._info) + ++ info.update(server_group._info) + columns = _get_columns(info) + data = utils.get_dict_properties(info, columns, + formatters=_formatters) + return columns, data + +- + class DeleteServerGroup(command.Command): + _description = _("Delete existing server group(s).") + +@@ -133,8 +181,13 @@ class ListServerGroup(command.Lister): + + def take_action(self, parsed_args): + compute_client = self.app.client_manager.compute ++ ++ #WRS:extension ++ compute_client.api_version = api_versions.APIVersion("2.53") ++ + data = compute_client.server_groups.list(parsed_args.all_projects) + ++ #WRS:extension list project_id, user_id fields and metadata fields + if parsed_args.long: + column_headers = columns = ( + 'ID', +@@ -143,12 +196,14 @@ class ListServerGroup(command.Lister): + 'Members', + 'Project Id', + 'User Id', ++ 'Metadata', + ) + else: + column_headers = columns = ( + 'ID', + 'Name', + 'Policies', ++ 'Metadata', + ) + + return (column_headers, +@@ -175,11 +230,43 @@ class ShowServerGroup(command.ShowOne): + + def take_action(self, parsed_args): + compute_client = self.app.client_manager.compute ++ #WRS:extension ++ compute_client.api_version = api_versions.APIVersion("2.53") ++ + group = utils.find_resource(compute_client.server_groups, + parsed_args.server_group) ++ + info = {} + info.update(group._info) + columns = _get_columns(info) + data = utils.get_dict_properties(info, columns, + formatters=_formatters) + return columns, data ++ ++#WRS:extension ++class SetServerGroupMetadata(command.Command): ++ _description = _("Set metadata of a server group") ++ ++ def get_parser(self, prog_name): ++ parser = super(SetServerGroupMetadata, self).get_parser(prog_name) ++ parser.add_argument( ++ 'id', ++ metavar='', ++ help=_("Unique ID of the server group") ++ ) ++ parser.add_argument( ++ 'metadata', ++ metavar='', ++ action='append', ++ default=[], ++ type=_csv_key_value_type, ++ help=_("Metadata to set/unset") ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ compute_client = self.app.client_manager.compute ++ metadata = _extract_metadata(parsed_args) ++ compute_client.api_version = api_versions.APIVersion("2.53") ++ compute_client.server_groups.set_metadata(parsed_args.id, metadata) ++ +diff --git a/setup.cfg b/setup.cfg +index f162417..627ff87 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -110,6 +110,7 @@ openstack.compute.v2 = + server_resize = openstackclient.compute.v2.server:ResizeServer + server_restore = openstackclient.compute.v2.server:RestoreServer + server_resume = openstackclient.compute.v2.server:ResumeServer ++ server_scale = openstackclient.compute.v2.server:ScaleServer + server_set = openstackclient.compute.v2.server:SetServer + server_shelve = openstackclient.compute.v2.server:ShelveServer + server_show = openstackclient.compute.v2.server:ShowServer +@@ -129,6 +130,7 @@ openstack.compute.v2 = + server_group_create = openstackclient.compute.v2.server_group:CreateServerGroup + server_group_delete = openstackclient.compute.v2.server_group:DeleteServerGroup + server_group_list = openstackclient.compute.v2.server_group:ListServerGroup ++ server_group_set_metadata = openstackclient.compute.v2.server_group:SetServerGroupMetadata + server_group_show = openstackclient.compute.v2.server_group:ShowServerGroup + server_image_create = openstackclient.compute.v2.server_image:CreateServerImage + usage_list = openstackclient.compute.v2.usage:ListUsage +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/patches/0002-US106901-Openstack-CLI-Adoption.patch b/openstack/python-openstackclient/centos/patches/0002-US106901-Openstack-CLI-Adoption.patch new file mode 100644 index 00000000..e4598a6f --- /dev/null +++ b/openstack/python-openstackclient/centos/patches/0002-US106901-Openstack-CLI-Adoption.patch @@ -0,0 +1,330 @@ +From c4f2db9901b54217e1737987a19a8b48209f40ab Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Mon, 5 Feb 2018 14:06:32 -0500 +Subject: [PATCH 1/1] US106901 Openstack CLI Adoption: Cinder/Glance/Ceilometer + + CI partII: OSC equivalence of cinder cli with WRS extension +--- + openstackclient/volume/v1/volume.py | 58 ++++++++++++++++++++++++++++ + openstackclient/volume/v1/volume_snapshot.py | 26 +++++++++++++ + openstackclient/volume/v2/volume.py | 57 +++++++++++++++++++++++++++ + openstackclient/volume/v2/volume_snapshot.py | 27 +++++++++++++ + setup.cfg | 11 +++++- + 5 files changed, 178 insertions(+), 1 deletion(-) + +diff --git a/openstackclient/volume/v1/volume.py b/openstackclient/volume/v1/volume.py +index b29429e..85caf3e 100644 +--- a/openstackclient/volume/v1/volume.py ++++ b/openstackclient/volume/v1/volume.py +@@ -12,6 +12,13 @@ + # License for the specific language governing permissions and limitations + # under the License. + # ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# ++ + + """Volume v1 Volume action implementations""" + +@@ -25,6 +32,7 @@ from osc_lib import utils + import six + + from openstackclient.i18n import _ ++from cinderclient import utils as cinder_utils + + + LOG = logging.getLogger(__name__) +@@ -624,3 +632,53 @@ class UnsetVolume(command.Command): + volume.id, + parsed_args.property, + ) ++ ++ ++# WRS extension ++class ExportVolume(command.Command): ++ _description = _("Export volume to a file.") ++ ++ def get_parser(self, prog_name): ++ parser = super(ExportVolume, self).get_parser(prog_name) ++ parser.add_argument( ++ 'volume', ++ metavar='', ++ help=_('Name or ID of the volume to export'), ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ volume_client = self.app.client_manager.volume ++ volume = utils.find_resource( ++ volume_client.volumes, parsed_args.volume) ++ volume_client.volumes.export(volume) ++ ++ ++# WRS extension ++class ImportVolume(command.Command): ++ _description = _("Import a volume from a file.") ++ ++ def get_parser(self, prog_name): ++ parser = super(ImportVolume, self).get_parser(prog_name) ++ parser.add_argument( ++ 'file_name', ++ metavar='', ++ help=_('Name of the file to import'), ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ volume_client = self.app.client_manager.volume ++ ++ # Parse the volume ID from the filename which is in this format: ++ # volume--.tgz ++ if(parsed_args.file_name.find("volume-") != 0 or ++ parsed_args.file_name.rfind(".tgz") == -1 or ++ len(parsed_args.file_name) < 28): ++ raise exceptions.CommandError( ++ "Invalid filename - volume files must have the following format: " ++ "volume--.tgz") ++ ++ volume_id = parsed_args.file_name[7:-20] ++ volume = cinder_utils.find_volume(volume_client, volume_id) ++ volume_client.volumes.import_volume(volume, parsed_args.file_name) +diff --git a/openstackclient/volume/v1/volume_snapshot.py b/openstackclient/volume/v1/volume_snapshot.py +index 3e83da5..a4d44a8 100644 +--- a/openstackclient/volume/v1/volume_snapshot.py ++++ b/openstackclient/volume/v1/volume_snapshot.py +@@ -12,6 +12,12 @@ + # License for the specific language governing permissions and limitations + # under the License. + # ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + + """Volume v1 Snapshot action implementations""" + +@@ -25,6 +31,7 @@ from osc_lib import utils + import six + + from openstackclient.i18n import _ ++from cinderclient import utils as cinder_utils + + + LOG = logging.getLogger(__name__) +@@ -352,3 +359,22 @@ class UnsetVolumeSnapshot(command.Command): + snapshot.id, + parsed_args.property, + ) ++ ++# WRS extension ++class ExportVolumeSnapshot(command.Command): ++ _description = _("Export a snapshot to a file.") ++ ++ def get_parser(self, prog_name): ++ parser = super(ExportVolumeSnapshot, self).get_parser(prog_name) ++ parser.add_argument( ++ 'snapshot', ++ metavar='', ++ help=_('Name or ID of the snapshot to export'), ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ volume_client = self.app.client_manager.volume ++ snapshot = cinder_utils.find_resource( ++ volume_client.volume_snapshots, parsed_args.snapshot) ++ volume_client.volume_snapshots.export(snapshot) +diff --git a/openstackclient/volume/v2/volume.py b/openstackclient/volume/v2/volume.py +index 61f846b..c2ea7a4 100644 +--- a/openstackclient/volume/v2/volume.py ++++ b/openstackclient/volume/v2/volume.py +@@ -11,6 +11,12 @@ + # License for the specific language governing permissions and limitations + # under the License. + # ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + + """Volume V2 Volume action implementations""" + +@@ -25,6 +31,7 @@ import six + + from openstackclient.i18n import _ + from openstackclient.identity import common as identity_common ++from cinderclient import utils as cinder_utils + + + LOG = logging.getLogger(__name__) +@@ -784,3 +791,53 @@ class UnsetVolume(command.Command): + if result > 0: + raise exceptions.CommandError(_("One or more of the " + "unset operations failed")) ++ ++ ++# WRS extension ++class ExportVolume(command.Command): ++ _description = _("Export volume to a file.") ++ ++ def get_parser(self, prog_name): ++ parser = super(ExportVolume, self).get_parser(prog_name) ++ parser.add_argument( ++ 'volume', ++ metavar='', ++ help=_('Name or ID of the volume to export'), ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ volume_client = self.app.client_manager.volume ++ volume = cinder_utils.find_resource( ++ volume_client.volumes, parsed_args.volume) ++ volume_client.volumes.export(volume) ++ ++ ++# WRS extension ++class ImportVolume(command.Command): ++ _description = _("Import a volume from a file.") ++ ++ def get_parser(self, prog_name): ++ parser = super(ImportVolume, self).get_parser(prog_name) ++ parser.add_argument( ++ 'file_name', ++ metavar='', ++ help=_('Name of the file to import'), ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ volume_client = self.app.client_manager.volume ++ ++ # Parse the volume ID from the filename which is in this format: ++ # volume--.tgz ++ if(parsed_args.file_name.find("volume-") != 0 or ++ parsed_args.file_name.rfind(".tgz") == -1 or ++ len(parsed_args.file_name) < 28): ++ raise exceptions.CommandError( ++ "Invalid filename - volume files must have the following format: " ++ "volume--.tgz") ++ ++ volume_id = parsed_args.file_name[7:-20] ++ volume = cinder_utils.find_volume(volume_client, volume_id) ++ volume_client.volumes.import_volume(volume, parsed_args.file_name) +diff --git a/openstackclient/volume/v2/volume_snapshot.py b/openstackclient/volume/v2/volume_snapshot.py +index fe96941..704ef75 100644 +--- a/openstackclient/volume/v2/volume_snapshot.py ++++ b/openstackclient/volume/v2/volume_snapshot.py +@@ -11,6 +11,12 @@ + # License for the specific language governing permissions and limitations + # under the License. + # ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# SPDX-License-Identifier: Apache-2.0 ++# ++# ++# ++# + + """Volume v2 snapshot action implementations""" + +@@ -25,6 +31,7 @@ import six + + from openstackclient.i18n import _ + from openstackclient.identity import common as identity_common ++from cinderclient import utils as cinder_utils + + + LOG = logging.getLogger(__name__) +@@ -437,3 +444,23 @@ class UnsetVolumeSnapshot(command.Command): + snapshot.id, + parsed_args.property, + ) ++ ++ ++# WRS extension ++class ExportVolumeSnapshot(command.Command): ++ _description = _("Export a snapshot to a file.") ++ ++ def get_parser(self, prog_name): ++ parser = super(ExportVolumeSnapshot, self).get_parser(prog_name) ++ parser.add_argument( ++ 'snapshot', ++ metavar='', ++ help=_('Name or ID of the snapshot to export'), ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ volume_client = self.app.client_manager.volume ++ snapshot = cinder_utils.find_resource( ++ volume_client.volume_snapshots, parsed_args.snapshot) ++ volume_client.volume_snapshots.export(snapshot) +diff --git a/setup.cfg b/setup.cfg +index 627ff87..11f7261 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -468,6 +468,8 @@ openstack.volume.v1 = + snapshot_unset = openstackclient.volume.v1.snapshot:UnsetSnapshot + volume_create = openstackclient.volume.v1.volume:CreateVolume + volume_delete = openstackclient.volume.v1.volume:DeleteVolume ++ volume_export = openstackclient.volume.v1.volume:ExportVolume ++ volume_import = openstackclient.volume.v1.volume:ImportVolume + volume_list = openstackclient.volume.v1.volume:ListVolume + volume_migrate = openstackclient.volume.v1.volume:MigrateVolume + volume_set = openstackclient.volume.v1.volume:SetVolume +@@ -480,6 +482,7 @@ openstack.volume.v1 = + volume_backup_show = openstackclient.volume.v1.backup:ShowVolumeBackup + volume_snapshot_create = openstackclient.volume.v1.volume_snapshot:CreateVolumeSnapshot + volume_snapshot_delete = openstackclient.volume.v1.volume_snapshot:DeleteVolumeSnapshot ++ volume_snapshot_export = openstackclient.volume.v1.volume_snapshot:ExportVolumeSnapshot + volume_snapshot_list = openstackclient.volume.v1.volume_snapshot:ListVolumeSnapshot + volume_snapshot_set = openstackclient.volume.v1.volume_snapshot:SetVolumeSnapshot + volume_snapshot_show = openstackclient.volume.v1.volume_snapshot:ShowVolumeSnapshot +@@ -529,7 +532,9 @@ openstack.volume.v2 = + snapshot_show = openstackclient.volume.v2.snapshot:ShowSnapshot + snapshot_unset = openstackclient.volume.v2.snapshot:UnsetSnapshot + volume_create = openstackclient.volume.v2.volume:CreateVolume +- volume_delete = openstackclient.volume.v2.volume:DeleteVolume ++ volume_delete = openstackclient.volume.v2.volume:DeleteVolume ++ volume_export = openstackclient.volume.v2.volume:ExportVolume ++ volume_import = openstackclient.volume.v2.volume:ImportVolume + volume_list = openstackclient.volume.v2.volume:ListVolume + volume_migrate = openstackclient.volume.v2.volume:MigrateVolume + volume_set = openstackclient.volume.v2.volume:SetVolume +@@ -545,6 +550,7 @@ openstack.volume.v2 = + volume_host_set = openstackclient.volume.v2.volume_host:SetVolumeHost + volume_snapshot_create = openstackclient.volume.v2.volume_snapshot:CreateVolumeSnapshot + volume_snapshot_delete = openstackclient.volume.v2.volume_snapshot:DeleteVolumeSnapshot ++ volume_snapshot_export = openstackclient.volume.v2.volume_snapshot:ExportVolumeSnapshot + volume_snapshot_list = openstackclient.volume.v2.volume_snapshot:ListVolumeSnapshot + volume_snapshot_set = openstackclient.volume.v2.volume_snapshot:SetVolumeSnapshot + volume_snapshot_show = openstackclient.volume.v2.volume_snapshot:ShowVolumeSnapshot +@@ -584,6 +590,8 @@ openstack.volume.v3 = + consistency_group_snapshot_show = openstackclient.volume.v2.consistency_group_snapshot:ShowConsistencyGroupSnapshot + volume_create = openstackclient.volume.v2.volume:CreateVolume + volume_delete = openstackclient.volume.v2.volume:DeleteVolume ++ volume_export = openstackclient.volume.v2.volume:ExportVolume ++ volume_import = openstackclient.volume.v2.volume:ImportVolume + volume_list = openstackclient.volume.v2.volume:ListVolume + volume_migrate = openstackclient.volume.v2.volume:MigrateVolume + volume_set = openstackclient.volume.v2.volume:SetVolume +@@ -598,6 +606,7 @@ openstack.volume.v3 = + volume_host_set = openstackclient.volume.v2.volume_host:SetVolumeHost + volume_snapshot_create = openstackclient.volume.v2.volume_snapshot:CreateVolumeSnapshot + volume_snapshot_delete = openstackclient.volume.v2.volume_snapshot:DeleteVolumeSnapshot ++ volume_snapshot_export = openstackclient.volume.v2.volume_snapshot:ExportVolumeSnapshot + volume_snapshot_list = openstackclient.volume.v2.volume_snapshot:ListVolumeSnapshot + volume_snapshot_set = openstackclient.volume.v2.volume_snapshot:SetVolumeSnapshot + volume_snapshot_show = openstackclient.volume.v2.volume_snapshot:ShowVolumeSnapshot +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/patches/0003-US106901-Openstack-CLI-Adoption.patch b/openstack/python-openstackclient/centos/patches/0003-US106901-Openstack-CLI-Adoption.patch new file mode 100644 index 00000000..2cebc220 --- /dev/null +++ b/openstack/python-openstackclient/centos/patches/0003-US106901-Openstack-CLI-Adoption.patch @@ -0,0 +1,435 @@ +From 6f1c69328515b4aa1591c999dedbbf88eefacfbe Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Wed, 14 Feb 2018 22:19:17 -0500 +Subject: [PATCH 1/1] US106901: OSC equivalence for Ceilometer OSC + equivalence achieved for the following ceilometer commands + + ceilometer clis: + ceilometer metertype-list + ceilometer pipeline-list + ceilometer pipeline-show + ceilometer pipeline-update + + OSC equivalence: + openstack telemetry metertype list + openstack telemetry pipeline list + openstack telemetry pipeline show + openstack telemetry pipeline update +--- + openstackclient/common/extension.py | 22 +++++ + openstackclient/common/module.py | 2 +- + openstackclient/telemetry/__init__.py | 0 + openstackclient/telemetry/client.py | 68 ++++++++++++++ + openstackclient/telemetry/v2/__init__.py | 0 + openstackclient/telemetry/v2/metertype.py | 67 ++++++++++++++ + openstackclient/telemetry/v2/pipeline.py | 146 ++++++++++++++++++++++++++++++ + requirements.txt | 1 + + setup.cfg | 6 ++ + 9 files changed, 311 insertions(+), 1 deletion(-) + create mode 100644 openstackclient/telemetry/__init__.py + create mode 100644 openstackclient/telemetry/client.py + create mode 100644 openstackclient/telemetry/v2/__init__.py + create mode 100644 openstackclient/telemetry/v2/metertype.py + create mode 100644 openstackclient/telemetry/v2/pipeline.py + +diff --git a/openstackclient/common/extension.py b/openstackclient/common/extension.py +index 139f43a..c1b0347 100644 +--- a/openstackclient/common/extension.py ++++ b/openstackclient/common/extension.py +@@ -12,6 +12,12 @@ + # License for the specific language governing permissions and limitations + # under the License. + # ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# ++# ++# ++# ++# + + """Extension action implementations""" + +@@ -55,6 +61,13 @@ class ListExtension(command.Lister): + default=False, + help=_('List extensions for the Block Storage API'), + ) ++ # WRS extension ++ parser.add_argument( ++ '--telemetry', ++ action='store_true', ++ default=False, ++ help=_('List extensions for Ceilometer API'), ++ ) + parser.add_argument( + '--long', + action='store_true', +@@ -111,6 +124,15 @@ class ListExtension(command.Lister): + message = _("Failed to retrieve extensions list " + "from Network API") + LOG.warning(message) ++ # WRS extension ++ if parsed_args.telemetry or show_all: ++ telemetry_client = self.app.client_manager.telemetry ++ try: ++ data += telemetry_client.list_extensions.show_all() ++ except Exception: ++ message = _("Extensions list not supported by " ++ "Ceilometer API") ++ LOG.warning(message) + + extension_tuples = ( + utils.get_item_properties( +diff --git a/openstackclient/common/module.py b/openstackclient/common/module.py +index 20497f2..12d2038 100644 +--- a/openstackclient/common/module.py ++++ b/openstackclient/common/module.py +@@ -35,7 +35,7 @@ class ListCommand(command.Lister): + '--group', + metavar='', + help=_('Show commands filtered by a command group, for example: ' +- 'identity, volume, compute, image, network and ' ++ 'identity, volume, compute, image, network, telemetry and ' + 'other keywords'), + ) + return parser +diff --git a/openstackclient/telemetry/__init__.py b/openstackclient/telemetry/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/openstackclient/telemetry/client.py b/openstackclient/telemetry/client.py +new file mode 100644 +index 0000000..0698a20 +--- /dev/null ++++ b/openstackclient/telemetry/client.py +@@ -0,0 +1,68 @@ ++# Copyright 2012-2018 OpenStack Foundation ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++# WRS extension ++ ++ ++from osc_lib import utils ++from openstackclient.i18n import _ ++ ++import logging ++LOG = logging.getLogger(__name__) ++ ++DEFAULT_API_VERSION = '2' ++API_VERSION_OPTION = 'telemetry_api_version' ++API_NAME = "telemetry" ++API_VERSIONS = { ++ "2": "ceilometerclient.v2.client.Client", ++} ++ ++ ++def make_client(instance): ++ """Returns an ceilometer service client""" ++ ceilometer_client = utils.get_client_class( ++ API_NAME, ++ instance._api_version[API_NAME], ++ API_VERSIONS) ++ LOG.debug('Instantiating ceilometer client: %s', ceilometer_client) ++ ++ # Remember interface only if interface is set ++ kwargs = utils.build_kwargs_dict('endpoint_type', instance.interface) ++ ++ client = ceilometer_client( ++ session=instance.session, ++ region_name=instance.region_name, ++ **kwargs ++ ) ++ ++ return client ++ ++ ++def build_option_parser(parser): ++ """Hook to add global options""" ++ parser.add_argument( ++ '--os-telemetry-api-version', ++ metavar='', ++ default=utils.env('TELEMETRY_API_VERSION'), ++ help=_('Ceilometer API version, default=%s (Env: TELEMETRY_API_VERSION)') % ++ DEFAULT_API_VERSION, ++ ) ++ return parser +diff --git a/openstackclient/telemetry/v2/__init__.py b/openstackclient/telemetry/v2/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/openstackclient/telemetry/v2/metertype.py b/openstackclient/telemetry/v2/metertype.py +new file mode 100644 +index 0000000..5472123 +--- /dev/null ++++ b/openstackclient/telemetry/v2/metertype.py +@@ -0,0 +1,67 @@ ++# Copyright 2012-2018 OpenStack Foundation ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++# WRS extension ++ ++from osc_lib.command import command ++from osc_lib import utils ++ ++from openstackclient.i18n import _ ++ ++ ++class ListMetertype(command.Lister): ++ """List the user's meter types.""" ++ ++ def get_parser(self, prog_name): ++ parser = super(ListMetertype, self).get_parser(prog_name) ++ parser.add_argument( ++ "-q", "--query", ++ metavar="", ++ help=_("key[op]data_type::value; list. data_type is optional, " ++ "but if supplied must be string, integer, float, or boolean.") ++ ) ++ parser.add_argument( ++ "-l", "--limit", ++ metavar="", ++ help=_("Maximum number of meters to return.") ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ ceilometer_client = self.app.client_manager.telemetry ++ ++ columns = ( ++ "Name", ++ "Type", ++ "Unit", ++ ) ++ ++ metertypes = ceilometer_client.metertypes.list() ++ ++ return (columns, ++ (utils.get_item_properties( ++ s, columns, ++ ) for s in metertypes)) +diff --git a/openstackclient/telemetry/v2/pipeline.py b/openstackclient/telemetry/v2/pipeline.py +new file mode 100644 +index 0000000..cbe077a +--- /dev/null ++++ b/openstackclient/telemetry/v2/pipeline.py +@@ -0,0 +1,146 @@ ++# Copyright 2012-2018 OpenStack Foundation ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++# Copyright (c) 2013-2018 Wind River Systems, Inc. ++# ++# ++# ++# ++# ++ ++# WRS extension ++ ++from osc_lib.command import command ++from osc_lib import utils ++import six ++ ++from openstackclient.i18n import _ ++ ++from oslo_utils import strutils ++from ceilometerclient import exc ++from ceilometerclient.common import utils as ceilometer_utils ++ ++ ++class ListPipeline(command.Lister): ++ """List the pipelines .""" ++ ++ def get_parser(self, prog_name): ++ parser = super(ListPipeline, self).get_parser(prog_name) ++ parser.add_argument( ++ "-q", "--query", ++ metavar="", ++ help=_("key[op]value; list.") ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ ceilometer_client = self.app.client_manager.telemetry ++ ++ columns = ( ++ "Name", ++ "Enabled", ++ "Location", ++ "Max Bytes", ++ "Backup Count", ++ "Compress", ++ ) ++ ++ pipelines = ceilometer_client.pipelines.list() ++ ++ return (columns, ++ (utils.get_item_properties( ++ s, columns, ++ ) for s in pipelines)) ++ ++ ++class ShowPipeline(command.ShowOne): ++ """Show details of a given pipeline.""" ++ ++ def get_parser(self, prog_name): ++ parser = super(ShowPipeline, self).get_parser(prog_name) ++ parser.add_argument( ++ "-n", "--name", ++ metavar="", ++ help=_("Name of the pipeline to show.") ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ ceilometer_client = self.app.client_manager.telemetry ++ pipelines = ceilometer_client.pipelines.get(parsed_args.name) ++ data = pipelines._info.copy() ++ return zip(*sorted(six.iteritems(data))) ++ ++ ++def _show_pipeline(pipeline): ++ fields = ['name', 'enabled', 'location', 'max_bytes', ++ 'backup_count', 'compress'] ++ data = dict([(f, getattr(pipeline, f, '')) for f in fields]) ++ ceilometer_utils.print_dict(data, wrap=72) ++ ++ ++class UpdatePipeline(command.Command): ++ """Update output values for an existing csv pipeline""" ++ ++ def get_parser(self, prog_name): ++ parser = super(UpdatePipeline, self).get_parser(prog_name) ++ parser.add_argument( ++ "-n", "--name", ++ required=True, ++ metavar="", ++ help=_("Name of the pipeline to update.") ++ ) ++ parser.add_argument( ++ "--enabled", ++ type=strutils.bool_from_string, ++ metavar="{True|False}", ++ help=_("True if enabling this pipeline") ++ ) ++ parser.add_argument( ++ "--location", ++ metavar="", ++ help=_("Full Path of the output file.") ++ ) ++ parser.add_argument( ++ "--backup_count", ++ type=int, ++ metavar="", ++ help=_("Number of backup files to keep.") ++ ) ++ parser.add_argument( ++ "--max_bytes", ++ type=int, ++ metavar="", ++ help=_("Maximum size of the file in bytes.") ++ ) ++ parser.add_argument( ++ "--compress", ++ type=strutils.bool_from_string, ++ metavar="{True|False}", ++ help=_("True if compressing backups.") ++ ) ++ return parser ++ ++ def take_action(self, parsed_args): ++ """Update output values for an existing csv pipeline.""" ++ ceilometer_client = self.app.client_manager.telemetry ++ ++ fields = dict(filter(lambda x: not (x[1] is None), vars(parsed_args).items())) ++ fields = ceilometer_utils.key_with_slash_to_nested_dict(fields) ++ fields.pop('name') ++ try: ++ pipeline = ceilometer_client.pipelines.update(parsed_args.name, **fields) ++ except exc.HTTPNotFound: ++ raise exc.CommandError('Pipeline not found: %s' % parsed_args.name) ++ _show_pipeline(pipeline) +diff --git a/requirements.txt b/requirements.txt +index 24e4672..56a5705 100644 +--- a/requirements.txt ++++ b/requirements.txt +@@ -15,3 +15,4 @@ python-glanceclient>=2.7.0 # Apache-2.0 + python-keystoneclient>=3.8.0 # Apache-2.0 + python-novaclient>=9.0.0 # Apache-2.0 + python-cinderclient>=3.0.0 # Apache-2.0 ++python-ceilometerclient>=2.9.0 # Apache-2.0 +diff --git a/setup.cfg b/setup.cfg +index 11f7261..1b8e006 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -37,6 +37,7 @@ openstack.cli.base = + network = openstackclient.network.client + object_store = openstackclient.object.client + volume = openstackclient.volume.client ++ telemetry = openstackclient.telemetry.client + openstack.common = + availability_zone_list = openstackclient.common.availability_zone:ListAvailabilityZone + configuration_show = openstackclient.common.configuration:ShowConfiguration +@@ -632,6 +633,11 @@ openstack.volume.v3 = + volume_transfer_request_delete = openstackclient.volume.v2.volume_transfer_request:DeleteTransferRequest + volume_transfer_request_list = openstackclient.volume.v2.volume_transfer_request:ListTransferRequest + volume_transfer_request_show = openstackclient.volume.v2.volume_transfer_request:ShowTransferRequest ++openstack.telemetry.v2 = ++ telemetry_metertype_list = openstackclient.telemetry.v2.metertype:ListMetertype ++ telemetry_pipeline_list = openstackclient.telemetry.v2.pipeline:ListPipeline ++ telemetry_pipeline_show = openstackclient.telemetry.v2.pipeline:ShowPipeline ++ telemetry_pipeline_update = openstackclient.telemetry.v2.pipeline:UpdatePipeline + + [pbr] + autodoc_tree_index_modules = True +-- +1.8.3.1 + diff --git a/openstack/python-openstackclient/centos/patches/CGTS-7814-warning-only-when-the-admin-password-chang.patch b/openstack/python-openstackclient/centos/patches/CGTS-7814-warning-only-when-the-admin-password-chang.patch new file mode 100644 index 00000000..b2dfb0e5 --- /dev/null +++ b/openstack/python-openstackclient/centos/patches/CGTS-7814-warning-only-when-the-admin-password-chang.patch @@ -0,0 +1,42 @@ +From a2f2306ae12c65d0758a63781ace5d3e6cf0df7f Mon Sep 17 00:00:00 2001 +From: jmckenna +Date: Fri, 6 Oct 2017 10:40:12 -0400 +Subject: [PATCH] [PATCH] CGTS-7814: warning only when the admin password + change accepted + +Currently admin password change CLI warning coming even when new +Password Change is not accepted. The patch fixed this by moving +the warning from keystoneclient to openstackclienti, after the +password update call. + +diff --git a/openstackclient/identity/v3/user.py b/openstackclient/identity/v3/user.py +index 201d217..e94ec1f 100644 +--- a/openstackclient/identity/v3/user.py ++++ b/openstackclient/identity/v3/user.py +@@ -392,7 +392,7 @@ class SetUser(command.Command): + kwargs['enabled'] = False + + identity_client.users.update(user.id, **kwargs) +- if user.name == 'admin' : ++ if user.name == 'admin' and 'password' in kwargs : + print("Warning: '%s' password changed. Please wait 5 minutes " + "before Locking/Unlocking the controllers for the password " + "change to come into effect\n" %(user.name)) +@@ -456,6 +456,17 @@ class SetPasswordUser(command.Command): + + identity_client.users.update_password(current_password, password) + ++ # retrieve the authentication information from the cached token session ++ session_auth = identity_client.session.auth ++ username = "" ++ if "username" in session_auth.__dict__: ++ username = session_auth.username ++ elif "_username" in session_auth.__dict__: ++ username = session_auth._username ++ if username == 'admin' : ++ print("Warning: '%s' password changed. Please wait 5 minutes " ++ "before Locking/Unlocking the controllers for the password " ++ "change to come into effect\n" %(username)) + + class ShowUser(command.ShowOne): + _description = _("Display user details") diff --git a/openstack/python-openstackclient/centos/patches/openstackClient_Passwordchange_warning.patch b/openstack/python-openstackclient/centos/patches/openstackClient_Passwordchange_warning.patch new file mode 100644 index 00000000..a226b0df --- /dev/null +++ b/openstack/python-openstackclient/centos/patches/openstackClient_Passwordchange_warning.patch @@ -0,0 +1,22 @@ +From be8b4cdf2939d5cd2bf3f2a38534f0d760f63413 Mon Sep 17 00:00:00 2001 +From: jmckenna +Date: Fri, 6 Oct 2017 10:38:33 -0400 +Subject: [PATCH] CGTS-6766: Openstack client logging a warning message when + 'admin' user password is changed using 'openstack user set'CLI command + + +diff --git a/openstackclient/identity/v3/user.py b/openstackclient/identity/v3/user.py +index 5f4fb54..201d217 100644 +--- a/openstackclient/identity/v3/user.py ++++ b/openstackclient/identity/v3/user.py +@@ -392,6 +392,10 @@ class SetUser(command.Command): + kwargs['enabled'] = False + + identity_client.users.update(user.id, **kwargs) ++ if user.name == 'admin' : ++ print("Warning: '%s' password changed. Please wait 5 minutes " ++ "before Locking/Unlocking the controllers for the password " ++ "change to come into effect\n" %(user.name)) + + + class SetPasswordUser(command.Command): diff --git a/openstack/python-openstackclient/centos/srpm_path b/openstack/python-openstackclient/centos/srpm_path new file mode 100644 index 00000000..f8078d19 --- /dev/null +++ b/openstack/python-openstackclient/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-openstackclient-3.12.0-1.el7.src.rpm diff --git a/openstack/python-openstackdocstheme/centos/build_srpm.data b/openstack/python-openstackdocstheme/centos/build_srpm.data new file mode 100644 index 00000000..8aeb5536 --- /dev/null +++ b/openstack/python-openstackdocstheme/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=1 diff --git a/openstack/python-openstackdocstheme/centos/meta_patches/0001-WRS-apply-WRS-patches.patch b/openstack/python-openstackdocstheme/centos/meta_patches/0001-WRS-apply-WRS-patches.patch new file mode 100644 index 00000000..dee2a440 --- /dev/null +++ b/openstack/python-openstackdocstheme/centos/meta_patches/0001-WRS-apply-WRS-patches.patch @@ -0,0 +1,33 @@ +From e81c260407459ce9707da6876c92129d86041380 Mon Sep 17 00:00:00 2001 +From: Al Bailey +Date: Fri, 27 Oct 2017 12:05:28 -0500 +Subject: [PATCH] WRS apply WRS patches + +--- + SPECS/python-openstackdocstheme.spec | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/SPECS/python-openstackdocstheme.spec b/SPECS/python-openstackdocstheme.spec +index 1eae0c4..316b88b 100644 +--- a/SPECS/python-openstackdocstheme.spec ++++ b/SPECS/python-openstackdocstheme.spec +@@ -6,7 +6,7 @@ + + Name: python-%{pypi_name} + Version: 1.11.0 +-Release: 1%{?dist} ++Release: 1%{?_tis_dist}.%{tis_patch_ver} + Summary: OpenStack Docs Theme + + License: ASL 2.0 +@@ -14,6 +14,7 @@ URL: http://docs.openstack.org/ + Source0: https://pypi.io/packages/source/o/%{pypi_name}/%{pypi_name}-%{version}.tar.gz + Patch0001: 0001-Remove-all-Google-Analytics-tracking.patch + Patch0002: 0002-Remove-external-references.patch ++Patch0003: 0003-WRS.-Fix-when-building-outside-git-tree.patch + BuildArch: noarch + + %package -n python2-%{pypi_name} +-- +1.8.3.1 + diff --git a/openstack/python-openstackdocstheme/centos/meta_patches/PATCH_ORDER b/openstack/python-openstackdocstheme/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..cf9f520f --- /dev/null +++ b/openstack/python-openstackdocstheme/centos/meta_patches/PATCH_ORDER @@ -0,0 +1 @@ +0001-WRS-apply-WRS-patches.patch diff --git a/openstack/python-openstackdocstheme/centos/patches/0003-WRS.-Fix-when-building-outside-git-tree.patch b/openstack/python-openstackdocstheme/centos/patches/0003-WRS.-Fix-when-building-outside-git-tree.patch new file mode 100644 index 00000000..cc394699 --- /dev/null +++ b/openstack/python-openstackdocstheme/centos/patches/0003-WRS.-Fix-when-building-outside-git-tree.patch @@ -0,0 +1,32 @@ +From 83649dfe12bf4b6263b2df663af1f87e54e7571a Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Fri, 27 Oct 2017 12:03:11 -0500 +Subject: [PATCH] WRS. Fix when building outside git tree + +--- + openstackdocstheme/__init__.py | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/openstackdocstheme/__init__.py b/openstackdocstheme/__init__.py +index d8fb2f5..e5974a7 100644 +--- a/openstackdocstheme/__init__.py ++++ b/openstackdocstheme/__init__.py +@@ -49,9 +49,12 @@ def _html_page_context(app, pagename, templatename, context, doctree): + global _html_context_data + if _html_context_data is None: + _html_context_data = {} +- _html_context_data['gitsha'] = subprocess.check_output( +- ['git', 'rev-parse', 'HEAD'], +- ).decode('utf-8').strip() ++ try: ++ _html_context_data['gitsha'] = subprocess.check_output( ++ ['git', 'rev-parse', 'HEAD'], ++ ).decode('utf-8').strip() ++ except: ++ _html_context_data['gitsha'] = 'unknown' + repo_name = app.config.repository_name + if repo_name: + _html_context_data['giturl'] = _giturl.format(repo_name) +-- +1.8.3.1 + diff --git a/openstack/python-openstackdocstheme/centos/srpm_path b/openstack/python-openstackdocstheme/centos/srpm_path new file mode 100644 index 00000000..395136ad --- /dev/null +++ b/openstack/python-openstackdocstheme/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-openstackdocstheme-1.11.0-1.el7.src.rpm diff --git a/openstack/python-openstacksdk/centos/build_srpm.data b/openstack/python-openstacksdk/centos/build_srpm.data new file mode 100644 index 00000000..8f4752a6 --- /dev/null +++ b/openstack/python-openstacksdk/centos/build_srpm.data @@ -0,0 +1,3 @@ +SRC_DIR="$CGCS_BASE/git/python-openstacksdk" +TIS_BASE_SRCREV=0e9d3757eddd26a92121bf50a4ec3bf75a63de1c +TIS_PATCH_VER=GITREVCOUNT diff --git a/openstack/python-openstacksdk/centos/python-openstacksdk.spec b/openstack/python-openstacksdk/centos/python-openstacksdk.spec new file mode 100644 index 00000000..5edc4f93 --- /dev/null +++ b/openstack/python-openstacksdk/centos/python-openstacksdk.spec @@ -0,0 +1,206 @@ +%{!?upstream_version: %global upstream_version %{version}%{?milestone}} +%if 0%{?fedora} >= 24 +%global with_python3 1 +%endif + +# Disable docs while openstackdocstheme is packaged +%global with_doc 0 + +%global pypi_name openstacksdk + +Name: python-%{pypi_name} +Version: 0.9.17 +Release: 0%{?_tis_dist}.%{tis_patch_ver} +Summary: An SDK for building applications to work with OpenStack + +License: ASL 2.0 +URL: http://www.openstack.org/ +Source0: %{name}-%{version}%{?milestone}.tar.gz +BuildArch: noarch + + +%description +A collection of libraries for building applications to work with OpenStack +clouds. + +%package -n python2-%{pypi_name} +Summary: An SDK for building applications to work with OpenStack +%{?python_provide:%python_provide python2-%{pypi_name}} + +BuildRequires: python2-devel +BuildRequires: python-pbr >= 2.0.0 +BuildRequires: python-sphinx +BuildRequires: python-oslo-sphinx +BuildRequires: python-requests +BuildRequires: python-keystoneauth1 +BuildRequires: python-oslo-utils +BuildRequires: python-deprecation +BuildRequires: python-os-client-config +# Test requirements +BuildRequires: python-coverage +BuildRequires: python-iso8601 >= 0.1.11 +BuildRequires: python-jsonpatch >= 1.1 +BuildRequires: python-subunit +BuildRequires: python-os-testr +BuildRequires: python-mock +BuildRequires: python-testrepository +BuildRequires: python-testscenarios +BuildRequires: python-testtools + +Requires: python-deprecation +Requires: python-jsonpatch >= 1.1 +Requires: python-iso8601 >= 0.1.11 +Requires: python-keystoneauth1 >= 3.1.0 +Requires: python-os-client-config >= 1.28.0 +Requires: python-oslo-utils +Requires: python-six +Requires: python-stevedore + +%description -n python2-%{pypi_name} +A collection of libraries for building applications to work with OpenStack +clouds. + +%package -n python2-%{pypi_name}-tests +Summary: An SDK for building applications to work with OpenStack - test files + +Requires: python2-%{pypi_name} = %{version}-%{release} + +%description -n python2-%{pypi_name}-tests +A collection of libraries for building applications to work with OpenStack +clouds - test files + +%package sdk +Summary: SDK files for %{name} + +%description sdk +Contains SDK files for %{name} package + + +%if 0%{?with_python3} +%package -n python3-%{pypi_name} +Summary: An SDK for building applications to work with OpenStack +%{?python_provide:%python_provide python3-%{pypi_name}} + +BuildRequires: python3-devel +BuildRequires: python3-pbr >= 1.8 +BuildRequires: python3-sphinx +BuildRequires: python3-oslo-sphinx +BuildRequires: python3-requests +BuildRequires: python3-keystoneauth1 +BuildRequires: python3-oslo-utils +BuildRequires: python3-os-client-config +# Test requirements +BuildRequires: python3-coverage +BuildRequires: python3-subunit +BuildRequires: python3-os-testr +BuildRequires: python3-mock +BuildRequires: python3-testrepository +BuildRequires: python3-testscenarios +BuildRequires: python3-testtools + +Requires: python3-keystoneauth1 +Requires: python3-os-client-config +Requires: python3-oslo-utils +Requires: python3-six +Requires: python3-stevedore + +%description -n python3-%{pypi_name} +A collection of libraries for building applications to work with OpenStack +clouds. + +%package -n python3-%{pypi_name}-tests +Summary: An SDK for building applications to work with OpenStack - test files + +Requires: python3-%{pypi_name} = %{version}-%{release} + +%description -n python3-%{pypi_name}-tests +A collection of libraries for building applications to work with OpenStack +clouds - test files + +%endif + + +%if 0%{?with_doc} +%package -n python-%{pypi_name}-doc +Summary: An SDK for building applications to work with OpenStack - documentation + +%description -n python-%{pypi_name}-doc +A collection of libraries for building applications to work with OpenStack +clouds - documentation. +%endif + +%prep +%autosetup -n %{name}-%{upstream_version} -S git + +%build +export PBR_VERSION=%{version} +%py2_build + +%if 0%{?with_python3} +%{py3_build} +%endif + +%if 0%{?with_doc} +# generate html docs +sphinx-build -b html doc/source html +# remove the sphinx-build leftovers +rm -rf html/.{doctrees,buildinfo} +%endif + +%install +export PBR_VERSION=%{version} +%py2_install + +%if 0%{?with_python3} +%{py3_install} +%endif + +# prep SDK package +mkdir -p %{buildroot}/usr/share/remote-clients +tar zcf %{buildroot}/usr/share/remote-clients/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{name}-%{version} + +%check +export PBR_VERSION=%{version} +%{__python2} setup.py test + +%if 0%{?with_python3} +rm -rf .testrepository +export PBR_VERSION=%{version} +%{__python3} setup.py test +%endif + + +%files -n python2-%{pypi_name} +%doc README.rst +%license LICENSE +%{python2_sitelib}/openstack +%{python2_sitelib}/%{pypi_name}-*.egg-info +%exclude %{python2_sitelib}/openstack/tests + +%files -n python2-%{pypi_name}-tests +%{python2_sitelib}/openstack/tests + +%if 0%{?with_doc} +%files -n python-%{pypi_name}-doc +%doc html +%license LICENSE +%endif + +%if 0%{?with_python3} +%files -n python3-%{pypi_name} +%doc README.rst +%license LICENSE +%{python3_sitelib}/openstack +%{python3_sitelib}/%{pypi_name}-*.egg-info +%exclude %{python3_sitelib}/openstack/tests + +%files -n python3-%{pypi_name}-tests +%{python3_sitelib}/openstack/tests +%endif + +%files sdk +/usr/share/remote-clients/%{name}-%{version}.tgz + +%changelog +* Mon Sep 12 2016 Haikel Guemar 0.9.5-1 +- Update to 0.9.5 diff --git a/openstack/python-osc-lib/centos/build_srpm.data b/openstack/python-osc-lib/centos/build_srpm.data new file mode 100644 index 00000000..70b4b5dc --- /dev/null +++ b/openstack/python-osc-lib/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=2 diff --git a/openstack/python-osc-lib/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch b/openstack/python-osc-lib/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..65036ad1 --- /dev/null +++ b/openstack/python-osc-lib/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch @@ -0,0 +1,26 @@ +From 0abdbd359af7c24e82fc4776084aa28c5e43a05e Mon Sep 17 00:00:00 2001 +From: Andy Ning +Date: Tue, 28 Nov 2017 09:16:37 -0500 +Subject: [PATCH 1/1] Update-package-versioning-for-TIS-format. + +Signed-off-by: Andy Ning +--- + SPECS/python-osc-lib.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/python-osc-lib.spec b/SPECS/python-osc-lib.spec +index 8e85dbd..568ad55 100644 +--- a/SPECS/python-osc-lib.spec ++++ b/SPECS/python-osc-lib.spec +@@ -12,7 +12,7 @@ + + Name: python-%{library} + Version: 1.7.0 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: OpenStack library for writing OSC plugins + License: ASL 2.0 + URL: https://github.com/openstack/%{library}/ +-- +1.8.3.1 + diff --git a/openstack/python-osc-lib/centos/meta_patches/0002-CGTS-7947-add-os-keystone-region-name-option-to-open.patch b/openstack/python-osc-lib/centos/meta_patches/0002-CGTS-7947-add-os-keystone-region-name-option-to-open.patch new file mode 100644 index 00000000..33506a59 --- /dev/null +++ b/openstack/python-osc-lib/centos/meta_patches/0002-CGTS-7947-add-os-keystone-region-name-option-to-open.patch @@ -0,0 +1,28 @@ +From 496e39e401113c5ca9c4e53ff506518015c26149 Mon Sep 17 00:00:00 2001 +From: Andy Ning +Date: Tue, 28 Nov 2017 09:35:02 -0500 +Subject: [PATCH 1/1] CGTS-7947: add --os-keystone-region-name option to + openstack + +Signed-off-by: Andy Ning +--- + SPECS/python-osc-lib.spec | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/SPECS/python-osc-lib.spec b/SPECS/python-osc-lib.spec +index 568ad55..e035b22 100644 +--- a/SPECS/python-osc-lib.spec ++++ b/SPECS/python-osc-lib.spec +@@ -19,6 +19,9 @@ URL: https://github.com/openstack/%{library}/ + + Source0: https://tarballs.openstack.org/%{library}/%{library}-%{upstream_version}.tar.gz + ++# WRS patches ++Patch0001: 0001-CGTS-7947-add-os-keystone-region-name-option-to-open.patch ++ + BuildArch: noarch + + %package -n python2-%{library} +-- +1.8.3.1 + diff --git a/openstack/python-osc-lib/centos/meta_patches/0003-CGTS-8470-update-remote-client-to-include-osc-lib.patch b/openstack/python-osc-lib/centos/meta_patches/0003-CGTS-8470-update-remote-client-to-include-osc-lib.patch new file mode 100644 index 00000000..8c5134b4 --- /dev/null +++ b/openstack/python-osc-lib/centos/meta_patches/0003-CGTS-8470-update-remote-client-to-include-osc-lib.patch @@ -0,0 +1,52 @@ +From 7389923ee13be16dffcd7b1f68a5794ef5ecc248 Mon Sep 17 00:00:00 2001 +From: Andy Ning +Date: Wed, 6 Dec 2017 17:18:36 -0500 +Subject: [PATCH 1/1] CGTS-8470: update remote client to include osc-lib + +Currently remote client built from sdk doesn't include osc-lib. +The patch added osc-lib in since it is patched. +--- + SPECS/python-osc-lib.spec | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/SPECS/python-osc-lib.spec b/SPECS/python-osc-lib.spec +index e035b22..2ae8629 100644 +--- a/SPECS/python-osc-lib.spec ++++ b/SPECS/python-osc-lib.spec +@@ -94,6 +94,12 @@ osc-lib is a package of common support modules for writing OSC plugins. + + This package contains the documentation. + ++%package sdk ++Summary: SDK files for %{library} ++ ++%description sdk ++Contains SDK files for %{library} package ++ + %if 0%{?with_python3} + %package -n python3-%{library} + Summary: OpenStack Example library +@@ -192,6 +198,10 @@ rm -rf .testrepository + %endif + %{__python2} setup.py test + ++# prep SDK package ++mkdir -p %{buildroot}/usr/share/remote-clients/%{name} ++tar zcf %{buildroot}/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz --exclude='.gitignore' --exclude='.gitreview' -C .. %{library}-%{upstream_version} ++ + %files -n python2-%{library} + %license LICENSE + %{python2_sitelib}/%{module} +@@ -218,6 +228,9 @@ rm -rf .testrepository + %{python3_sitelib}/%{module}/tests + %endif # with_python3 + ++%files sdk ++/usr/share/remote-clients/%{name}/%{name}-%{version}.tgz ++ + %changelog + * Sat Aug 12 2017 Alfredo Moralejo 1.7.0-1 + - Update to 1.7.0 +-- +1.8.3.1 + diff --git a/openstack/python-osc-lib/centos/meta_patches/PATCH_ORDER b/openstack/python-osc-lib/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..b2ddb07d --- /dev/null +++ b/openstack/python-osc-lib/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,4 @@ +0001-Update-package-versioning-for-TIS-format.patch +0002-CGTS-7947-add-os-keystone-region-name-option-to-open.patch +0003-CGTS-8470-update-remote-client-to-include-osc-lib.patch + diff --git a/openstack/python-osc-lib/centos/patches/0001-CGTS-7947-add-os-keystone-region-name-option-to-open.patch b/openstack/python-osc-lib/centos/patches/0001-CGTS-7947-add-os-keystone-region-name-option-to-open.patch new file mode 100644 index 00000000..0d23fa38 --- /dev/null +++ b/openstack/python-osc-lib/centos/patches/0001-CGTS-7947-add-os-keystone-region-name-option-to-open.patch @@ -0,0 +1,45 @@ +From 83a5e9b3fc561d9e9c01c71e122ef6407f99a03a Mon Sep 17 00:00:00 2001 +From: Andy Ning +Date: Tue, 28 Nov 2017 09:28:38 -0500 +Subject: [PATCH 1/1] CGTS-7947: add --os-keystone-region-name option to + openstack + +The new option only apply to identity client. +--- + osc_lib/clientmanager.py | 1 + + osc_lib/shell.py | 7 +++++++ + 2 files changed, 8 insertions(+) + +diff --git a/osc_lib/clientmanager.py b/osc_lib/clientmanager.py +index 6017115..ed4f521 100644 +--- a/osc_lib/clientmanager.py ++++ b/osc_lib/clientmanager.py +@@ -91,6 +91,7 @@ class ClientManager(object): + self._app_name = app_name + self._app_version = app_version + self.region_name = self._cli_options.region_name ++ self.keystone_region_name = self._cli_options.keystone_region_name + self.interface = self._cli_options.interface + + self.timing = self._cli_options.timing +diff --git a/osc_lib/shell.py b/osc_lib/shell.py +index 6e645a9..afc5da6 100644 +--- a/osc_lib/shell.py ++++ b/osc_lib/shell.py +@@ -201,6 +201,13 @@ class OpenStackShell(app.App): + help=_('Authentication region name (Env: OS_REGION_NAME)'), + ) + parser.add_argument( ++ '--os-keystone-region-name', ++ metavar='', ++ dest='keystone_region_name', ++ default=utils.env('OS_KEYSTONE_REGION_NAME'), ++ help=_('Keystone Authentication region name (Env: OS_KEYSTONE_REGION_NAME)'), ++ ) ++ parser.add_argument( + '--os-cacert', + metavar='', + dest='cacert', +-- +1.8.3.1 + diff --git a/openstack/python-osc-lib/centos/srpm_path b/openstack/python-osc-lib/centos/srpm_path new file mode 100644 index 00000000..48539645 --- /dev/null +++ b/openstack/python-osc-lib/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-osc-lib-1.7.0-1.el7.src.rpm diff --git a/openstack/python-oslo-concurrency/centos/build_srpm.data b/openstack/python-oslo-concurrency/centos/build_srpm.data new file mode 100644 index 00000000..70b4b5dc --- /dev/null +++ b/openstack/python-oslo-concurrency/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=2 diff --git a/openstack/python-oslo-concurrency/centos/meta_patches/PATCH_ORDER b/openstack/python-oslo-concurrency/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..f6ac15ed --- /dev/null +++ b/openstack/python-oslo-concurrency/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,2 @@ +update-package-versioning-for-TIS-format.patch +spec-add-fair-lock.patch diff --git a/openstack/python-oslo-concurrency/centos/meta_patches/spec-add-fair-lock.patch b/openstack/python-oslo-concurrency/centos/meta_patches/spec-add-fair-lock.patch new file mode 100644 index 00000000..80e10302 --- /dev/null +++ b/openstack/python-oslo-concurrency/centos/meta_patches/spec-add-fair-lock.patch @@ -0,0 +1,15 @@ +diff --git a/SPECS/python-oslo-concurrency.spec b/SPECS/python-oslo-concurrency.spec +index a4a3883..cc9e799 100644 +--- a/SPECS/python-oslo-concurrency.spec ++++ b/SPECS/python-oslo-concurrency.spec +@@ -16,6 +16,10 @@ Summary: OpenStack Oslo concurrency library + License: ASL 2.0 + URL: https://launchpad.net/oslo + Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz ++ ++# WRS ++Patch0001: add-fair-lock.patch ++ + BuildArch: noarch + + %description diff --git a/openstack/python-oslo-concurrency/centos/meta_patches/update-package-versioning-for-TIS-format.patch b/openstack/python-oslo-concurrency/centos/meta_patches/update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..614debb9 --- /dev/null +++ b/openstack/python-oslo-concurrency/centos/meta_patches/update-package-versioning-for-TIS-format.patch @@ -0,0 +1,13 @@ +diff --git a/SPECS/python-oslo-concurrency.spec b/SPECS/python-oslo-concurrency.spec +index dee2b71..a4a3883 100644 +--- a/SPECS/python-oslo-concurrency.spec ++++ b/SPECS/python-oslo-concurrency.spec +@@ -10,7 +10,7 @@ + + Name: python-oslo-concurrency + Version: 3.21.0 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: OpenStack Oslo concurrency library + + License: ASL 2.0 diff --git a/openstack/python-oslo-concurrency/centos/patches/add-fair-lock.patch b/openstack/python-oslo-concurrency/centos/patches/add-fair-lock.patch new file mode 100644 index 00000000..57eaef8a --- /dev/null +++ b/openstack/python-oslo-concurrency/centos/patches/add-fair-lock.patch @@ -0,0 +1,106 @@ +diff --git a/oslo_concurrency/lockutils.py b/oslo_concurrency/lockutils.py +index ea67571..4519463 100644 +--- a/oslo_concurrency/lockutils.py ++++ b/oslo_concurrency/lockutils.py +@@ -87,6 +87,43 @@ ReaderWriterLock = fasteners.ReaderWriterLock + """ + + ++class FairLocks(object): ++ """A garbage collected container of fair locks. ++ ++ This collection internally uses a weak value dictionary so that when a ++ lock is no longer in use (by any threads) it will automatically be ++ removed from this container by the garbage collector. ++ """ ++ ++ def __init__(self): ++ self._locks = weakref.WeakValueDictionary() ++ self._lock = threading.Lock() ++ ++ def get(self, name): ++ """Gets (or creates) a lock with a given name. ++ ++ :param name: The lock name to get/create (used to associate ++ previously created names with the same lock). ++ ++ Returns an newly constructed lock (or an existing one if it was ++ already created for the given name). ++ """ ++ with self._lock: ++ try: ++ return self._locks[name] ++ except KeyError: ++ rwlock = ReaderWriterLock() ++ self._locks[name] = rwlock ++ return rwlock ++ ++ ++fair_locks = FairLocks() ++ ++ ++def internal_fair_lock(name): ++ return fair_locks.get(name) ++ ++ + class Semaphores(object): + """A garbage collected container of semaphores. + +@@ -170,7 +207,7 @@ def internal_lock(name, semaphores=None): + + @contextlib.contextmanager + def lock(name, lock_file_prefix=None, external=False, lock_path=None, +- do_log=True, semaphores=None, delay=0.01): ++ do_log=True, semaphores=None, delay=0.01, fair=False): + """Context based lock + + This function yields a `threading.Semaphore` instance (if we don't use +@@ -200,16 +237,22 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None, + + :param delay: Delay between acquisition attempts (in seconds). + ++ :param fair: Whether or not we want a "fair" lock where contending lockers ++ will get the lock in the order in which they tried to acquire it. ++ + .. versionchanged:: 0.2 + Added *do_log* optional parameter. + + .. versionchanged:: 0.3 + Added *delay* and *semaphores* optional parameters. + """ +- int_lock = internal_lock(name, semaphores=semaphores) ++ if fair: ++ int_lock = internal_fair_lock(name).write_lock() ++ else: ++ int_lock = internal_lock(name, semaphores=semaphores) + with int_lock: + if do_log: +- LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name}) ++ LOG.debug('Acquired lock "%(lock)s"', {'lock': name}) + try: + if external and not CONF.oslo_concurrency.disable_process_locking: + ext_lock = external_lock(name, lock_file_prefix, lock_path) +@@ -222,11 +265,11 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None, + yield int_lock + finally: + if do_log: +- LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name}) ++ LOG.debug('Releasing lock "%(lock)s"', {'lock': name}) + + + def synchronized(name, lock_file_prefix=None, external=False, lock_path=None, +- semaphores=None, delay=0.01): ++ semaphores=None, delay=0.01, fair=False): + """Synchronization decorator. + + Decorating a method like so:: +@@ -261,7 +304,8 @@ def synchronized(name, lock_file_prefix=None, external=False, lock_path=None, + t2 = None + try: + with lock(name, lock_file_prefix, external, lock_path, +- do_log=False, semaphores=semaphores, delay=delay): ++ do_log=False, semaphores=semaphores, delay=delay, ++ fair=fair): + t2 = timeutils.now() + LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: ' + 'waited %(wait_secs)0.3fs', diff --git a/openstack/python-oslo-concurrency/centos/srpm_path b/openstack/python-oslo-concurrency/centos/srpm_path new file mode 100644 index 00000000..65225c97 --- /dev/null +++ b/openstack/python-oslo-concurrency/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-oslo-concurrency-3.21.0-1.el7.src.rpm diff --git a/openstack/python-oslo-messaging/centos/build_srpm.data b/openstack/python-oslo-messaging/centos/build_srpm.data new file mode 100644 index 00000000..0eac83bb --- /dev/null +++ b/openstack/python-oslo-messaging/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=4 diff --git a/openstack/python-oslo-messaging/centos/meta_patches/0004-disable-check-on-build.patch b/openstack/python-oslo-messaging/centos/meta_patches/0004-disable-check-on-build.patch new file mode 100644 index 00000000..966274e4 --- /dev/null +++ b/openstack/python-oslo-messaging/centos/meta_patches/0004-disable-check-on-build.patch @@ -0,0 +1,27 @@ +From a31e3b402f19767e1390edf17400e38f6592c829 Mon Sep 17 00:00:00 2001 +From: jmckenna +Date: Thu, 7 Dec 2017 09:12:43 -0500 +Subject: [PATCH] Disable check on build + + +diff --git a/SPECS/python-oslo-messaging.spec b/SPECS/python-oslo-messaging.spec +index 627a1be..648718a 100644 +--- a/SPECS/python-oslo-messaging.spec ++++ b/SPECS/python-oslo-messaging.spec +@@ -268,11 +268,11 @@ rm -fr doc/build/html/.buildinfo + %check + # Temporarily disabling tests until we have + # mock >= 1.2 and pika_pool +-%{__python2} setup.py test || +-%if 0%{?with_python3} +-rm -rf .testrepository +-%{__python3} setup.py test || +-%endif ++# %{__python2} setup.py test || ++# %if 0%{?with_python3} ++# rm -rf .testrepository ++# %{__python3} setup.py test || ++# %endif + + %files -n python2-%{pkg_name} + %license LICENSE diff --git a/openstack/python-oslo-messaging/centos/meta_patches/PATCH_ORDER b/openstack/python-oslo-messaging/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..96a79b65 --- /dev/null +++ b/openstack/python-oslo-messaging/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,4 @@ +update-package-versioning-for-tis-format.patch +spec-rabbit-increase-heartbeat-rate-to-decrease-polling-interval.patch +fix-pifpaf-build-error.patch +0004-disable-check-on-build.patch diff --git a/openstack/python-oslo-messaging/centos/meta_patches/fix-pifpaf-build-error.patch b/openstack/python-oslo-messaging/centos/meta_patches/fix-pifpaf-build-error.patch new file mode 100644 index 00000000..0a3b8461 --- /dev/null +++ b/openstack/python-oslo-messaging/centos/meta_patches/fix-pifpaf-build-error.patch @@ -0,0 +1,42 @@ +From 7473db64725de270d899a5ec35801c25f5ba54b1 Mon Sep 17 00:00:00 2001 +From: jmckenna +Date: Thu, 7 Dec 2017 09:10:31 -0500 +Subject: [PATCH] refactor patch "fix pifpaf build error" + + +diff --git a/SPECS/python-oslo-messaging.spec b/SPECS/python-oslo-messaging.spec +index abc5dce..627a1be 100644 +--- a/SPECS/python-oslo-messaging.spec ++++ b/SPECS/python-oslo-messaging.spec +@@ -52,6 +52,7 @@ BuildRequires: python-tenacity + BuildRequires: python-testrepository + BuildRequires: python-testscenarios + BuildRequires: python-testtools ++BuildRequires: python2-pifpaf + + + Requires: python-amqp >= 2.1.0 +@@ -135,6 +136,7 @@ Requires: python-oslotest + Requires: python-testrepository + Requires: python-testscenarios + Requires: python-testtools ++Requires: python2-pifpaf + + %description -n python2-%{pkg_name}-tests + Tests for the OpenStack common messaging library. +@@ -165,6 +167,7 @@ BuildRequires: python3-tenacity + BuildRequires: python3-testrepository + BuildRequires: python3-testscenarios + BuildRequires: python3-testtools ++BuildRequires: python3-pifpaf + + Requires: python3-amqp >= 2.1.0 + Requires: python3-debtcollector >= 1.2.0 +@@ -217,6 +220,7 @@ Requires: python3-oslotest + Requires: python3-testrepository + Requires: python3-testscenarios + Requires: python3-testtools ++Requires: python3-pifpaf + + %description -n python3-%{pkg_name}-tests + Tests for the OpenStack common messaging library. diff --git a/openstack/python-oslo-messaging/centos/meta_patches/spec-rabbit-increase-heartbeat-rate-to-decrease-polling-interval.patch b/openstack/python-oslo-messaging/centos/meta_patches/spec-rabbit-increase-heartbeat-rate-to-decrease-polling-interval.patch new file mode 100644 index 00000000..eaf17222 --- /dev/null +++ b/openstack/python-oslo-messaging/centos/meta_patches/spec-rabbit-increase-heartbeat-rate-to-decrease-polling-interval.patch @@ -0,0 +1,28 @@ +From ed0e39e1f647b44b19cfe444009ab9e64c47f070 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 2 Oct 2017 14:45:55 -0400 +Subject: [PATCH 2/2] WRS: + spec-rabbit-increase-heartbeat-rate-to-decrease-polling-interval.patch + +--- + SPECS/python-oslo-messaging.spec | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/SPECS/python-oslo-messaging.spec b/SPECS/python-oslo-messaging.spec +index 361f93e..55e8567 100644 +--- a/SPECS/python-oslo-messaging.spec ++++ b/SPECS/python-oslo-messaging.spec +@@ -14,6 +14,10 @@ Summary: OpenStack common messaging library + License: ASL 2.0 + URL: https://launchpad.net/oslo + Source0: https://tarballs.openstack.org/%{pypi_name}/%{pypi_name}-%{upstream_version}.tar.gz ++ ++# WRS ++Patch0001: rabbit-increase-heartbeat-rate-to-decrease-poll-interval.patch ++ + BuildArch: noarch + + BuildRequires: git +-- +1.9.1 + diff --git a/openstack/python-oslo-messaging/centos/meta_patches/update-package-versioning-for-tis-format.patch b/openstack/python-oslo-messaging/centos/meta_patches/update-package-versioning-for-tis-format.patch new file mode 100644 index 00000000..cb82830b --- /dev/null +++ b/openstack/python-oslo-messaging/centos/meta_patches/update-package-versioning-for-tis-format.patch @@ -0,0 +1,26 @@ +From 76a1a25bbd4e50e46b9ab4c0601e8f3ba9c7dc9e Mon Sep 17 00:00:00 2001 +From: Allain Legacy +Date: Fri, 2 Jun 2017 14:57:47 -0400 +Subject: [PATCH 1/2] update package versioning for tis format + +Signed-off-by: Allain Legacy +--- + SPECS/python-oslo-messaging.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/python-oslo-messaging.spec b/SPECS/python-oslo-messaging.spec +index 59562ac..929e8e0 100644 +--- a/SPECS/python-oslo-messaging.spec ++++ b/SPECS/python-oslo-messaging.spec +@@ -8,7 +8,7 @@ + + Name: python-oslo-messaging + Version: 5.30.0 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: OpenStack common messaging library + + License: ASL 2.0 +-- +2.12.1 + diff --git a/openstack/python-oslo-messaging/centos/patches/rabbit-increase-heartbeat-rate-to-decrease-poll-interval.patch b/openstack/python-oslo-messaging/centos/patches/rabbit-increase-heartbeat-rate-to-decrease-poll-interval.patch new file mode 100644 index 00000000..df386e73 --- /dev/null +++ b/openstack/python-oslo-messaging/centos/patches/rabbit-increase-heartbeat-rate-to-decrease-poll-interval.patch @@ -0,0 +1,37 @@ +From c5cc972c692d3fc05afc54d0368511e7699d241f Mon Sep 17 00:00:00 2001 +From: rpm-build +Date: Fri, 2 Jun 2017 14:52:44 -0400 +Subject: [PATCH] rabbit: increase heartbeat rate to decrease poll interval + +The poll_timeout is tied to the heartbeat_rate value when the +heartbeat_timeout_threshold is non-zero. It works out to be: + + threshold / rate / 2 + +Therefore the default is 60 / 2 / 2 = 15. This causes the recv() to block for +up to 15 seconds unless there are incoming RPC messages. This is problematic +for graceful shutdown of services as the stop() request may block if the recv() +is blocked. To ensure that the recv() does not block for a long time we are +reducing the interval by controlling the rate. + +Signed-off-by: rpm-build +--- + oslo_messaging/_drivers/impl_rabbit.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/oslo_messaging/_drivers/impl_rabbit.py b/oslo_messaging/_drivers/impl_rabbit.py +index da1acee..eeb9257 100644 +--- a/oslo_messaging/_drivers/impl_rabbit.py ++++ b/oslo_messaging/_drivers/impl_rabbit.py +@@ -191,7 +191,7 @@ rabbit_opts = [ + "considered down if heartbeat's keep-alive fails " + "(0 disable the heartbeat). EXPERIMENTAL"), + cfg.IntOpt('heartbeat_rate', +- default=2, ++ default=10, + help='How often times during the heartbeat_timeout_threshold ' + 'we check the heartbeat.'), + +-- +2.12.1 + diff --git a/openstack/python-oslo-messaging/centos/srpm_path b/openstack/python-oslo-messaging/centos/srpm_path new file mode 100644 index 00000000..db0845d4 --- /dev/null +++ b/openstack/python-oslo-messaging/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-oslo-messaging-5.30.0-1.el7.src.rpm diff --git a/openstack/python-oslo-service/centos/build_srpm.data b/openstack/python-oslo-service/centos/build_srpm.data new file mode 100644 index 00000000..70b4b5dc --- /dev/null +++ b/openstack/python-oslo-service/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=2 diff --git a/openstack/python-oslo-service/centos/meta_patches/PATCH_ORDER b/openstack/python-oslo-service/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..2e2ced69 --- /dev/null +++ b/openstack/python-oslo-service/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,2 @@ +update-package-versioning-for-tis-format.patch +spec-loopingcall-permit-aborting-while-sleeping.patch diff --git a/openstack/python-oslo-service/centos/meta_patches/spec-loopingcall-permit-aborting-while-sleeping.patch b/openstack/python-oslo-service/centos/meta_patches/spec-loopingcall-permit-aborting-while-sleeping.patch new file mode 100644 index 00000000..2535c9b5 --- /dev/null +++ b/openstack/python-oslo-service/centos/meta_patches/spec-loopingcall-permit-aborting-while-sleeping.patch @@ -0,0 +1,37 @@ +From 0005b00e62641792f7cb5a647c4720601f4081db Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 2 Oct 2017 14:42:44 -0400 +Subject: [PATCH 2/2] WRS: + spec-loopingcall-permit-aborting-while-sleeping.patch + +--- + SPECS/python-oslo-service.spec | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/SPECS/python-oslo-service.spec b/SPECS/python-oslo-service.spec +index ebf5ccf..7d138d0 100644 +--- a/SPECS/python-oslo-service.spec ++++ b/SPECS/python-oslo-service.spec +@@ -14,6 +14,10 @@ Summary: Oslo service library + License: ASL 2.0 + URL: http://launchpad.net/oslo + Source0: https://pypi.io/packages/source/o/%{pypi_name}/%{pypi_name}-%{version}.tar.gz ++ ++# WRS ++Patch0001: loopingcall-permit-aborting-while-sleeping.patch ++ + BuildArch: noarch + + %package -n python2-%{pname} +@@ -131,6 +135,8 @@ Library for running OpenStack services + + %prep + %setup -q -n %{pypi_name}-%{upstream_version} ++# Apply WRS patches ++%patch0001 -p1 + + %build + %py2_build +-- +1.9.1 + diff --git a/openstack/python-oslo-service/centos/meta_patches/update-package-versioning-for-tis-format.patch b/openstack/python-oslo-service/centos/meta_patches/update-package-versioning-for-tis-format.patch new file mode 100644 index 00000000..6eb3efe8 --- /dev/null +++ b/openstack/python-oslo-service/centos/meta_patches/update-package-versioning-for-tis-format.patch @@ -0,0 +1,27 @@ +From e38fcd3820d8c48d23d3b17a20d78d600c8e0347 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Mon, 2 Oct 2017 14:42:44 -0400 +Subject: [PATCH 1/2] WRS: update-package-versioning-for-tis-format.patch + +Conflicts: + SPECS/python-oslo-service.spec +--- + SPECS/python-oslo-service.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/python-oslo-service.spec b/SPECS/python-oslo-service.spec +index 172e484..ebf5ccf 100644 +--- a/SPECS/python-oslo-service.spec ++++ b/SPECS/python-oslo-service.spec +@@ -8,7 +8,7 @@ + + Name: python-%{pname} + Version: 1.16.1 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: Oslo service library + + License: ASL 2.0 +-- +1.9.1 + diff --git a/openstack/python-oslo-service/centos/patches/loopingcall-permit-aborting-while-sleeping.patch b/openstack/python-oslo-service/centos/patches/loopingcall-permit-aborting-while-sleeping.patch new file mode 100644 index 00000000..441e61df --- /dev/null +++ b/openstack/python-oslo-service/centos/patches/loopingcall-permit-aborting-while-sleeping.patch @@ -0,0 +1,177 @@ +From 4e07594552a7249c45bf12fb66c79faedace1721 Mon Sep 17 00:00:00 2001 +From: Allain Legacy +Date: Wed, 31 May 2017 16:18:19 -0400 +Subject: [PATCH] Permit aborting loopingcall while sleeping + +Some of the openstack services implement worker tasks that are based on +the oslo-service LoopingCallBase objects. They do this as a way to have +a task that runs periodically as a greenthread within a child worker +process. For example, the neutron-server runs AgentStatusCheckWorker() +objects as base service workers in its child worker processes. + +When the parent server process handles a SIGTERM signal it attempts to +stop all services launched on each of the child worker processes (i.e., +ProcessLauncher.stop()). That results in a stop() being called on each +of the underlying base services and then a wait() to ensure that they +complete before shutdown. + +If any service that is implemented on a LoopingCallBase related object +is suspended on a greenthread.sleep() the previous call to stop() will +have no effect and so the wait() will block until the sleep() finishes. +For tasks that either have a frequent FixedLoopingBase interface or a +short initial_delay this may not be a problem, but for those with a long +delay this could mean that the wait() blocks for minutes before the +process is allowed to shutdown. + +To solve this the LoopingCallBase calls to greenthread.sleep() are being +replaced with a threading.Event() object's wait() method. This allows a +caller of stop() to interrupt the sleep and expedite the shutdown. + +Closes-Bug: #1660210 + +Change-Id: I5835f9595826df5349e4cc8b1da8529bb960ee04 +Signed-off-by: Allain Legacy +--- + oslo_service/loopingcall.py | 19 +++++++++++++------ + oslo_service/tests/test_loopingcall.py | 14 +++++++------- + 2 files changed, 20 insertions(+), 13 deletions(-) + +diff --git a/oslo_service/loopingcall.py b/oslo_service/loopingcall.py +index 72eaab1..d1c12fd 100644 +--- a/oslo_service/loopingcall.py ++++ b/oslo_service/loopingcall.py +@@ -17,6 +17,7 @@ + + import random + import sys ++import threading + + from eventlet import event + from eventlet import greenthread +@@ -84,19 +85,25 @@ class LoopingCallBase(object): + self.args = args + self.kw = kw + self.f = f +- self._running = False + self._thread = None + self.done = None ++ self.abort = threading.Event() ++ ++ @property ++ def _running(self): ++ return not self.abort.is_set() + + def stop(self): +- self._running = False ++ self.abort.set() + + def wait(self): + return self.done.wait() + + def _on_done(self, gt, *args, **kwargs): + self._thread = None +- self._running = False ++ ++ def _sleep(self, timeout): ++ return self.abort.wait(timeout) + + def _start(self, idle_for, initial_delay=None, stop_on_exception=True): + """Start the looping +@@ -113,8 +120,8 @@ class LoopingCallBase(object): + """ + if self._thread is not None: + raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE) +- self._running = True + self.done = event.Event() ++ self.abort.clear() + self._thread = greenthread.spawn( + self._run_loop, idle_for, + initial_delay=initial_delay, stop_on_exception=stop_on_exception) +@@ -128,7 +135,7 @@ class LoopingCallBase(object): + func = self.f if stop_on_exception else _safe_wrapper(self.f, kind, + func_name) + if initial_delay: +- greenthread.sleep(initial_delay) ++ self._sleep(initial_delay) + try: + watch = timeutils.StopWatch() + while self._running: +@@ -142,7 +149,7 @@ class LoopingCallBase(object): + 'for %(idle).02f seconds', + {'func_name': func_name, 'idle': idle, + 'kind': kind}) +- greenthread.sleep(idle) ++ self._sleep(idle) + except LoopingCallDone as e: + self.done.send(e.retvalue) + except Exception: +diff --git a/oslo_service/tests/test_loopingcall.py b/oslo_service/tests/test_loopingcall.py +index c149506..b3c7842 100644 +--- a/oslo_service/tests/test_loopingcall.py ++++ b/oslo_service/tests/test_loopingcall.py +@@ -276,7 +276,7 @@ class DynamicLoopingCallTestCase(test_base.BaseTestCase): + else: + self.num_runs = self.num_runs - 1 + +- @mock.patch('eventlet.greenthread.sleep') ++ @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') + def test_timeout_task_without_return(self, sleep_mock): + self.num_runs = 1 + timer = loopingcall.DynamicLoopingCall( +@@ -285,7 +285,7 @@ class DynamicLoopingCallTestCase(test_base.BaseTestCase): + timer.start(periodic_interval_max=5).wait() + sleep_mock.assert_has_calls([mock.call(5)]) + +- @mock.patch('eventlet.greenthread.sleep') ++ @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') + def test_interval_adjustment(self, sleep_mock): + self.num_runs = 2 + +@@ -294,7 +294,7 @@ class DynamicLoopingCallTestCase(test_base.BaseTestCase): + + sleep_mock.assert_has_calls([mock.call(5), mock.call(1)]) + +- @mock.patch('eventlet.greenthread.sleep') ++ @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') + def test_initial_delay(self, sleep_mock): + self.num_runs = 1 + +@@ -306,7 +306,7 @@ class DynamicLoopingCallTestCase(test_base.BaseTestCase): + + class TestBackOffLoopingCall(test_base.BaseTestCase): + @mock.patch('random.SystemRandom.gauss') +- @mock.patch('eventlet.greenthread.sleep') ++ @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') + def test_exponential_backoff(self, sleep_mock, random_mock): + def false(): + return False +@@ -330,7 +330,7 @@ class TestBackOffLoopingCall(test_base.BaseTestCase): + self.assertEqual(expected_times, sleep_mock.call_args_list) + + @mock.patch('random.SystemRandom.gauss') +- @mock.patch('eventlet.greenthread.sleep') ++ @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') + def test_no_backoff(self, sleep_mock, random_mock): + random_mock.return_value = 1 + func = mock.Mock() +@@ -345,7 +345,7 @@ class TestBackOffLoopingCall(test_base.BaseTestCase): + self.assertTrue(retvalue, 'return value') + + @mock.patch('random.SystemRandom.gauss') +- @mock.patch('eventlet.greenthread.sleep') ++ @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') + def test_no_sleep(self, sleep_mock, random_mock): + # Any call that executes properly the first time shouldn't sleep + random_mock.return_value = 1 +@@ -358,7 +358,7 @@ class TestBackOffLoopingCall(test_base.BaseTestCase): + self.assertTrue(retvalue, 'return value') + + @mock.patch('random.SystemRandom.gauss') +- @mock.patch('eventlet.greenthread.sleep') ++ @mock.patch('oslo_service.loopingcall.LoopingCallBase._sleep') + def test_max_interval(self, sleep_mock, random_mock): + def false(): + return False +-- +2.12.1 + diff --git a/openstack/python-oslo-service/centos/srpm_path b/openstack/python-oslo-service/centos/srpm_path new file mode 100644 index 00000000..3cacd013 --- /dev/null +++ b/openstack/python-oslo-service/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-oslo-service-1.16.1-1.el7.src.rpm diff --git a/openstack/python-wsme/centos/build_srpm.data b/openstack/python-wsme/centos/build_srpm.data new file mode 100644 index 00000000..d3f64f33 --- /dev/null +++ b/openstack/python-wsme/centos/build_srpm.data @@ -0,0 +1 @@ +TIS_PATCH_VER=3 diff --git a/openstack/python-wsme/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch b/openstack/python-wsme/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..a3425412 --- /dev/null +++ b/openstack/python-wsme/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch @@ -0,0 +1,25 @@ +From 645004eec0d66fdb5b76738a8077ea93ac7f7148 Mon Sep 17 00:00:00 2001 +From: Don Penney +Date: Mon, 26 Sep 2016 17:32:55 -0400 +Subject: [PATCH] Update package versioning for TIS format + +--- + SPECS/python-wsme.spec | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/SPECS/python-wsme.spec b/SPECS/python-wsme.spec +index 8f7fd22..943c1cd 100644 +--- a/SPECS/python-wsme.spec ++++ b/SPECS/python-wsme.spec +@@ -12,7 +12,7 @@ + + Name: python-%{lpypi_name} + Version: 0.8.0 +-Release: 1%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + Summary: Web Services Made Easy + + License: MIT +-- +1.8.3.1 + diff --git a/openstack/python-wsme/centos/meta_patches/0003-Remove-TOX-calls-from-build.patch b/openstack/python-wsme/centos/meta_patches/0003-Remove-TOX-calls-from-build.patch new file mode 100644 index 00000000..7c637e6e --- /dev/null +++ b/openstack/python-wsme/centos/meta_patches/0003-Remove-TOX-calls-from-build.patch @@ -0,0 +1,29 @@ +From 8e5e778228717039e39be1fb65c63a6fe7c3cd5d Mon Sep 17 00:00:00 2001 +From: Al Bailey +Date: Tue, 6 Mar 2018 09:56:17 -0600 +Subject: [PATCH] Remove TOX calls from build + +--- + SPECS/python-wsme.spec | 6 ------ + 1 file changed, 6 deletions(-) + +diff --git a/SPECS/python-wsme.spec b/SPECS/python-wsme.spec +index 4a2c360..602bff0 100644 +--- a/SPECS/python-wsme.spec ++++ b/SPECS/python-wsme.spec +@@ -106,12 +106,6 @@ manipulate the request and the response objects. + %{__python3} setup.py install --skip-build --root %{buildroot} + %endif + +-%check +-%{__python2} setup.py test +-%if 0%{?with_python3} +-%{__python3} setup.py test +-%endif +- + %files -n python2-%{lpypi_name} + %doc README.rst examples/ + %license LICENSE +-- +1.8.3.1 + diff --git a/openstack/python-wsme/centos/meta_patches/PATCH_ORDER b/openstack/python-wsme/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..6ef03247 --- /dev/null +++ b/openstack/python-wsme/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,3 @@ +meta-patch-ClientSideError-logging-verbosity-fix.patch +0001-Update-package-versioning-for-TIS-format.patch +0003-Remove-TOX-calls-from-build.patch diff --git a/openstack/python-wsme/centos/meta_patches/meta-patch-ClientSideError-logging-verbosity-fix.patch b/openstack/python-wsme/centos/meta_patches/meta-patch-ClientSideError-logging-verbosity-fix.patch new file mode 100644 index 00000000..72253695 --- /dev/null +++ b/openstack/python-wsme/centos/meta_patches/meta-patch-ClientSideError-logging-verbosity-fix.patch @@ -0,0 +1,36 @@ +From f45245a7435af40a0166e8c5e5fb68383d7774d8 Mon Sep 17 00:00:00 2001 +From: Kam Nasim +Date: Tue, 3 Jan 2017 13:36:15 -0500 +Subject: [PATCH] meta patch for ClientSideError logging verbosity fix. + +--- + SPECS/python-wsme.spec | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/SPECS/python-wsme.spec b/SPECS/python-wsme.spec +index 943c1cd..7f392b4 100644 +--- a/SPECS/python-wsme.spec ++++ b/SPECS/python-wsme.spec +@@ -18,6 +18,10 @@ Summary: Web Services Made Easy + License: MIT + URL: https://pypi.python.org/pypi/WSME + Source0: http://pypi.python.org/packages/source/W/%{pypi_name}/%{pypi_name}-%{version}.tar.gz ++ ++# WRS ++Patch0001: 0001-log-client-side-errors.patch ++ + BuildArch: noarch + + %description +@@ -87,6 +91,8 @@ manipulate the request and the response objects. + %prep + %setup -q -n %{pypi_name}-%{version} + ++# Apply WRS patches ++%patch0001 -p1 + + %build + %{__python2} setup.py build +-- +1.8.3.1 + diff --git a/openstack/python-wsme/centos/patches/0001-log-client-side-errors.patch b/openstack/python-wsme/centos/patches/0001-log-client-side-errors.patch new file mode 100644 index 00000000..efa839ea --- /dev/null +++ b/openstack/python-wsme/centos/patches/0001-log-client-side-errors.patch @@ -0,0 +1,41 @@ +From cf0c831d31b970bf920f3152d50d849193e9154a Mon Sep 17 00:00:00 2001 +From: Kam Nasim +Date: Tue, 3 Jan 2017 13:09:29 -0500 +Subject: [PATCH] change ClientSideError logging verbosity + +Regression introduced in 16.10. Reverts the following + upstream commit since WSME is used by SysInv-api to return ClientSideErrors, + and in the case of CLI commands, no log history for such errors would be + available. + +Reverting commit 94cd1751c7b028898a38fda0689cfce15e2a96e2 + + Author: Chris Dent + Date: Thu Apr 9 14:04:32 2015 +0100 + + Change client-side error logging to debug + + A client-side error (that is something akin to a 4xx HTTP response + code) is something that is common, it is not something that should + cause WARNING level log messages. This change switches to using + DEBUG so that it is easier to filter out the noisy messages. +--- + wsme/api.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/wsme/api.py b/wsme/api.py +index 4aab9a4..2ba70e0 100644 +--- a/wsme/api.py ++++ b/wsme/api.py +@@ -218,7 +218,7 @@ def format_exception(excinfo, debug=False): + else six.text_type(error)) + r = dict(faultcode="Client", + faultstring=faultstring) +- log.debug("Client-side error: %s" % r['faultstring']) ++ log.warning("Client-side error: %s" % r['faultstring']) + r['debuginfo'] = None + return r + else: +-- +1.8.3.1 + diff --git a/openstack/python-wsme/centos/srpm_path b/openstack/python-wsme/centos/srpm_path new file mode 100644 index 00000000..8f04b17e --- /dev/null +++ b/openstack/python-wsme/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/python-wsme-0.8.0-1.el7.src.rpm diff --git a/openstack/rabbitmq-server/centos/build_srpm.data b/openstack/rabbitmq-server/centos/build_srpm.data new file mode 100644 index 00000000..4968cc7f --- /dev/null +++ b/openstack/rabbitmq-server/centos/build_srpm.data @@ -0,0 +1,3 @@ +COPY_LIST="rabbitmq-server/rabbitmq-server.ocf centos/patches/*" +TIS_PATCH_VER=6 +BUILD_IS_SLOW=3 diff --git a/openstack/rabbitmq-server/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch b/openstack/rabbitmq-server/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch new file mode 100644 index 00000000..f8d7d3eb --- /dev/null +++ b/openstack/rabbitmq-server/centos/meta_patches/0001-Update-package-versioning-for-TIS-format.patch @@ -0,0 +1,31 @@ +From 9eb2a001b8f80d1a66d4ec78d882d7d6cb4dff51 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Tue, 24 Jan 2017 12:16:38 -0500 +Subject: [PATCH 4/5] WRS: 0001-Update-package-versioning-for-TIS-format.patch + +Conflicts: + SPECS/rabbitmq-server.spec +--- + SPECS/rabbitmq-server.spec | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/SPECS/rabbitmq-server.spec b/SPECS/rabbitmq-server.spec +index 3095f87..b5d53a2 100644 +--- a/SPECS/rabbitmq-server.spec ++++ b/SPECS/rabbitmq-server.spec +@@ -2,11 +2,9 @@ + # We want to install into /usr/lib, even on 64-bit platforms + %global _rabbit_libdir %{_exec_prefix}/lib/rabbitmq + +-%define tis_version r1 +- + Name: rabbitmq-server + Version: 3.6.5 +-Release: 1.%{tis_version}%{?dist} ++Release: 1.el7%{?_tis_dist}.%{tis_patch_ver} + License: MPLv1.1 + Group: Development/Libraries + Source0: http://www.rabbitmq.com/releases/rabbitmq-server/v%{version}/%{name}-%{version}.tar.xz +-- +1.8.3.1 + diff --git a/openstack/rabbitmq-server/centos/meta_patches/0006-Update-rabbitmq-server.logrotate.patch b/openstack/rabbitmq-server/centos/meta_patches/0006-Update-rabbitmq-server.logrotate.patch new file mode 100644 index 00000000..76990dda --- /dev/null +++ b/openstack/rabbitmq-server/centos/meta_patches/0006-Update-rabbitmq-server.logrotate.patch @@ -0,0 +1,25 @@ +From 0934088d588521e4f2ac2dbd28ae2de919368d71 Mon Sep 17 00:00:00 2001 +From: Don Penney +Date: Wed, 7 Feb 2018 13:34:44 -0500 +Subject: [PATCH] Update rabbitmq-server.logrotate + +--- + SOURCES/rabbitmq-server.logrotate | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/SOURCES/rabbitmq-server.logrotate b/SOURCES/rabbitmq-server.logrotate +index 832cdcf..8de4c84 100644 +--- a/SOURCES/rabbitmq-server.logrotate ++++ b/SOURCES/rabbitmq-server.logrotate +@@ -7,6 +7,7 @@ + notifempty + sharedscripts + postrotate +- /usr/sbin/rabbitmqctl -q rotate_logs ++ pgrep beam.smp >/dev/null && /usr/sbin/rabbitmqctl -q rotate_logs ++ true + endscript + } +-- +1.8.3.1 + diff --git a/openstack/rabbitmq-server/centos/meta_patches/Allow-rabbitmqctl-to-run-as-root.patch b/openstack/rabbitmq-server/centos/meta_patches/Allow-rabbitmqctl-to-run-as-root.patch new file mode 100644 index 00000000..634967e9 --- /dev/null +++ b/openstack/rabbitmq-server/centos/meta_patches/Allow-rabbitmqctl-to-run-as-root.patch @@ -0,0 +1,27 @@ +From cc25d3100a677e0c69a11975fd30ce72937ad575 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Tue, 24 Jan 2017 12:16:38 -0500 +Subject: [PATCH 3/5] WRS: Allow-rabbitmqctl-to-run-as-root.patch + +--- + SOURCES/rabbitmq-script-wrapper | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/SOURCES/rabbitmq-script-wrapper b/SOURCES/rabbitmq-script-wrapper +index 3d2fc49..d039b23 100644 +--- a/SOURCES/rabbitmq-script-wrapper ++++ b/SOURCES/rabbitmq-script-wrapper +@@ -42,7 +42,9 @@ elif [ `id -u` = `id -u rabbitmq` -o "$SCRIPT" = "rabbitmq-plugins" ] ; then + fi + exec /usr/lib/rabbitmq/bin/${SCRIPT} "$@" + elif [ `id -u` = 0 ] ; then +- su rabbitmq -s /bin/sh -c "/usr/lib/rabbitmq/bin/${SCRIPT} ${CMDLINE}" ++ # WRS. Allow to run as root ++ # su rabbitmq -s /bin/sh -c "/usr/lib/rabbitmq/bin/${SCRIPT} ${CMDLINE}" ++ /bin/sh -c "/usr/lib/rabbitmq/bin/${SCRIPT} ${CMDLINE}" + else + /usr/lib/rabbitmq/bin/${SCRIPT} + echo +-- +1.8.3.1 + diff --git a/openstack/rabbitmq-server/centos/meta_patches/PATCH_ORDER b/openstack/rabbitmq-server/centos/meta_patches/PATCH_ORDER new file mode 100644 index 00000000..7e991e6b --- /dev/null +++ b/openstack/rabbitmq-server/centos/meta_patches/PATCH_ORDER @@ -0,0 +1,6 @@ +spec-comments-for-ocf.patch +packstack-fixes.patch +Allow-rabbitmqctl-to-run-as-root.patch +0001-Update-package-versioning-for-TIS-format.patch +Set-root-home-for-rabbitmqctl.patch +0006-Update-rabbitmq-server.logrotate.patch diff --git a/openstack/rabbitmq-server/centos/meta_patches/Set-root-home-for-rabbitmqctl.patch b/openstack/rabbitmq-server/centos/meta_patches/Set-root-home-for-rabbitmqctl.patch new file mode 100644 index 00000000..22f7ab33 --- /dev/null +++ b/openstack/rabbitmq-server/centos/meta_patches/Set-root-home-for-rabbitmqctl.patch @@ -0,0 +1,24 @@ +From d6ed4a9b31a25ae7b803b99f49bb88bb9648ce06 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Tue, 24 Jan 2017 12:16:38 -0500 +Subject: [PATCH 5/5] WRS: Set-root-home-for-rabbitmqctl.patch + +--- + SOURCES/rabbitmq-script-wrapper | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/SOURCES/rabbitmq-script-wrapper b/SOURCES/rabbitmq-script-wrapper +index d039b23..6f7450e 100644 +--- a/SOURCES/rabbitmq-script-wrapper ++++ b/SOURCES/rabbitmq-script-wrapper +@@ -44,6 +44,7 @@ elif [ `id -u` = `id -u rabbitmq` -o "$SCRIPT" = "rabbitmq-plugins" ] ; then + elif [ `id -u` = 0 ] ; then + # WRS. Allow to run as root + # su rabbitmq -s /bin/sh -c "/usr/lib/rabbitmq/bin/${SCRIPT} ${CMDLINE}" ++ export HOME=${HOME:-/root} + /bin/sh -c "/usr/lib/rabbitmq/bin/${SCRIPT} ${CMDLINE}" + else + /usr/lib/rabbitmq/bin/${SCRIPT} +-- +1.8.3.1 + diff --git a/openstack/rabbitmq-server/centos/meta_patches/packstack-fixes.patch b/openstack/rabbitmq-server/centos/meta_patches/packstack-fixes.patch new file mode 100644 index 00000000..e4435b7c --- /dev/null +++ b/openstack/rabbitmq-server/centos/meta_patches/packstack-fixes.patch @@ -0,0 +1,37 @@ +From 251b011a5ccd75bfeb3ea56bb4d414491b560874 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Tue, 24 Jan 2017 12:16:38 -0500 +Subject: [PATCH 2/5] WRS: packstack-fixes.patch + +Conflicts: + SPECS/rabbitmq-server.spec +--- + SPECS/rabbitmq-server.spec | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/SPECS/rabbitmq-server.spec b/SPECS/rabbitmq-server.spec +index f1c3cab..3095f87 100644 +--- a/SPECS/rabbitmq-server.spec ++++ b/SPECS/rabbitmq-server.spec +@@ -28,6 +28,9 @@ Patch6: rabbitmq-server-0006-rabbit_prelaunch-must-use-RABBITMQ_SERVER_ERL_ARGS. + Patch101: rabbitmq-common-0001-Avoid-RPC-roundtrips-while-listing-items.patch + Patch102: rabbitmq-common-0002-Use-proto_dist-from-command-line.patch + ++# WRS Patches ++Patch201: update-rabbitmq-server.service.patch ++ + URL: http://www.rabbitmq.com/ + BuildArch: noarch + BuildRequires: erlang >= %{erlang_minver}, python-simplejson, xmlto, libxslt, python, zip +@@ -72,6 +75,8 @@ cd deps/rabbit_common + %patch102 -p1 + cd ../.. + ++%patch201 -p1 ++ + # We have to remove it until common_test subpackage lands RHOS + rm -f \ + deps/amqp_client/src/rabbit_ct_client_helpers.erl \ +-- +1.8.3.1 + diff --git a/openstack/rabbitmq-server/centos/meta_patches/spec-comments-for-ocf.patch b/openstack/rabbitmq-server/centos/meta_patches/spec-comments-for-ocf.patch new file mode 100644 index 00000000..7a96ef02 --- /dev/null +++ b/openstack/rabbitmq-server/centos/meta_patches/spec-comments-for-ocf.patch @@ -0,0 +1,49 @@ +From 45e477c179ff48c209c7b3210e285e6cddc22137 Mon Sep 17 00:00:00 2001 +From: Scott Little +Date: Tue, 24 Jan 2017 12:16:38 -0500 +Subject: [PATCH 1/5] WRS: spec-comments-for-ocf.patch + +Conflicts: + SPECS/rabbitmq-server.spec +--- + SPECS/rabbitmq-server.spec | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/SPECS/rabbitmq-server.spec b/SPECS/rabbitmq-server.spec +index 3a7d940..f1c3cab 100644 +--- a/SPECS/rabbitmq-server.spec ++++ b/SPECS/rabbitmq-server.spec +@@ -2,10 +2,11 @@ + # We want to install into /usr/lib, even on 64-bit platforms + %global _rabbit_libdir %{_exec_prefix}/lib/rabbitmq + ++%define tis_version r1 + + Name: rabbitmq-server + Version: 3.6.5 +-Release: 1%{?dist} ++Release: 1.%{tis_version}%{?dist} + License: MPLv1.1 + Group: Development/Libraries + Source0: http://www.rabbitmq.com/releases/rabbitmq-server/v%{version}/%{name}-%{version}.tar.xz +@@ -15,6 +16,8 @@ Source0: http://www.rabbitmq.com/releases/rabbitmq-server/v%{version}/%{name}-%{ + Source2: rabbitmq-script-wrapper + # curl -O https://raw.githubusercontent.com/lemenkov/rabbitmq-server/cdfc661/packaging/RPMS/Fedora/rabbitmq-server.logrotate + Source3: rabbitmq-server.logrotate ++# WRS modified ocf is located in wrs_patches ++Source4: rabbitmq-server.ocf + Source5: rabbitmq-server.tmpfiles + Patch1: rabbitmq-server-0001-Remove-excessive-sd_notify-code.patch + Patch2: rabbitmq-server-0002-Add-systemd-notification-support.patch +@@ -106,7 +109,7 @@ for app in rabbitmq-defaults rabbitmq-env rabbitmq-plugins rabbitmq-server rabbi + ln -s %{_rabbit_libdir}/lib/rabbitmq_server-%{version}/sbin/${app} %{buildroot}%{_rabbit_libdir}/bin/${app} + done + +-install -p -D -m 0755 scripts/rabbitmq-server.ocf %{buildroot}%{_exec_prefix}/lib/ocf/resource.d/rabbitmq/rabbitmq-server ++install -p -D -m 0755 %{S:4} %{buildroot}%{_exec_prefix}/lib/ocf/resource.d/rabbitmq/rabbitmq-server + install -p -D -m 0755 scripts/rabbitmq-server-ha.ocf %{buildroot}%{_exec_prefix}/lib/ocf/resource.d/rabbitmq/rabbitmq-server-ha + + install -p -D -m 0644 %{S:3} %{buildroot}%{_sysconfdir}/logrotate.d/rabbitmq-server +-- +1.8.3.1 + diff --git a/openstack/rabbitmq-server/centos/patches/update-rabbitmq-server.service.patch b/openstack/rabbitmq-server/centos/patches/update-rabbitmq-server.service.patch new file mode 100644 index 00000000..186ec845 --- /dev/null +++ b/openstack/rabbitmq-server/centos/patches/update-rabbitmq-server.service.patch @@ -0,0 +1,32 @@ +From 8840ebc0baa702f612d8dddcdd3271758e2e4c4d Mon Sep 17 00:00:00 2001 +From: Don Penney +Date: Mon, 27 Jun 2016 14:42:41 -0400 +Subject: [PATCH 1/1] Update rabbitmq-server.service + +--- + docs/rabbitmq-server.service.example | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/docs/rabbitmq-server.service.example b/docs/rabbitmq-server.service.example +index 1aa6549..92aa231 100644 +--- a/docs/rabbitmq-server.service.example ++++ b/docs/rabbitmq-server.service.example +@@ -1,4 +1,3 @@ +-# systemd unit example + [Unit] + Description=RabbitMQ broker + After=network.target epmd@0.0.0.0.socket +@@ -6,8 +5,8 @@ Wants=network.target epmd@0.0.0.0.socket + + [Service] + Type=notify +-User=rabbitmq +-Group=rabbitmq ++User=root ++Group=root + NotifyAccess=all + TimeoutStartSec=3600 + WorkingDirectory=/var/lib/rabbitmq +-- +1.8.3.1 + diff --git a/openstack/rabbitmq-server/centos/srpm_path b/openstack/rabbitmq-server/centos/srpm_path new file mode 100644 index 00000000..ffbf7b3b --- /dev/null +++ b/openstack/rabbitmq-server/centos/srpm_path @@ -0,0 +1 @@ +mirror:Source/rabbitmq-server-3.6.5-1.el7.src.rpm diff --git a/openstack/rabbitmq-server/rabbitmq-server/rabbitmq-server b/openstack/rabbitmq-server/rabbitmq-server/rabbitmq-server new file mode 100644 index 00000000..59c7de68 --- /dev/null +++ b/openstack/rabbitmq-server/rabbitmq-server/rabbitmq-server @@ -0,0 +1,251 @@ +#!/bin/sh +# +# rabbitmq-server RabbitMQ broker +# +# chkconfig: - 80 05 +# description: Enable AMQP service provided by RabbitMQ +# + +### BEGIN INIT INFO +# Provides: rabbitmq-server +# Required-Start: $remote_fs $network +# Required-Stop: $remote_fs $network +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Description: RabbitMQ broker +# Short-Description: Enable AMQP service provided by RabbitMQ broker +### END INIT INFO + +DAEMON=/usr/bin/rabbitmq-server +CONTROL=/usr/bin/rabbitmqctl +USER=root +ROTATE_SUFFIX= +INIT_LOG_DIR=/var/log/rabbitmq +PID_FILE=/var/run/rabbitmq/pid +HOME=/root +export HOME + +LOCK_FILE= + +test -x $DAEMON || exit 0 +test -x $CONTROL || exit 0 + +RETVAL=0 + +. /lib/lsb/init-functions + +status_of_proc () { + local pidfile daemon name retval pid pidfromfile + + pidfile= + daemon=_x_ + name=_x_ + while [ $# -gt 0 ]; do + case $1 in + -p) shift ; pidfile=$1 ;; + *) + if [ "$daemon" = "_x_" ]; then + daemon="$1" + elif [ "$name" = "_x_" ]; then + name="$1" + fi + ;; + esac + shift + done + + if [ ! -e $pidfile ]; then + for d in $daemon; do + rc=`pidof $d > /dev/null` + if [ $? = 0 ] ; then + return 0 + fi + done + return 1 + fi + + pidfromfile=`cat $pidfile` + kill -0 $pidfromfile 2> /dev/null + retval=$_ + if [ "$daemon" = "_x_" ]; then + # only the pid from the file is used + if [ $retval = 0 ]; then + log_success_msg "$name is running" + else + log_failure_msg "$name is not running" + fi + fi + + for pid in `pidof $daemon` ; do + if test "$pidfromfile" -eq "$pid" 2> /dev/null; then + log_success_msg "$name is running" + return 0 + fi + done + log_failure_msg "$name is not running" + return 1 +} + +ensure_pid_dir () { + PID_DIR=`dirname ${PID_FILE}` + if [ ! -d ${PID_DIR} ] ; then + mkdir -p ${PID_DIR} + chown -R ${USER}:${USER} ${PID_DIR} + chmod 755 ${PID_DIR} + fi +} + +remove_pid () { + rm -f ${PID_FILE} + rmdir `dirname ${PID_FILE}` || : +} + +start_rabbitmq () { + status_rabbitmq quiet || true + if [ $RETVAL = 0 ] ; then + echo RabbitMQ is currently running + else + RETVAL=0 + ensure_pid_dir + + # There seems to be a race condition around the creation of the Erlang cookie file, + # where both the rabbitmq-server launch and the subsequent "rabbitmqctl wait" will + # both try to create the file if it doesn't exist. This can result in a rare failure during + # initial configuration that reports the cookie file does not have the correct + # permissions. To avoid any possible race condition, we'll make a call to + # "rabbitmqctl status" first, which will create the cookie for us. + /sbin/rabbitmqctl status >/dev/null 2>&1 + + # RabbitMQ is using Erlang timers. It appears the Erlang module will abort if it detects discrepencies + # in timers, which it is believed to occur sometimes if the process is switched to a different processor + # (it compares a time value against an earlier value retrieved from a possibly different processor). + # To avoid this potential issue, we'll restrict rabbitmq-server to a single CPU for this instance. + # Note: Since SM does not use this script for launch rabbitmq-server, this will only affect the initial + # launching by packstack/puppet for configuration purposes. + RABBITMQ_PID_FILE=$PID_FILE start-stop-daemon \ + --pidfile=$PID_FILE \ + --background \ + --start \ + --startas /bin/bash -- \ + -c "exec taskset -c 0 $DAEMON >> ${INIT_LOG_DIR}/startup_log 2>> ${INIT_LOG_DIR}/startup_err" + + # Wait a second to give rabbit a chance to get started, then check it + sleep 1 + taskset -c 0 $CONTROL wait $PID_FILE >> ${INIT_LOG_DIR}/wait_log 2>&1 + RETVAL=$? + case "$RETVAL" in + 0) + echo SUCCESS + if [ -n "$LOCK_FILE" ] ; then + touch $LOCK_FILE + fi + echo "`date` - rabbitmq successfully started" >> ${INIT_LOG_DIR}/startup_log + # only emit the event on boot + [ -n "$UPSTART_JOB" ] && initctl emit --no-wait rabbitmq-server-running || true + ;; + *) + remove_pid + echo FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\} + echo "`date` - rabbitmq failed to start" >> ${INIT_LOG_DIR}/startup_err + RETVAL=1 + ;; + esac + fi +} + +stop_rabbitmq () { + status_rabbitmq quiet || true + if [ $RETVAL = 0 ] ; then + if [ ! -e $PID_FILE ] ; then + for d in beam beam.smp; do + rc=`pidof $d 2> /dev/null` + if [ $? = 0 ] ; then + kill $rc + return + fi + done + fi + start-stop-daemon --pidfile=$PID_FILE --stop + RETVAL=$? + if [ $RETVAL = 0 ] ; then + remove_pid + if [ -n "$LOCK_FILE" ] ; then + rm -f $LOCK_FILE + fi + echo "`date` - rabbitmq successfully stopped" >> ${INIT_LOG_DIR}/shutdown_log + else + echo FAILED - check ${INIT_LOG_DIR}/shutdown_log, _err + echo "`date` - rabbitmq failed to stop" >> ${INIT_LOG_DIR}/shutdown_err + fi + else + echo RabbitMQ is not running + RETVAL=0 + fi +} + +status_rabbitmq() { + if [ "$1" = "quiet" ] ; then + status_of_proc -p $PID_FILE "beam beam.smp" rabbitmq-server > /dev/null + else + status_of_proc -p $PID_FILE "beam beam.smp" rabbitmq-server || exit 1 + fi + RETVAL=$? +} + +rotate_logs_rabbitmq() { + $CONTROL rotate_logs ${ROTATE_SUFFIX} + if [ $? != 0 ] ; then + RETVAL=1 + fi +} + +restart_running_rabbitmq () { + status_rabbitmq quiet + if [ $RETVAL = 0 ] ; then + restart_rabbitmq + else + echo RabbitMQ is not runnning + RETVAL=0 + fi +} + +restart_rabbitmq() { + stop_rabbitmq + start_rabbitmq +} + +case "$1" in + start) + echo -n "Starting $DESC: " + start_rabbitmq + echo "$NAME." + ;; + stop) + echo -n "Stopping $DESC: " + stop_rabbitmq + echo "$NAME." + ;; + status) + status_rabbitmq + ;; + rotate-logs) + echo -n "Rotating log files for $DESC: " + rotate_logs_rabbitmq + ;; + force-reload|reload|restart) + echo -n "Restarting $DESC: " + restart_rabbitmq + echo "$NAME." + ;; + try-restart) + echo -n "Restarting $DESC: " + restart_running_rabbitmq + echo "$NAME." + ;; + *) + echo "Usage: $0 {start|stop|status|rotate-logs|restart|condrestart|try-restart|reload|force-reload}" >&2 + RETVAL=1 + ;; +esac + +exit $RETVAL diff --git a/openstack/rabbitmq-server/rabbitmq-server/rabbitmq-server.ocf b/openstack/rabbitmq-server/rabbitmq-server/rabbitmq-server.ocf new file mode 100644 index 00000000..dde9e292 --- /dev/null +++ b/openstack/rabbitmq-server/rabbitmq-server/rabbitmq-server.ocf @@ -0,0 +1,413 @@ +#!/bin/sh +## The contents of this file are subject to the Mozilla Public License +## Version 1.1 (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License +## at http://www.mozilla.org/MPL/ +## +## Software distributed under the License is distributed on an "AS IS" +## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +## the License for the specific language governing rights and +## limitations under the License. +## +## The Original Code is RabbitMQ. +## +## The Initial Developer of the Original Code is VMware, Inc. +## Copyright (c) 2007-2013 VMware, Inc. All rights reserved. +## + +## +## OCF Resource Agent compliant rabbitmq-server resource script. +## + +## OCF instance parameters +## OCF_RESKEY_server +## OCF_RESKEY_ctl +## OCF_RESKEY_nodename +## OCF_RESKEY_ip +## OCF_RESKEY_port +## OCF_RESKEY_config_file +## OCF_RESKEY_log_base +## OCF_RESKEY_mnesia_base +## OCF_RESKEY_server_start_args +## OCF_RESKEY_pid_file +## WRS +# OCF_RESKEY_env_config_file +# OCF_RESKEY_dist_port + +####################################################################### +# Initialization: + +: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat} +. ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs + +####################################################################### + +. /etc/platform/platform.conf + +OCF_RESKEY_server_default="/usr/sbin/rabbitmq-server" +OCF_RESKEY_ctl_default="/usr/sbin/rabbitmqctl" +OCF_RESKEY_nodename_default="rabbit@localhost" +OCF_RESKEY_log_base_default="/var/log/rabbitmq" +OCF_RESKEY_pid_file_default="/var/run/rabbitmq/pid" +: ${OCF_RESKEY_server=${OCF_RESKEY_server_default}} +: ${OCF_RESKEY_ctl=${OCF_RESKEY_ctl_default}} +: ${OCF_RESKEY_nodename=${OCF_RESKEY_nodename_default}} +: ${OCF_RESKEY_log_base=${OCF_RESKEY_log_base_default}} +: ${OCF_RESKEY_pid_file=${OCF_RESKEY_pid_file_default}} + +meta_data() { + cat < + + +1.0 + + +Resource agent for RabbitMQ-server + + +Resource agent for RabbitMQ-server + + + + +The path to the rabbitmq-server script + +Path to rabbitmq-server + + + + + +The path to the rabbitmqctl script + +Path to rabbitmqctl + + + + + +The node name for rabbitmq-server + +Node name + + + + + +The IP address for rabbitmq-server to listen on + +IP Address + + + + + +The IP Port for rabbitmq-server to listen on + +IP Port + + + + + +Location of the config file (without the .config suffix) + +Config file path (without the .config suffix) + + + + + +Location of the directory under which logs will be created + +Log base path + + + + + +Location of the directory under which mnesia will store data + +Mnesia base path + + + + + +Additional arguments provided to the server on startup + +Server start arguments + + + + + +Location of the file in which the pid will be stored + +Pid file path + + + + + + + + + + + + + + +END +} + +rabbit_usage() { + cat < /dev/null 2> /dev/null + rc=$? + case "$rc" in + 0) + ocf_log debug "RabbitMQ server is running normally" + return $OCF_SUCCESS + ;; + 2) + ocf_log debug "RabbitMQ server is not running" + return $OCF_NOT_RUNNING + ;; + *) + ocf_log err "Unexpected return from rabbitmqctl $NODENAME_ARG $action: $rc" + exit $OCF_ERR_GENERIC + esac +} + +rabbit_start() { + local rc + + if rabbit_status; then + ocf_log info "Resource already running." + return $OCF_SUCCESS + fi + + export_vars + + # Increase the maximum number of file descriptors that can be open at + # once - required for large systems. + ulimit -n 8192 + if [ "${system_type}" = "All-in-one" ]; then + # Rabbit/beam related tasks should be on platform cores from the get go. + # If they are affined to all cores during initialization sequence of AIO, + # the system will end up with many extra beam threads that are not in use. + source /etc/init.d/cpumap_functions.sh + PLATFORM_CPULIST=$(get_platform_cpu_list) + setsid sh -c "exec taskset -c ${PLATFORM_CPULIST} $RABBITMQ_SERVER >> ${RABBITMQ_LOG_BASE}/startup_log 2>> ${RABBITMQ_LOG_BASE}/startup_err" & + else + setsid sh -c "$RABBITMQ_SERVER >> ${RABBITMQ_LOG_BASE}/startup_log 2>> ${RABBITMQ_LOG_BASE}/startup_err" & + fi + + # Wait for the server to come up. + # Let the CRM/LRM time us out if required + rabbit_wait $RABBITMQ_PID_FILE + rc=$? + if [ "$rc" != $OCF_SUCCESS ]; then + remove_pid + ocf_log info "rabbitmq-server start failed: $rc" + exit $OCF_ERR_GENERIC + fi + + return $OCF_SUCCESS +} + +rabbit_stop() { + local rc + + if ! rabbit_status; then + ocf_log info "Resource not running." + return $OCF_SUCCESS + fi + + $RABBITMQ_CTL stop + rc=$? + + if [ "$rc" != 0 ]; then + ocf_log err "rabbitmq-server stop command failed: $RABBITMQ_CTL stop, $rc" + return $rc + fi + + # Spin waiting for the server to shut down. + # Let the CRM/LRM time us out if required + stop_wait=1 + while [ $stop_wait = 1 ]; do + rabbit_status + rc=$? + if [ "$rc" = $OCF_NOT_RUNNING ]; then + remove_pid + stop_wait=0 + break + elif [ "$rc" != $OCF_SUCCESS ]; then + ocf_log info "rabbitmq-server stop failed: $rc" + exit $OCF_ERR_GENERIC + fi + sleep 1 + done + + return $OCF_SUCCESS +} + +rabbit_monitor() { + rabbit_status + return $? +} + +case $__OCF_ACTION in + meta-data) + meta_data + exit $OCF_SUCCESS + ;; + usage|help) + rabbit_usage + exit $OCF_SUCCESS + ;; +esac + +if ocf_is_probe; then + rabbit_validate_partial +else + rabbit_validate_full +fi + +export_vars + +case $__OCF_ACTION in + start) + rabbit_start + ;; + stop) + rabbit_stop + ;; + status|monitor) + rabbit_monitor + ;; + validate-all) + exit $OCF_SUCCESS + ;; + *) + rabbit_usage + exit $OCF_ERR_UNIMPLEMENTED + ;; +esac + +exit $?