upstream/openstack/python-heat/python-heat/templates/hot/scenarios/VMAutoScaling.yaml

226 lines
7.6 KiB
YAML

################################################################################
# Copyright (c) 2014-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
################################################################################
#
# Objective:
# Demonstrates dynamically resize guest vcpus of a VM without
# service impact when policy criteria described in the template is met.
#
# Pre-Reqs:
# Normal lab setup.
# Ability to launch a VM
# A keypair called: controller-0 (nova keypair-list)
# A flavor with special attributes
# hw:wrs:min_vcpus = 1
# hw:cpu_policy = dedicated
# a minimum of 2 vcpus
# A glance image called: wrl6 (glance image-list)
# Network called: private-net0 (neutron net-list)
#
# Mandatory Template Parameters:
# None
#
# Tenant Considerations:
# A tenant can use this template if using a keypair, subnet and network
# accessible to it
#
# Sample CLI syntax:
# heat stack-create -f VMAutoScaling.yaml STACK
#
# Expected Outcome:
# A new stack (heat stack-list)
# A new nova VM (nova list)
# Two aodh alarms corresponding to high and low watermarks
# (aodh alarm list)
#
################################################################################
heat_template_version: 2015-04-30
description: >
Creates a HEAT autoscaling stack and a VM instance which is able to scale
up/down the number of guest vpcu's using vcpu_util samples from the guest
instance.
parameters:
KEYPAIR:
description: Name of existing KeyPair to enable SSH access to VMs
type: string
default: controller-0
constraints:
- custom_constraint: nova.keypair
KEYPAIR_ADMIN_USER:
description: Name of user account to inject ssh keys from keypair
type: string
default: 'ec2-user'
FLAVOR:
description: Nova flavor to use. (nova flavor-list)
type: string
default: m1.small
constraints:
- custom_constraint: nova.flavor
IMAGE:
description: the name or uuid of the server image in glance
type: string
default: wrl6
constraints:
- custom_constraint: glance.image
NETWORK:
description: name of private network to use (neutron net-list)
type: string
default: private-net0
constraints:
- custom_constraint: neutron.network
METER_NAME:
description: Gnocchi metric to query when determining autoscaling
type: string
default: vcpu_util
RESOURCE_TYPE:
description: Gnocchi resource type to use to query
type: string
default: instance
SCALE_UP_VALUE:
description: Metric value that will trigger a scale up if exceeded
type: string
default: '60'
SCALE_DOWN_VALUE:
description: Metric value that will trigger a scale down if below
type: string
default: '5'
VM_NAME:
description: Name to label the VM instance
type: string
default: 'guest-1'
resources:
Srv:
type: OS::Nova::Server
properties:
name: { get_param: VM_NAME }
image: { get_param: IMAGE }
flavor: { get_param: FLAVOR }
key_name: { get_param: KEYPAIR }
admin_user: { get_param: KEYPAIR_ADMIN_USER }
metadata:
{"metering.server_group": { get_param: "OS::stack_id"}}
networks:
- { network: { get_param: NETWORK }, vif-model: virtio }
CPUAlarmHigh:
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
properties:
description: Scale up if 5 minutes avg of meter above threshold
metric: { get_param: METER_NAME }
aggregation_method: mean
granularity: 300
evaluation_periods: '1'
threshold: { get_param: SCALE_UP_VALUE }
resource_type: { get_param: RESOURCE_TYPE }
repeat_actions: 'True'
comparison_operator: gt
alarm_actions:
- { get_attr: [SrvScaleUpPolicy, AlarmUrl] }
# gnocchi alarm resource will automatically
# prepend to the query based on the meter type
# metadata.metering
# or metadata.user_metadata
query:
str_replace:
template: '{"=": {"server_group": "stack_id"}}'
params:
stack_id: {get_param: "OS::stack_id"}
CPUAlarmLow:
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
properties:
description: Scale down if 5 minutes avg of meter below threshold
metric: { get_param: METER_NAME }
aggregation_method: mean
granularity: 300
evaluation_periods: '1'
threshold: { get_param: SCALE_DOWN_VALUE }
resource_type: { get_param: RESOURCE_TYPE }
repeat_actions: 'True'
comparison_operator: lt
alarm_actions:
- { get_attr: [SrvScaleDownPolicy, AlarmUrl]}
# gnocchi alarm resource will automatically
# prepend to the query based on the meter type
# metadata.metering
# or metadata.user_metadata
query:
str_replace:
template: '{"=": {"server_group": "stack_id"}}'
params:
stack_id: {get_param: "OS::stack_id"}
SrvScaleUpPolicy:
type: OS::WR::ScalingPolicy
properties:
ServerName: { get_resource: Srv }
ScalingResource: 'cpu'
ScalingDirection: 'up'
Cooldown: '60'
SrvScaleDownPolicy:
type: OS::WR::ScalingPolicy
properties:
ServerName: { get_resource: Srv }
ScalingResource: 'cpu'
ScalingDirection: 'down'
Cooldown: '60'
outputs:
gnocchi_query:
value:
str_replace:
template: >
gnocchi measures aggregation --resource-type resourcetype
--query '"server_group"="stackval"'
--granularity 300 --aggregation mean -m metric
params:
resourcetype: { get_param: RESOURCE_TYPE }
metric: { get_param: METER_NAME }
stackval: { get_param: "OS::stack_id" }
description: >
This is a Gnocchi query for statistics on the vcpu_util measurements about
OS::Nova::Server instances in this stack. The --resource-type select the
type of Gnocchi resource. The --query parameter filters resources
according to its attributes. When a VM's metadata includes an item of the
form metering.server_group=X, the corresponding Gnocchi resource has a
attribute named server_group that can queried with 'server_group="X"' In
this case the nested stacks give their VMs metadata that is passed as a
nested stack parameter, and this stack passes a metadata of the form
metering.server_group=X, where X is this stack's ID.
manual_scale_up:
value:
str_replace:
template: >
curl -X POST "scale_up_url"
params:
scale_up_url: { get_attr: [SrvScaleUpPolicy, AlarmUrl] }
manual_scale_down:
value:
str_replace:
template: >
curl -X POST "scale_down_url"
params:
scale_down_url: { get_attr: [SrvScaleDownPolicy, AlarmUrl] }