Fix additional puppet-lint warnings and errors

This update addresses the following errors and warnings
from puppet-lint:
- 140chars
- case_without_default
- ensure_first_param
- inherits_across_namespaces
- parameter_order
- single_quote_string_with_variables
- variable_is_lowercase
- variable_scope

In the case of variable_is_lowercase, the compute.pp manifest
has variables with sizes like 2M in the name. These have been
left as-is, with lint:ignore comments for the check, due to
the semantics of the name.

For the 140chars check, certain long lines have been left as-is,
with lint:ignore comments, due to long commands being executed.
These can be revisited in a future update to try to break up
the lines and remove the lint:ignore directives.

Change-Id: I37809bacb43818e0956f9f434c30c48e05017325
Story: 2004515
Task: 28685
Signed-off-by: Don Penney <don.penney@windriver.com>
This commit is contained in:
Don Penney 2018-12-27 16:23:13 -06:00
parent e6c0e0af8c
commit 9a3264acaa
27 changed files with 88 additions and 66 deletions

View File

@ -311,7 +311,7 @@ class openstack::cinder::lvm::filesystem::drbd (
owner => 'root',
group => 'root',
}
-> file_line { 'blacklist ${cinder_disk} automount':
-> file_line { "blacklist ${cinder_disk} automount":
ensure => present,
line => $cinder_disk,
path => '/etc/udev/mount.blacklist',
@ -455,10 +455,10 @@ class openstack::cinder::lvm(
}
define openstack::cinder::backend::ceph(
$backend_enabled = false,
$backend_name,
$rbd_user = 'cinder',
$rbd_pool,
$backend_enabled = false,
$rbd_user = 'cinder',
$rbd_ceph_conf = '/etc/ceph/ceph.conf'
) {
@ -581,9 +581,9 @@ class openstack::cinder::haproxy
define openstack::cinder::api::backend(
$type_enabled = false,
$backend_name,
$type_name,
$backend_name
$type_enabled = false,
) {
# Run it on the active controller, otherwise the prefetch step tries to query
# cinder and can fail
@ -681,7 +681,7 @@ class openstack::cinder::pre {
class openstack::cinder::post
inherits openstack::cinder::params {
inherits ::openstack::cinder::params {
# Ensure that phases are marked as complete
if $is_initial_cinder {

View File

@ -1,5 +1,14 @@
class openstack::client
inherits ::platform::client::params {
class openstack::client {
include ::platform::client::params
$admin_username = $::platform::client::params::admin_username
$identity_auth_url = $::platform::client::params::identity_auth_url
$identity_region = $::platform::client::params::identity_region
$identity_api_version = $::platform::client::params::identity_api_version
$admin_user_domain = $::platform::client::params::admin_user_domain
$admin_project_domain = $::platform::client::params::admin_project_domain
$admin_project_name = $::platform::client::params::admin_project_name
$keystone_identity_region = $::platform::client::params::keystone_identity_region
include ::platform::client::credentials::params
$keyring_file = $::platform::client::credentials::params::keyring_file

View File

@ -1,7 +1,7 @@
class openstack::glance::params (
$api_host,
$service_enabled = true,
$api_port = 9292,
$api_host,
$region_name = undef,
$service_type = 'image',
$glance_directory = '/opt/cgcs/glance',

View File

@ -54,7 +54,9 @@ class openstack::heat
# skip the check if cinder region name has not been configured
if ($::openstack::cinder::params::region_name != undef and
$::openstack::cinder::params::region_name != $::platform::params::region_2_name) {
$shared_service_cinder = [$::openstack::cinder::params::service_type, $::openstack::cinder::params::service_type_v2, $::openstack::cinder::params::service_type_v3]
$shared_service_cinder = [$::openstack::cinder::params::service_type,
$::openstack::cinder::params::service_type_v2,
$::openstack::cinder::params::service_type_v3]
} else {
$shared_service_cinder = []
}

View File

@ -1,9 +1,11 @@
class openstack::horizon::params (
$secret_key,
$openstack_host,
$enable_https = false,
$lockout_period = 300,
$lockout_retries = 3,
$secret_key,
$horizon_ssl = false,
$horizon_cert = undef,
$horizon_key = undef,
@ -13,8 +15,6 @@ class openstack::horizon::params (
$neutron_enable_firewall = false,
$neutron_enable_vpn = false,
$openstack_host,
$tpm_object = undef,
$tpm_engine = '/usr/lib64/openssl/engines/libtpm2.so',
) { }

View File

@ -1,10 +1,10 @@
class openstack::keystone::params(
$api_version,
$api_port = 5000,
$admin_port = 5000,
$identity_uri,
$auth_uri,
$host_url,
$api_port = 5000,
$admin_port = 5000,
$region_name = undef,
$system_controller_region = undef,
$service_name = 'openstack-keystone',
@ -298,7 +298,7 @@ class openstack::keystone::endpointgroup
mode => '0640',
content => template('openstack/keystone-systemcontroller-filter.erb'),
}
-> exec { 'endpointgroup-${reference_region}-command':
-> exec { "endpointgroup-${reference_region}-command":
cwd => '/etc/keystone',
logoutput => true,
provider => shell,
@ -306,7 +306,7 @@ class openstack::keystone::endpointgroup
command => template('openstack/keystone-defaultregion.erb'),
path => ['/usr/bin/', '/bin/', '/sbin/', '/usr/sbin/'],
}
-> exec { 'endpointgroup-${system_controller_region}-command':
-> exec { "endpointgroup-${system_controller_region}-command":
cwd => '/etc/keystone',
logoutput => true,
provider => shell,

View File

@ -1,4 +1,7 @@
class openstack::murano::params (
$tcp_listen_options,
$rabbit_tcp_listen_options,
$rabbit_cipher_list,
$api_port = 8082,
$auth_password = 'guest',
$auth_user = 'guest',
@ -11,9 +14,6 @@ class openstack::murano::params (
$rabbit_normal_port = '5672',
$rabbit_ssl_port = '5671',
$rabbit_certs_dir = '/etc/ssl/private/murano-rabbit',
$tcp_listen_options,
$rabbit_tcp_listen_options,
$rabbit_cipher_list,
$tlsv2 = 'tlsv1.2',
$tlsv1 = 'tlsv1.1',
$ssl_fail_if_no_peer_cert = true,

View File

@ -279,9 +279,9 @@ class openstack::neutron::firewall
}
} else {
platform::firewall::rule { 'ryu-bgp-port':
ensure => absent,
service_name => 'neutron',
ports => $bgp_port,
ensure => absent
}
}
}

View File

@ -149,7 +149,7 @@ class openstack::nova::compute (
}
if ! $host_private_key_file {
fail("Unable to determine name of private key file. Type specified was '${host_key_type}' but should be one of: ssh-rsa, ssh-dsa, ssh-ecdsa.")
fail("Unable to determine name of private key file. Type specified was '${host_key_type}' but should be one of: ssh-rsa, ssh-dsa, ssh-ecdsa.") # lint:ignore:140chars
}
$host_public_key_file = $host_key_type ? {
@ -160,7 +160,7 @@ class openstack::nova::compute (
}
if ! $host_public_key_file {
fail("Unable to determine name of public key file. Type specified was '${host_key_type}' but should be one of: ssh-rsa, ssh-dsa, ssh-ecdsa.")
fail("Unable to determine name of public key file. Type specified was '${host_key_type}' but should be one of: ssh-rsa, ssh-dsa, ssh-ecdsa.") # lint:ignore:140chars
}
file { '/etc/ssh':
@ -192,7 +192,7 @@ class openstack::nova::compute (
}
if ! $migration_private_key_file {
fail("Unable to determine name of private key file. Type specified was '${migration_key_type}' but should be one of: ssh-rsa, ssh-dsa, ssh-ecdsa.")
fail("Unable to determine name of private key file. Type specified was '${migration_key_type}' but should be one of: ssh-rsa, ssh-dsa, ssh-ecdsa.") # lint:ignore:140chars
}
$migration_auth_options = [

View File

@ -22,7 +22,17 @@ class platform::compute::grub::params (
$cpu_options = '',
$m_hugepages = 'hugepagesz=2M hugepages=0',
$default_pgsz = 'default_hugepagesz=2M',
$keys = ['kvm-intel.eptad', 'default_hugepagesz', 'hugepagesz', 'hugepages', 'isolcpus', 'nohz_full', 'rcu_nocbs', 'kthread_cpus', 'irqaffinity'],
$keys = [
'kvm-intel.eptad',
'default_hugepagesz',
'hugepagesz',
'hugepages',
'isolcpus',
'nohz_full',
'rcu_nocbs',
'kthread_cpus',
'irqaffinity',
],
) {
if $::is_broadwell_processor {
@ -179,6 +189,7 @@ class platform::compute::hugetlbf {
}
}
# lint:ignore:variable_is_lowercase
class platform::compute::hugepage::params (
$nr_hugepages_2M = undef,
$nr_hugepages_1G = undef,
@ -242,6 +253,7 @@ class platform::compute::allocate
}
}
}
# lint:endignore:variable_is_lowercase
class platform::compute::extend
inherits ::platform::compute::hugepage::params {

View File

@ -1,12 +1,12 @@
class platform::drbd::params (
$automount = false,
$ha_primary = false,
$initial_setup = false,
$fs_type = 'ext4',
$link_speed,
$link_util,
$num_parallel,
$rtt_ms,
$automount = false,
$ha_primary = false,
$initial_setup = false,
$fs_type = 'ext4',
$cpumask = false,
) {
include ::platform::params

View File

@ -1,4 +1,5 @@
define platform::firewall::rule (
$service_name,
$chain = 'INPUT',
$destination = undef,
$ensure = present,
@ -7,7 +8,6 @@ define platform::firewall::rule (
$outiface = undef,
$ports = undef,
$proto = 'tcp',
$service_name,
$table = undef,
$tosource = undef,
) {

View File

@ -1,7 +1,7 @@
class platform::haproxy::params (
$enable_https = false,
$private_ip_address,
$public_ip_address,
$enable_https = false,
$global_options = undef,
$tpm_object = undef,

View File

@ -10,7 +10,7 @@ class platform::helm
# TODO(jrichard): Upversion tiller image to v2.11.1 once released.
-> exec { 'load tiller docker image':
command => 'docker image pull gcr.io/kubernetes-helm/tiller@sha256:022ce9d4a99603be1d30a4ca96a7fa57a45e6f2ef11172f4333c18aaae407f5b',
command => 'docker image pull gcr.io/kubernetes-helm/tiller@sha256:022ce9d4a99603be1d30a4ca96a7fa57a45e6f2ef11172f4333c18aaae407f5b', # lint:ignore:140chars
logoutput => true,
}
@ -26,14 +26,14 @@ class platform::helm
}
-> exec { 'create cluster role binding for tiller service account':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller',
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller', # lint:ignore:140chars
logoutput => true,
}
# TODO(jrichard): Upversion tiller image to v2.11.1 once released.
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ],
command => 'helm init --skip-refresh --service-account tiller --node-selectors "node-role.kubernetes.io/master"="" --tiller-image=gcr.io/kubernetes-helm/tiller@sha256:022ce9d4a99603be1d30a4ca96a7fa57a45e6f2ef11172f4333c18aaae407f5b',
command => 'helm init --skip-refresh --service-account tiller --node-selectors "node-role.kubernetes.io/master"="" --tiller-image=gcr.io/kubernetes-helm/tiller@sha256:022ce9d4a99603be1d30a4ca96a7fa57a45e6f2ef11172f4333c18aaae407f5b', # lint:ignore:140chars
logoutput => true,
user => 'wrsroot',
group => 'wrs',

View File

@ -142,13 +142,13 @@ class platform::kubernetes::master::init
# kubernetes 1.12 uses coredns rather than kube-dns.
# Restrict the dns pod to master nodes
-> exec { 'restrict coredns to master nodes':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}\'',
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}\'', # lint:ignore:140chars
logoutput => true,
}
# Remove the taint from the master node
-> exec { 'remove taint from master node':
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-",
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-", # lint:ignore:140chars
logoutput => true,
}
} else {
@ -224,13 +224,13 @@ class platform::kubernetes::master::init
# Restrict the dns pod to master nodes. It seems that each time
# kubeadm init is run, it undoes any changes to the deployment.
-> exec { 'restrict coredns to master nodes':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}\'',
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}\'', # lint:ignore:140chars
logoutput => true,
}
# Remove the taint from the master node
-> exec { 'remove taint from master node':
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-",
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-", # lint:ignore:140chars
logoutput => true,
}
}

View File

@ -24,6 +24,9 @@ class platform::memcached::params(
$controller_1_hostname: {
$listen_ip = $::platform::network::mgmt::params::controller1_address
}
default: {
fail("Hostname must be either ${controller_0_hostname} or ${controller_1_hostname}")
}
}
}

View File

@ -1,6 +1,6 @@
class platform::ntp (
$servers = [],
$ntpdate_timeout,
$servers = [],
$enabled = true,
) {
if $enabled or ($::personality == 'controller'){

View File

@ -1,15 +1,15 @@
class platform::params (
$config_path = undef,
$hostname,
$controller_hostname,
$pxeboot_hostname,
$config_path = undef,
$controller_0_hostname = undef,
$controller_1_hostname = undef,
$controller_upgrade = false,
$hostname,
$mate_hostname = undef,
$mate_ipaddress = undef,
$nfs_proto = 'udp',
$nfs_rw_size = 1024,
$pxeboot_hostname,
$region_1_name = undef,
$region_2_name = undef,
$region_config = false,

View File

@ -91,12 +91,12 @@ class platform::remotelogging::proxy(
} else {
platform::firewall::rule { 'remotelogging-nat':
ensure => absent,
service_name => $service_name,
table => $table,
chain => $chain,
outiface => $oam_interface,
jump => $jump,
ensure => absent
}
}
}

View File

@ -150,8 +150,10 @@ class platform::sm
$platform_nfs_subnet_url = "${platform_nfs_ip_network_url}/${platform_nfs_ip_param_mask}"
$cgcs_nfs_subnet_url = "${cgcs_nfs_ip_network_url}/${cgcs_nfs_ip_param_mask}"
# lint:ignore:140chars
$nfs_server_mgmt_exports = "${cgcs_nfs_subnet_url}:${cgcs_fs_directory},${platform_nfs_subnet_url}:${platform_fs_directory},${platform_nfs_subnet_url}:${extension_fs_directory}"
$nfs_server_mgmt_mounts = "${cgcs_fs_device}:${cgcs_fs_directory},${platform_fs_device}:${platform_fs_directory},${extension_fs_device}:${extension_fs_directory}"
# lint:endignore:140chars
################## Openstack Parameters ######################
@ -306,6 +308,8 @@ class platform::sm
$barbican_enabled = $::openstack::barbican::params::service_enabled
}
# lint:ignore:140chars
if $system_mode == 'simplex' {
exec { 'Deprovision oam-ip service group member':
command => 'sm-deprovision service-group-member oam-services oam-ip',
@ -1619,6 +1623,8 @@ class platform::sm
}
}
}
# lint:endignore:140chars
}

View File

@ -47,10 +47,10 @@ define platform::vswitch::ovs::bridge(
define platform::vswitch::ovs::port(
$type = 'port',
$bridge,
$attributes = [],
$interfaces,
$type = 'port',
$attributes = [],
) {
exec { "ovs-add-port: ${title}":
command => template('platform/ovs.add-port.erb'),
@ -72,8 +72,8 @@ define platform::vswitch::ovs::address(
define platform::vswitch::ovs::flow(
$bridge,
$attributes = [],
$actions,
$attributes = [],
) {
exec { "ovs-add-flow: ${title}":
command => template('platform/ovs.add-flow.erb'),

View File

@ -25,15 +25,8 @@ setenv =
GEM_HOME = {envdir}
GEM_PATH = {envdir}
skip_tests = \
--no-140chars \
--no-autoloader_layout-check \
--no-case_without_default \
--no-documentation-check \
--no-ensure_first_param \
--no-inherits_across_namespaces \
--no-parameter_order \
--no-single_quote_string_with_variables \
--no-variable_is_lowercase-check
--no-documentation-check
commands =
gem install --no-document puppet-lint
bash -c "find {toxinidir} -name \*.pp -print0 | xargs -0 puppet-lint --fail-on-warnings {[testenv:puppetlint]skip_tests}"

View File

@ -14,8 +14,10 @@
#
class dcmanager::keystone::auth (
$password,
$auth_name = 'dcmanager',
$auth_domain,
$admin_project_name,
$admin_project_domain,
$auth_name = 'dcmanager',
$email = 'dcmanager@localhost',
$tenant = 'admin',
$region = 'SystemController',
@ -28,8 +30,6 @@ class dcmanager::keystone::auth (
$public_url = 'http://127.0.0.1:8119/v1',
$admin_url = 'http://127.0.0.1:8119/v1',
$internal_url = 'http://127.0.0.1:8119/v1',
$admin_project_name,
$admin_project_domain,
) {
$real_service_name = pick($service_name, $auth_name)

View File

@ -5,8 +5,8 @@
#
class nfv::keystone::auth (
$auth_name = 'vim',
$password,
$auth_name = 'vim',
$tenant = 'services',
$email = 'vim@localhost',
$region = 'RegionOne',

View File

@ -20,7 +20,6 @@ class nova_api_proxy (
anchor { 'proxy-start': }
package { 'nova_api_proxy':
ensure => $package_ensure,
name => 'nova-api-proxy',
require => Anchor['proxy-start'],
}

View File

@ -5,8 +5,8 @@
#
class patching::keystone::auth (
$auth_name = 'patching',
$password,
$auth_name = 'patching',
$tenant = 'services',
$email = 'patching@localhost',
$region = 'RegionOne',

View File

@ -25,9 +25,7 @@ setenv =
GEM_HOME = {envdir}
GEM_PATH = {envdir}
skip_tests = \
--no-documentation-check \
--no-parameter_order \
--no-variable_scope
--no-documentation-check
commands =
gem install --no-document puppet-lint
bash -c "find {toxinidir} -name \*.pp -print0 | xargs -0 puppet-lint --fail-on-warnings {[testenv:puppetlint]skip_tests}"