Remove Resource Monitor ; aka rmon, from the load

All rmon resource monitoring has been moved to collectd.

This update removes rmon from mtce and the load.

Story: 2002823
Task: 30045

Test Plan:
PASS: Build and install a standard system.
PASS: Inspect mtce rpm list
PASS: Inspect logs
PASS: Check pmon.d

Depends-On: https://review.openstack.org/#/c/643739
Change-Id: I7572a1d0a9cf746abfba3d67352534d96f60c5a7
Signed-off-by: Eric MacDonald <eric.macdonald@windriver.com>
This commit is contained in:
Eric MacDonald 2019-03-18 12:15:34 -04:00
parent 066dfcd8f7
commit b00c4dd415
5 changed files with 10 additions and 25 deletions

View File

@ -828,7 +828,6 @@ def mtce_restart():
"""Restart maintenance processes to handle interface changes"""
restart_service("mtcClient")
restart_service("hbsClient")
restart_service("rmon")
restart_service("pmon")

View File

@ -35,17 +35,6 @@ class platform::mtce
}
$boot_device = $::boot_disk_device_path
file {'/etc/rmonfiles.d':
ensure => directory,
mode => '0755',
}
-> file { '/etc/rmonfiles.d/static.conf':
ensure => present,
mode => '0644',
content => template('mtce/static_conf.erb'),
}
}

View File

@ -306,7 +306,7 @@ class platform::sm
}
exec { 'Configure Postgres FileSystem':
command => "sm-configure service_instance pg-fs pg-fs \"rmon_rsc_name=database-storage,device=${pg_fs_device},directory=${pg_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
command => "sm-configure service_instance pg-fs pg-fs \"device=${pg_fs_device},directory=${pg_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
exec { 'Configure Postgres':
@ -318,7 +318,7 @@ class platform::sm
}
exec { 'Configure Rabbit FileSystem':
command => "sm-configure service_instance rabbit-fs rabbit-fs \"rmon_rsc_name=messaging-storage,device=${rabbit_fs_device},directory=${rabbit_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
command => "sm-configure service_instance rabbit-fs rabbit-fs \"device=${rabbit_fs_device},directory=${rabbit_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
exec { 'Configure Rabbit':
@ -349,7 +349,7 @@ class platform::sm
}
exec { 'Configure CGCS FileSystem':
command => "sm-configure service_instance cgcs-fs cgcs-fs \"rmon_rsc_name=cloud-storage,device=${cgcs_fs_device},directory=${cgcs_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
command => "sm-configure service_instance cgcs-fs cgcs-fs \"device=${cgcs_fs_device},directory=${cgcs_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
exec { 'Configure CGCS Export FileSystem':
@ -361,7 +361,7 @@ class platform::sm
}
exec { 'Configure Extension FileSystem':
command => "sm-configure service_instance extension-fs extension-fs \"rmon_rsc_name=extension-storage,device=${extension_fs_device},directory=${extension_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
command => "sm-configure service_instance extension-fs extension-fs \"device=${extension_fs_device},directory=${extension_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
exec { 'Configure Extension Export FileSystem':
@ -374,7 +374,7 @@ class platform::sm
}
exec { 'Configure Patch-vault FileSystem':
command => "sm-configure service_instance patch-vault-fs patch-vault-fs \"rmon_rsc_name=patch-vault-storage,device=${patch_fs_device},directory=${patch_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
command => "sm-configure service_instance patch-vault-fs patch-vault-fs \"device=${patch_fs_device},directory=${patch_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
}
@ -386,7 +386,7 @@ class platform::sm
command => 'sm-provision service helmrepository-fs',
}
-> exec { 'Configure Helm Chart Repository FileSystem':
command => "sm-configure service_instance helmrepository-fs helmrepository-fs \"rmon_rsc_name=helm-charts-storage,device=${helmrepo_fs_source_dir},directory=${helmrepo_fs_target_dir},options=bind,noatime,nodiratime,fstype=ext4,check_level=20\"",
command => "sm-configure service_instance helmrepository-fs helmrepository-fs \"device=${helmrepo_fs_source_dir},directory=${helmrepo_fs_target_dir},options=bind,noatime,nodiratime,fstype=ext4,check_level=20\"",
}
exec { 'Configure ETCD DRBD':
@ -710,7 +710,7 @@ class platform::sm
}
exec { 'Configure Platform FileSystem':
command => "sm-configure service_instance platform-fs platform-fs \"rmon_rsc_name=platform-storage,device=${platform_fs_device},directory=${platform_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
command => "sm-configure service_instance platform-fs platform-fs \"device=${platform_fs_device},directory=${platform_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
exec { 'Configure Platform Export FileSystem':

View File

@ -55,8 +55,6 @@ rewrite r_rewrite_set{
set("<%= @system_name %> platform.log ${HOST}", value("HOST") condition(filter(f_local1)));
set("<%= @system_name %> pmond.log ${HOST}", value("HOST") condition(filter(f_pmon)));
set("<%= @system_name %> postgres.log ${HOST}", value("HOST") condition(filter(f_local0)));
set("<%= @system_name %> rmond.log ${HOST}", value("HOST") condition(filter(f_rmon)));
set("<%= @system_name %> rmond_notify.log ${HOST}", value("HOST") condition(filter(f_rmon_notify)));
set("<%= @system_name %> sm.log ${HOST}", value("HOST") condition(filter(f_local3)));
set("<%= @system_name %> sysinv-api.log ${HOST}", value("HOST") condition(filter(f_sysinvapi)));
set("<%= @system_name %> sysinv.log ${HOST}", value("HOST") condition(filter(f_sysinv)));

View File

@ -7880,10 +7880,9 @@ class ConductorManager(service.PeriodicService):
return rc
# Retry in case of errors or racing issues with rmon autoextend. Rmon is pooling at
# 10s intervals and autoextend is fast. Therefore retrying a few times and waiting
# between each retry should provide enough protection in the unlikely case
# LVM's own locking mechanism is unreliable.
# Retrying a few times and waiting between each retry should provide
# enough protection in the unlikely case LVM's own locking mechanism
# is unreliable.
@retry(stop_max_attempt_number=5, wait_fixed=1000,
retry_on_result=(lambda x: True if x == constants.CINDER_RESIZE_FAILURE else False))
def _resize_cinder_volumes(self, delayed=False):