Updating some of the dcmanager and audit logs

On startup of any process, the entire configuration was
being logged, which was we were logging the entire
configuration to the file (almost 200 lines per process).
- Lowering that log to DEBUG.
- Added an extra 'Starting...' log entry to help indicate
when a process start/restart occurs.

The update_subcloud_endpoint_status  log was not including
enough information, as it is called several times in a row
for each endpoint.

Two audit logs that are created every 30 seconds have been reduced
from info to debug.

Closes-Bug: 1928335
Change-Id: I7ada5abf87c2f28f5826c02345f8dd3197eae665
Signed-off-by: albailey <Al.Bailey@windriver.com>
This commit is contained in:
albailey 2021-05-14 13:48:23 -05:00
parent 5a5127196a
commit 077ab926e3
11 changed files with 39 additions and 29 deletions

View File

@ -66,8 +66,9 @@ def main():
app.serve(service, CONF, workers)
LOG.info("Configuration:")
CONF.log_opt_values(LOG, std_logging.INFO)
LOG.info("Starting...")
LOG.debug("Configuration:")
CONF.log_opt_values(LOG, std_logging.DEBUG)
app.wait()

View File

@ -31,7 +31,6 @@ from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcorch.common import consts as dcorch_consts
from dcmanager.common import consts
from dcmanager.common.i18n import _
from dcmanager.common import utils
LOG = logging.getLogger(__name__)
@ -65,7 +64,7 @@ class PatchAudit(object):
"""Manages tasks related to patch audits."""
def __init__(self, context, dcmanager_rpc_client):
LOG.info(_('PatchAudit initialization...'))
LOG.debug('PatchAudit initialization...')
self.context = context
self.dcmanager_rpc_client = dcmanager_rpc_client
self.audit_count = 0

View File

@ -267,7 +267,7 @@ class SubcloudAuditManager(manager.Manager):
"""Audit availability of subclouds loop."""
# We will be running in our own green thread here.
LOG.info('Triggered subcloud audit.')
LOG.debug('Triggered subcloud audit.')
self.audit_count += 1
# Determine whether to trigger a state update to each subcloud.
@ -336,27 +336,29 @@ class SubcloudAuditManager(manager.Manager):
# It might make sense to split it out.
if audit.patch_audit_requested or audit.load_audit_requested:
audit_patch = True
LOG.info("DB says patch audit needed")
LOG.debug("DB says patch audit needed")
break
if not audit_firmware:
for audit in subcloud_audits:
if audit.firmware_audit_requested:
LOG.info("DB says firmware audit needed")
LOG.debug("DB says firmware audit needed")
audit_firmware = True
break
if not audit_kubernetes:
for audit in subcloud_audits:
if audit.kubernetes_audit_requested:
LOG.info("DB says kubernetes audit needed")
LOG.debug("DB says kubernetes audit needed")
audit_kubernetes = True
break
LOG.info("Triggered subcloud audit: patch=(%s) firmware=(%s) kube=(%s)"
% (audit_patch, audit_firmware, audit_kubernetes))
patch_audit_data, firmware_audit_data, kubernetes_audit_data = \
self._get_audit_data(audit_patch, audit_firmware, audit_kubernetes)
LOG.info("patch_audit_data: %s, "
"firmware_audit_data: %s, "
"kubernetes_audit_data: %s, " % (patch_audit_data,
firmware_audit_data,
kubernetes_audit_data))
LOG.debug("patch_audit_data: %s, "
"firmware_audit_data: %s, "
"kubernetes_audit_data: %s, " % (patch_audit_data,
firmware_audit_data,
kubernetes_audit_data))
# We want a chunksize of at least 1 so add the number of workers.
chunksize = (len(subcloud_audits) + CONF.audit_worker_workers) / CONF.audit_worker_workers

View File

@ -69,8 +69,9 @@ def main():
app.serve(service, CONF, workers)
LOG.info("Configuration:")
CONF.log_opt_values(LOG, std_logging.INFO)
LOG.info("Starting...")
LOG.debug("Configuration:")
CONF.log_opt_values(LOG, std_logging.DEBUG)
app.wait()

View File

@ -54,8 +54,9 @@ def main():
launcher = service.launch(cfg.CONF,
srv, workers=CONF.audit_workers)
LOG.info("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.INFO)
LOG.info("Starting...")
LOG.debug("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
launcher.wait()

View File

@ -54,8 +54,9 @@ def main():
launcher = service.launch(cfg.CONF,
srv, workers=CONF.audit_worker_workers)
LOG.info("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.INFO)
LOG.info("Starting...")
LOG.debug("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
launcher.wait()

View File

@ -57,8 +57,9 @@ def main():
launcher = service.launch(cfg.CONF,
srv, workers=cfg.CONF.workers)
LOG.info("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.INFO)
LOG.info("Starting...")
LOG.debug("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
# the following periodic tasks are intended serve as HA checking
# srv.create_periodic_tasks()

View File

@ -53,8 +53,9 @@ def main():
launcher = service.launch(CONF,
srv, workers=cfg.CONF.orch_workers)
LOG.info("Configuration:")
CONF.log_opt_values(LOG, logging.INFO)
LOG.info("Starting...")
LOG.debug("Configuration:")
CONF.log_opt_values(LOG, logging.DEBUG)
launcher.wait()

View File

@ -163,8 +163,9 @@ class DCManagerService(service.Service):
SYNC_STATUS_OUT_OF_SYNC,
alarmable=True):
# Updates subcloud endpoint sync status
LOG.info("Handling update_subcloud_endpoint_status request for: %s" %
subcloud_name)
LOG.info("Handling update_subcloud_endpoint_status request for "
"subcloud: (%s) endpoint: (%s) status:(%s) "
% (subcloud_name, endpoint_type, sync_status))
self.subcloud_manager. \
update_subcloud_endpoint_status(context,

View File

@ -61,8 +61,9 @@ def main():
app.serve(service, CONF, workers)
LOG.info("Configuration:")
CONF.log_opt_values(LOG, std_logging.INFO)
LOG.info("Starting...")
LOG.debug("Configuration:")
CONF.log_opt_values(LOG, std_logging.DEBUG)
app.wait()

View File

@ -111,8 +111,9 @@ def main():
app.serve(service, CONF, workers)
LOG.info("Configuration:")
CONF.log_opt_values(LOG, std_logging.INFO)
LOG.info("Starting...")
LOG.debug("Configuration:")
CONF.log_opt_values(LOG, std_logging.DEBUG)
app.wait()