diff --git a/.zuul.yaml b/.zuul.yaml index 61555c1447..6a77165e12 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -13,7 +13,6 @@ - sysinv-tox-pylint - sysinv-tox-bandit - controllerconfig-tox-flake8 - - controllerconfig-tox-py27 - controllerconfig-tox-pylint - cgtsclient-tox-py27 - cgtsclient-tox-pep8 @@ -27,7 +26,6 @@ - sysinv-tox-pylint - sysinv-tox-bandit - controllerconfig-tox-flake8 - - controllerconfig-tox-py27 - controllerconfig-tox-pylint - cgtsclient-tox-py27 - cgtsclient-tox-pep8 @@ -112,18 +110,6 @@ tox_envlist: flake8 tox_extra_args: -c controllerconfig/controllerconfig/tox.ini -- job: - name: controllerconfig-tox-py27 - parent: tox - description: Run py27 tests for controllerconfig - required-projects: - - starlingx/fault - files: - - controllerconfig/* - vars: - tox_envlist: py27 - tox_extra_args: -c controllerconfig/controllerconfig/tox.ini - - job: name: controllerconfig-tox-pylint parent: tox diff --git a/controllerconfig/centos/build_srpm.data b/controllerconfig/centos/build_srpm.data index 04bab29441..64925df6c3 100755 --- a/controllerconfig/centos/build_srpm.data +++ b/controllerconfig/centos/build_srpm.data @@ -1,2 +1,2 @@ SRC_DIR="controllerconfig" -TIS_PATCH_VER=151 +TIS_PATCH_VER=152 diff --git a/controllerconfig/centos/controllerconfig.spec b/controllerconfig/centos/controllerconfig.spec index 2548ec7299..406b17f2e1 100644 --- a/controllerconfig/centos/controllerconfig.spec +++ b/controllerconfig/centos/controllerconfig.spec @@ -57,10 +57,7 @@ mkdir -p $RPM_BUILD_ROOT/wheels install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ install -d -m 755 %{buildroot}%{local_bindir} -install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password -install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone -install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh install -d -m 755 %{buildroot}%{local_goenabledd} install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh @@ -74,13 +71,12 @@ install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/ install -d -m 755 %{buildroot}%{local_etc_systemd} install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service -#install -p -D -m 664 scripts/config.service %{buildroot}%{local_etc_systemd}/config.service %post systemctl enable controllerconfig.service %clean -rm -rf $RPM_BUILD_ROOT +rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root,-) diff --git a/controllerconfig/controllerconfig/controllerconfig/__init__.py b/controllerconfig/controllerconfig/controllerconfig/__init__.py index 138851db64..9fc91a45df 100644 --- a/controllerconfig/controllerconfig/controllerconfig/__init__.py +++ b/controllerconfig/controllerconfig/controllerconfig/__init__.py @@ -1,34 +1,10 @@ # -# Copyright (c) 2015-2019 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from controllerconfig.common.validator import validate # noqa: F401 -from controllerconfig.common.configobjects import Network # noqa: F401 -from controllerconfig.common.configobjects import DEFAULT_CONFIG # noqa: F401 -from controllerconfig.common.configobjects import REGION_CONFIG # noqa: F401 -from controllerconfig.common.configobjects import DEFAULT_NAMES # noqa: F401 -from controllerconfig.common.configobjects import HP_NAMES # noqa: F401 -from controllerconfig.common.configobjects import SUBCLOUD_CONFIG # noqa: F401 -from controllerconfig.common.configobjects import MGMT_TYPE # noqa: F401 -from controllerconfig.common.configobjects import INFRA_TYPE # noqa: F401 -from controllerconfig.common.configobjects import OAM_TYPE # noqa: F401 -from controllerconfig.common.configobjects import NETWORK_PREFIX_NAMES # noqa: F401 -from controllerconfig.common.configobjects import HOST_XML_ATTRIBUTES # noqa: F401 -from controllerconfig.common.configobjects import DEFAULT_DOMAIN_NAME # noqa: F401 from controllerconfig.common.exceptions import ConfigError # noqa: F401 -from controllerconfig.common.exceptions import ConfigFail # noqa: F401 from controllerconfig.common.exceptions import ValidateFail # noqa: F401 -from controllerconfig.utils import is_valid_vlan # noqa: F401 -from controllerconfig.utils import is_mtu_valid # noqa: F401 from controllerconfig.utils import validate_network_str # noqa: F401 from controllerconfig.utils import validate_address_str # noqa: F401 -from controllerconfig.utils import validate_address # noqa: F401 -from controllerconfig.utils import is_valid_url # noqa: F401 -from controllerconfig.utils import is_valid_domain_or_ip # noqa: F401 -from controllerconfig.utils import ip_version_to_string # noqa: F401 -from controllerconfig.utils import lag_mode_to_str # noqa: F401 -from controllerconfig.utils import validate_openstack_password # noqa: F401 -from controllerconfig.utils import validate_nameserver_address_str # noqa: F401 -from controllerconfig.utils import extract_openstack_password_rules_from_file # noqa: F401 diff --git a/controllerconfig/controllerconfig/controllerconfig/backup_restore.py b/controllerconfig/controllerconfig/controllerconfig/backup_restore.py deleted file mode 100644 index 5b238be9b9..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/backup_restore.py +++ /dev/null @@ -1,1690 +0,0 @@ -# -# Copyright (c) 2014-2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Backup & Restore -""" - -from __future__ import print_function -import copy -import filecmp -import fileinput -import os -import glob -import shutil -import stat -import subprocess -import tarfile -import tempfile -import textwrap -import time - -from fm_api import constants as fm_constants -from fm_api import fm_api -from sysinv.common import constants as sysinv_constants - -from controllerconfig.common import log -from controllerconfig.common import constants -from controllerconfig.common.exceptions import BackupFail -from controllerconfig.common.exceptions import RestoreFail -from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common.exceptions import SysInvFail -from controllerconfig import openstack -import tsconfig.tsconfig as tsconfig -from controllerconfig import utils -from controllerconfig import sysinv_api as sysinv -from six.moves import input -from os import environ - -LOG = log.get_logger(__name__) - -DEVNULL = open(os.devnull, 'w') -RESTORE_COMPLETE = "restore-complete" -RESTORE_RERUN_REQUIRED = "restore-rerun-required" - -# Backup/restore related constants -backup_in_progress = tsconfig.BACKUP_IN_PROGRESS_FLAG -restore_in_progress = tsconfig.RESTORE_IN_PROGRESS_FLAG -restore_patching_complete = '/etc/platform/.restore_patching_complete' -node_is_patched = '/var/run/node_is_patched' -keyring_permdir = os.path.join('/opt/platform/.keyring', tsconfig.SW_VERSION) -ceph_permdir = os.path.join(tsconfig.CONFIG_PATH, 'ceph-config') -ldap_permdir = '/var/lib/openldap-data' -patching_permdir = '/opt/patching' -patching_repo_permdir = '/www/pages/updates' -home_permdir = '/home' -extension_permdir = '/opt/extension' -patch_vault_permdir = '/opt/patch-vault' -mariadb_pod = 'mariadb-server-0' - -kube_config = environ.get('KUBECONFIG') -if kube_config is None: - kube_config = '/etc/kubernetes/admin.conf' - - -kube_cmd_prefix = 'kubectl --kubeconfig=%s ' % kube_config -kube_cmd_prefix += 'exec -i %s -n openstack -- bash -c ' % mariadb_pod - -mysql_prefix = '\'exec mysql -uroot -p"$MYSQL_ROOT_PASSWORD" ' -mysqldump_prefix = '\'exec mysqldump -uroot -p"$MYSQL_ROOT_PASSWORD" ' - - -def get_backup_databases(): - """ - Retrieve database lists for backup. - :return: backup_databases and backup_database_skip_tables - """ - - # Databases common to all configurations - REGION_LOCAL_DATABASES = ('postgres', 'template1', 'sysinv', - 'fm', 'barbican') - REGION_SHARED_DATABASES = ('keystone',) - - # Indicates which tables have to be dropped for a certain database. - DB_TABLE_SKIP_MAPPING = { - 'fm': ('alarm',), - 'dcorch': ('orch_job', - 'orch_request', - 'resource', - 'subcloud_resource'), } - - if tsconfig.region_config == 'yes': - BACKUP_DATABASES = REGION_LOCAL_DATABASES - else: - # Add additional databases for non-region configuration and for the - # primary region in region deployments. - BACKUP_DATABASES = REGION_LOCAL_DATABASES + REGION_SHARED_DATABASES - - # Add distributed cloud databases - if tsconfig.distributed_cloud_role == \ - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - BACKUP_DATABASES += ('dcmanager', 'dcorch') - - # We generate the tables to be skipped for each database - # mentioned in BACKUP_DATABASES. We explicitly list - # skip tables in DB_TABLE_SKIP_MAPPING - BACKUP_DB_SKIP_TABLES = dict( - [[x, DB_TABLE_SKIP_MAPPING.get(x, ())] for x in BACKUP_DATABASES]) - - return BACKUP_DATABASES, BACKUP_DB_SKIP_TABLES - - -def get_os_backup_databases(): - """ - Retrieve openstack database lists from MariaDB for backup. - :return: os_backup_databases - """ - - skip_dbs = ("Database", "information_schema", "performance_schema", - "mysql", "horizon", "panko", "gnocchi") - - try: - db_cmd = kube_cmd_prefix + mysql_prefix + '-e"show databases" \'' - - proc = subprocess.Popen([db_cmd], shell=True, - stdout=subprocess.PIPE, stderr=DEVNULL) - - os_backup_dbs = set(line[:-1] for line in proc.stdout - if line[:-1] not in skip_dbs) - - proc.communicate() - - return os_backup_dbs - - except subprocess.CalledProcessError: - raise BackupFail("Failed to get openstack databases from MariaDB.") - - -def check_load_versions(archive, staging_dir): - match = False - try: - member = archive.getmember('etc/build.info') - archive.extract(member, path=staging_dir) - match = filecmp.cmp('/etc/build.info', staging_dir + '/etc/build.info') - shutil.rmtree(staging_dir + '/etc') - except Exception as e: - LOG.exception(e) - raise RestoreFail("Unable to verify load version in backup file. " - "Invalid backup file.") - - if not match: - LOG.error("Load version mismatch.") - raise RestoreFail("Load version of backup does not match the " - "version of the installed load.") - - -def get_subfunctions(filename): - """ - Retrieves the subfunctions from a platform.conf file. - :param filename: file to retrieve subfunctions from - :return: a list of the subfunctions or None if no subfunctions exist - """ - matchstr = 'subfunction=' - - with open(filename, 'r') as f: - for line in f: - if matchstr in line: - parsed = line.split('=') - return parsed[1].rstrip().split(",") - return - - -def check_load_subfunctions(archive, staging_dir): - """ - Verify that the subfunctions in the backup match the installed load. - :param archive: backup archive - :param staging_dir: staging directory - :return: raises exception if the subfunctions do not match - """ - match = False - backup_subfunctions = None - try: - member = archive.getmember('etc/platform/platform.conf') - archive.extract(member, path=staging_dir) - backup_subfunctions = get_subfunctions(staging_dir + - '/etc/platform/platform.conf') - shutil.rmtree(staging_dir + '/etc') - if set(backup_subfunctions) ^ set(tsconfig.subfunctions): - # The set of subfunctions do not match - match = False - else: - match = True - except Exception: - LOG.exception("Unable to verify subfunctions in backup file") - raise RestoreFail("Unable to verify subfunctions in backup file. " - "Invalid backup file.") - - if not match: - LOG.error("Subfunction mismatch - backup: %s, installed: %s" % - (str(backup_subfunctions), str(tsconfig.subfunctions))) - raise RestoreFail("Subfunctions in backup load (%s) do not match the " - "subfunctions of the installed load (%s)." % - (str(backup_subfunctions), - str(tsconfig.subfunctions))) - - -def file_exists_in_archive(archive, file_path): - """ Check if file exists in archive """ - try: - archive.getmember(file_path) - return True - - except KeyError: - LOG.info("File %s is not in archive." % file_path) - return False - - -def filter_directory(archive, directory): - for tarinfo in archive: - if tarinfo.name.split('/')[0] == directory: - yield tarinfo - - -def backup_etc_size(): - """ Backup etc size estimate """ - try: - total_size = utils.directory_get_size('/etc') - return total_size - except OSError: - LOG.error("Failed to estimate backup etc size.") - raise BackupFail("Failed to estimate backup etc size") - - -def backup_etc(archive): - """ Backup etc """ - try: - archive.add('/etc', arcname='etc') - - except tarfile.TarError: - LOG.error("Failed to backup etc.") - raise BackupFail("Failed to backup etc") - - -def restore_etc_file(archive, dest_dir, etc_file): - """ Restore etc file """ - try: - # Change the name of this file to remove the leading path - member = archive.getmember('etc/' + etc_file) - # Copy the member to avoid changing the name for future operations on - # this member. - temp_member = copy.copy(member) - temp_member.name = os.path.basename(temp_member.name) - archive.extract(temp_member, path=dest_dir) - - except tarfile.TarError: - LOG.error("Failed to restore etc file.") - raise RestoreFail("Failed to restore etc file") - - -def restore_etc_ssl_dir(archive, configpath=constants.CONFIG_WORKDIR): - """ Restore the etc SSL dir """ - - def filter_etc_ssl_private(members): - for tarinfo in members: - if 'etc/ssl/private' in tarinfo.name: - yield tarinfo - - if file_exists_in_archive(archive, 'config/server-cert.pem'): - restore_config_file( - archive, configpath, 'server-cert.pem') - - if file_exists_in_archive(archive, 'etc/ssl/private'): - # NOTE: This will include all TPM certificate files if TPM was - # enabled on the backed up system. However in that case, this - # restoration is only done for the first controller and TPM - # will need to be reconfigured once duplex controller (if any) - # is restored. - archive.extractall(path='/', - members=filter_etc_ssl_private(archive)) - - -def restore_ceph_external_config_files(archive, staging_dir): - # Restore ceph-config. - if file_exists_in_archive(archive, "config/ceph-config"): - restore_config_dir(archive, staging_dir, 'ceph-config', ceph_permdir) - - # Copy the file to /etc/ceph. - # There might be no files to copy, so don't check the return code. - cp_command = ('cp -Rp ' + os.path.join(ceph_permdir, '*') + - ' /etc/ceph/') - subprocess.call(cp_command, shell=True) - - -def backup_config_size(config_permdir): - """ Backup configuration size estimate """ - try: - return(utils.directory_get_size(config_permdir)) - - except OSError: - LOG.error("Failed to estimate backup configuration size.") - raise BackupFail("Failed to estimate backup configuration size") - - -def backup_config(archive, config_permdir): - """ Backup configuration """ - try: - # The config dir is versioned, but we're only grabbing the current - # release - archive.add(config_permdir, arcname='config') - - except tarfile.TarError: - LOG.error("Failed to backup config.") - raise BackupFail("Failed to backup configuration") - - -def restore_config_file(archive, dest_dir, config_file): - """ Restore configuration file """ - try: - # Change the name of this file to remove the leading path - member = archive.getmember('config/' + config_file) - # Copy the member to avoid changing the name for future operations on - # this member. - temp_member = copy.copy(member) - temp_member.name = os.path.basename(temp_member.name) - archive.extract(temp_member, path=dest_dir) - - except tarfile.TarError: - LOG.error("Failed to restore config file %s." % config_file) - raise RestoreFail("Failed to restore configuration") - - -def restore_configuration(archive, staging_dir): - """ Restore configuration """ - try: - os.makedirs(constants.CONFIG_WORKDIR, stat.S_IRWXU | stat.S_IRGRP | - stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) - except OSError: - LOG.error("Failed to create config directory: %s", - constants.CONFIG_WORKDIR) - raise RestoreFail("Failed to restore configuration files") - - # Restore cgcs_config file from original installation for historical - # purposes. Not used to restore the system as the information in this - # file is out of date (not updated after original installation). - restore_config_file(archive, constants.CONFIG_WORKDIR, 'cgcs_config') - - # Restore platform.conf file and update as necessary. The file will be - # created in a temporary location and then moved into place when it is - # complete to prevent access to a partially created file. - restore_etc_file(archive, staging_dir, 'platform/platform.conf') - temp_platform_conf_file = os.path.join(tsconfig.PLATFORM_CONF_PATH, - 'platform.conf.temp') - shutil.copyfile(os.path.join(staging_dir, 'platform.conf'), - temp_platform_conf_file) - install_uuid = utils.get_install_uuid() - for line in fileinput.FileInput(temp_platform_conf_file, inplace=1): - if line.startswith("INSTALL_UUID="): - # The INSTALL_UUID must be updated to match the new INSTALL_UUID - # which was generated when this controller was installed prior to - # doing the restore. - print("INSTALL_UUID=%s" % install_uuid) - elif line.startswith("management_interface=") or \ - line.startswith("oam_interface=") or \ - line.startswith("cluster_host_interface=") or \ - line.startswith("UUID="): - # Strip out any entries that are host specific as the backup can - # be done on either controller. The application of the - # platform_conf manifest will add these back in. - pass - else: - print(line, end='') - fileinput.close() - # Move updated platform.conf file into place. - os.rename(temp_platform_conf_file, tsconfig.PLATFORM_CONF_FILE) - - # Kick tsconfig to reload the platform.conf file - tsconfig._load() - - # Restore branding - restore_config_dir(archive, staging_dir, 'branding', '/opt/branding/') - - # Restore banner customization - restore_config_dir(archive, staging_dir, 'banner/etc', '/opt/banner') - - # Restore ssh configuration - restore_config_dir(archive, staging_dir, 'ssh_config', - constants.CONFIG_WORKDIR + '/ssh_config') - - # Configure hostname - utils.configure_hostname('controller-0') - - # Restore hosts file - restore_etc_file(archive, '/etc', 'hosts') - restore_etc_file(archive, constants.CONFIG_WORKDIR, 'hosts') - - # Restore certificate files - restore_etc_ssl_dir(archive) - - -def filter_pxelinux(archive): - for tarinfo in archive: - if tarinfo.name.find('config/pxelinux.cfg') == 0: - yield tarinfo - - -def restore_dnsmasq(archive, config_permdir): - """ Restore dnsmasq """ - try: - etc_files = ['hosts'] - - perm_files = ['hosts', - 'dnsmasq.hosts', 'dnsmasq.leases', - 'dnsmasq.addn_hosts'] - - for etc_file in etc_files: - restore_config_file(archive, '/etc', etc_file) - - for perm_file in perm_files: - restore_config_file(archive, config_permdir, perm_file) - - # Extract distributed cloud addn_hosts file if present in archive. - if file_exists_in_archive( - archive, 'config/dnsmasq.addn_hosts_dc'): - restore_config_file(archive, config_permdir, - 'dnsmasq.addn_hosts_dc') - - tmpdir = tempfile.mkdtemp(prefix="pxerestore_") - - archive.extractall(tmpdir, - members=filter_pxelinux(archive)) - - if os.path.exists(tmpdir + '/config/pxelinux.cfg'): - shutil.rmtree(config_permdir + 'pxelinux.cfg', ignore_errors=True) - shutil.move(tmpdir + '/config/pxelinux.cfg', config_permdir) - - shutil.rmtree(tmpdir, ignore_errors=True) - - except (shutil.Error, subprocess.CalledProcessError, tarfile.TarError): - LOG.error("Failed to restore dnsmasq config.") - raise RestoreFail("Failed to restore dnsmasq files") - - -def backup_puppet_data_size(puppet_permdir): - """ Backup puppet data size estimate """ - try: - return(utils.directory_get_size(puppet_permdir)) - - except OSError: - LOG.error("Failed to estimate backup puppet data size.") - raise BackupFail("Failed to estimate backup puppet data size") - - -def backup_puppet_data(archive, puppet_permdir): - """ Backup puppet data """ - try: - # The puppet dir is versioned, but we're only grabbing the current - # release - archive.add(puppet_permdir, arcname='hieradata') - - except tarfile.TarError: - LOG.error("Failed to backup puppet data.") - raise BackupFail("Failed to backup puppet data") - - -def restore_static_puppet_data(archive, puppet_workdir): - """ Restore static puppet data """ - try: - member = archive.getmember('hieradata/static.yaml') - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - member = archive.getmember('hieradata/secure_static.yaml') - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - except tarfile.TarError: - LOG.error("Failed to restore static puppet data.") - raise RestoreFail("Failed to restore static puppet data") - - except OSError: - pass - - -def restore_puppet_data(archive, puppet_workdir, controller_0_address): - """ Restore puppet data """ - try: - member = archive.getmember('hieradata/system.yaml') - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - member = archive.getmember('hieradata/secure_system.yaml') - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - # Only restore controller-0 hieradata - controller_0_hieradata = 'hieradata/%s.yaml' % controller_0_address - member = archive.getmember(controller_0_hieradata) - archive.extract(member, path=os.path.dirname(puppet_workdir)) - - except tarfile.TarError: - LOG.error("Failed to restore puppet data.") - raise RestoreFail("Failed to restore puppet data") - - except OSError: - pass - - -def backup_armada_manifest_size(armada_permdir): - """ Backup armada manifest size estimate """ - try: - return(utils.directory_get_size(armada_permdir)) - - except OSError: - LOG.error("Failed to estimate backup armada manifest size.") - raise BackupFail("Failed to estimate backup armada manifest size") - - -def backup_armada_manifest_data(archive, armada_permdir): - """ Backup armada manifest data """ - try: - archive.add(armada_permdir, arcname='armada') - - except tarfile.TarError: - LOG.error("Failed to backup armada manifest data.") - raise BackupFail("Failed to backup armada manifest data") - - -def restore_armada_manifest_data(archive, armada_permdir): - """ Restore armada manifest data """ - try: - shutil.rmtree(armada_permdir, ignore_errors=True) - members = filter_directory(archive, 'armada') - temp_members = list() - # remove armada and armada/ from the member path since they are - # extracted to armada_permdir: /opt/platform/armada/release - for m in members: - temp_member = copy.copy(m) - lst = temp_member.name.split('armada/') - if len(lst) > 1: - temp_member.name = lst[1] - temp_members.append(temp_member) - archive.extractall(path=armada_permdir, members=temp_members) - - except (tarfile.TarError, OSError): - LOG.error("Failed to restore armada manifest.") - shutil.rmtree(armada_permdir, ignore_errors=True) - raise RestoreFail("Failed to restore armada manifest") - - -def backup_keyring_size(keyring_permdir): - """ Backup keyring size estimate """ - try: - return(utils.directory_get_size(keyring_permdir)) - - except OSError: - LOG.error("Failed to estimate backup keyring size.") - raise BackupFail("Failed to estimate backup keyring size") - - -def backup_keyring(archive, keyring_permdir): - """ Backup keyring configuration """ - try: - archive.add(keyring_permdir, arcname='.keyring') - - except tarfile.TarError: - LOG.error("Failed to backup keyring.") - raise BackupFail("Failed to backup keyring configuration") - - -def restore_keyring(archive, keyring_permdir): - """ Restore keyring configuration """ - try: - shutil.rmtree(keyring_permdir, ignore_errors=False) - members = filter_directory(archive, '.keyring') - temp_members = list() - # remove .keyring and .keyring/ from the member path since they are - # extracted to keyring_permdir: /opt/platform/.keyring/release - for m in members: - temp_member = copy.copy(m) - lst = temp_member.name.split('.keyring/') - if len(lst) > 1: - temp_member.name = lst[1] - temp_members.append(temp_member) - archive.extractall(path=keyring_permdir, members=temp_members) - - except (tarfile.TarError, shutil.Error): - LOG.error("Failed to restore keyring.") - shutil.rmtree(keyring_permdir, ignore_errors=True) - raise RestoreFail("Failed to restore keyring configuration") - - -def prefetch_keyring(archive): - """ Prefetch keyring configuration for manifest use """ - keyring_tmpdir = '/tmp/.keyring' - python_keyring_tmpdir = '/tmp/python_keyring' - try: - shutil.rmtree(keyring_tmpdir, ignore_errors=True) - shutil.rmtree(python_keyring_tmpdir, ignore_errors=True) - archive.extractall( - path=os.path.dirname(keyring_tmpdir), - members=filter_directory(archive, - os.path.basename(keyring_tmpdir))) - - shutil.move(keyring_tmpdir + '/python_keyring', python_keyring_tmpdir) - - except (tarfile.TarError, shutil.Error): - LOG.error("Failed to restore keyring.") - shutil.rmtree(keyring_tmpdir, ignore_errors=True) - shutil.rmtree(python_keyring_tmpdir, ignore_errors=True) - raise RestoreFail("Failed to restore keyring configuration") - - -def cleanup_prefetched_keyring(): - """ Cleanup fetched keyring """ - try: - keyring_tmpdir = '/tmp/.keyring' - python_keyring_tmpdir = '/tmp/python_keyring' - - shutil.rmtree(keyring_tmpdir, ignore_errors=True) - shutil.rmtree(python_keyring_tmpdir, ignore_errors=True) - - except shutil.Error: - LOG.error("Failed to cleanup keyring.") - raise RestoreFail("Failed to cleanup fetched keyring") - - -def backup_ldap_size(): - """ Backup ldap size estimate """ - try: - total_size = 0 - - proc = subprocess.Popen( - ['slapcat -d 0 -F /etc/openldap/schema | wc -c'], - shell=True, stdout=subprocess.PIPE) - - for line in proc.stdout: - total_size = int(line) - break - - proc.communicate() - - return total_size - - except subprocess.CalledProcessError: - LOG.error("Failed to estimate backup ldap size.") - raise BackupFail("Failed to estimate backup ldap size") - - -def backup_ldap(archive, staging_dir): - """ Backup ldap configuration """ - try: - ldap_staging_dir = staging_dir + '/ldap' - os.mkdir(ldap_staging_dir, 0o655) - - subprocess.check_call([ - 'slapcat', '-d', '0', '-F', '/etc/openldap/schema', - '-l', (ldap_staging_dir + '/ldap.db')], stdout=DEVNULL) - - archive.add(ldap_staging_dir + '/ldap.db', arcname='ldap.db') - - except (OSError, subprocess.CalledProcessError, tarfile.TarError): - LOG.error("Failed to backup ldap database.") - raise BackupFail("Failed to backup ldap configuration") - - -def restore_ldap(archive, ldap_permdir, staging_dir): - """ Restore ldap configuration """ - try: - ldap_staging_dir = staging_dir + '/ldap' - archive.extract('ldap.db', path=ldap_staging_dir) - - utils.stop_lsb_service('openldap') - - subprocess.call(['rm', '-rf', ldap_permdir], stdout=DEVNULL) - os.mkdir(ldap_permdir, 0o755) - - subprocess.check_call(['slapadd', '-F', '/etc/openldap/schema', - '-l', ldap_staging_dir + '/ldap.db'], - stdout=DEVNULL, stderr=DEVNULL) - - except (subprocess.CalledProcessError, OSError, tarfile.TarError): - LOG.error("Failed to restore ldap database.") - raise RestoreFail("Failed to restore ldap configuration") - - finally: - utils.start_lsb_service('openldap') - - -def backup_mariadb_size(): - """ Backup MariaDB size estimate """ - try: - total_size = 0 - - os_backup_dbs = get_os_backup_databases() - - # Backup data for databases. - for db_elem in os_backup_dbs: - - db_cmd = kube_cmd_prefix + mysqldump_prefix - db_cmd += ' %s\' | wc -c' % db_elem - - proc = subprocess.Popen([db_cmd], shell=True, - stdout=subprocess.PIPE, stderr=DEVNULL) - - total_size += int(proc.stdout.readline()) - proc.communicate() - - return total_size - - except subprocess.CalledProcessError: - LOG.error("Failed to estimate MariaDB database size.") - raise BackupFail("Failed to estimate MariaDB database size") - - -def backup_mariadb(archive, staging_dir): - """ Backup MariaDB data """ - try: - mariadb_staging_dir = staging_dir + '/mariadb' - os.mkdir(mariadb_staging_dir, 0o655) - - os_backup_dbs = get_os_backup_databases() - - # Backup data for databases. - for db_elem in os_backup_dbs: - db_cmd = kube_cmd_prefix + mysqldump_prefix - db_cmd += ' %s\' > %s/%s.sql.data' % (db_elem, - mariadb_staging_dir, db_elem) - - subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL) - - archive.add(mariadb_staging_dir, arcname='mariadb') - - except (OSError, subprocess.CalledProcessError, tarfile.TarError): - LOG.error("Failed to backup MariaDB databases.") - raise BackupFail("Failed to backup MariaDB database.") - - -def extract_mariadb_data(archive): - """ Extract and store MariaDB data """ - try: - # We store MariaDB data in /opt/backups/mariadb for now. - # After MariaDB service is up, we will populate the - # database using these data. - archive.extractall(path=constants.BACKUPS_PATH, - members=filter_directory(archive, 'mariadb')) - except (OSError, tarfile.TarError) as e: - LOG.error("Failed to extract and store MariaDB data. Error: %s", e) - raise RestoreFail("Failed to extract and store MariaDB data.") - - -def create_helm_overrides_directory(): - """ - Create helm overrides directory - During restore, application-apply will be done without - first running application-upload where the helm overrides - directory is created. So we need to create the helm overrides - directory before running application-apply. - """ - try: - os.mkdir(constants.HELM_OVERRIDES_PERMDIR, 0o755) - except OSError: - LOG.error("Failed to create helm overrides directory") - raise BackupFail("Failed to create helm overrides directory") - - -def restore_mariadb(): - """ - Restore MariaDB - - This function is called after MariaDB service is up - """ - try: - mariadb_staging_dir = constants.BACKUPS_PATH + '/mariadb' - # Restore data for databases. - for data in glob.glob(mariadb_staging_dir + '/*.sql.data'): - db_elem = data.split('/')[-1].split('.')[0] - create_db = "create database %s" % db_elem - - # Create the database - db_cmd = kube_cmd_prefix + mysql_prefix + '-e"%s" \'' % create_db - subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL) - - # Populate data - db_cmd = 'cat %s | ' % data - db_cmd = db_cmd + kube_cmd_prefix + mysql_prefix - db_cmd += '%s\' ' % db_elem - subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL) - - shutil.rmtree(mariadb_staging_dir, ignore_errors=True) - - except (OSError, subprocess.CalledProcessError) as e: - LOG.error("Failed to restore MariaDB data. Error: %s", e) - raise RestoreFail("Failed to restore MariaDB data.") - - -def backup_postgres_size(): - """ Backup postgres size estimate """ - try: - total_size = 0 - - # Backup roles, table spaces and schemas for databases. - proc = subprocess.Popen([('sudo -u postgres pg_dumpall --clean ' + - '--schema-only | wc -c')], shell=True, - stdout=subprocess.PIPE, stderr=DEVNULL) - - for line in proc.stdout: - total_size = int(line) - break - - proc.communicate() - - # get backup database - backup_databases, backup_db_skip_tables = get_backup_databases() - - # Backup data for databases. - for _, db_elem in enumerate(backup_databases): - - db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts ' - db_cmd += '--disable-triggers --data-only %s ' % db_elem - - for _, table_elem in enumerate(backup_db_skip_tables[db_elem]): - db_cmd += '--exclude-table=%s ' % table_elem - - db_cmd += '| wc -c' - - proc = subprocess.Popen([db_cmd], shell=True, - stdout=subprocess.PIPE, stderr=DEVNULL) - - for line in proc.stdout: - total_size += int(line) - break - - proc.communicate() - - return total_size - - except subprocess.CalledProcessError: - LOG.error("Failed to estimate backup database size.") - raise BackupFail("Failed to estimate backup database size") - - -def backup_postgres(archive, staging_dir): - """ Backup postgres configuration """ - try: - postgres_staging_dir = staging_dir + '/postgres' - os.mkdir(postgres_staging_dir, 0o655) - - # Backup roles, table spaces and schemas for databases. - subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' + - '--schema-only' + - '> %s/%s' % (postgres_staging_dir, - 'postgres.sql.config'))], - shell=True, stderr=DEVNULL) - - # get backup database - backup_databases, backup_db_skip_tables = get_backup_databases() - - # Backup data for databases. - for _, db_elem in enumerate(backup_databases): - - db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts ' - db_cmd += '--disable-triggers --data-only %s ' % db_elem - - for _, table_elem in enumerate(backup_db_skip_tables[db_elem]): - db_cmd += '--exclude-table=%s ' % table_elem - - db_cmd += '> %s/%s.sql.data' % (postgres_staging_dir, db_elem) - - subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL) - - archive.add(postgres_staging_dir, arcname='postgres') - - except (OSError, subprocess.CalledProcessError, tarfile.TarError): - LOG.error("Failed to backup postgres databases.") - raise BackupFail("Failed to backup database configuration") - - -def restore_postgres(archive, staging_dir): - """ Restore postgres configuration """ - try: - postgres_staging_dir = staging_dir + '/postgres' - archive.extractall(path=staging_dir, - members=filter_directory(archive, 'postgres')) - - utils.start_service("postgresql") - - # Restore roles, table spaces and schemas for databases. - subprocess.check_call(["sudo", "-u", "postgres", "psql", "-f", - postgres_staging_dir + - '/postgres.sql.config', "postgres"], - stdout=DEVNULL, stderr=DEVNULL) - - # Restore data for databases. - for data in glob.glob(postgres_staging_dir + '/*.sql.data'): - db_elem = data.split('/')[-1].split('.')[0] - subprocess.check_call(["sudo", "-u", "postgres", "psql", "-f", - data, db_elem], - stdout=DEVNULL) - - except (OSError, subprocess.CalledProcessError, tarfile.TarError) as e: - LOG.error("Failed to restore postgres databases. Error: %s", e) - raise RestoreFail("Failed to restore database configuration") - - finally: - utils.stop_service('postgresql') - - -def filter_config_dir(archive, directory): - for tarinfo in archive: - if tarinfo.name.find('config/' + directory) == 0: - yield tarinfo - - -def restore_config_dir(archive, staging_dir, config_dir, dest_dir): - """ Restore configuration directory if it exists """ - try: - archive.extractall(staging_dir, - members=filter_config_dir(archive, config_dir)) - - # Copy files from backup to dest dir - if (os.path.exists(staging_dir + '/config/' + config_dir) and - os.listdir(staging_dir + '/config/' + config_dir)): - subprocess.call(["mkdir", "-p", dest_dir]) - - try: - for f in glob.glob( - staging_dir + '/config/' + config_dir + '/*'): - subprocess.check_call(["cp", "-p", f, dest_dir]) - except IOError: - LOG.warning("Failed to copy %s files" % config_dir) - - except (subprocess.CalledProcessError, tarfile.TarError): - LOG.info("No custom %s config was found during restore." % config_dir) - - -def backup_std_dir_size(directory): - """ Backup standard directory size estimate """ - try: - return utils.directory_get_size(directory) - - except OSError: - LOG.error("Failed to estimate backup size for %s" % directory) - raise BackupFail("Failed to estimate backup size for %s" % directory) - - -def backup_std_dir(archive, directory): - """ Backup standard directory """ - try: - archive.add(directory, arcname=os.path.basename(directory)) - - except tarfile.TarError: - LOG.error("Failed to backup %s" % directory) - raise BackupFail("Failed to backup %s" % directory) - - -def restore_std_dir(archive, directory): - """ Restore standard directory """ - try: - shutil.rmtree(directory, ignore_errors=True) - # Verify that archive contains this directory - try: - archive.getmember(os.path.basename(directory)) - except KeyError: - LOG.error("Archive does not contain directory %s" % directory) - raise RestoreFail("Invalid backup file - missing directory %s" % - directory) - archive.extractall( - path=os.path.dirname(directory), - members=filter_directory(archive, os.path.basename(directory))) - - except (shutil.Error, tarfile.TarError): - LOG.error("Failed to restore %s" % directory) - raise RestoreFail("Failed to restore %s" % directory) - - -def configure_loopback_interface(archive): - """ Restore and apply configuration for loopback interface """ - utils.remove_interface_config_files() - restore_etc_file( - archive, utils.NETWORK_SCRIPTS_PATH, - 'sysconfig/network-scripts/' + utils.NETWORK_SCRIPTS_LOOPBACK) - utils.restart_networking() - - -def backup_ceph_crush_map(archive, staging_dir): - """ Backup ceph crush map """ - try: - ceph_staging_dir = os.path.join(staging_dir, 'ceph') - os.mkdir(ceph_staging_dir, 0o655) - crushmap_file = os.path.join(ceph_staging_dir, - sysinv_constants.CEPH_CRUSH_MAP_BACKUP) - subprocess.check_call(['ceph', 'osd', 'getcrushmap', - '-o', crushmap_file], stdout=DEVNULL, - stderr=DEVNULL) - archive.add(crushmap_file, arcname='ceph/' + - sysinv_constants.CEPH_CRUSH_MAP_BACKUP) - except Exception as e: - LOG.error('Failed to backup ceph crush map. Reason: {}'.format(e)) - raise BackupFail('Failed to backup ceph crush map') - - -def restore_ceph_crush_map(archive): - """ Restore ceph crush map """ - if not file_exists_in_archive(archive, 'ceph/' + - sysinv_constants.CEPH_CRUSH_MAP_BACKUP): - return - - try: - crush_map_file = 'ceph/' + sysinv_constants.CEPH_CRUSH_MAP_BACKUP - if file_exists_in_archive(archive, crush_map_file): - member = archive.getmember(crush_map_file) - # Copy the member to avoid changing the name for future - # operations on this member. - temp_member = copy.copy(member) - temp_member.name = os.path.basename(temp_member.name) - archive.extract(temp_member, - path=sysinv_constants.SYSINV_CONFIG_PATH) - - except tarfile.TarError as e: - LOG.error('Failed to restore crush map file. Reason: {}'.format(e)) - raise RestoreFail('Failed to restore crush map file') - - -def check_size(archive_dir): - """Check if there is enough space to create backup.""" - backup_overhead_bytes = 1024 ** 3 # extra GB for staging directory - - backup_size = (backup_overhead_bytes + - backup_etc_size() + - backup_config_size(tsconfig.CONFIG_PATH) + - backup_puppet_data_size(constants.HIERADATA_PERMDIR) + - backup_keyring_size(keyring_permdir) + - backup_ldap_size() + - backup_postgres_size() + - backup_std_dir_size(home_permdir) + - backup_std_dir_size(patching_permdir) + - backup_std_dir_size(patching_repo_permdir) + - backup_std_dir_size(extension_permdir) + - backup_std_dir_size(patch_vault_permdir) + - backup_armada_manifest_size(constants.ARMADA_PERMDIR) + - backup_std_dir_size(constants.HELM_CHARTS_PERMDIR) + - backup_mariadb_size() - ) - - archive_dir_free_space = \ - utils.filesystem_get_free_space(archive_dir) - - if backup_size > archive_dir_free_space: - print("Archive directory (%s) does not have enough free " - "space (%s), estimated backup size is %s." % - (archive_dir, utils.print_bytes(archive_dir_free_space), - utils.print_bytes(backup_size))) - - raise BackupFail("Not enough free space for backup.") - - -def backup(backup_name, archive_dir, clone=False): - """Backup configuration.""" - - if not os.path.isdir(archive_dir): - raise BackupFail("Archive directory (%s) not found." % archive_dir) - - if not utils.is_active("management-ip"): - raise BackupFail( - "Backups can only be performed from the active controller.") - - if os.path.isfile(backup_in_progress): - raise BackupFail("Backup already in progress.") - else: - open(backup_in_progress, 'w') - - fmApi = fm_api.FaultAPIs() - entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, - sysinv_constants.CONTROLLER_HOSTNAME) - fault = fm_api.Fault(alarm_id=fm_constants.FM_ALARM_ID_BACKUP_IN_PROGRESS, - alarm_state=fm_constants.FM_ALARM_STATE_SET, - entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST, - entity_instance_id=entity_instance_id, - severity=fm_constants.FM_ALARM_SEVERITY_MINOR, - reason_text=("System Backup in progress."), - # operational - alarm_type=fm_constants.FM_ALARM_TYPE_7, - # congestion - probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8, - proposed_repair_action=("No action required."), - service_affecting=False) - - fmApi.set_fault(fault) - - staging_dir = None - system_tar_path = None - warnings = '' - try: - os.chdir('/') - - if not clone: - check_size(archive_dir) - - print ("\nPerforming backup (this might take several minutes):") - staging_dir = tempfile.mkdtemp(dir=archive_dir) - - system_tar_path = os.path.join(archive_dir, - backup_name + '_system.tgz') - system_archive = tarfile.open(system_tar_path, "w:gz") - - step = 1 - total_steps = 16 - - # Step 1: Backup etc - backup_etc(system_archive) - utils.progress(total_steps, step, 'backup etc', 'DONE') - step += 1 - - # Step 2: Backup configuration - backup_config(system_archive, tsconfig.CONFIG_PATH) - utils.progress(total_steps, step, 'backup configuration', 'DONE') - step += 1 - - # Step 3: Backup puppet data - backup_puppet_data(system_archive, constants.HIERADATA_PERMDIR) - utils.progress(total_steps, step, 'backup puppet data', 'DONE') - step += 1 - - # Step 4: Backup armada data - backup_armada_manifest_data(system_archive, constants.ARMADA_PERMDIR) - utils.progress(total_steps, step, 'backup armada data', 'DONE') - step += 1 - - # Step 5: Backup helm charts data - backup_std_dir(system_archive, constants.HELM_CHARTS_PERMDIR) - utils.progress(total_steps, step, 'backup helm charts', 'DONE') - step += 1 - - # Step 6: Backup keyring - backup_keyring(system_archive, keyring_permdir) - utils.progress(total_steps, step, 'backup keyring', 'DONE') - step += 1 - - # Step 7: Backup ldap - backup_ldap(system_archive, staging_dir) - utils.progress(total_steps, step, 'backup ldap', 'DONE') - step += 1 - - # Step 8: Backup postgres - backup_postgres(system_archive, staging_dir) - utils.progress(total_steps, step, 'backup postgres', 'DONE') - step += 1 - - # Step 9: Backup mariadb - backup_mariadb(system_archive, staging_dir) - utils.progress(total_steps, step, 'backup mariadb', 'DONE') - step += 1 - - # Step 10: Backup home - backup_std_dir(system_archive, home_permdir) - utils.progress(total_steps, step, 'backup home directory', 'DONE') - step += 1 - - # Step 11: Backup patching - if not clone: - backup_std_dir(system_archive, patching_permdir) - utils.progress(total_steps, step, 'backup patching', 'DONE') - step += 1 - - # Step 12: Backup patching repo - if not clone: - backup_std_dir(system_archive, patching_repo_permdir) - utils.progress(total_steps, step, 'backup patching repo', 'DONE') - step += 1 - - # Step 13: Backup extension filesystem - backup_std_dir(system_archive, extension_permdir) - utils.progress(total_steps, step, 'backup extension filesystem ' - 'directory', 'DONE') - step += 1 - - # Step 14: Backup patch-vault filesystem - if os.path.exists(patch_vault_permdir): - backup_std_dir(system_archive, patch_vault_permdir) - utils.progress(total_steps, step, 'backup patch-vault filesystem ' - 'directory', 'DONE') - step += 1 - - # Step 15: Backup ceph crush map - backup_ceph_crush_map(system_archive, staging_dir) - utils.progress(total_steps, step, 'backup ceph crush map', 'DONE') - step += 1 - - # Step 16: Create archive - system_archive.close() - utils.progress(total_steps, step, 'create archive', 'DONE') - step += 1 - - except Exception: - if system_tar_path and os.path.isfile(system_tar_path): - os.remove(system_tar_path) - - raise - finally: - fmApi.clear_fault(fm_constants.FM_ALARM_ID_BACKUP_IN_PROGRESS, - entity_instance_id) - os.remove(backup_in_progress) - if staging_dir: - shutil.rmtree(staging_dir, ignore_errors=True) - - system_msg = "System backup file created" - if not clone: - system_msg += ": " + system_tar_path - - print(system_msg) - if warnings != '': - print("WARNING: The following problems occurred:") - print(textwrap.fill(warnings, 80)) - - -def create_restore_runtime_config(filename): - """ Create any runtime parameters needed for Restore.""" - config = {} - # We need to re-enable Openstack password rules, which - # were previously disabled while the controller manifests - # were applying during a Restore - config['classes'] = ['keystone::security_compliance'] - utils.create_manifest_runtime_config(filename, config) - - -def restore_system(backup_file, include_storage_reinstall=False, clone=False): - """Restoring system configuration.""" - - if (os.path.exists(constants.CGCS_CONFIG_FILE) or - os.path.exists(tsconfig.CONFIG_PATH) or - os.path.exists(constants.INITIAL_CONFIG_COMPLETE_FILE)): - print(textwrap.fill( - "Configuration has already been done. " - "A system restore operation can only be done " - "immediately after the load has been installed.", 80)) - print('') - raise RestoreFail("System configuration already completed") - - if not os.path.isabs(backup_file): - raise RestoreFail("Backup file (%s) not found. Full path is " - "required." % backup_file) - - if os.path.isfile(restore_in_progress): - raise RestoreFail("Restore already in progress.") - else: - open(restore_in_progress, 'w') - - # Add newline to console log for install-clone scenario - newline = clone - staging_dir = None - - try: - try: - with open(os.devnull, "w") as fnull: - subprocess.check_call(["vgdisplay", "cgts-vg"], - stdout=fnull, - stderr=fnull) - except subprocess.CalledProcessError: - LOG.error("The cgts-vg volume group was not found") - raise RestoreFail("Volume groups not configured") - - print("\nRestoring system (this will take several minutes):") - # Use /scratch for the staging dir for now, - # until /opt/backups is available - staging_dir = tempfile.mkdtemp(dir='/scratch') - # Permission change required or postgres restore fails - subprocess.call(['chmod', 'a+rx', staging_dir], stdout=DEVNULL) - os.chdir('/') - - step = 1 - total_steps = 26 - - # Step 1: Open archive and verify installed load matches backup - try: - archive = tarfile.open(backup_file) - except tarfile.TarError as e: - LOG.exception(e) - raise RestoreFail("Error opening backup file. Invalid backup " - "file.") - check_load_versions(archive, staging_dir) - check_load_subfunctions(archive, staging_dir) - utils.progress(total_steps, step, 'open archive', 'DONE', newline) - step += 1 - - # Patching is potentially a multi-phase step. - # If the controller is impacted by patches from the backup, - # it must be rebooted before continuing the restore. - # If this is the second pass through, we can skip over this. - if not os.path.isfile(restore_patching_complete) and not clone: - # Step 2: Restore patching - restore_std_dir(archive, patching_permdir) - utils.progress(total_steps, step, 'restore patching', 'DONE', - newline) - step += 1 - - # Step 3: Restore patching repo - restore_std_dir(archive, patching_repo_permdir) - utils.progress(total_steps, step, 'restore patching repo', 'DONE', - newline) - step += 1 - - # Step 4: Apply patches - try: - subprocess.check_output(["sw-patch", "install-local"]) - except subprocess.CalledProcessError: - LOG.error("Failed to install patches") - raise RestoreFail("Failed to install patches") - utils.progress(total_steps, step, 'install patches', 'DONE', - newline) - step += 1 - - open(restore_patching_complete, 'w') - - # If the controller was impacted by patches, we need to reboot. - if os.path.isfile(node_is_patched): - if not clone: - print("\nThis controller has been patched. " + - "A reboot is required.") - print("After the reboot is complete, " + - "re-execute the restore command.") - while True: - user_input = input( - "Enter 'reboot' to reboot controller: ") - if user_input == 'reboot': - break - LOG.info("This controller has been patched. Rebooting now") - print("\nThis controller has been patched. Rebooting now\n\n") - time.sleep(5) - os.remove(restore_in_progress) - if staging_dir: - shutil.rmtree(staging_dir, ignore_errors=True) - subprocess.call("reboot") - - else: - # We need to restart the patch controller and agent, since - # we setup the repo and patch store outside its control - with open(os.devnull, "w") as devnull: - subprocess.call( - ["systemctl", - "restart", - "sw-patch-controller-daemon.service"], - stdout=devnull, stderr=devnull) - subprocess.call( - ["systemctl", - "restart", - "sw-patch-agent.service"], - stdout=devnull, stderr=devnull) - if clone: - # No patches were applied, return to cloning code - # to run validation code. - return RESTORE_RERUN_REQUIRED - else: - # Add the skipped steps - step += 3 - - if os.path.isfile(node_is_patched): - # If we get here, it means the node was patched by the user - # AFTER the restore applied patches and rebooted, but didn't - # reboot. - # This means the patch lineup no longer matches what's in the - # backup, but we can't (and probably shouldn't) prevent that. - # However, since this will ultimately cause the node to fail - # the goenabled step, we can fail immediately and force the - # user to reboot. - print ("\nThis controller has been patched, but not rebooted.") - print ("Please reboot before continuing the restore process.") - raise RestoreFail("Controller node patched without rebooting") - - # Flag can now be cleared - if os.path.exists(restore_patching_complete): - os.remove(restore_patching_complete) - - # Prefetch keyring - prefetch_keyring(archive) - - # Step 5: Restore configuration - restore_configuration(archive, staging_dir) - # In AIO SX systems, the loopback interface is used as the management - # interface. However, the application of the interface manifest will - # not configure the necessary addresses on the loopback interface (see - # apply_network_config.sh for details). So, we need to configure the - # loopback interface here. - if tsconfig.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX: - configure_loopback_interface(archive) - # Write the simplex flag - utils.write_simplex_flag() - utils.progress(total_steps, step, 'restore configuration', 'DONE', - newline) - step += 1 - - # Step 6: Apply restore bootstrap manifest - controller_0_address = utils.get_address_from_hosts_file( - 'controller-0') - restore_static_puppet_data(archive, constants.HIERADATA_WORKDIR) - try: - utils.apply_manifest(controller_0_address, - sysinv_constants.CONTROLLER, - 'bootstrap', - constants.HIERADATA_WORKDIR) - except Exception as e: - LOG.exception(e) - raise RestoreFail( - 'Failed to apply bootstrap manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - - utils.progress(total_steps, step, 'apply bootstrap manifest', 'DONE', - newline) - step += 1 - - # Step 7: Restore puppet data - restore_puppet_data(archive, constants.HIERADATA_WORKDIR, - controller_0_address) - utils.progress(total_steps, step, 'restore puppet data', 'DONE', - newline) - step += 1 - - # Step 8: Persist configuration - utils.persist_config() - utils.progress(total_steps, step, 'persist configuration', 'DONE', - newline) - step += 1 - - # Step 9: Apply controller manifest - try: - utils.apply_manifest(controller_0_address, - sysinv_constants.CONTROLLER, - 'controller', - constants.HIERADATA_PERMDIR) - except Exception as e: - LOG.exception(e) - raise RestoreFail( - 'Failed to apply controller manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - utils.progress(total_steps, step, 'apply controller manifest', 'DONE', - newline) - step += 1 - - # Step 10: Apply runtime controller manifests - restore_filename = os.path.join(staging_dir, 'restore.yaml') - create_restore_runtime_config(restore_filename) - try: - utils.apply_manifest(controller_0_address, - sysinv_constants.CONTROLLER, - 'runtime', - constants.HIERADATA_PERMDIR, - runtime_filename=restore_filename) - except Exception as e: - LOG.exception(e) - raise RestoreFail( - 'Failed to apply runtime controller manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - utils.progress(total_steps, step, - 'apply runtime controller manifest', 'DONE', - newline) - step += 1 - - # Move the staging dir under /opt/backups, now that it's setup - shutil.rmtree(staging_dir, ignore_errors=True) - staging_dir = tempfile.mkdtemp(dir=constants.BACKUPS_PATH) - # Permission change required or postgres restore fails - subprocess.call(['chmod', 'a+rx', staging_dir], stdout=DEVNULL) - - # Step 11: Apply banner customization - utils.apply_banner_customization() - utils.progress(total_steps, step, 'apply banner customization', 'DONE', - newline) - step += 1 - - # Step 12: Restore dnsmasq and pxeboot config - restore_dnsmasq(archive, tsconfig.CONFIG_PATH) - utils.progress(total_steps, step, 'restore dnsmasq', 'DONE', newline) - step += 1 - - # Step 13: Restore keyring - restore_keyring(archive, keyring_permdir) - utils.progress(total_steps, step, 'restore keyring', 'DONE', newline) - step += 1 - - # Step 14: Restore ldap - restore_ldap(archive, ldap_permdir, staging_dir) - utils.progress(total_steps, step, 'restore ldap', 'DONE', newline) - step += 1 - - # Step 15: Restore postgres - restore_postgres(archive, staging_dir) - utils.progress(total_steps, step, 'restore postgres', 'DONE', newline) - step += 1 - - # Step 16: Extract and store mariadb data - extract_mariadb_data(archive) - utils.progress(total_steps, step, 'extract mariadb', 'DONE', newline) - step += 1 - - # Step 17: Restore ceph crush map - restore_ceph_crush_map(archive) - utils.progress(total_steps, step, 'restore ceph crush map', 'DONE', - newline) - step += 1 - - # Step 18: Restore home - restore_std_dir(archive, home_permdir) - utils.progress(total_steps, step, 'restore home directory', 'DONE', - newline) - step += 1 - - # Step 19: Restore extension filesystem - restore_std_dir(archive, extension_permdir) - utils.progress(total_steps, step, 'restore extension filesystem ' - 'directory', 'DONE', newline) - step += 1 - - # Step 20: Restore patch-vault filesystem - if file_exists_in_archive(archive, - os.path.basename(patch_vault_permdir)): - restore_std_dir(archive, patch_vault_permdir) - utils.progress(total_steps, step, 'restore patch-vault filesystem ' - 'directory', 'DONE', newline) - - step += 1 - - # Step 21: Restore external ceph configuration files. - restore_ceph_external_config_files(archive, staging_dir) - utils.progress(total_steps, step, 'restore CEPH external config', - 'DONE', newline) - step += 1 - - # Step 22: Restore Armada manifest - restore_armada_manifest_data(archive, constants.ARMADA_PERMDIR) - utils.progress(total_steps, step, 'restore armada manifest', - 'DONE', newline) - step += 1 - - # Step 23: Restore Helm charts - restore_std_dir(archive, constants.HELM_CHARTS_PERMDIR) - utils.progress(total_steps, step, 'restore helm charts', - 'DONE', newline) - step += 1 - - # Step 24: Create Helm overrides directory - create_helm_overrides_directory() - utils.progress(total_steps, step, 'create helm overrides directory', - 'DONE', newline) - step += 1 - - # Step 25: Shutdown file systems - archive.close() - shutil.rmtree(staging_dir, ignore_errors=True) - utils.shutdown_file_systems() - utils.progress(total_steps, step, 'shutdown file systems', 'DONE', - newline) - step += 1 - - # Step 26: Recover services - utils.mtce_restart() - utils.mark_config_complete() - time.sleep(120) - - for service in ['sysinv-conductor', 'sysinv-inv']: - if not utils.wait_sm_service(service): - raise RestoreFail("Services have failed to initialize.") - - utils.progress(total_steps, step, 'recover services', 'DONE', newline) - step += 1 - - if tsconfig.system_mode != sysinv_constants.SYSTEM_MODE_SIMPLEX: - - print("\nRestoring node states (this will take several minutes):") - - with openstack.OpenStack() as client: - # On ceph setups storage nodes take about 90 seconds - # to become locked. Setting the timeout to 120 seconds - # for such setups - lock_timeout = 60 - storage_hosts = sysinv.get_hosts(client.admin_token, - client.conf['region_name'], - personality='storage') - if storage_hosts: - lock_timeout = 120 - - failed_lock_host = False - skip_hosts = ['controller-0'] - if not include_storage_reinstall: - if storage_hosts: - install_uuid = utils.get_install_uuid() - for h in storage_hosts: - skip_hosts.append(h.name) - - # Update install_uuid on the storage node - client.sysinv.ihost.update_install_uuid( - h.uuid, - install_uuid) - - skip_hosts_count = len(skip_hosts) - - # Wait for nodes to be identified as disabled before attempting - # to lock hosts. Even if after 3 minute nodes are still not - # identified as disabled, we still continue the restore. - if not client.wait_for_hosts_disabled( - exempt_hostnames=skip_hosts, - timeout=180): - LOG.info("At least one node is not in a disabling state. " - "Continuing.") - - print("\nLocking nodes:") - try: - failed_hosts = client.lock_hosts(skip_hosts, - utils.progress, - timeout=lock_timeout) - # Don't power off nodes that could not be locked - if len(failed_hosts) > 0: - skip_hosts.append(failed_hosts) - - except (KeystoneFail, SysInvFail) as e: - LOG.exception(e) - failed_lock_host = True - - if not failed_lock_host: - print("\nPowering-off nodes:") - try: - client.power_off_hosts(skip_hosts, - utils.progress, - timeout=60) - except (KeystoneFail, SysInvFail) as e: - LOG.exception(e) - # this is somehow expected - - if failed_lock_host or len(skip_hosts) > skip_hosts_count: - if include_storage_reinstall: - print(textwrap.fill( - "Failed to lock at least one node. " + - "Please lock the unlocked nodes manually.", 80 - )) - else: - print(textwrap.fill( - "Failed to lock at least one node. " + - "Please lock the unlocked controller-1 or " + - "worker nodes manually.", 80 - )) - - if not clone: - print(textwrap.fill( - "Before continuing to the next step in the restore, " + - "please ensure all nodes other than controller-0 " + - "and storage nodes, if they are not being " + - "reinstalled, are powered off. Please refer to the " + - "system administration guide for more details.", 80 - )) - - finally: - os.remove(restore_in_progress) - if staging_dir: - shutil.rmtree(staging_dir, ignore_errors=True) - cleanup_prefetched_keyring() - - fmApi = fm_api.FaultAPIs() - entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, - sysinv_constants.CONTROLLER_HOSTNAME) - fault = fm_api.Fault( - alarm_id=fm_constants.FM_ALARM_ID_BACKUP_IN_PROGRESS, - alarm_state=fm_constants.FM_ALARM_STATE_MSG, - entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST, - entity_instance_id=entity_instance_id, - severity=fm_constants.FM_ALARM_SEVERITY_MINOR, - reason_text=("System Restore complete."), - # other - alarm_type=fm_constants.FM_ALARM_TYPE_0, - # unknown - probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN, - proposed_repair_action=(""), - service_affecting=False) - - fmApi.set_fault(fault) - - if utils.get_system_type() == sysinv_constants.TIS_AIO_BUILD: - print("\nApplying worker manifests for %s. " % - (utils.get_controller_hostname())) - print("Node will reboot on completion.") - - sysinv.do_worker_config_complete(utils.get_controller_hostname()) - - # show in-progress log on console every 30 seconds - # until self reboot or timeout - - time.sleep(30) - for i in range(1, 10): - print("worker manifest apply in progress ... ") - time.sleep(30) - - raise RestoreFail("Timeout running worker manifests, " - "reboot did not occur") - - return RESTORE_COMPLETE diff --git a/controllerconfig/controllerconfig/controllerconfig/clone.py b/controllerconfig/controllerconfig/controllerconfig/clone.py deleted file mode 100644 index 41c8fe3d65..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/clone.py +++ /dev/null @@ -1,712 +0,0 @@ -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Clone a Configured System and Install the image on another -identical hardware or the same hardware. -""" - -from __future__ import print_function -import os -import re -import glob -import time -import shutil -import netaddr -import tempfile -import fileinput -import subprocess - -from controllerconfig.common import constants -from sysinv.common import constants as si_const -from controllerconfig import sysinv_api -import tsconfig.tsconfig as tsconfig -from controllerconfig.common import log -from controllerconfig.common.exceptions import CloneFail -from controllerconfig.common.exceptions import BackupFail -from controllerconfig import utils -from controllerconfig import backup_restore - -DEBUG = False -LOG = log.get_logger(__name__) -DEVNULL = open(os.devnull, 'w') -CLONE_ARCHIVE_DIR = "clone-archive" -CLONE_ISO_INI = ".cloneiso.ini" -NAME = "name" -INSTALLED = "installed_at" -RESULT = "result" -IN_PROGRESS = "in-progress" -FAIL = "failed" -OK = "ok" - - -def clone_status(): - """ Check status of last install-clone. """ - INI_FILE1 = os.path.join("/", CLONE_ARCHIVE_DIR, CLONE_ISO_INI) - INI_FILE2 = os.path.join(tsconfig.PLATFORM_CONF_PATH, CLONE_ISO_INI) - name = "unknown" - result = "unknown" - installed_at = "unknown time" - for ini_file in [INI_FILE1, INI_FILE2]: - if os.path.exists(ini_file): - with open(ini_file) as f: - s = f.read() - for line in s.split("\n"): - if line.startswith(NAME): - name = line.split("=")[1].strip() - elif line.startswith(RESULT): - result = line.split("=")[1].strip() - elif line.startswith(INSTALLED): - installed_at = line.split("=")[1].strip() - break # one file was found, skip the other file - if result != "unknown": - if result == OK: - print("\nInstallation of cloned image [{}] was successful at {}\n" - .format(name, installed_at)) - elif result == FAIL: - print("\nInstallation of cloned image [{}] failed at {}\n" - .format(name, installed_at)) - else: - print("\ninstall-clone is in progress.\n") - else: - print("\nCloned image is not installed on this node.\n") - - -def check_size(archive_dir): - """ Check if there is enough space to create iso. """ - overhead_bytes = 1024 ** 3 # extra GB for staging directory - # Size of the cloned iso is directly proportional to the - # installed package repository (note that patches are a part of - # the system archive size below). - # 1G overhead size added (above) will accomodate the temporary - # workspace (updating system archive etc) needed to create the iso. - feed_dir = os.path.join('/www', 'pages', 'feed', - 'rel-' + tsconfig.SW_VERSION) - overhead_bytes += backup_restore.backup_std_dir_size(feed_dir) - - clone_size = ( - overhead_bytes + - backup_restore.backup_etc_size() + - backup_restore.backup_config_size(tsconfig.CONFIG_PATH) + - backup_restore.backup_puppet_data_size(constants.HIERADATA_PERMDIR) + - backup_restore.backup_keyring_size(backup_restore.keyring_permdir) + - backup_restore.backup_ldap_size() + - backup_restore.backup_postgres_size() + - backup_restore.backup_std_dir_size(backup_restore.home_permdir) + - backup_restore.backup_std_dir_size(backup_restore.patching_permdir) + - backup_restore.backup_std_dir_size( - backup_restore.patching_repo_permdir) + - backup_restore.backup_std_dir_size(backup_restore.extension_permdir) + - backup_restore.backup_std_dir_size( - backup_restore.patch_vault_permdir) + - backup_restore.backup_armada_manifest_size( - constants.ARMADA_PERMDIR) + - backup_restore.backup_std_dir_size( - constants.HELM_CHARTS_PERMDIR) + - backup_restore.backup_mariadb_size()) - - archive_dir_free_space = \ - utils.filesystem_get_free_space(archive_dir) - - if clone_size > archive_dir_free_space: - print("\nArchive directory (%s) does not have enough free " - "space (%s), estimated size to create image is %s." % - (archive_dir, - utils.print_bytes(archive_dir_free_space), - utils.print_bytes(clone_size))) - raise CloneFail("Not enough free space.\n") - - -def update_bootloader_default(bl_file, host): - """ Update bootloader files for cloned image """ - if not os.path.exists(bl_file): - LOG.error("{} does not exist".format(bl_file)) - raise CloneFail("{} does not exist".format(os.path.basename(bl_file))) - - # Tags should be in sync with common-bsp/files/centos.syslinux.cfg - # and common-bsp/files/grub.cfg - STANDARD_STANDARD = '0' - STANDARD_EXTENDED = 'S0' - AIO_STANDARD = '2' - AIO_EXTENDED = 'S2' - AIO_LL_STANDARD = '4' - AIO_LL_EXTENDED = 'S4' - if "grub.cfg" in bl_file: - STANDARD_STANDARD = 'standard>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_STANDARD - STANDARD_EXTENDED = 'standard>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_EXTENDED - AIO_STANDARD = 'aio>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_STANDARD - AIO_EXTENDED = 'aio>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_EXTENDED - AIO_LL_STANDARD = 'aio-lowlat>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_STANDARD - AIO_LL_EXTENDED = 'aio-lowlat>serial>' + \ - si_const.SYSTEM_SECURITY_PROFILE_EXTENDED - SUBMENUITEM_TBOOT = 'tboot' - SUBMENUITEM_SECUREBOOT = 'secureboot' - - timeout_line = None - default_line = None - default_label_num = STANDARD_STANDARD - if utils.get_system_type() == si_const.TIS_AIO_BUILD: - if si_const.LOWLATENCY in tsconfig.subfunctions: - default_label_num = AIO_LL_STANDARD - else: - default_label_num = AIO_STANDARD - if (tsconfig.security_profile == - si_const.SYSTEM_SECURITY_PROFILE_EXTENDED): - default_label_num = STANDARD_EXTENDED - if utils.get_system_type() == si_const.TIS_AIO_BUILD: - if si_const.LOWLATENCY in tsconfig.subfunctions: - default_label_num = AIO_LL_EXTENDED - else: - default_label_num = AIO_EXTENDED - if "grub.cfg" in bl_file: - if host.tboot is not None: - if host.tboot == "true": - default_label_num = default_label_num + '>' + \ - SUBMENUITEM_TBOOT - else: - default_label_num = default_label_num + '>' + \ - SUBMENUITEM_SECUREBOOT - - try: - with open(bl_file) as f: - s = f.read() - for line in s.split("\n"): - if line.startswith("timeout"): - timeout_line = line - elif line.startswith("default"): - default_line = line - - if "grub.cfg" in bl_file: - replace = "default='{}'\ntimeout=10".format(default_label_num) - else: # isolinux format - replace = "default {}\ntimeout 10".format(default_label_num) - - if default_line and timeout_line: - s = s.replace(default_line, "") - s = s.replace(timeout_line, replace) - elif default_line: - s = s.replace(default_line, replace) - elif timeout_line: - s = s.replace(timeout_line, replace) - else: - s = replace + s - - s = re.sub(r'boot_device=[^\s]*', - 'boot_device=%s' % host.boot_device, - s) - s = re.sub(r'rootfs_device=[^\s]*', - 'rootfs_device=%s' % host.rootfs_device, - s) - s = re.sub(r'console=[^\s]*', - 'console=%s' % host.console, - s) - - with open(bl_file, "w") as f: - LOG.info("rewriting {}: label={} find=[{}][{}] replace=[{}]" - .format(bl_file, default_label_num, timeout_line, - default_line, replace.replace('\n', ''))) - f.write(s) - - except Exception as e: - LOG.error("update_bootloader_default failed: {}".format(e)) - raise CloneFail("Failed to update bootloader files") - - -def get_online_cpus(): - """ Get max cpu id """ - with open('/sys/devices/system/cpu/online') as f: - s = f.read() - max_cpu_id = s.split('-')[-1].strip() - LOG.info("Max cpu id:{} [{}]".format(max_cpu_id, s.strip())) - return max_cpu_id - return "" - - -def get_total_mem(): - """ Get total memory size """ - with open('/proc/meminfo') as f: - s = f.read() - for line in s.split("\n"): - if line.startswith("MemTotal:"): - mem_total = line.split()[1] - LOG.info("MemTotal:[{}]".format(mem_total)) - return mem_total - return "" - - -def get_disk_size(disk): - """ Get the disk size """ - disk_size = "" - try: - disk_size = subprocess.check_output( - ['lsblk', '--nodeps', '--output', 'SIZE', - '--noheadings', '--bytes', disk]) - except Exception as e: - LOG.exception(e) - LOG.error("Failed to get disk size [{}]".format(disk)) - raise CloneFail("Failed to get disk size") - return disk_size.strip() - - -def create_ini_file(clone_archive_dir, iso_name): - """Create clone ini file.""" - interfaces = "" - my_hostname = utils.get_controller_hostname() - macs = sysinv_api.get_mac_addresses(my_hostname) - for intf in macs.keys(): - interfaces += intf + " " - - disk_paths = "" - for _, _, files in os.walk('/dev/disk/by-path'): - for f in files: - if f.startswith("pci-") and "part" not in f and "usb" not in f: - disk_size = get_disk_size('/dev/disk/by-path/' + f) - disk_paths += f + "#" + disk_size + " " - break # no need to go into sub-dirs. - - LOG.info("create ini: {} {}".format(macs, files)) - with open(os.path.join(clone_archive_dir, CLONE_ISO_INI), 'w') as f: - f.write('[clone_iso]\n') - f.write('name=' + iso_name + '\n') - f.write('host=' + my_hostname + '\n') - f.write('created_at=' + time.strftime("%Y-%m-%d %H:%M:%S %Z") - + '\n') - f.write('interfaces=' + interfaces + '\n') - f.write('disks=' + disk_paths + '\n') - f.write('cpus=' + get_online_cpus() + '\n') - f.write('mem=' + get_total_mem() + '\n') - LOG.info("create ini: ({}) ({})".format(interfaces, disk_paths)) - - -def create_iso(iso_name, archive_dir): - """ Create iso image. This is modelled after - the cgcs-root/build-tools/build-iso tool. """ - try: - controller_0 = sysinv_api.get_host_data('controller-0') - except Exception as e: - e_log = "Failed to retrieve controller-0 inventory details." - LOG.exception(e_log) - raise CloneFail(e_log) - - iso_dir = os.path.join(archive_dir, 'isolinux') - clone_archive_dir = os.path.join(iso_dir, CLONE_ARCHIVE_DIR) - output = None - tmpdir = None - total_steps = 6 - step = 1 - print ("\nCreating ISO:") - - # Add the correct kick-start file to the image - ks_file = "controller_ks.cfg" - if utils.get_system_type() == si_const.TIS_AIO_BUILD: - if si_const.LOWLATENCY in tsconfig.subfunctions: - ks_file = "smallsystem_lowlatency_ks.cfg" - else: - ks_file = "smallsystem_ks.cfg" - - try: - # prepare the iso files - images_dir = os.path.join(iso_dir, 'images') - os.mkdir(images_dir, 0o644) - pxe_dir = os.path.join('/pxeboot', - 'rel-' + tsconfig.SW_VERSION) - os.symlink(pxe_dir + '/installer-bzImage', - iso_dir + '/vmlinuz') - os.symlink(pxe_dir + '/installer-initrd', - iso_dir + '/initrd.img') - utils.progress(total_steps, step, 'preparing files', 'DONE') - step += 1 - - feed_dir = os.path.join('/www', 'pages', 'feed', - 'rel-' + tsconfig.SW_VERSION) - os.symlink(feed_dir + '/Packages', iso_dir + '/Packages') - os.symlink(feed_dir + '/repodata', iso_dir + '/repodata') - os.symlink(feed_dir + '/LiveOS', iso_dir + '/LiveOS') - shutil.copy2(feed_dir + '/isolinux.cfg', iso_dir) - update_bootloader_default(iso_dir + '/isolinux.cfg', controller_0) - shutil.copyfile('/usr/share/syslinux/isolinux.bin', - iso_dir + '/isolinux.bin') - os.symlink('/usr/share/syslinux/vesamenu.c32', - iso_dir + '/vesamenu.c32') - for filename in glob.glob(os.path.join(feed_dir, '*ks.cfg')): - shutil.copy(os.path.join(feed_dir, filename), iso_dir) - utils.progress(total_steps, step, 'preparing files', 'DONE') - step += 1 - - efiboot_dir = os.path.join(iso_dir, 'EFI', 'BOOT') - os.makedirs(efiboot_dir, 0o644) - l_efi_dir = os.path.join('/boot', 'efi', 'EFI') - shutil.copy2(l_efi_dir + '/BOOT/BOOTX64.EFI', efiboot_dir) - shutil.copy2(l_efi_dir + '/centos/MokManager.efi', efiboot_dir) - shutil.copy2(l_efi_dir + '/centos/grubx64.efi', efiboot_dir) - shutil.copy2('/pxeboot/EFI/grub.cfg', efiboot_dir) - update_bootloader_default(efiboot_dir + '/grub.cfg', controller_0) - shutil.copytree(l_efi_dir + '/centos/fonts', - efiboot_dir + '/fonts') - # copy EFI boot image and update the grub.cfg file - efi_img = images_dir + '/efiboot.img' - shutil.copy2(pxe_dir + '/efiboot.img', efi_img) - tmpdir = tempfile.mkdtemp(dir=archive_dir) - output = subprocess.check_output( - ["mount", "-t", "vfat", "-o", "loop", - efi_img, tmpdir], - stderr=subprocess.STDOUT) - # replace the grub.cfg file with the updated file - efi_grub_f = os.path.join(tmpdir, 'EFI', 'BOOT', 'grub.cfg') - os.remove(efi_grub_f) - shutil.copy2(efiboot_dir + '/grub.cfg', efi_grub_f) - subprocess.call(['umount', tmpdir]) - shutil.rmtree(tmpdir, ignore_errors=True) - tmpdir = None - - epoch_time = "%.9f" % time.time() - disc_info = [epoch_time, tsconfig.SW_VERSION, "x86_64"] - with open(iso_dir + '/.discinfo', 'w') as f: - f.write('\n'.join(disc_info)) - - # copy the latest install_clone executable - shutil.copy2('/usr/bin/install_clone', iso_dir) - subprocess.check_output("cat /pxeboot/post_clone_iso_ks.cfg >> " + - iso_dir + "/" + ks_file, shell=True) - utils.progress(total_steps, step, 'preparing files', 'DONE') - step += 1 - - # copy patches - iso_patches_dir = os.path.join(iso_dir, 'patches') - iso_patch_repo_dir = os.path.join(iso_patches_dir, 'repodata') - iso_patch_pkgs_dir = os.path.join(iso_patches_dir, 'Packages') - iso_patch_metadata_dir = os.path.join(iso_patches_dir, 'metadata') - iso_patch_applied_dir = os.path.join(iso_patch_metadata_dir, 'applied') - iso_patch_committed_dir = os.path.join(iso_patch_metadata_dir, - 'committed') - - os.mkdir(iso_patches_dir, 0o755) - os.mkdir(iso_patch_repo_dir, 0o755) - os.mkdir(iso_patch_pkgs_dir, 0o755) - os.mkdir(iso_patch_metadata_dir, 0o755) - os.mkdir(iso_patch_applied_dir, 0o755) - os.mkdir(iso_patch_committed_dir, 0o755) - - repodata = '/www/pages/updates/rel-%s/repodata/' % tsconfig.SW_VERSION - pkgsdir = '/www/pages/updates/rel-%s/Packages/' % tsconfig.SW_VERSION - patch_applied_dir = '/opt/patching/metadata/applied/' - patch_committed_dir = '/opt/patching/metadata/committed/' - subprocess.check_call(['rsync', '-a', repodata, - '%s/' % iso_patch_repo_dir]) - if os.path.exists(pkgsdir): - subprocess.check_call(['rsync', '-a', pkgsdir, - '%s/' % iso_patch_pkgs_dir]) - if os.path.exists(patch_applied_dir): - subprocess.check_call(['rsync', '-a', patch_applied_dir, - '%s/' % iso_patch_applied_dir]) - if os.path.exists(patch_committed_dir): - subprocess.check_call(['rsync', '-a', patch_committed_dir, - '%s/' % iso_patch_committed_dir]) - utils.progress(total_steps, step, 'preparing files', 'DONE') - step += 1 - - create_ini_file(clone_archive_dir, iso_name) - - os.chmod(iso_dir + '/isolinux.bin', 0o664) - iso_file = os.path.join(archive_dir, iso_name + ".iso") - output = subprocess.check_output( - ["nice", "mkisofs", - "-o", iso_file, "-R", "-D", - "-A", "oe_iso_boot", "-V", "oe_iso_boot", - "-f", "-quiet", - "-b", "isolinux.bin", "-c", "boot.cat", "-no-emul-boot", - "-boot-load-size", "4", "-boot-info-table", - "-eltorito-alt-boot", "-e", "images/efiboot.img", - "-no-emul-boot", - iso_dir], - stderr=subprocess.STDOUT) - LOG.info("{} created: [{}]".format(iso_file, output)) - utils.progress(total_steps, step, 'iso created', 'DONE') - step += 1 - - output = subprocess.check_output( - ["nice", "isohybrid", - "--uefi", - iso_file], - stderr=subprocess.STDOUT) - LOG.debug("isohybrid: {}".format(output)) - - output = subprocess.check_output( - ["nice", "implantisomd5", - iso_file], - stderr=subprocess.STDOUT) - LOG.debug("implantisomd5: {}".format(output)) - utils.progress(total_steps, step, 'checksum implanted', 'DONE') - print("Cloned iso image created: {}".format(iso_file)) - - except Exception as e: - LOG.exception(e) - e_log = "ISO creation ({}) failed".format(iso_name) - if output: - e_log += ' [' + output + ']' - LOG.error(e_log) - raise CloneFail("ISO creation failed.") - - finally: - if tmpdir: - subprocess.call(['umount', tmpdir], stderr=DEVNULL) - shutil.rmtree(tmpdir, ignore_errors=True) - - -def find_and_replace_in_file(target, find, replace): - """ Find and replace a string in a file. """ - found = None - try: - for line in fileinput.FileInput(target, inplace=1): - if find in line: - # look for "find" string within word boundaries - fpat = r'\b' + find + r'\b' - line = re.sub(fpat, replace, line) - found = True - print(line, end='') - - except Exception as e: - LOG.error("Failed to replace [{}] with [{}] in [{}]: {}" - .format(find, replace, target, str(e))) - found = None - finally: - fileinput.close() - return found - - -def find_and_replace(target_list, find, replace): - """ Find and replace a string in all files in a directory. """ - found = False - file_list = [] - for target in target_list: - if os.path.isfile(target): - if find_and_replace_in_file(target, find, replace): - found = True - file_list.append(target) - elif os.path.isdir(target): - try: - output = subprocess.check_output( - ['grep', '-rl', find, target]) - if output: - for line in output.split('\n'): - if line and find_and_replace_in_file( - line, find, replace): - found = True - file_list.append(line) - except Exception: - pass # nothing found in that directory - if not found: - LOG.error("[{}] not found in backup".format(find)) - else: - LOG.info("Replaced [{}] with [{}] in {}".format( - find, replace, file_list)) - - -def remove_from_archive(archive, unwanted): - """ Remove a file from the archive. """ - try: - subprocess.check_call(["tar", "--delete", - "--file=" + archive, - unwanted]) - except subprocess.CalledProcessError as e: - LOG.error("Delete of {} failed: {}".format(unwanted, e.output)) - raise CloneFail("Failed to modify backup archive") - - -def update_oamip_in_archive(tmpdir): - """ Update OAM IP in system archive file. """ - oam_list = sysinv_api.get_oam_ip() - if not oam_list: - raise CloneFail("Failed to get OAM IP") - for oamfind in [oam_list.oam_start_ip, oam_list.oam_end_ip, - oam_list.oam_subnet, oam_list.oam_floating_ip, - oam_list.oam_c0_ip, oam_list.oam_c1_ip]: - if not oamfind: - continue - ip = netaddr.IPNetwork(oamfind) - find_str = "" - if ip.version == 4: - # if ipv4, use 192.0.x.x as the temporary oam ip - find_str = str(ip.ip) - ipstr_list = find_str.split('.') - ipstr_list[0] = '192' - ipstr_list[1] = '0' - repl_ipstr = ".".join(ipstr_list) - else: - # if ipv6, use 2001:db8:x as the temporary oam ip - find_str = str(ip.ip) - ipstr_list = find_str.split(':') - ipstr_list[0] = '2001' - ipstr_list[1] = 'db8' - repl_ipstr = ":".join(ipstr_list) - if repl_ipstr: - find_and_replace( - [os.path.join(tmpdir, 'etc/hosts'), - os.path.join(tmpdir, 'etc/sysconfig/network-scripts'), - os.path.join(tmpdir, 'etc/nfv/vim/config.ini'), - os.path.join(tmpdir, 'etc/haproxy/haproxy.cfg'), - os.path.join(tmpdir, 'etc/heat/heat.conf'), - os.path.join(tmpdir, 'etc/keepalived/keepalived.conf'), - os.path.join(tmpdir, 'etc/vswitch/vswitch.ini'), - os.path.join(tmpdir, 'etc/nova/nova.conf'), - os.path.join(tmpdir, 'config/hosts'), - os.path.join(tmpdir, 'hieradata'), - os.path.join(tmpdir, 'postgres/keystone.sql.data'), - os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - find_str, repl_ipstr) - else: - LOG.error("Failed to modify OAM IP:[{}]" - .format(oamfind)) - raise CloneFail("Failed to modify OAM IP") - - -def update_mac_in_archive(tmpdir): - """ Update MAC addresses in system archive file. """ - hostname = utils.get_controller_hostname() - macs = sysinv_api.get_mac_addresses(hostname) - for intf, mac in macs.items(): - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - mac, "CLONEISOMAC_{}{}".format(hostname, intf)) - - if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or - tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT): - hostname = utils.get_mate_controller_hostname() - macs = sysinv_api.get_mac_addresses(hostname) - for intf, mac in macs.items(): - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - mac, "CLONEISOMAC_{}{}".format(hostname, intf)) - - -def update_disk_serial_id_in_archive(tmpdir): - """ Update disk serial id in system archive file. """ - hostname = utils.get_controller_hostname() - disk_sids = sysinv_api.get_disk_serial_ids(hostname) - for d_dnode, d_sid in disk_sids.items(): - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode)) - - if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or - tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT): - hostname = utils.get_mate_controller_hostname() - disk_sids = sysinv_api.get_disk_serial_ids(hostname) - for d_dnode, d_sid in disk_sids.items(): - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode)) - - -def update_sysuuid_in_archive(tmpdir): - """ Update system uuid in system archive file. """ - sysuuid = sysinv_api.get_system_uuid() - find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - sysuuid, "CLONEISO_SYSTEM_UUID") - - -def update_backup_archive(backup_name, archive_dir): - """ Update backup archive file to be included in clone-iso """ - path_to_archive = os.path.join(archive_dir, backup_name) - tmpdir = tempfile.mkdtemp(dir=archive_dir) - try: - subprocess.check_call( - ['gunzip', path_to_archive + '.tgz'], - stdout=DEVNULL, stderr=DEVNULL) - # 70-persistent-net.rules with the correct MACs will be - # generated on the linux boot on the cloned side. Remove - # the stale file from original side. - remove_from_archive(path_to_archive + '.tar', - 'etc/udev/rules.d/70-persistent-net.rules') - # Extract only a subset of directories which have files to be - # updated for oam-ip and MAC addresses. After updating the files - # these directories are added back to the archive. - subprocess.check_call( - ['tar', '-x', - '--directory=' + tmpdir, - '-f', path_to_archive + '.tar', - 'etc', 'postgres', 'config', - 'hieradata'], - stdout=DEVNULL, stderr=DEVNULL) - update_oamip_in_archive(tmpdir) - update_mac_in_archive(tmpdir) - update_disk_serial_id_in_archive(tmpdir) - update_sysuuid_in_archive(tmpdir) - subprocess.check_call( - ['tar', '--update', - '--directory=' + tmpdir, - '-f', path_to_archive + '.tar', - 'etc', 'postgres', 'config', - 'hieradata'], - stdout=DEVNULL, stderr=DEVNULL) - subprocess.check_call(['gzip', path_to_archive + '.tar']) - shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz') - - except Exception as e: - LOG.error("Update of backup archive {} failed {}".format( - path_to_archive, str(e))) - raise CloneFail("Failed to update backup archive") - - finally: - if not DEBUG: - shutil.rmtree(tmpdir, ignore_errors=True) - - -def validate_controller_state(): - """ Cloning allowed now? """ - # Check if this Controller is enabled and provisioned - try: - if not sysinv_api.controller_enabled_provisioned( - utils.get_controller_hostname()): - raise CloneFail("Controller is not enabled/provisioned") - if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or - tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT): - if not sysinv_api.controller_enabled_provisioned( - utils.get_mate_controller_hostname()): - raise CloneFail("Mate controller is not enabled/provisioned") - except CloneFail: - raise - except Exception: - raise CloneFail("Controller is not enabled/provisioned") - - if utils.get_system_type() != si_const.TIS_AIO_BUILD: - raise CloneFail("Cloning supported only on All-in-one systems") - - if len(sysinv_api.get_alarms()) > 0: - raise CloneFail("There are active alarms on this system!") - - -def clone(backup_name, archive_dir): - """ Do Cloning """ - validate_controller_state() - LOG.info("Cloning [{}] at [{}]".format(backup_name, archive_dir)) - check_size(archive_dir) - - isolinux_dir = os.path.join(archive_dir, 'isolinux') - clone_archive_dir = os.path.join(isolinux_dir, CLONE_ARCHIVE_DIR) - if os.path.exists(isolinux_dir): - LOG.info("deleting old iso_dir %s" % isolinux_dir) - shutil.rmtree(isolinux_dir, ignore_errors=True) - os.makedirs(clone_archive_dir, 0o644) - - try: - backup_restore.backup(backup_name, clone_archive_dir, clone=True) - LOG.info("system backup done") - update_backup_archive(backup_name + '_system', clone_archive_dir) - create_iso(backup_name, archive_dir) - except BackupFail as e: - raise CloneFail(e.message) - except CloneFail as e: - raise - finally: - if not DEBUG: - shutil.rmtree(isolinux_dir, ignore_errors=True) diff --git a/controllerconfig/controllerconfig/controllerconfig/common/configobjects.py b/controllerconfig/controllerconfig/controllerconfig/common/configobjects.py deleted file mode 100644 index 1866a7c996..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/configobjects.py +++ /dev/null @@ -1,371 +0,0 @@ -""" -Copyright (c) 2015-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from netaddr import IPRange -from controllerconfig.common.exceptions import ConfigFail -from controllerconfig.common.exceptions import ValidateFail -from controllerconfig.utils import is_mtu_valid -from controllerconfig.utils import is_valid_vlan -from controllerconfig.utils import validate_network_str -from controllerconfig.utils import validate_address_str - -DEFAULT_CONFIG = 0 -REGION_CONFIG = 1 -SUBCLOUD_CONFIG = 2 - -MGMT_TYPE = 0 -INFRA_TYPE = 1 -OAM_TYPE = 2 -CLUSTER_TYPE = 3 -NETWORK_PREFIX_NAMES = [ - ('MGMT', 'INFRA', 'OAM', 'CLUSTER'), - ('CLM', 'BLS', 'CAN', 'CLUSTER') -] - -HOST_XML_ATTRIBUTES = ['hostname', 'personality', 'subfunctions', - 'mgmt_mac', 'mgmt_ip', - 'bm_ip', 'bm_type', 'bm_username', - 'bm_password', 'boot_device', 'rootfs_device', - 'install_output', 'console', 'vsc_controllers', - 'power_on', 'location'] - -# Network naming types -DEFAULT_NAMES = 0 -HP_NAMES = 1 - -# well-known default domain name -DEFAULT_DOMAIN_NAME = 'Default' - - -class LogicalInterface(object): - """ Represents configuration for a logical interface. - """ - def __init__(self): - self.name = None - self.mtu = None - self.lag_interface = False - self.lag_mode = None - self.ports = None - - def parse_config(self, system_config, logical_interface): - # Ensure logical interface config is present - if not system_config.has_section(logical_interface): - raise ConfigFail("Missing config for logical interface %s." % - logical_interface) - self.name = logical_interface - - # Parse/validate the MTU - self.mtu = system_config.getint(logical_interface, 'INTERFACE_MTU') - if not is_mtu_valid(self.mtu): - raise ConfigFail("Invalid MTU value for %s. " - "Valid values: 576 - 9216" % logical_interface) - - # Parse the ports - self.ports = [_f for _f in - [x.strip() for x in - system_config.get(logical_interface, - 'INTERFACE_PORTS').split(',')] - if _f] - - # Parse/validate the LAG config - lag_interface = system_config.get(logical_interface, - 'LAG_INTERFACE') - if lag_interface.lower() == 'y': - self.lag_interface = True - if len(self.ports) != 2: - raise ConfigFail( - "Invalid number of ports (%d) supplied for LAG " - "interface %s" % (len(self.ports), logical_interface)) - self.lag_mode = system_config.getint(logical_interface, 'LAG_MODE') - if self.lag_mode < 1 or self.lag_mode > 6: - raise ConfigFail( - "Invalid LAG_MODE value of %d for %s. Valid values: 1-6" % - (self.lag_mode, logical_interface)) - elif lag_interface.lower() == 'n': - if len(self.ports) > 1: - raise ConfigFail( - "More than one interface supplied for non-LAG " - "interface %s" % logical_interface) - if len(self.ports) == 0: - raise ConfigFail( - "No interfaces supplied for non-LAG " - "interface %s" % logical_interface) - else: - raise ConfigFail( - "Invalid LAG_INTERFACE value of %s for %s. Valid values: " - "Y or N" % (lag_interface, logical_interface)) - - -class Network(object): - """ Represents configuration for a network. - """ - def __init__(self): - self.vlan = None - self.cidr = None - self.multicast_cidr = None - self.start_address = None - self.end_address = None - self.start_end_in_config = False - self.floating_address = None - self.address_0 = None - self.address_1 = None - self.dynamic_allocation = False - self.gateway_address = None - self.logical_interface = None - - def parse_config(self, system_config, config_type, network_type, - min_addresses=0, multicast_addresses=0, optional=False, - naming_type=DEFAULT_NAMES, - logical_interface_required=True): - network_prefix = NETWORK_PREFIX_NAMES[naming_type][network_type] - network_name = network_prefix + '_NETWORK' - - if naming_type == HP_NAMES: - attr_prefix = network_prefix + '_' - else: - attr_prefix = '' - - # Ensure network config is present - if not system_config.has_section(network_name): - if not optional: - raise ConfigFail("Missing config for network %s." % - network_name) - else: - # Optional interface - just return - return - - # Parse/validate the VLAN - if system_config.has_option(network_name, attr_prefix + 'VLAN'): - self.vlan = system_config.getint(network_name, - attr_prefix + 'VLAN') - if self.vlan: - if not is_valid_vlan(self.vlan): - raise ConfigFail( - "Invalid %s value of %d for %s. Valid values: 1-4094" % - (attr_prefix + 'VLAN', self.vlan, network_name)) - - # Parse/validate the cidr - cidr_str = system_config.get(network_name, attr_prefix + 'CIDR') - try: - self.cidr = validate_network_str( - cidr_str, min_addresses) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'CIDR', cidr_str, network_name, e)) - - # Parse/validate the multicast subnet - if 0 < multicast_addresses and \ - system_config.has_option(network_name, - attr_prefix + 'MULTICAST_CIDR'): - multicast_cidr_str = system_config.get(network_name, attr_prefix + - 'MULTICAST_CIDR') - try: - self.multicast_cidr = validate_network_str( - multicast_cidr_str, multicast_addresses, multicast=True) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str, - network_name, e)) - - if self.cidr.version != self.multicast_cidr.version: - raise ConfigFail( - "Invalid %s value of %s for %s. Multicast " - "subnet and network IP families must be the same." % - (attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str, - network_name)) - - # Parse/validate the hardwired controller addresses - floating_address_str = None - address_0_str = None - address_1_str = None - - if min_addresses == 1: - if (system_config.has_option( - network_name, attr_prefix + 'IP_FLOATING_ADDRESS') or - system_config.has_option( - network_name, attr_prefix + 'IP_UNIT_0_ADDRESS') or - system_config.has_option( - network_name, attr_prefix + 'IP_UNIT_1_ADDRESS') or - system_config.has_option( - network_name, attr_prefix + 'IP_START_ADDRESS') or - system_config.has_option( - network_name, attr_prefix + 'IP_END_ADDRESS')): - raise ConfigFail( - "Only one IP address is required for OAM " - "network, use 'IP_ADDRESS' to specify the OAM IP " - "address") - floating_address_str = system_config.get( - network_name, attr_prefix + 'IP_ADDRESS') - try: - self.floating_address = validate_address_str( - floating_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_ADDRESS', - floating_address_str, network_name, e)) - self.address_0 = self.floating_address - self.address_1 = self.floating_address - else: - if system_config.has_option( - network_name, attr_prefix + 'IP_FLOATING_ADDRESS'): - floating_address_str = system_config.get( - network_name, attr_prefix + 'IP_FLOATING_ADDRESS') - try: - self.floating_address = validate_address_str( - floating_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_FLOATING_ADDRESS', - floating_address_str, network_name, e)) - - if system_config.has_option( - network_name, attr_prefix + 'IP_UNIT_0_ADDRESS'): - address_0_str = system_config.get( - network_name, attr_prefix + 'IP_UNIT_0_ADDRESS') - try: - self.address_0 = validate_address_str( - address_0_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_UNIT_0_ADDRESS', - address_0_str, network_name, e)) - - if system_config.has_option( - network_name, attr_prefix + 'IP_UNIT_1_ADDRESS'): - address_1_str = system_config.get( - network_name, attr_prefix + 'IP_UNIT_1_ADDRESS') - try: - self.address_1 = validate_address_str( - address_1_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_UNIT_1_ADDRESS', - address_1_str, network_name, e)) - - # Parse/validate the start/end addresses - start_address_str = None - end_address_str = None - if system_config.has_option( - network_name, attr_prefix + 'IP_START_ADDRESS'): - start_address_str = system_config.get( - network_name, attr_prefix + 'IP_START_ADDRESS') - try: - self.start_address = validate_address_str( - start_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'IP_START_ADDRESS', - start_address_str, network_name, e)) - - if system_config.has_option( - network_name, attr_prefix + 'IP_END_ADDRESS'): - end_address_str = system_config.get( - network_name, attr_prefix + 'IP_END_ADDRESS') - try: - self.end_address = validate_address_str( - end_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s " % - (attr_prefix + 'IP_END_ADDRESS', - end_address_str, network_name, e)) - - if start_address_str or end_address_str: - if not end_address_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_END_ADDRESS', - network_name)) - if not start_address_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_START_ADDRESS', - network_name)) - if not self.start_address < self.end_address: - raise ConfigFail( - "Start address %s not less than end address %s for %s." - % (str(self.start_address), str(self.end_address), - network_name)) - if not IPRange(start_address_str, end_address_str).size >= \ - min_addresses: - raise ConfigFail("Address range for %s must contain at " - "least %d addresses." % - (network_name, min_addresses)) - self.start_end_in_config = True - - if floating_address_str or address_0_str or address_1_str: - if not floating_address_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_FLOATING_ADDRESS', - network_name)) - if not address_0_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_UNIT_0_ADDRESS', - network_name)) - if not address_1_str: - raise ConfigFail("Missing attribute %s for %s_NETWORK" % - (attr_prefix + 'IP_UNIT_1_ADDRESS', - network_name)) - - if start_address_str and floating_address_str: - raise ConfigFail("Overspecified network: Can only set %s " - "and %s OR %s, %s, and %s for " - "%s_NETWORK" % - (attr_prefix + 'IP_START_ADDRESS', - attr_prefix + 'IP_END_ADDRESS', - attr_prefix + 'IP_FLOATING_ADDRESS', - attr_prefix + 'IP_UNIT_0_ADDRESS', - attr_prefix + 'IP_UNIT_1_ADDRESS', - network_name)) - - if config_type == DEFAULT_CONFIG: - if not self.start_address: - self.start_address = self.cidr[2] - if not self.end_address: - self.end_address = self.cidr[-2] - - # Parse/validate the dynamic IP address allocation - if system_config.has_option(network_name, - 'DYNAMIC_ALLOCATION'): - dynamic_allocation = system_config.get(network_name, - 'DYNAMIC_ALLOCATION') - if dynamic_allocation.lower() == 'y': - self.dynamic_allocation = True - elif dynamic_allocation.lower() == 'n': - self.dynamic_allocation = False - else: - raise ConfigFail( - "Invalid DYNAMIC_ALLOCATION value of %s for %s. " - "Valid values: Y or N" % - (dynamic_allocation, network_name)) - - # Parse/validate the gateway (optional) - if system_config.has_option(network_name, attr_prefix + 'GATEWAY'): - gateway_address_str = system_config.get( - network_name, attr_prefix + 'GATEWAY') - try: - self.gateway_address = validate_address_str( - gateway_address_str, self.cidr) - except ValidateFail as e: - raise ConfigFail( - "Invalid %s value of %s for %s.\nReason: %s" % - (attr_prefix + 'GATEWAY', - gateway_address_str, network_name, e)) - - # Parse/validate the logical interface - if logical_interface_required or system_config.has_option( - network_name, attr_prefix + 'LOGICAL_INTERFACE'): - logical_interface_name = system_config.get( - network_name, attr_prefix + 'LOGICAL_INTERFACE') - self.logical_interface = LogicalInterface() - self.logical_interface.parse_config(system_config, - logical_interface_name) diff --git a/controllerconfig/controllerconfig/controllerconfig/common/constants.py b/controllerconfig/controllerconfig/controllerconfig/common/constants.py index 6f0059c7a8..8581c3fbdc 100644 --- a/controllerconfig/controllerconfig/controllerconfig/common/constants.py +++ b/controllerconfig/controllerconfig/controllerconfig/common/constants.py @@ -1,10 +1,9 @@ # -# Copyright (c) 2016-2019 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -from sysinv.common import constants as sysinv_constants from tsconfig import tsconfig @@ -15,70 +14,9 @@ CONFIG_PERMDIR = tsconfig.CONFIG_PATH HIERADATA_WORKDIR = '/tmp/hieradata' HIERADATA_PERMDIR = tsconfig.PUPPET_PATH + 'hieradata' -ARMADA_PERMDIR = tsconfig.ARMADA_PATH -HELM_CHARTS_PERMDIR = tsconfig.PLATFORM_PATH + '/helm_charts' -HELM_OVERRIDES_PERMDIR = tsconfig.HELM_OVERRIDES_PATH - KEYRING_WORKDIR = '/tmp/python_keyring' KEYRING_PERMDIR = tsconfig.KEYRING_PATH INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete' -CONFIG_FAIL_FILE = '/var/run/.config_fail' -COMMON_CERT_FILE = "/etc/ssl/private/server-cert.pem" -FIREWALL_RULES_FILE = '/etc/platform/iptables.rules' -OPENSTACK_PASSWORD_RULES_FILE = '/etc/keystone/password-rules.conf' -INSTALLATION_FAILED_FILE = '/etc/platform/installation_failed' BACKUPS_PATH = '/opt/backups' - -INTERFACES_LOG_FILE = "/tmp/configure_interfaces.log" - -LINK_MTU_DEFAULT = "1500" - -CINDER_LVM_THIN = "thin" -CINDER_LVM_THICK = "thick" - -DEFAULT_DATABASE_STOR_SIZE = \ - sysinv_constants.DEFAULT_DATABASE_STOR_SIZE -DEFAULT_SMALL_DATABASE_STOR_SIZE = \ - sysinv_constants.DEFAULT_SMALL_DATABASE_STOR_SIZE -DEFAULT_SMALL_BACKUP_STOR_SIZE = \ - sysinv_constants.DEFAULT_SMALL_BACKUP_STOR_SIZE -DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = \ - sysinv_constants.DEFAULT_VIRTUAL_DATABASE_STOR_SIZE -DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = \ - sysinv_constants.DEFAULT_VIRTUAL_BACKUP_STOR_SIZE -DEFAULT_EXTENSION_STOR_SIZE = \ - sysinv_constants.DEFAULT_EXTENSION_STOR_SIZE -DEFAULT_PLATFORM_STOR_SIZE = \ - sysinv_constants.DEFAULT_PLATFORM_STOR_SIZE - -SYSTEM_CONFIG_TIMEOUT = 420 -SERVICE_ENABLE_TIMEOUT = 180 -MINIMUM_ROOT_DISK_SIZE = 500 -MAXIMUM_CGCS_LV_SIZE = 500 -LDAP_CONTROLLER_CONFIGURE_TIMEOUT = 30 -SYSADMIN_MAX_PASSWORD_AGE = 45 # 45 days - -LAG_MODE_ACTIVE_BACKUP = "active-backup" -LAG_MODE_BALANCE_XOR = "balance-xor" -LAG_MODE_8023AD = "802.3ad" - -LAG_TXHASH_LAYER2 = "layer2" - -LAG_MIIMON_FREQUENCY = 100 - -LOOPBACK_IFNAME = 'lo' - -DEFAULT_MULTICAST_SUBNET_IPV4 = '239.1.1.0/28' -DEFAULT_MULTICAST_SUBNET_IPV6 = 'ff08::1:1:0/124' - -DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4 = '192.168.204.0/28' - -DEFAULT_REGION_NAME = "RegionOne" -DEFAULT_SERVICE_PROJECT_NAME = "services" - -SSH_WARNING_MESSAGE = "WARNING: Command should only be run from the " \ - "console. Continuing with this terminal may cause " \ - "loss of connectivity and configuration failure." -SSH_ERROR_MESSAGE = "ERROR: Command should only be run from the console." diff --git a/controllerconfig/controllerconfig/controllerconfig/common/crypt.py b/controllerconfig/controllerconfig/controllerconfig/common/crypt.py deleted file mode 100644 index ce53d73f80..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/crypt.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Routines for URL-safe encrypting/decrypting - -Cloned from git/glance/common -""" - -import base64 -import os -import random - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import algorithms -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers import modes -from oslo_utils import encodeutils -import six -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - - -def urlsafe_encrypt(key, plaintext, blocksize=16): - """Encrypts plaintext. - - Resulting ciphertext will contain URL-safe characters. - If plaintext is Unicode, encode it to UTF-8 before encryption. - - :param key: AES secret key - :param plaintext: Input text to be encrypted - :param blocksize: Non-zero integer multiple of AES blocksize in bytes (16) - :returns: Resulting ciphertext - """ - - def pad(text): - """Pads text to be encrypted""" - pad_length = (blocksize - len(text) % blocksize) - # NOTE(rosmaita): I know this looks stupid, but we can't just - # use os.urandom() to get the bytes because we use char(0) as - # a delimiter - pad = b''.join(six.int2byte(random.SystemRandom().randint(1, 0xFF)) - for i in range(pad_length - 1)) - # We use chr(0) as a delimiter between text and padding - return text + b'\0' + pad - - plaintext = encodeutils.to_utf8(plaintext) - key = encodeutils.to_utf8(key) - # random initial 16 bytes for CBC - init_vector = os.urandom(16) - backend = default_backend() - cypher = Cipher(algorithms.AES(key), modes.CBC(init_vector), - backend=backend) - encryptor = cypher.encryptor() - padded = encryptor.update( - pad(six.binary_type(plaintext))) + encryptor.finalize() - encoded = base64.urlsafe_b64encode(init_vector + padded) - if six.PY3: - encoded = encoded.decode('ascii') - return encoded - - -def urlsafe_decrypt(key, ciphertext): - """Decrypts URL-safe base64 encoded ciphertext. - - On Python 3, the result is decoded from UTF-8. - - :param key: AES secret key - :param ciphertext: The encrypted text to decrypt - - :returns: Resulting plaintext - """ - # Cast from unicode - ciphertext = encodeutils.to_utf8(ciphertext) - key = encodeutils.to_utf8(key) - ciphertext = base64.urlsafe_b64decode(ciphertext) - backend = default_backend() - cypher = Cipher(algorithms.AES(key), modes.CBC(ciphertext[:16]), - backend=backend) - decryptor = cypher.decryptor() - padded = decryptor.update(ciphertext[16:]) + decryptor.finalize() - text = padded[:padded.rfind(b'\0')] - if six.PY3: - text = text.decode('utf-8') - return text diff --git a/controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py b/controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py deleted file mode 100755 index c88c69cc1e..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/dcmanager.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright (c) 2017-2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -DC Manager Interactions -""" - -from controllerconfig.common import log - -from Crypto.Hash import MD5 -from controllerconfig.common import crypt - -import json - - -LOG = log.get_logger(__name__) - - -class UserList(object): - """ - User List - """ - def __init__(self, user_data, hash_string): - # Decrypt the data using input hash_string to generate - # the key - h = MD5.new() - h.update(hash_string) - encryption_key = h.hexdigest() - user_data_decrypted = crypt.urlsafe_decrypt(encryption_key, - user_data) - - self._data = json.loads(user_data_decrypted) - - def get_password(self, name): - """ - Search the users for the password - """ - for user in self._data: - if user['name'] == name: - return user['password'] - return None diff --git a/controllerconfig/controllerconfig/controllerconfig/common/exceptions.py b/controllerconfig/controllerconfig/controllerconfig/common/exceptions.py index 66a4b7e1c3..b42526bff3 100644 --- a/controllerconfig/controllerconfig/controllerconfig/common/exceptions.py +++ b/controllerconfig/controllerconfig/controllerconfig/common/exceptions.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2014-2019 Wind River Systems, Inc. +# Copyright (c) 2014-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -20,56 +20,21 @@ class ConfigError(Exception): return self.message or "" -class ConfigFail(ConfigError): - """General configuration error.""" - pass - - class ValidateFail(ConfigError): """Validation of data failed.""" pass -class BackupFail(ConfigError): - """Backup error.""" - pass - - class UpgradeFail(ConfigError): """Upgrade error.""" pass -class BackupWarn(ConfigError): - """Backup warning.""" - pass - - -class RestoreFail(ConfigError): - """Backup error.""" - pass - - class KeystoneFail(ConfigError): """Keystone error.""" pass -class SysInvFail(ConfigError): - """System Inventory error.""" - pass - - -class UserQuit(ConfigError): - """User initiated quit operation.""" - pass - - -class CloneFail(ConfigError): - """Clone error.""" - pass - - class TidyStorageFail(ConfigError): """Tidy storage error.""" pass diff --git a/controllerconfig/controllerconfig/controllerconfig/common/keystone.py b/controllerconfig/controllerconfig/controllerconfig/common/keystone.py index 34e86063ec..f0ef3f408f 100755 --- a/controllerconfig/controllerconfig/controllerconfig/common/keystone.py +++ b/controllerconfig/controllerconfig/controllerconfig/common/keystone.py @@ -12,10 +12,9 @@ import datetime import iso8601 from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common import log +from oslo_log import log - -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) class Token(object): diff --git a/controllerconfig/controllerconfig/controllerconfig/common/log.py b/controllerconfig/controllerconfig/controllerconfig/common/log.py deleted file mode 100644 index d3844d5e72..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/log.py +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright (c) 2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Logging -""" - -import logging -import logging.handlers - -_loggers = {} - - -def get_logger(name): - """ Get a logger or create one """ - - if name not in _loggers: - _loggers[name] = logging.getLogger(name) - - return _loggers[name] - - -def setup_logger(logger): - """ Setup a logger """ - - # Send logs to /var/log/platform.log - syslog_facility = logging.handlers.SysLogHandler.LOG_LOCAL1 - - formatter = logging.Formatter("configassistant[%(process)d] " + - "%(pathname)s:%(lineno)s " + - "%(levelname)8s [%(name)s] %(message)s") - - handler = logging.handlers.SysLogHandler(address='/dev/log', - facility=syslog_facility) - handler.setLevel(logging.INFO) - handler.setFormatter(formatter) - - logger.addHandler(handler) - logger.setLevel(logging.INFO) - - -def configure(): - """ Setup logging """ - - for logger in _loggers: - setup_logger(_loggers[logger]) diff --git a/controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py b/controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py index 45e06f849a..8122216957 100755 --- a/controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py +++ b/controllerconfig/controllerconfig/controllerconfig/common/rest_api_utils.py @@ -1,5 +1,5 @@ """ -Copyright (c) 2015-2017 Wind River Systems, Inc. +Copyright (c) 2015-2020 Wind River Systems, Inc. SPDX-License-Identifier: Apache-2.0 @@ -7,16 +7,15 @@ SPDX-License-Identifier: Apache-2.0 import json from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common import dcmanager from controllerconfig.common import keystone -from controllerconfig.common import log from six.moves import http_client as httplib from six.moves.urllib import request as urlrequest from six.moves.urllib.error import HTTPError from six.moves.urllib.error import URLError +from oslo_log import log -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) def rest_api_request(token, method, api_cmd, api_cmd_headers=None, @@ -324,16 +323,3 @@ def delete_project(token, api_url, id): api_cmd = api_url + "/projects/" + id response = rest_api_request(token, "DELETE", api_cmd,) return keystone.Project(response) - - -def get_subcloud_config(token, api_url, subcloud_name, - hash_string): - """ - Ask DC Manager for our subcloud configuration - """ - api_cmd = api_url + "/subclouds/" + subcloud_name + "/config" - response = rest_api_request(token, "GET", api_cmd) - config = dict() - config['users'] = dcmanager.UserList(response['users'], hash_string) - - return config diff --git a/controllerconfig/controllerconfig/controllerconfig/common/validator.py b/controllerconfig/controllerconfig/controllerconfig/common/validator.py deleted file mode 100644 index fb0d1da019..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/common/validator.py +++ /dev/null @@ -1,1189 +0,0 @@ -""" -Copyright (c) 2015-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" -from controllerconfig.common.configobjects import DEFAULT_NAMES -from controllerconfig.common.configobjects import NETWORK_PREFIX_NAMES -from controllerconfig.common.configobjects import OAM_TYPE -from controllerconfig.common.configobjects import MGMT_TYPE -from controllerconfig.common.configobjects import Network -from controllerconfig.common.configobjects import REGION_CONFIG -from controllerconfig.common.configobjects import DEFAULT_DOMAIN_NAME -from controllerconfig.common.configobjects import HP_NAMES -from controllerconfig.common.configobjects import SUBCLOUD_CONFIG -from controllerconfig.common.configobjects import CLUSTER_TYPE -from netaddr import IPRange -from controllerconfig.utils import lag_mode_to_str -from controllerconfig.utils import validate_network_str -from controllerconfig.utils import check_network_overlap -from controllerconfig.utils import is_mtu_valid -from controllerconfig.utils import get_service -from controllerconfig.utils import get_optional -from controllerconfig.utils import validate_address_str -from controllerconfig.utils import validate_nameserver_address_str -from controllerconfig.utils import is_valid_url -from controllerconfig.utils import is_valid_domain_or_ip -from controllerconfig.utils import is_valid_bool_str -from controllerconfig.common.exceptions import ConfigFail -from controllerconfig.common.exceptions import ValidateFail - - -# Constants -TiS_VERSION = "xxxSW_VERSIONxxx" - -# Minimum values for partition sizes -MIN_DATABASE_STORAGE = 20 -MIN_IMAGE_STORAGE = 10 -MIN_IMAGE_CONVERSIONS_VOLUME = 20 - -SYSADMIN_PASSWD_NO_AGING = 99999 - -# System mode -SYSTEM_MODE_DUPLEX = "duplex" -SYSTEM_MODE_SIMPLEX = "simplex" -SYSTEM_MODE_DUPLEX_DIRECT = "duplex-direct" - -DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER = 'systemcontroller' -DISTRIBUTED_CLOUD_ROLE_SUBCLOUD = 'subcloud' - -# System type -SYSTEM_TYPE_AIO = "All-in-one" -SYSTEM_TYPE_STANDARD = "Standard" - - -class ConfigValidator(object): - - def __init__(self, system_config, cgcs_config, config_type, offboard, - naming_type=DEFAULT_NAMES): - """ - :param system_config: system configuration - :param cgcs_config: if not None config data should be returned - :param config_type: indicates whether it is system, region or subcloud - config - :param offboard: if true only perform general error checking - :return: - """ - self.conf = system_config - self.cgcs_conf = cgcs_config - self.config_type = config_type - self.naming_type = naming_type - self.offboard = offboard - self.next_lag_index = 0 - self.configured_networks = [] - self.configured_vlans = [] - self.pxeboot_network_configured = False - self.pxeboot_section_name = None - self.management_interface = None - self.cluster_interface = None - self.mgmt_network = None - self.cluster_network = None - self.oam_network = None - self.vswitch_type = None - self.system_mode = None - self.system_type = None - self.system_dc_role = None - - def is_simplex_cpe(self): - return self.system_mode == SYSTEM_MODE_SIMPLEX - - def is_subcloud(self): - return self.system_dc_role == DISTRIBUTED_CLOUD_ROLE_SUBCLOUD - - def set_system_mode(self, mode): - self.system_mode = mode - - def set_system_dc_role(self, dc_role): - self.system_dc_role = dc_role - - def set_oam_config(self, use_lag, external_oam_interface_name): - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cEXT_OAM') - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_MTU', - self.oam_network.logical_interface.mtu) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_SUBNET', - self.oam_network.cidr) - if use_lag: - self.cgcs_conf.set('cEXT_OAM', 'LAG_EXTERNAL_OAM_INTERFACE', - 'yes') - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_0', - self.oam_network.logical_interface.ports[0]) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_1', - self.oam_network.logical_interface.ports[1]) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_BOND_POLICY', - lag_mode_to_str(self.oam_network. - logical_interface.lag_mode)) - else: - self.cgcs_conf.set('cEXT_OAM', 'LAG_EXTERNAL_OAM_INTERFACE', - 'no') - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_INTERFACE', - external_oam_interface_name) - if self.oam_network.vlan: - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_VLAN', - str(self.oam_network.vlan)) - external_oam_interface_name += '.' + str(self.oam_network.vlan) - - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_INTERFACE_NAME', - external_oam_interface_name) - if self.oam_network.gateway_address: - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_GATEWAY_ADDRESS', - str(self.oam_network.gateway_address)) - if self.system_mode == SYSTEM_MODE_SIMPLEX: - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_FLOATING_ADDRESS', - str(self.oam_network.floating_address)) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_0_ADDRESS', - str(self.oam_network.address_0)) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_1_ADDRESS', - str(self.oam_network.address_1)) - else: - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_FLOATING_ADDRESS', - str(self.oam_network.floating_address or - self.oam_network.start_address)) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_0_ADDRESS', - str(self.oam_network.address_0 or - self.oam_network.start_address + 1)) - self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_1_ADDRESS', - str(self.oam_network.address_1 or - self.oam_network.start_address + 2)) - - def process_oam_on_its_own_interface(self): - use_lag = False - oam_prefix = NETWORK_PREFIX_NAMES[self.naming_type][OAM_TYPE] - # OAM on its own LAG interface - if self.oam_network.logical_interface.lag_interface: - if self.oam_network.logical_interface.lag_mode not in (1, 2, 4): - raise ConfigFail( - "Unsupported LAG mode (%d) for %s interface" - " - use LAG mode 1, 2, or 4 instead" % - (self.oam_network.logical_interface.lag_mode, oam_prefix)) - use_lag = True - external_oam_interface = 'bond' + str(self.next_lag_index) - else: - # CAN on its own non-LAG interface - external_oam_interface = ( - self.oam_network.logical_interface.ports[0]) - return use_lag, external_oam_interface - - def validate_oam_common(self): - # validate OAM network - self.oam_network = Network() - if self.is_simplex_cpe(): - min_addresses = 1 - else: - min_addresses = 3 - try: - self.oam_network.parse_config(self.conf, self.config_type, - OAM_TYPE, - min_addresses=min_addresses, - multicast_addresses=0, - naming_type=self.naming_type) - except ConfigFail: - raise - except Exception as e: - raise ConfigFail("Error parsing configuration file: %s" % e) - - def validate_aio_simplex_mgmt(self): - # AIO simplex management network configuration - mgmt_prefix = NETWORK_PREFIX_NAMES[self.naming_type][MGMT_TYPE] - self.mgmt_network = Network() - - min_addresses = 16 - - try: - self.mgmt_network.parse_config(self.conf, self.config_type, - MGMT_TYPE, - min_addresses=min_addresses, - multicast_addresses=0, - naming_type=self.naming_type, - logical_interface_required=False) - - except ConfigFail: - raise - except Exception as e: - raise ConfigFail("Error parsing configuration file: %s" % e) - - if self.mgmt_network.vlan or self.mgmt_network.multicast_cidr or \ - self.mgmt_network.start_end_in_config or \ - self.mgmt_network.floating_address or \ - self.mgmt_network.address_0 or self.mgmt_network.address_1 or \ - self.mgmt_network.dynamic_allocation or \ - self.mgmt_network.gateway_address or \ - self.mgmt_network.logical_interface: - raise ConfigFail("For AIO simplex, only the %s network CIDR can " - "be specified" % mgmt_prefix) - - if self.mgmt_network.cidr.version == 6: - raise ConfigFail("IPv6 management network not supported on " - "simplex configuration.") - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cMGMT') - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_SUBNET', - self.mgmt_network.cidr) - - def validate_aio_network(self, subcloud=False): - if not subcloud: - # AIO-SX subcloud supports MGMT_NETWORK & PXEBOOT_NETWORK - if self.conf.has_section('PXEBOOT_NETWORK'): - raise ConfigFail("PXEBoot Network configuration is not " - "supported.") - if self.conf.has_section('MGMT_NETWORK'): - self.validate_aio_simplex_mgmt() - if self.conf.has_section('BOARD_MANAGEMENT_NETWORK'): - raise ConfigFail("Board Management Network configuration is not " - "supported.") - # validate OAM network - oam_prefix = NETWORK_PREFIX_NAMES[self.naming_type][OAM_TYPE] - self.validate_oam_common() - (use_lag, external_oam_interface_name) = ( - self.process_oam_on_its_own_interface()) - - # Ensure that the gateway was configured - if self.oam_network.gateway_address is None: - raise ConfigFail( - "No gateway specified - %s_GATEWAY must be specified" - % oam_prefix) - - # Check overlap with management network - if self.mgmt_network is not None: - try: - self.configured_networks.append(self.mgmt_network.cidr) - check_network_overlap(self.oam_network.cidr, - self.configured_networks) - except ValidateFail: - raise ConfigFail("%s CIDR %s overlaps with another configured " - "network" % - (oam_prefix, str(self.mgmt_network.cidr))) - - self.set_oam_config(use_lag, external_oam_interface_name) - - def validate_version(self): - if self.offboard: - version = TiS_VERSION - else: - from tsconfig.tsconfig import SW_VERSION - version = SW_VERSION - - if not self.conf.has_option('VERSION', 'RELEASE'): - raise ConfigFail( - "Version information is missing from this config file. Please" - " refer to the installation documentation for details on " - "the correct contents of the configuration file.") - ini_version = self.conf.get('VERSION', 'RELEASE') - if version != ini_version: - raise ConfigFail( - "The configuration file given is of a different version (%s) " - "than the installed software (%s). Please refer to the " - "installation documentation for details on the correct " - "contents of the configuration file and update it with " - "any changes required for this release." % - (ini_version, version)) - - def validate_system(self): - # timezone section - timezone = 'UTC' - if self.conf.has_option('SYSTEM', 'TIMEZONE'): - timezone = self.conf.get('SYSTEM', 'TIMEZONE') - - # system type section - if self.conf.has_option("SYSTEM", "SYSTEM_TYPE"): - self.system_type = self.conf.get("SYSTEM", "SYSTEM_TYPE") - available_system_types = [ - SYSTEM_TYPE_STANDARD, - SYSTEM_TYPE_AIO - ] - if self.system_type not in available_system_types: - raise ConfigFail("Available options for SYSTEM_TYPE are: %s" % - available_system_types) - elif not self.offboard: - from tsconfig.tsconfig import system_type - self.system_type = system_type - - # system mode section - if self.conf.has_option("SYSTEM", "SYSTEM_MODE"): - self.system_mode = self.conf.get("SYSTEM", "SYSTEM_MODE") - available_system_modes = [SYSTEM_MODE_DUPLEX] - if self.system_type != SYSTEM_TYPE_STANDARD: - available_system_modes.append(SYSTEM_MODE_SIMPLEX) - available_system_modes.append(SYSTEM_MODE_DUPLEX_DIRECT) - if self.system_mode not in available_system_modes: - raise ConfigFail("Available options for SYSTEM_MODE are: %s" % - available_system_modes) - else: - if self.system_type == SYSTEM_TYPE_STANDARD: - self.system_mode = SYSTEM_MODE_DUPLEX - else: - self.system_mode = SYSTEM_MODE_DUPLEX_DIRECT - - if self.conf.has_option("SYSTEM", "DISTRIBUTED_CLOUD_ROLE"): - self.system_dc_role = \ - self.conf.get("SYSTEM", "DISTRIBUTED_CLOUD_ROLE") - if self.config_type == SUBCLOUD_CONFIG: - available_dc_role = [DISTRIBUTED_CLOUD_ROLE_SUBCLOUD] - elif self.config_type != REGION_CONFIG: - available_dc_role = [DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER] - else: - raise ConfigFail("DISTRIBUTED_CLOUD_ROLE option is " - "not avaialbe for this configuration") - - if self.system_dc_role not in available_dc_role: - raise ConfigFail( - "Available options for DISTRIBUTED_CLOUD_ROLE are: %s" % - available_dc_role) - - if (self.system_dc_role == - DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER and - self.system_type == SYSTEM_TYPE_AIO): - raise ConfigFail("An All-in-one controller cannot be " - "configured as Distributed Cloud " - "System Controller") - elif self.config_type == SUBCLOUD_CONFIG: - self.system_dc_role = DISTRIBUTED_CLOUD_ROLE_SUBCLOUD - else: - self.system_dc_role = None - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section("cSYSTEM") - self.cgcs_conf.set("cSYSTEM", "TIMEZONE", timezone) - self.cgcs_conf.set("cSYSTEM", "SYSTEM_MODE", self.system_mode) - if self.system_dc_role is not None: - self.cgcs_conf.set("cSYSTEM", "DISTRIBUTED_CLOUD_ROLE", - self.system_dc_role) - - def validate_storage(self): - if (self.conf.has_option('STORAGE', 'DATABASE_STORAGE') or - self.conf.has_option('STORAGE', 'IMAGE_STORAGE') or - self.conf.has_option('STORAGE', 'BACKUP_STORAGE') or - self.conf.has_option('STORAGE', 'IMAGE_CONVERSIONS_VOLUME') or - self.conf.has_option('STORAGE', 'SHARED_INSTANCE_STORAGE') or - self.conf.has_option('STORAGE', 'CINDER_BACKEND') or - self.conf.has_option('STORAGE', 'CINDER_DEVICE') or - self.conf.has_option('STORAGE', 'CINDER_LVM_TYPE') or - self.conf.has_option('STORAGE', 'CINDER_STORAGE')): - msg = "DATABASE_STORAGE, IMAGE_STORAGE, BACKUP_STORAGE, " + \ - "IMAGE_CONVERSIONS_VOLUME, SHARED_INSTANCE_STORAGE, " + \ - "CINDER_BACKEND, CINDER_DEVICE, CINDER_LVM_TYPE, " + \ - "CINDER_STORAGE " + \ - "are not valid entries in config file." - raise ConfigFail(msg) - - def validate_pxeboot(self): - # PXEBoot network configuration - start_end_in_config = False - - if self.config_type in [REGION_CONFIG, SUBCLOUD_CONFIG]: - self.pxeboot_section_name = 'REGION2_PXEBOOT_NETWORK' - else: - self.pxeboot_section_name = 'PXEBOOT_NETWORK' - - if self.conf.has_section(self.pxeboot_section_name): - pxeboot_cidr_str = self.conf.get(self.pxeboot_section_name, - 'PXEBOOT_CIDR') - try: - pxeboot_subnet = validate_network_str(pxeboot_cidr_str, 16) - if pxeboot_subnet.version != 4: - raise ValidateFail("Invalid PXEBOOT_NETWORK IP version - " - "only IPv4 supported") - self.configured_networks.append(pxeboot_subnet) - pxeboot_start_address = None - pxeboot_end_address = None - if self.conf.has_option(self.pxeboot_section_name, - "IP_START_ADDRESS"): - start_addr_str = self.conf.get(self.pxeboot_section_name, - "IP_START_ADDRESS") - pxeboot_start_address = validate_address_str( - start_addr_str, pxeboot_subnet - ) - - if self.conf.has_option(self.pxeboot_section_name, - "IP_END_ADDRESS"): - end_addr_str = self.conf.get(self.pxeboot_section_name, - "IP_END_ADDRESS") - pxeboot_end_address = validate_address_str( - end_addr_str, pxeboot_subnet - ) - - if pxeboot_start_address or pxeboot_end_address: - if not pxeboot_end_address: - raise ConfigFail("Missing attribute %s for %s" % - ('IP_END_ADDRESS', - self.pxeboot_section_name)) - - if not pxeboot_start_address: - raise ConfigFail("Missing attribute %s for %s" % - ('IP_START_ADDRESS', - self.pxeboot_section_name)) - - if not pxeboot_start_address < pxeboot_end_address: - raise ConfigFail("Start address %s not " - "less than end address %s for %s." - % (start_addr_str, - end_addr_str, - self.pxeboot_section_name)) - - min_addresses = 8 - if not IPRange(start_addr_str, end_addr_str).size >= \ - min_addresses: - raise ConfigFail("Address range for %s must contain " - "at least %d addresses." % - (self.pxeboot_section_name, - min_addresses)) - start_end_in_config = True - - self.pxeboot_network_configured = True - except ValidateFail as e: - raise ConfigFail("Invalid PXEBOOT_CIDR value of %s for %s." - "\nReason: %s" % - (pxeboot_cidr_str, - self.pxeboot_section_name, e)) - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cPXEBOOT') - if self.pxeboot_network_configured: - self.cgcs_conf.set('cPXEBOOT', 'PXEBOOT_SUBNET', - str(pxeboot_subnet)) - if start_end_in_config: - self.cgcs_conf.set("cPXEBOOT", - "PXEBOOT_START_ADDRESS", - start_addr_str) - self.cgcs_conf.set("cPXEBOOT", - "PXEBOOT_END_ADDRESS", - end_addr_str) - - pxeboot_floating_addr = pxeboot_start_address - pxeboot_controller_addr_0 = pxeboot_start_address + 1 - pxeboot_controller_addr_1 = pxeboot_controller_addr_0 + 1 - else: - pxeboot_floating_addr = pxeboot_subnet[2] - pxeboot_controller_addr_0 = pxeboot_subnet[3] - pxeboot_controller_addr_1 = pxeboot_subnet[4] - self.cgcs_conf.set('cPXEBOOT', - 'CONTROLLER_PXEBOOT_FLOATING_ADDRESS', - str(pxeboot_floating_addr)) - self.cgcs_conf.set('cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_0', - str(pxeboot_controller_addr_0)) - self.cgcs_conf.set('cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_1', - str(pxeboot_controller_addr_1)) - self.cgcs_conf.set('cPXEBOOT', 'PXECONTROLLER_FLOATING_HOSTNAME', - 'pxecontroller') - - def validate_mgmt(self): - # Management network configuration - mgmt_prefix = NETWORK_PREFIX_NAMES[self.naming_type][MGMT_TYPE] - self.mgmt_network = Network() - - if self.config_type == SUBCLOUD_CONFIG: - min_addresses = 5 - else: - min_addresses = 8 - - try: - self.mgmt_network.parse_config(self.conf, self.config_type, - MGMT_TYPE, - min_addresses=min_addresses, - multicast_addresses=16, - naming_type=self.naming_type) - except ConfigFail: - raise - except Exception as e: - raise ConfigFail("Error parsing configuration file: %s" % e) - - if self.mgmt_network.floating_address: - raise ConfigFail("%s network cannot specify individual unit " - "addresses" % mgmt_prefix) - - if not self.mgmt_network.multicast_cidr: - # The MULTICAST_CIDR is optional for subclouds (default is used) - if self.config_type != SUBCLOUD_CONFIG: - raise ConfigFail("%s MULTICAST_CIDR attribute is missing." - % mgmt_prefix) - - try: - check_network_overlap(self.mgmt_network.cidr, - self.configured_networks) - self.configured_networks.append(self.mgmt_network.cidr) - except ValidateFail: - raise ConfigFail("%s CIDR %s overlaps with another configured " - "network" % - (mgmt_prefix, str(self.mgmt_network.cidr))) - - if (self.system_dc_role == - DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER): - # For Distributed Cloud SystemController, we require the setting - # of the IP_START_ADDRESS/IP_END_ADDRESS config settings so as to - # raise awareness that some space in MGMT subnet must be set aside - # for gateways to reach subclouds. - - if not self.mgmt_network.start_end_in_config: - raise ConfigFail("IP_START_ADDRESS and IP_END_ADDRESS required" - " for %s network as this configuration " - "requires address space left for gateway " - "address(es)" % mgmt_prefix) - else: - # Warn user that some space in the management subnet must - # be reserved for the system controller gateway address(es) - # used to communicate with the subclouds. - 2 because of - # subnet and broadcast addresses. - address_range = \ - IPRange(str(self.mgmt_network.start_address), - str(self.mgmt_network.end_address)).size - - if address_range >= (self.mgmt_network.cidr.size - 2): - raise ConfigFail( - "Address range for %s network too large, no addresses" - " left for gateway(s), required in this " - "configuration." % mgmt_prefix) - - if self.mgmt_network.logical_interface.lag_interface: - supported_lag_mode = [1, 4] - if (self.mgmt_network.logical_interface.lag_mode not in - supported_lag_mode): - raise ConfigFail("Unsupported LAG mode (%d) for %s interface" - " - use LAG mode %s instead" % - (self.mgmt_network.logical_interface.lag_mode, - mgmt_prefix, supported_lag_mode)) - - self.management_interface = 'bond' + str(self.next_lag_index) - management_interface_name = self.management_interface - self.next_lag_index += 1 - else: - self.management_interface = ( - self.mgmt_network.logical_interface.ports[0]) - management_interface_name = self.management_interface - - if self.mgmt_network.vlan: - if not self.pxeboot_network_configured: - raise ConfigFail( - "Management VLAN cannot be configured because " - "PXEBOOT_NETWORK is not configured.") - self.configured_vlans.append(self.mgmt_network.vlan) - management_interface_name += '.' + str(self.mgmt_network.vlan) - elif self.pxeboot_network_configured: - raise ConfigFail( - "Management VLAN must be configured because " - "%s configured." % self.pxeboot_section_name) - - if not self.is_simplex_cpe() and self.mgmt_network.cidr.version == 6 \ - and not self.pxeboot_network_configured: - raise ConfigFail("IPv6 management network cannot be configured " - "because PXEBOOT_NETWORK is not configured.") - - mtu = self.mgmt_network.logical_interface.mtu - if not is_mtu_valid(mtu): - raise ConfigFail( - "Invalid MTU value of %s for %s. " - "Valid values: 576 - 9216" - % (mtu, self.mgmt_network.logical_interface.name)) - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cMGMT') - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_MTU', - self.mgmt_network.logical_interface.mtu) - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_SUBNET', - self.mgmt_network.cidr) - if self.mgmt_network.logical_interface.lag_interface: - self.cgcs_conf.set('cMGMT', 'LAG_MANAGEMENT_INTERFACE', 'yes') - self.cgcs_conf.set( - 'cMGMT', 'MANAGEMENT_BOND_MEMBER_0', - self.mgmt_network.logical_interface.ports[0]) - self.cgcs_conf.set( - 'cMGMT', 'MANAGEMENT_BOND_MEMBER_1', - self.mgmt_network.logical_interface.ports[1]) - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_BOND_POLICY', - lag_mode_to_str(self.mgmt_network. - logical_interface.lag_mode)) - else: - self.cgcs_conf.set('cMGMT', 'LAG_MANAGEMENT_INTERFACE', 'no') - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_INTERFACE', - self.management_interface) - - if self.mgmt_network.vlan: - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_VLAN', - str(self.mgmt_network.vlan)) - - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_INTERFACE_NAME', - management_interface_name) - - if self.mgmt_network.gateway_address: - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_GATEWAY_ADDRESS', - str(self.mgmt_network.gateway_address)) - - self.cgcs_conf.set('cMGMT', 'CONTROLLER_FLOATING_ADDRESS', - str(self.mgmt_network.start_address)) - self.cgcs_conf.set('cMGMT', 'CONTROLLER_0_ADDRESS', - str(self.mgmt_network.start_address + 1)) - self.cgcs_conf.set('cMGMT', 'CONTROLLER_1_ADDRESS', - str(self.mgmt_network.start_address + 2)) - first_nfs_ip = self.mgmt_network.start_address + 3 - self.cgcs_conf.set('cMGMT', 'NFS_MANAGEMENT_ADDRESS_1', - str(first_nfs_ip)) - self.cgcs_conf.set('cMGMT', 'NFS_MANAGEMENT_ADDRESS_2', - str(first_nfs_ip + 1)) - self.cgcs_conf.set('cMGMT', 'CONTROLLER_FLOATING_HOSTNAME', - 'controller') - self.cgcs_conf.set('cMGMT', 'CONTROLLER_HOSTNAME_PREFIX', - 'controller-') - self.cgcs_conf.set('cMGMT', 'OAMCONTROLLER_FLOATING_HOSTNAME', - 'oamcontroller') - if self.mgmt_network.dynamic_allocation: - self.cgcs_conf.set('cMGMT', 'DYNAMIC_ADDRESS_ALLOCATION', - "yes") - else: - self.cgcs_conf.set('cMGMT', 'DYNAMIC_ADDRESS_ALLOCATION', - "no") - if self.mgmt_network.start_address and \ - self.mgmt_network.end_address: - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_START_ADDRESS', - self.mgmt_network.start_address) - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_END_ADDRESS', - self.mgmt_network.end_address) - if self.mgmt_network.multicast_cidr: - self.cgcs_conf.set('cMGMT', 'MANAGEMENT_MULTICAST_SUBNET', - self.mgmt_network.multicast_cidr) - - def validate_cluster(self): - # Kubernetes cluster network configuration - cluster_prefix = NETWORK_PREFIX_NAMES[self.naming_type][CLUSTER_TYPE] - if not self.conf.has_section(cluster_prefix + '_NETWORK'): - return - self.cluster_network = Network() - try: - self.cluster_network.parse_config(self.conf, self.config_type, - CLUSTER_TYPE, - min_addresses=8, - naming_type=self.naming_type) - except ConfigFail: - raise - except Exception as e: - raise ConfigFail("Error parsing configuration file: %s" % e) - - if self.cluster_network.floating_address: - raise ConfigFail("%s network cannot specify individual unit " - "addresses" % cluster_prefix) - - try: - check_network_overlap(self.cluster_network.cidr, - self.configured_networks) - self.configured_networks.append(self.cluster_network.cidr) - except ValidateFail: - raise ConfigFail("%s CIDR %s overlaps with another configured " - "network" % - (cluster_prefix, str(self.cluster_network.cidr))) - - if self.cluster_network.logical_interface.lag_interface: - supported_lag_mode = [1, 2, 4] - if (self.cluster_network.logical_interface.lag_mode not in - supported_lag_mode): - raise ConfigFail( - "Unsupported LAG mode (%d) for %s interface" - " - use LAG mode %s instead" % - (self.cluster_network.logical_interface.lag_mode, - cluster_prefix, supported_lag_mode)) - - self.cluster_interface = 'bond' + str(self.next_lag_index) - cluster_interface_name = self.cluster_interface - self.next_lag_index += 1 - else: - self.cluster_interface = ( - self.cluster_network.logical_interface.ports[0]) - cluster_interface_name = self.cluster_interface - - if self.cluster_network.vlan: - if any(self.cluster_network.vlan == vlan for vlan in - self.configured_vlans): - raise ConfigFail( - "%s_NETWORK VLAN conflicts with another configured " - "VLAN" % cluster_prefix) - self.configured_vlans.append(self.cluster_network.vlan) - cluster_interface_name += '.' + str(self.cluster_network.vlan) - - mtu = self.cluster_network.logical_interface.mtu - if not is_mtu_valid(mtu): - raise ConfigFail( - "Invalid MTU value of %s for %s. " - "Valid values: 576 - 9216" - % (mtu, self.cluster_network.logical_interface.name)) - - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cCLUSTER') - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_MTU', - self.cluster_network.logical_interface.mtu) - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_SUBNET', - self.cluster_network.cidr) - if self.cluster_network.logical_interface.lag_interface: - self.cgcs_conf.set('cCLUSTER', 'LAG_CLUSTER_INTERFACE', 'yes') - self.cgcs_conf.set( - 'cCLUSTER', 'CLUSTER_BOND_MEMBER_0', - self.cluster_network.logical_interface.ports[0]) - self.cgcs_conf.set( - 'cCLUSTER', 'CLUSTER_BOND_MEMBER_1', - self.cluster_network.logical_interface.ports[1]) - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_BOND_POLICY', - lag_mode_to_str(self.cluster_network. - logical_interface.lag_mode)) - else: - self.cgcs_conf.set('cCLUSTER', 'LAG_CLUSTER_INTERFACE', 'no') - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_INTERFACE', - self.cluster_interface) - - if self.cluster_network.vlan: - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_VLAN', - str(self.cluster_network.vlan)) - - self.cgcs_conf.set('cCLUSTER', 'CLUSTER_INTERFACE_NAME', - cluster_interface_name) - - if self.cluster_network.dynamic_allocation: - self.cgcs_conf.set('cCLUSTER', 'DYNAMIC_ADDRESS_ALLOCATION', - "yes") - else: - self.cgcs_conf.set('cCLUSTER', 'DYNAMIC_ADDRESS_ALLOCATION', - "no") - - def validate_oam(self): - # OAM network configuration - oam_prefix = NETWORK_PREFIX_NAMES[self.naming_type][OAM_TYPE] - mgmt_prefix = NETWORK_PREFIX_NAMES[self.naming_type][MGMT_TYPE] - self.validate_oam_common() - try: - check_network_overlap(self.oam_network.cidr, - self.configured_networks) - self.configured_networks.append(self.oam_network.cidr) - except ValidateFail: - raise ConfigFail( - "%s CIDR %s overlaps with another configured network" % - (oam_prefix, str(self.oam_network.cidr))) - - use_lag = False - if (self.oam_network.logical_interface.name == - self.mgmt_network.logical_interface.name): - # CAN sharing CLM interface - external_oam_interface = self.management_interface - elif (self.cluster_network and - (self.oam_network.logical_interface.name == - self.cluster_network.logical_interface.name)): - # CAN sharing BLS interface - external_oam_interface = self.cluster_interface - else: - (use_lag, external_oam_interface) = ( - self.process_oam_on_its_own_interface()) - external_oam_interface_name = external_oam_interface - - if self.oam_network.vlan: - if any(self.oam_network.vlan == vlan for vlan in - self.configured_vlans): - raise ConfigFail( - "%s_NETWORK VLAN conflicts with another configured VLAN" % - oam_prefix) - self.configured_vlans.append(self.oam_network.vlan) - elif external_oam_interface in (self.management_interface, - self.cluster_interface): - raise ConfigFail( - "VLAN required for %s_NETWORK since it uses the same interface" - " as another network" % oam_prefix) - - # Ensure that exactly one gateway was configured - if (self.mgmt_network.gateway_address is None and self.oam_network. - gateway_address is None): - raise ConfigFail( - "No gateway specified - either the %s_GATEWAY or %s_GATEWAY " - "must be specified" % (mgmt_prefix, oam_prefix)) - elif self.mgmt_network.gateway_address and ( - self.oam_network.gateway_address): - # In subcloud configs we support both a management and OAM gateway - if self.config_type != SUBCLOUD_CONFIG: - raise ConfigFail( - "Two gateways specified - only one of the %s_GATEWAY or " - "%s_GATEWAY can be specified" % (mgmt_prefix, oam_prefix)) - self.set_oam_config(use_lag, external_oam_interface_name) - - def validate_sdn(self): - if self.conf.has_section('SDN'): - raise ConfigFail("SDN Configuration is no longer supported") - - def validate_dns(self): - # DNS config is optional - if not self.conf.has_section('DNS'): - return - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cDNS') - for x in range(0, 3): - if self.conf.has_option('DNS', 'NAMESERVER_' + str(x + 1)): - dns_address_str = self.conf.get('DNS', 'NAMESERVER_' + str( - x + 1)) - try: - dns_address = validate_nameserver_address_str( - dns_address_str) - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDNS', 'NAMESERVER_' + str(x + 1), - str(dns_address)) - except ValidateFail as e: - raise ConfigFail( - "Invalid DNS NAMESERVER value of %s.\nReason: %s" % - (dns_address_str, e)) - - def validate_docker_proxy(self): - if not self.conf.has_section('DOCKER_PROXY'): - return - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cDOCKER_PROXY') - # check http_proxy - if self.conf.has_option('DOCKER_PROXY', 'DOCKER_HTTP_PROXY'): - docker_http_proxy_str = self.conf.get( - 'DOCKER_PROXY', 'DOCKER_HTTP_PROXY') - if is_valid_url(docker_http_proxy_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_PROXY', 'DOCKER_HTTP_PROXY', - docker_http_proxy_str) - else: - raise ConfigFail( - "Invalid DOCKER_HTTP_PROXY value of %s." % - docker_http_proxy_str) - # check https_proxy - if self.conf.has_option('DOCKER_PROXY', 'DOCKER_HTTPS_PROXY'): - docker_https_proxy_str = self.conf.get( - 'DOCKER_PROXY', 'DOCKER_HTTPS_PROXY') - if is_valid_url(docker_https_proxy_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_PROXY', 'DOCKER_HTTPS_PROXY', - docker_https_proxy_str) - else: - raise ConfigFail( - "Invalid DOCKER_HTTPS_PROXY value of %s." % - docker_https_proxy_str) - # check no_proxy - if self.conf.has_option('DOCKER_PROXY', 'DOCKER_NO_PROXY'): - docker_no_proxy_list_str = self.conf.get( - 'DOCKER_PROXY', 'DOCKER_NO_PROXY') - docker_no_proxy_list = docker_no_proxy_list_str.split(',') - for no_proxy_str in docker_no_proxy_list: - if not is_valid_domain_or_ip(no_proxy_str): - raise ConfigFail( - "Invalid DOCKER_NO_PROXY value of %s." % - no_proxy_str) - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_PROXY', 'DOCKER_NO_PROXY', - docker_no_proxy_list_str) - - def validate_docker_registry(self): - if not self.conf.has_section('DOCKER_REGISTRY'): - return - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cDOCKER_REGISTRY') - # check k8s_registry - if self.conf.has_option('DOCKER_REGISTRY', 'DOCKER_K8S_REGISTRY'): - docker_k8s_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'DOCKER_K8S_REGISTRY') - if is_valid_domain_or_ip(docker_k8s_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'DOCKER_K8S_REGISTRY', - docker_k8s_registry_str) - else: - raise ConfigFail( - "Invalid DOCKER_K8S_REGISTRY value of %s." % - docker_k8s_registry_str) - # check gcr_registry - if self.conf.has_option('DOCKER_REGISTRY', 'DOCKER_GCR_REGISTRY'): - docker_gcr_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'DOCKER_GCR_REGISTRY') - if is_valid_domain_or_ip(docker_gcr_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'DOCKER_GCR_REGISTRY', - docker_gcr_registry_str) - else: - raise ConfigFail( - "Invalid DOCKER_GCR_REGISTRY value of %s." % - docker_gcr_registry_str) - # check quay_registry - if self.conf.has_option('DOCKER_REGISTRY', 'DOCKER_QUAY_REGISTRY'): - docker_quay_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'DOCKER_QUAY_REGISTRY') - if is_valid_domain_or_ip(docker_quay_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'DOCKER_QUAY_REGISTRY', - docker_quay_registry_str) - else: - raise ConfigFail( - "Invalid DOCKER_QUAY_REGISTRY value of %s." % - docker_quay_registry_str) - # check docker_registry - if self.conf.has_option('DOCKER_REGISTRY', 'DOCKER_DOCKER_REGISTRY'): - docker_docker_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'DOCKER_DOCKER_REGISTRY') - if is_valid_domain_or_ip(docker_docker_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'DOCKER_DOCKER_REGISTRY', - docker_docker_registry_str) - else: - raise ConfigFail( - "Invalid DOCKER_DOCKER_REGISTRY value of %s." % - docker_docker_registry_str) - # check is_secure_registry - if self.conf.has_option('DOCKER_REGISTRY', 'IS_SECURE_REGISTRY'): - docker_is_secure_registry_str = self.conf.get( - 'DOCKER_REGISTRY', 'IS_SECURE_REGISTRY') - if is_valid_bool_str(docker_is_secure_registry_str): - if self.cgcs_conf is not None: - self.cgcs_conf.set('cDOCKER_REGISTRY', - 'IS_SECURE_REGISTRY', - docker_is_secure_registry_str) - else: - raise ConfigFail( - "Invalid IS_SECURE_REGISTRY value of %s." % - docker_is_secure_registry_str) - - def validate_ntp(self): - if self.conf.has_section('NTP'): - raise ConfigFail("NTP Configuration is no longer supported") - - def validate_region(self, config_type=REGION_CONFIG): - region_1_name = self.conf.get('SHARED_SERVICES', 'REGION_NAME') - region_2_name = self.conf.get('REGION_2_SERVICES', 'REGION_NAME') - if region_1_name == region_2_name: - raise ConfigFail( - "The Region Names must be unique.") - - if not (self.conf.has_option('REGION_2_SERVICES', 'CREATE') and - self.conf.get('REGION_2_SERVICES', 'CREATE') == 'Y'): - password_fields = [ - 'PATCHING', 'SYSINV', 'FM', 'BARBICAN', 'NFV', 'MTCE' - ] - for pw in password_fields: - if not self.conf.has_option('REGION_2_SERVICES', - pw + '_PASSWORD'): - raise ConfigFail("User password for %s is required and " - "missing." % pw) - - admin_user_name = self.conf.get('SHARED_SERVICES', 'ADMIN_USER_NAME') - if self.conf.has_option('SHARED_SERVICES', - 'ADMIN_USER_DOMAIN'): - admin_user_domain = self.conf.get('SHARED_SERVICES', - 'ADMIN_USER_DOMAIN') - else: - admin_user_domain = DEFAULT_DOMAIN_NAME - - # for now support both ADMIN_PROJECT_NAME and ADMIN_TENANT_NAME - if self.conf.has_option('SHARED_SERVICES', 'ADMIN_PROJECT_NAME'): - admin_project_name = self.conf.get('SHARED_SERVICES', - 'ADMIN_PROJECT_NAME') - else: - admin_project_name = self.conf.get('SHARED_SERVICES', - 'ADMIN_TENANT_NAME') - if self.conf.has_option('SHARED_SERVICES', - 'ADMIN_PROJECT_DOMAIN'): - admin_project_domain = self.conf.get('SHARED_SERVICES', - 'ADMIN_PROJECT_DOMAIN') - else: - admin_project_domain = DEFAULT_DOMAIN_NAME - - # for now support both SERVICE_PROJECT_NAME and SERVICE_TENANT_NAME - if self.conf.has_option('SHARED_SERVICES', 'SERVICE_PROJECT_NAME'): - service_project_name = self.conf.get('SHARED_SERVICES', - 'SERVICE_PROJECT_NAME') - else: - service_project_name = self.conf.get('SHARED_SERVICES', - 'SERVICE_TENANT_NAME') - keystone_service_name = get_service(self.conf, 'SHARED_SERVICES', - 'KEYSTONE_SERVICE_NAME') - keystone_service_type = get_service(self.conf, 'SHARED_SERVICES', - 'KEYSTONE_SERVICE_TYPE') - - # validate the patch service name and type - get_service(self.conf, 'REGION_2_SERVICES', 'PATCHING_SERVICE_NAME') - get_service(self.conf, 'REGION_2_SERVICES', 'PATCHING_SERVICE_TYPE') - patch_user_name = self.conf.get('REGION_2_SERVICES', - 'PATCHING_USER_NAME') - patch_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'PATCHING_PASSWORD') - sysinv_user_name = self.conf.get('REGION_2_SERVICES', - 'SYSINV_USER_NAME') - sysinv_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'SYSINV_PASSWORD') - sysinv_service_name = get_service(self.conf, 'REGION_2_SERVICES', - 'SYSINV_SERVICE_NAME') - sysinv_service_type = get_service(self.conf, 'REGION_2_SERVICES', - 'SYSINV_SERVICE_TYPE') - - # validate nfv service name and type - get_service(self.conf, 'REGION_2_SERVICES', 'NFV_SERVICE_NAME') - get_service(self.conf, 'REGION_2_SERVICES', 'NFV_SERVICE_TYPE') - nfv_user_name = self.conf.get('REGION_2_SERVICES', 'NFV_USER_NAME') - nfv_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'NFV_PASSWORD') - - # validate mtce user - mtce_user_name = self.conf.get('REGION_2_SERVICES', 'MTCE_USER_NAME') - mtce_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'MTCE_PASSWORD') - - # validate fm service name and type - get_service(self.conf, 'REGION_2_SERVICES', 'FM_SERVICE_NAME') - get_service(self.conf, 'REGION_2_SERVICES', 'FM_SERVICE_TYPE') - fm_user_name = self.conf.get('REGION_2_SERVICES', 'FM_USER_NAME') - fm_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'FM_PASSWORD') - - # validate barbican service name and type - get_service(self.conf, 'REGION_2_SERVICES', 'BARBICAN_SERVICE_NAME') - get_service(self.conf, 'REGION_2_SERVICES', 'BARBICAN_SERVICE_TYPE') - barbican_user_name = self.conf.get('REGION_2_SERVICES', - 'BARBICAN_USER_NAME') - barbican_password = get_optional(self.conf, 'REGION_2_SERVICES', - 'BARBICAN_PASSWORD') - - if self.conf.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'): - user_domain = self.conf.get('REGION_2_SERVICES', - 'USER_DOMAIN_NAME') - else: - user_domain = DEFAULT_DOMAIN_NAME - if self.conf.has_option('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME'): - project_domain = self.conf.get('REGION_2_SERVICES', - 'PROJECT_DOMAIN_NAME') - else: - project_domain = DEFAULT_DOMAIN_NAME - - system_controller_subnet = None - system_controller_floating_ip = None - if config_type == SUBCLOUD_CONFIG: - system_controller_subnet = self.conf.get( - 'SHARED_SERVICES', 'SYSTEM_CONTROLLER_SUBNET') - system_controller_floating_ip = self.conf.get( - 'SHARED_SERVICES', 'SYSTEM_CONTROLLER_FLOATING_ADDRESS') - - # Create cgcs_config file if specified - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cREGION') - self.cgcs_conf.set('cREGION', 'REGION_CONFIG', 'True') - self.cgcs_conf.set('cREGION', 'REGION_1_NAME', region_1_name) - self.cgcs_conf.set('cREGION', 'REGION_2_NAME', region_2_name) - self.cgcs_conf.set('cREGION', 'ADMIN_USER_NAME', admin_user_name) - self.cgcs_conf.set('cREGION', 'ADMIN_USER_DOMAIN', - admin_user_domain) - self.cgcs_conf.set('cREGION', 'ADMIN_PROJECT_NAME', - admin_project_name) - self.cgcs_conf.set('cREGION', 'ADMIN_PROJECT_DOMAIN', - admin_project_domain) - self.cgcs_conf.set('cREGION', 'SERVICE_PROJECT_NAME', - service_project_name) - self.cgcs_conf.set('cREGION', 'KEYSTONE_SERVICE_NAME', - keystone_service_name) - self.cgcs_conf.set('cREGION', 'KEYSTONE_SERVICE_TYPE', - keystone_service_type) - self.cgcs_conf.set('cREGION', 'PATCHING_USER_NAME', - patch_user_name) - self.cgcs_conf.set('cREGION', 'PATCHING_PASSWORD', patch_password) - self.cgcs_conf.set('cREGION', 'SYSINV_USER_NAME', sysinv_user_name) - self.cgcs_conf.set('cREGION', 'SYSINV_PASSWORD', sysinv_password) - self.cgcs_conf.set('cREGION', 'SYSINV_SERVICE_NAME', - sysinv_service_name) - self.cgcs_conf.set('cREGION', 'SYSINV_SERVICE_TYPE', - sysinv_service_type) - self.cgcs_conf.set('cREGION', 'NFV_USER_NAME', nfv_user_name) - self.cgcs_conf.set('cREGION', 'NFV_PASSWORD', nfv_password) - self.cgcs_conf.set('cREGION', 'MTCE_USER_NAME', mtce_user_name) - self.cgcs_conf.set('cREGION', 'MTCE_PASSWORD', mtce_password) - self.cgcs_conf.set('cREGION', 'FM_USER_NAME', fm_user_name) - self.cgcs_conf.set('cREGION', 'FM_PASSWORD', fm_password) - self.cgcs_conf.set('cREGION', 'BARBICAN_USER_NAME', - barbican_user_name) - self.cgcs_conf.set('cREGION', 'BARBICAN_PASSWORD', - barbican_password) - - self.cgcs_conf.set('cREGION', 'USER_DOMAIN_NAME', - user_domain) - self.cgcs_conf.set('cREGION', 'PROJECT_DOMAIN_NAME', - project_domain) - if config_type == SUBCLOUD_CONFIG: - self.cgcs_conf.set('cREGION', 'SYSTEM_CONTROLLER_SUBNET', - system_controller_subnet) - self.cgcs_conf.set('cREGION', - 'SYSTEM_CONTROLLER_FLOATING_ADDRESS', - system_controller_floating_ip) - - def validate_security(self): - if self.conf.has_section('SECURITY'): - raise ConfigFail("The section SECURITY is " - "no longer supported.") - - def validate_licensing(self): - if self.conf.has_section('LICENSING'): - raise ConfigFail("The section LICENSING is no longer supported.") - - def validate_authentication(self): - if self.config_type in [REGION_CONFIG, SUBCLOUD_CONFIG]: - password = self.conf.get('SHARED_SERVICES', 'ADMIN_PASSWORD') - else: - password = self.conf.get('AUTHENTICATION', 'ADMIN_PASSWORD') - if self.cgcs_conf is not None: - self.cgcs_conf.add_section('cAUTHENTICATION') - self.cgcs_conf.set('cAUTHENTICATION', 'ADMIN_PASSWORD', password) - - -def validate(system_config, config_type=REGION_CONFIG, cgcs_config=None, - offboard=False): - """ - Perform general errors checking on a system configuration file - :param system_config: system configuration - :param config_type: indicates whether it is system, region or subcloud - configuration - :param cgcs_config: if not None config data should be returned - :param offboard: if true only perform general error checking - :return: None - """ - if config_type == REGION_CONFIG and system_config.has_section( - 'CLM_NETWORK'): - naming_type = HP_NAMES - else: - naming_type = DEFAULT_NAMES - validator = ConfigValidator(system_config, cgcs_config, config_type, - offboard, naming_type) - # Version configuration - validator.validate_version() - # System configuration - validator.validate_system() - # Storage configuration - validator.validate_storage() - # SDN configuration - validator.validate_sdn() - - if validator.is_simplex_cpe(): - if validator.is_subcloud(): - # For AIO-SX subcloud, mgmt n/w will be on a separate physical - # interface or could be on a VLAN interface (on PXEBOOT n/w). - validator.validate_aio_network(subcloud=True) - validator.validate_pxeboot() - validator.validate_mgmt() - else: - validator.validate_aio_network() - else: - # PXEBoot network configuration - validator.validate_pxeboot() - # Management network configuration - validator.validate_mgmt() - # OAM network configuration - validator.validate_oam() - # Kubernetes Cluster network configuration - validator.validate_cluster() - # Neutron configuration - leave blank to use defaults - # DNS configuration - validator.validate_dns() - # Docker Proxy configuration - validator.validate_docker_proxy() - # Docker Registry configuration - validator.validate_docker_registry() - # NTP configuration - validator.validate_ntp() - # Region configuration - if config_type in [REGION_CONFIG, SUBCLOUD_CONFIG]: - validator.validate_region(config_type) - # Security configuration - validator.validate_security() - # Licensing configuration - validator.validate_licensing() - # Authentication configuration - validator.validate_authentication() diff --git a/controllerconfig/controllerconfig/controllerconfig/configassistant.py b/controllerconfig/controllerconfig/controllerconfig/configassistant.py deleted file mode 100644 index 5468853b06..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/configassistant.py +++ /dev/null @@ -1,4746 +0,0 @@ -""" -Copyright (c) 2014-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from six.moves import configparser -import datetime -import errno -import getpass -import hashlib -import keyring -import netifaces -import os -import re -import stat -import subprocess -import textwrap -import time - -import pyudev -from controllerconfig import ConfigFail -from controllerconfig import ValidateFail -from controllerconfig import is_valid_vlan -from controllerconfig import is_mtu_valid -from controllerconfig import validate_network_str -from controllerconfig import validate_address_str -from controllerconfig import validate_address -from controllerconfig import ip_version_to_string -from controllerconfig import validate_nameserver_address_str -from controllerconfig import is_valid_url -from controllerconfig import is_valid_domain_or_ip -from controllerconfig import validate_openstack_password -from controllerconfig import DEFAULT_DOMAIN_NAME -from netaddr import IPNetwork -from netaddr import IPAddress -from netaddr import IPRange -from netaddr import AddrFormatError -from sysinv.common import constants as sysinv_constants -from tsconfig.tsconfig import SW_VERSION - -from controllerconfig import openstack -from controllerconfig import sysinv_api as sysinv -from controllerconfig import utils -from controllerconfig import progress - -from controllerconfig.common import constants -from controllerconfig.common import log -from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common.exceptions import SysInvFail -from controllerconfig.common.exceptions import UserQuit -from six.moves import input - -LOG = log.get_logger(__name__) - -DEVNULL = open(os.devnull, 'w') - - -def interface_exists(name): - """Check whether an interface exists.""" - return name in netifaces.interfaces() - - -def timestamped(dname, fmt='{dname}_%Y-%m-%d-%H-%M-%S'): - return datetime.datetime.now().strftime(fmt).format(dname=dname) - - -def prompt_for(prompt_text, default_input, validator): - """ - :param prompt_text: text for the prompt - :param default_input: default input if user hit enter directly - :param validator: validator function to validate user input, - validator should return error message in case - of invalid input, or None if input is valid. - :return: return a valid user input - """ - error_msg = None - while True: - user_input = input(prompt_text) - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = default_input - - if validator: - error_msg = validator(user_input) - - if error_msg is not None: - print(error_msg) - else: - break - - return user_input - - -def is_interface_up(interface_name): - arg = '/sys/class/net/' + interface_name + '/operstate' - try: - if (subprocess.check_output(['cat', arg]).rstrip() == - 'up'): - return True - else: - return False - except subprocess.CalledProcessError: - LOG.error("Command cat %s failed" % arg) - return False - - -def device_node_to_device_path(dev_node): - device_path = None - cmd = ["find", "-L", "/dev/disk/by-path/", "-samefile", dev_node] - - try: - out = subprocess.check_output(cmd) - except subprocess.CalledProcessError as e: - LOG.error("Could not retrieve device information: %s" % e) - return device_path - - device_path = out.rstrip() - return device_path - - -def parse_fdisk(device_node): - """Cloned/modified from sysinv""" - # Run command - fdisk_command = ('fdisk -l %s 2>/dev/null | grep "Disk %s:"' % - (device_node, device_node)) - fdisk_process = subprocess.Popen(fdisk_command, stdout=subprocess.PIPE, - shell=True) - fdisk_output = fdisk_process.stdout.read() - - # Parse output - secnd_half = fdisk_output.split(',')[1] - size_bytes = secnd_half.split()[0].strip() - - # Convert bytes to GiB (1 GiB = 1024*1024*1024 bytes) - int_size = int(size_bytes) - size_gib = int_size / 1073741824 - - return int(size_gib) - - -def get_rootfs_node(): - """Cloned from sysinv""" - cmdline_file = '/proc/cmdline' - device = None - - with open(cmdline_file, 'r') as f: - for line in f: - for param in line.split(): - params = param.split("=", 1) - if params[0] == "root": - if "UUID=" in params[1]: - key, uuid = params[1].split("=") - symlink = "/dev/disk/by-uuid/%s" % uuid - device = os.path.basename(os.readlink(symlink)) - else: - device = os.path.basename(params[1]) - - if device is not None: - if sysinv_constants.DEVICE_NAME_NVME in device: - re_line = re.compile(r'^(nvme[0-9]*n[0-9]*)') - else: - re_line = re.compile(r'^(\D*)') - match = re_line.search(device) - if match: - return os.path.join("/dev", match.group(1)) - - return - - -def find_boot_device(): - """ Determine boot device """ - boot_device = None - - context = pyudev.Context() - - # Get the boot partition - # Unfortunately, it seems we can only get it from the logfile. - # We'll parse the device used from a line like the following: - # BIOSBoot.create: device: /dev/sda1 ; status: False ; type: biosboot ; - # or - # EFIFS.create: device: /dev/sda1 ; status: False ; type: efi ; - # - logfile = '/var/log/anaconda/storage.log' - - re_line = re.compile(r'(BIOSBoot|EFIFS).create: device: ([^\s;]*)') - boot_partition = None - with open(logfile, 'r') as f: - for line in f: - match = re_line.search(line) - if match: - boot_partition = match.group(2) - break - if boot_partition is None: - raise ConfigFail("Failed to determine the boot partition") - - # Find the boot partition and get its parent - for device in context.list_devices(DEVTYPE='partition'): - if device.device_node == boot_partition: - boot_device = device.find_parent('block').device_node - break - - if boot_device is None: - raise ConfigFail("Failed to determine the boot device") - - return boot_device - - -def get_device_from_function(get_disk_function): - device_node = get_disk_function() - device_path = device_node_to_device_path(device_node) - device = device_path if device_path else os.path.basename(device_node) - - return device - - -def get_console_info(): - """ Determine console info """ - cmdline_file = '/proc/cmdline' - - re_line = re.compile(r'^.*\s+console=([^\s]*)') - - with open(cmdline_file, 'r') as f: - for line in f: - match = re_line.search(line) - if match: - console_info = match.group(1) - return console_info - return '' - - -def get_orig_install_mode(): - """ Determine original install mode, text vs graphical """ - # Post-install, the only way to detemine the original install mode - # will be to check the anaconda install log for the parameters passed - logfile = '/var/log/anaconda/anaconda.log' - - search_str = 'Display mode = t' - try: - subprocess.check_call(['grep', '-q', search_str, logfile]) - return 'text' - except subprocess.CalledProcessError: - return 'graphical' - - -def get_root_disk_size(): - """ Get size of the root disk """ - context = pyudev.Context() - rootfs_node = get_rootfs_node() - size_gib = 0 - - for device in context.list_devices(DEVTYPE='disk'): - # /dev/nvmeXn1 259 are for NVME devices - major = device['MAJOR'] - if (major == '8' or major == '3' or major == '253' or - major == '259'): - devname = device['DEVNAME'] - if devname == rootfs_node: - try: - size_gib = parse_fdisk(devname) - except Exception as e: - LOG.error("Could not retrieve disk size - %s " % e) - # Do not break config script, just return size 0 - break - break - return size_gib - - -def net_device_cmp(a, b): - # Sorting function for net devices - # Break device name "devX" into "dev" and "X", in order - # to numerically sort devices with same "dev" prefix. - # For example, this ensures a device named enp0s10 comes - # after enp0s3. - - pattern = re.compile("^(.*?)([0-9]*)$") - a_match = pattern.match(a) - b_match = pattern.match(b) - - if a_match.group(1) == b_match.group(1): - a_num = int(a_match.group(2)) if a_match.group(2).isdigit() else 0 - b_num = int(b_match.group(2)) if b_match.group(2).isdigit() else 0 - return a_num - b_num - elif a_match.group(1) < b_match.group(1): - return -1 - return 1 - - -def get_net_device_list(): - devlist = [] - context = pyudev.Context() - for device in context.list_devices(SUBSYSTEM='net'): - # Skip the loopback device - if device.sys_name != "lo": - devlist.append(str(device.sys_name)) - - return sorted(devlist, cmp=net_device_cmp) - - -def get_tboot_info(): - """ Determine whether we were booted with a tboot value """ - cmdline_file = '/proc/cmdline' - - # tboot=true, tboot=false, or no tboot parameter expected - re_line = re.compile(r'^.*\s+tboot=([^\s]*)') - - with open(cmdline_file, 'r') as f: - for line in f: - match = re_line.search(line) - if match: - tboot = match.group(1) - return tboot - return '' - - -class ConfigAssistant(): - """Allow user to do the initial configuration.""" - - def __init__(self, labmode=False, kubernetes=False, **kwargs): - """Constructor - - The values assigned here are used as the defaults if the user does not - supply a new value. - """ - - self.labmode = labmode - self.kubernetes = True - - self.config_uuid = "install" - - self.net_devices = get_net_device_list() - if len(self.net_devices) < 2: - raise ConfigFail("Two or more network devices are required") - - if os.path.exists(constants.INSTALLATION_FAILED_FILE): - msg = "Installation failed. For more info, see:\n" - with open(constants.INSTALLATION_FAILED_FILE, 'r') as f: - msg += f.read() - raise ConfigFail(msg) - - # system config - self.system_type = utils.get_system_type() - self.security_profile = utils.get_security_profile() - - if self.system_type == sysinv_constants.TIS_AIO_BUILD: - self.system_mode = sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT - else: - self.system_mode = sysinv_constants.SYSTEM_MODE_DUPLEX - self.system_dc_role = None - - self.rootfs_node = get_rootfs_node() - - # PXEBoot network config - self.separate_pxeboot_network = False - self.pxeboot_subnet = IPNetwork("192.168.202.0/24") - self.controller_pxeboot_floating_address = IPNetwork("192.168.202.2") - self.controller_pxeboot_address_0 = IPAddress("192.168.202.3") - self.controller_pxeboot_address_1 = IPAddress("192.168.202.4") - self.controller_pxeboot_hostname_suffix = "-pxeboot" - self.private_pxeboot_subnet = IPNetwork("169.254.202.0/24") - self.pxecontroller_floating_hostname = "pxecontroller" - self.pxeboot_start_address = None - self.pxeboot_end_address = None - self.use_entire_pxeboot_subnet = True - - # Management network config - self.management_interface_configured = False - self.management_interface_name = self.net_devices[1] - self.management_interface = self.net_devices[1] - self.management_vlan = "" - self.management_mtu = constants.LINK_MTU_DEFAULT - self.next_lag_index = 0 - self.lag_management_interface = False - self.lag_management_interface_member0 = self.net_devices[1] - self.lag_management_interface_member1 = "" - self.lag_management_interface_policy = constants.LAG_MODE_8023AD - self.lag_management_interface_txhash = constants.LAG_TXHASH_LAYER2 - self.lag_management_interface_miimon = constants.LAG_MIIMON_FREQUENCY - self.management_subnet = IPNetwork("192.168.204.0/24") - self.management_gateway_address = None - self.controller_floating_address = IPAddress("192.168.204.2") - self.controller_address_0 = IPAddress("192.168.204.3") - self.controller_address_1 = IPAddress("192.168.204.4") - self.nfs_management_address_1 = IPAddress("192.168.204.5") - self.nfs_management_address_2 = IPAddress("192.168.204.6") - self.storage_address_0 = "" - self.storage_address_1 = "" - self.controller_floating_hostname = "controller" - self.controller_hostname_prefix = "controller-" - self.storage_hostname_prefix = "storage-" - self.use_entire_mgmt_subnet = True - self.dynamic_address_allocation = True - self.management_start_address = IPAddress("192.168.204.2") - self.management_end_address = IPAddress("192.168.204.254") - self.management_multicast_subnet = \ - IPNetwork(constants.DEFAULT_MULTICAST_SUBNET_IPV4) - - # External OAM Network config - self.external_oam_interface_configured = False - self.external_oam_interface_name = self.net_devices[0] - self.external_oam_interface = self.net_devices[0] - self.external_oam_vlan = "" - self.external_oam_mtu = constants.LINK_MTU_DEFAULT - self.lag_external_oam_interface = False - self.lag_external_oam_interface_member0 = self.net_devices[0] - self.lag_external_oam_interface_member1 = "" - self.lag_external_oam_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - self.lag_external_oam_interface_txhash = "" - self.lag_external_oam_interface_miimon = \ - constants.LAG_MIIMON_FREQUENCY - self.external_oam_subnet = IPNetwork("10.10.10.0/24") - self.external_oam_gateway_address = IPAddress("10.10.10.1") - self.external_oam_floating_address = IPAddress("10.10.10.2") - self.external_oam_address_0 = IPAddress("10.10.10.3") - self.external_oam_address_1 = IPAddress("10.10.10.4") - self.oamcontroller_floating_hostname = "oamcontroller" - - # Kubernetes cluster network config - self.cluster_host_interface_configured = False - self.cluster_host_interface_name = self.management_interface_name - self.cluster_host_interface = self.management_interface - self.cluster_host_vlan = "" - self.cluster_host_mtu = constants.LINK_MTU_DEFAULT - self.lag_cluster_host_interface = False - self.lag_cluster_host_interface_member0 = "" - self.lag_cluster_host_interface_member1 = "" - self.lag_cluster_host_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - self.lag_cluster_host_interface_txhash = "" - self.lag_cluster_host_interface_miimon = \ - constants.LAG_MIIMON_FREQUENCY - self.cluster_host_subnet = IPNetwork("192.168.206.0/24") - - # Will be configurable in the future - self.cluster_pod_subnet = IPNetwork("172.16.0.0/16") - self.cluster_service_subnet = IPNetwork("10.96.0.0/12") - - # Docker Proxy config - self.enable_docker_proxy = False - self.docker_http_proxy = "" - self.docker_https_proxy = "" - self.docker_no_proxy = "" - - # Docker registry config - self.docker_use_default_registry = True - self.docker_k8s_registry = "" - self.docker_gcr_registry = "" - self.docker_quay_registry = "" - self.docker_docker_registry = "" - self.is_secure_registry = True - - # SDN config - self.enable_sdn = False - - # DNS config - self.nameserver_addresses = ["8.8.8.8", "8.8.4.4", ""] - - # HTTPS - self.enable_https = False - # Network config - self.vswitch_type = "none" - - # Authentication config - self.admin_username = "admin" - self.admin_password = "" - self.os_password_rules_file = constants.OPENSTACK_PASSWORD_RULES_FILE - self.openstack_passwords = [] - - # Region config - self.region_config = False - self.region_services_create = False - self.shared_services = [] - self.external_oam_start_address = "" - self.external_oam_end_address = "" - self.region_1_name = "" - self.region_2_name = "" - self.admin_user_domain = DEFAULT_DOMAIN_NAME - self.admin_project_name = "" - self.admin_project_domain = DEFAULT_DOMAIN_NAME - self.service_project_name = constants.DEFAULT_SERVICE_PROJECT_NAME - self.service_user_domain = DEFAULT_DOMAIN_NAME - self.service_project_domain = DEFAULT_DOMAIN_NAME - self.keystone_auth_uri = "" - self.keystone_identity_uri = "" - self.keystone_admin_uri = "" - self.keystone_internal_uri = "" - self.keystone_public_uri = "" - self.keystone_service_name = "" - self.keystone_service_type = "" - self.patching_ks_user_name = "" - self.patching_ks_password = "" - self.sysinv_ks_user_name = "" - self.sysinv_ks_password = "" - self.sysinv_service_name = "" - self.sysinv_service_type = "" - self.mtce_ks_user_name = "" - self.mtce_ks_password = "" - self.nfv_ks_user_name = "" - self.nfv_ks_password = "" - self.fm_ks_user_name = "" - self.fm_ks_password = "" - self.barbican_ks_user_name = "" - self.barbican_ks_password = "" - - self.ldap_region_name = "" - self.ldap_service_name = "" - self.ldap_service_uri = "" - - # Subcloud config (only valid when region configured) - self.system_controller_subnet = None - - # LDAP config - self.ldapadmin_password = "" - self.ldapadmin_hashed_pw = "" - - # Time Zone config - self.timezone = "UTC" - - # saved service passwords, indexed by service name - self._service_passwords = {} - - @staticmethod - def set_time(): - """Allow user to set the system date and time.""" - - print("System date and time:") - print("---------------------\n") - print(textwrap.fill( - "The system date and time must be set now. Note that UTC " - "time must be used and that the date and time must be set as " - "accurately as possible, even if NTP/PTP is to be configured " - "later.", 80)) - print('') - - now = datetime.datetime.utcnow() - date_format = '%Y-%m-%d %H:%M:%S' - print("Current system date and time (UTC): " + - now.strftime(date_format)) - - while True: - user_input = input( - "\nIs the current date and time correct? [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - print("Current system date and time will be used.") - return - elif user_input.lower() == 'n': - break - else: - print("Invalid choice") - - new_time = None - while True: - user_input = input("\nEnter new system date and time (UTC) " + - "in YYYY-MM-DD HH:MM:SS format: \n") - if user_input.lower() == 'q': - raise UserQuit - else: - try: - new_time = datetime.datetime.strptime(user_input, - date_format) - break - except ValueError: - print("Invalid date and time specified") - continue - - # Set the system clock - try: - subprocess.check_call(["date", "-s", new_time.isoformat()]) - - except subprocess.CalledProcessError: - LOG.error("Failed to set system date and time") - raise ConfigFail("Failed to set system date and time") - - # Set the hardware clock in UTC time - try: - subprocess.check_call(["hwclock", "-wu"]) - except subprocess.CalledProcessError: - LOG.error("Failed to set the hardware clock") - raise ConfigFail("Failed to set the hardware clock") - - @staticmethod - def set_timezone(self): - """Allow user to set the system timezone.""" - - print("\nSystem timezone:") - print("----------------\n") - print(textwrap.fill( - "The system timezone must be set now. The timezone " - "must be a valid timezone from /usr/share/zoneinfo " - "(e.g. UTC, Asia/Hong_Kong, etc...)", 80)) - print('') - - while True: - user_input = input( - "Please input the timezone[" + self.timezone + "]:") - - if user_input == 'Q' or user_input == 'q': - raise UserQuit - elif user_input == "": - break - else: - if not os.path.isfile("/usr/share/zoneinfo/%s" % user_input): - print("Invalid timezone specified, please try again.") - continue - self.timezone = user_input - break - return - - def subcloud_config(self): - return (self.system_dc_role == - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD) - - def get_next_lag_name(self): - """Return next available name for LAG interface.""" - name = 'bond' + str(self.next_lag_index) - self.next_lag_index += 1 - return name - - def get_sysadmin_sig(self): - """ Get signature for sysadmin user. """ - - # NOTE (knasim): only compute the signature for the entries we're - # tracking and propagating {password, aging}. This is prevent - # config-outdated alarms for shadow fields that get modified - # and we don't track and propagate - re_line = re.compile(r'(sysadmin:.*?)\s') - with open('/etc/shadow') as shadow_file: - for line in shadow_file: - match = re_line.search(line) - if match: - # Isolate password(2nd field) and aging(5th field) - entry = match.group(1).split(':') - entrystr = entry[1] + ":" + entry[4] - self.sysadmin_sig = hashlib.md5(entrystr).hexdigest() - self.passwd_hash = entry[1] - - def input_system_mode_config(self): - """Allow user to input system mode""" - print("\nSystem Configuration:") - print("---------------------\n") - print("System mode. Available options are:\n") - print(textwrap.fill( - "1) duplex-direct - two node redundant configuration. " - "Management and cluster-host networks " - "are directly connected to peer ports", 80)) - print(textwrap.fill( - "2) duplex - two node redundant configuration. ", 80)) - - print(textwrap.fill( - "3) simplex - single node non-redundant configuration.", 80)) - - value_mapping = { - "1": sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT, - "2": sysinv_constants.SYSTEM_MODE_DUPLEX, - '3': sysinv_constants.SYSTEM_MODE_SIMPLEX - } - user_input = prompt_for( - "System mode [duplex-direct]: ", '1', - lambda text: "Invalid choice" if text not in value_mapping - else None - ) - self.system_mode = value_mapping[user_input.lower()] - - def input_dc_selection(self): - """Allow user to input dc role""" - print("\nDistributed Cloud Configuration:") - print("--------------------------------\n") - - value_mapping = { - "y": sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER, - "n": None, - } - user_input = prompt_for( - "Configure Distributed Cloud System Controller [y/N]: ", 'n', - lambda text: "Invalid choice" if text.lower() not in value_mapping - else None - ) - self.system_dc_role = value_mapping[user_input.lower()] - - def check_storage_config(self): - """Check basic storage config.""" - - if get_root_disk_size() < constants.MINIMUM_ROOT_DISK_SIZE: - print(textwrap.fill( - "Warning: Root Disk %s size is less than %d GiB. " - "Please consult the Software Installation Guide " - "for details." % - (self.rootfs_node, constants.MINIMUM_ROOT_DISK_SIZE), 80)) - print('') - - def is_interface_in_bond(self, interface_name): - """ - Determine if the supplied interface is configured as a member - in a bond. - - :param interface_name: interface to check - :return: True or False - """ - # In the case of bond with a single member - if interface_name == "": - return False - - if ((self.management_interface_configured and - self.lag_management_interface and - (interface_name == self.lag_management_interface_member0 or - interface_name == self.lag_management_interface_member1)) - or - (self.external_oam_interface_configured and - self.lag_external_oam_interface and - (interface_name == self.lag_external_oam_interface_member0 or - interface_name == self.lag_external_oam_interface_member1)) - or - (self.cluster_host_interface_configured and - self.lag_cluster_host_interface and - (interface_name == self.lag_cluster_host_interface_member0 or - interface_name == self.lag_cluster_host_interface_member1))): - return True - else: - return False - - def is_interface_in_use(self, interface_name): - """ - Determine if the supplied interface is already configured for use - - :param interface_name: interface to check - :return: True or False - """ - if ((self.management_interface_configured and - interface_name == self.management_interface) or - (self.external_oam_interface_configured and - interface_name == self.external_oam_interface) or - (self.cluster_host_interface_configured and - interface_name == self.cluster_host_interface)): - return True - else: - return False - - def is_valid_pxeboot_address(self, ip_address): - """Determine whether a pxeboot address is valid.""" - if ip_address.version != 4: - print("Invalid IP version - only IPv4 supported") - return False - elif ip_address == self.pxeboot_subnet.network: - print("Cannot use network address") - return False - elif ip_address == self.pxeboot_subnet.broadcast: - print("Cannot use broadcast address") - return False - elif ip_address.is_multicast(): - print("Invalid network address - multicast address not allowed") - return False - elif ip_address.is_loopback(): - print("Invalid network address - loopback address not allowed") - return False - elif ip_address not in self.pxeboot_subnet: - print("Address must be in the PXEBoot subnet") - return False - else: - return True - - def default_pxeboot_config(self): - """Set pxeboot to default private network.""" - - # Use private subnet for pxe booting - self.separate_pxeboot_network = False - self.pxeboot_subnet = self.private_pxeboot_subnet - self.controller_pxeboot_floating_address = \ - IPAddress(self.pxeboot_subnet[2]) - self.controller_pxeboot_address_0 = \ - IPAddress(self.pxeboot_subnet[3]) - self.controller_pxeboot_address_1 = \ - IPAddress(self.pxeboot_subnet[4]) - - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - - def input_pxeboot_config(self): - """Allow user to input pxeboot config and perform validation.""" - - print("\nPXEBoot Network:") - print("----------------\n") - - print(textwrap.fill( - "The PXEBoot network is used for initial booting and installation" - " of each node. IP addresses on this network are reachable only " - "within the data center.", 80)) - print('') - print(textwrap.fill( - "The default configuration combines the PXEBoot network and the " - "management network. If a separate PXEBoot network is used, it " - "will share the management interface, which requires the " - "management network to be placed on a VLAN.", 80)) - - while True: - print('') - user_input = input( - "Configure a separate PXEBoot network [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.separate_pxeboot_network = True - break - elif user_input.lower() == 'n': - self.separate_pxeboot_network = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - if self.separate_pxeboot_network: - while True: - user_input = input("PXEBoot subnet [" + - str(self.pxeboot_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.pxeboot_subnet - - try: - ip_input = IPNetwork(user_input) - if ip_input.version != 4: - print("Invalid IP version - only IPv4 supported") - continue - elif ip_input.ip != ip_input.network: - print("Invalid network address") - continue - elif ip_input.size < 16: - print("PXEBoot subnet too small " - "- must have at least 16 addresses") - continue - - if ip_input.size < 255: - print("WARNING: Subnet allows only %d addresses." - % ip_input.size) - - self.pxeboot_subnet = ip_input - break - except AddrFormatError: - print("Invalid subnet - please enter a valid IPv4 subnet") - - value_mapping = { - "y": True, - "n": False, - } - - user_input = prompt_for( - "Use entire PXEBoot subnet [Y/n]: ", 'Y', - lambda text: "Invalid choice" - if text.lower() not in value_mapping - else None - ) - self.use_entire_pxeboot_subnet = value_mapping[user_input.lower()] - - if not self.use_entire_pxeboot_subnet: - def validate_input_address(text, error_header): - try: - validate_address_str(text, self.pxeboot_subnet) - return None - except ValidateFail as e: - return "%s\n Reason: %s" % (error_header, e) - - while True: - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - input_str = prompt_for( - "PXEBoot network start address [" + - str(self.pxeboot_start_address) + - "]: ", str(self.pxeboot_start_address), - lambda text: validate_input_address( - text, "Invalid start address.") - ) - self.pxeboot_start_address = IPAddress(input_str) - - input_str = prompt_for( - "PXEBoot network end address [" + - str(self.pxeboot_end_address) + - "]: ", str(self.pxeboot_end_address), - lambda text: validate_input_address( - text, "Invalid end address.") - ) - self.pxeboot_end_address = IPAddress(input_str) - - if not self.pxeboot_start_address < \ - self.pxeboot_end_address: - print("Start address not less than end address. ") - continue - - address_range = IPRange( - str(self.pxeboot_start_address), - str(self.pxeboot_end_address)) - - min_addresses = 8 - if not address_range.size >= min_addresses: - print( - "Address range must contain at least " - "%d addresses." % min_addresses) - continue - - print('') - break - else: - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - else: - # Use private subnet for pxe booting - self.pxeboot_subnet = self.private_pxeboot_subnet - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - - ip_input = self.pxeboot_start_address - if not self.is_valid_pxeboot_address(ip_input): - raise ConfigFail("Unable to create controller PXEBoot " - "floating address") - self.controller_pxeboot_floating_address = ip_input - - default_controller0_pxeboot_ip = \ - self.controller_pxeboot_floating_address + 1 - ip_input = IPAddress(default_controller0_pxeboot_ip) - if not self.is_valid_pxeboot_address(ip_input): - raise ConfigFail("Unable to create controller-0 PXEBoot " - "address") - self.controller_pxeboot_address_0 = ip_input - - default_controller1_pxeboot_ip = self.controller_pxeboot_address_0 + 1 - ip_input = IPAddress(default_controller1_pxeboot_ip) - if not self.is_valid_pxeboot_address(ip_input): - raise ConfigFail("Unable to create controller-1 PXEBoot " - "address") - self.controller_pxeboot_address_1 = ip_input - - def input_management_config(self): - """Allow user to input management config and perform validation.""" - - print("\nManagement Network:") - print("-------------------\n") - - print(textwrap.fill( - "The management network is used for internal communication " - "between platform components. IP addresses on this network " - "are reachable only within the data center.", 80)) - - while True: - print('') - print(textwrap.fill( - "A management bond interface provides redundant " - "connections for the management network.", 80)) - if self.system_mode == sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT: - print(textwrap.fill( - "It is strongly recommended to configure Management " - "interface link aggregation, for All-in-one duplex-direct." - )) - print('') - user_input = input( - "Management interface link aggregation [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.lag_management_interface = True - break - elif user_input.lower() == 'n': - self.lag_management_interface = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if self.lag_management_interface: - self.management_interface = self.get_next_lag_name() - - user_input = input("Management interface [" + - str(self.management_interface) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_interface - elif self.lag_management_interface: - print(textwrap.fill( - "Warning: The default name for the management bond " - "interface (%s) cannot be changed." % - self.management_interface, 80)) - print('') - user_input = self.management_interface - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.lag_management_interface: - self.management_interface = user_input - self.management_interface_name = user_input - break - elif interface_exists(user_input): - self.management_interface = user_input - self.management_interface_name = user_input - break - else: - print("Interface does not exist") - continue - - while True: - user_input = input("Management interface MTU [" + - str(self.management_mtu) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_mtu - - if is_mtu_valid(user_input): - self.management_mtu = user_input - break - else: - print("MTU is invalid/unsupported") - continue - - while True: - if not self.lag_management_interface: - break - - print('') - print("Specify one of the bonding policies. Possible values are:") - print(" 1) 802.3ad (LACP) policy") - print(" 2) Active-backup policy") - - user_input = input( - "\nManagement interface bonding policy [" + - str(self.lag_management_interface_policy) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == '1': - self.lag_management_interface_policy = \ - constants.LAG_MODE_8023AD - break - elif user_input == '2': - self.lag_management_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - self.lag_management_interface_txhash = None - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if not self.lag_management_interface: - break - - print(textwrap.fill( - "A maximum of 2 physical interfaces can be attached to the " - "management interface.", 80)) - print('') - - user_input = input( - "First management interface member [" + - str(self.lag_management_interface_member0) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_management_interface_member0 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - self.lag_management_interface_member0 = user_input - else: - print("Interface does not exist") - self.lag_management_interface_member0 = "" - continue - - user_input = input( - "Second management interface member [" + - str(self.lag_management_interface_member1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == self.lag_management_interface_member0: - print("Cannot use member 0 as member 1") - continue - elif user_input == "": - user_input = self.lag_management_interface_member1 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - self.lag_management_interface_member1 = user_input - break - else: - print("Interface does not exist") - self.lag_management_interface_member1 = "" - user_input = input( - "Do you want a single physical member in the bond " - "interface [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - break - elif user_input.lower() == 'n': - continue - - if self.separate_pxeboot_network: - print('') - print(textwrap.fill( - "A management VLAN is required because a separate PXEBoot " - "network was configured on the management interface.", 80)) - print('') - - while True: - user_input = input( - "Management VLAN Identifier [" + - str(self.management_vlan) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif is_valid_vlan(user_input): - self.management_vlan = user_input - self.management_interface_name = \ - self.management_interface + '.' + self.management_vlan - break - else: - print("VLAN is invalid/unsupported") - continue - - min_addresses = 8 - while True: - user_input = input("Management subnet [" + - str(self.management_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_subnet - - try: - tmp_management_subnet = validate_network_str(user_input, - min_addresses) - if (tmp_management_subnet.version == 6 and - not self.separate_pxeboot_network): - print("Using IPv6 management network requires " + - "use of separate PXEBoot network") - continue - self.management_subnet = tmp_management_subnet - self.management_start_address = self.management_subnet[2] - self.management_end_address = self.management_subnet[-2] - if self.management_subnet.size < 255: - print("WARNING: Subnet allows only %d addresses.\n" - "This will not allow you to provision a Cinder LVM" - " or Ceph backend." % self.management_subnet.size) - while True: - user_input = raw_input( - "Do you want to continue with the current " - "configuration? [Y/n]: ") - if user_input.lower() == 'q' or \ - user_input.lower() == 'n': - raise UserQuit - elif user_input.lower() == 'y' or user_input == "": - break - else: - print("Invalid choice") - continue - break - except ValidateFail as e: - print("{}".format(e)) - - if (self.system_dc_role != - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER): - while True: - user_input = input( - "Use entire management subnet [Y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.use_entire_mgmt_subnet = True - break - elif user_input.lower() == 'n': - self.use_entire_mgmt_subnet = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - else: - self.use_entire_mgmt_subnet = False - print(textwrap.fill( - "Configured as Distributed Cloud System Controller," - " disallowing use of entire management subnet. " - "Ensure management ip range does not include System" - " Controller gateway address(es)", 80)) - - if not self.use_entire_mgmt_subnet: - while True: - self.management_start_address = self.management_subnet[2] - self.management_end_address = self.management_subnet[-2] - while True: - user_input = input( - "Management network start address [" + - str(self.management_start_address) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_start_address - - try: - self.management_start_address = validate_address_str( - user_input, self.management_subnet) - break - except ValidateFail as e: - print("Invalid start address. \n Reason: %s" % e) - - while True: - user_input = input( - "Management network end address [" + - str(self.management_end_address) + "]: ") - if user_input == 'Q' or user_input == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_end_address - - try: - self.management_end_address = validate_address_str( - user_input, self.management_subnet) - break - except ValidateFail as e: - print("Invalid management end address. \n" - "Reason: %s" % e) - - if not self.management_start_address < \ - self.management_end_address: - print("Start address not less than end address. ") - print('') - continue - - address_range = IPRange(str(self.management_start_address), - str(self.management_end_address)) - if not address_range.size >= min_addresses: - print( - "Address range must contain at least %d addresses. " % - min_addresses) - continue - - sc = sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER - if (self.system_dc_role == sc): - # Warn user that some space in the management subnet must - # be reserved for the system controller gateway address(es) - # used to communicate with the subclouds. - 2 because of - # subnet and broadcast addresses. - if address_range.size >= (self.management_subnet.size - 2): - print(textwrap.fill( - "Address range too large, no addresses left " - "for System Controller gateway(s). ", 80)) - continue - break - while True: - print('') - if self.kubernetes: - print(textwrap.fill( - "IP addresses can be assigned to hosts dynamically or " - "a static IP address can be specified for each host. " - "This choice applies to both the management network " - "and cluster-host network. ", 80)) - else: - print(textwrap.fill( - "IP addresses can be assigned to hosts dynamically or " - "a static IP address can be specified for each host. " - "This choice applies to the management network ", 80)) - print(textwrap.fill( - "Warning: Selecting 'N', or static IP address allocation, " - "disables automatic provisioning of new hosts in System " - "Inventory, requiring the user to manually provision using " - "the 'system host-add' command. ", 80)) - user_input = input( - "Dynamic IP address allocation [Y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.dynamic_address_allocation = True - break - elif user_input.lower() == 'n': - self.dynamic_address_allocation = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - default_controller0_mgmt_float_ip = self.management_start_address - ip_input = IPAddress(default_controller0_mgmt_float_ip) - try: - validate_address(ip_input, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create controller-0 Management " - "floating address") - self.controller_floating_address = ip_input - - default_controller0_mgmt_ip = self.controller_floating_address + 1 - ip_input = IPAddress(default_controller0_mgmt_ip) - try: - validate_address(ip_input, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create controller-0 Management " - "address") - self.controller_address_0 = ip_input - - default_controller1_mgmt_ip = self.controller_address_0 + 1 - ip_input = IPAddress(default_controller1_mgmt_ip) - try: - validate_address(ip_input, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create controller-1 Management " - "address") - self.controller_address_1 = ip_input - - first_nfs_ip = self.controller_address_1 + 1 - - """ create default Management NFS addresses """ - default_nfs_ip = IPAddress(first_nfs_ip) - try: - validate_address(default_nfs_ip, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create NFS Management address 1") - self.nfs_management_address_1 = default_nfs_ip - - default_nfs_ip = IPAddress(self.nfs_management_address_1 + 1) - try: - validate_address(default_nfs_ip, self.management_subnet) - except ValidateFail: - raise ConfigFail("Unable to create NFS Management address 2") - self.nfs_management_address_2 = default_nfs_ip - - while True: - if self.management_subnet.version == 6: - # Management subnet is IPv6, so update the default value - self.management_multicast_subnet = \ - IPNetwork(constants.DEFAULT_MULTICAST_SUBNET_IPV6) - - user_input = input("Management Network Multicast subnet [" + - str(self.management_multicast_subnet) + - "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_multicast_subnet - - try: - ip_input = IPNetwork(user_input) - if not self.is_valid_management_multicast_subnet(ip_input): - continue - self.management_multicast_subnet = ip_input - break - except AddrFormatError: - print("Invalid subnet - " - "please enter a valid IPv4 or IPv6 subnet" - ) - - """ Management interface configuration complete""" - self.management_interface_configured = True - - def input_aio_simplex_management_config(self, management_subnet=None): - """Allow user to input AIO simplex management config and perform - validation.""" - - if management_subnet is not None: - self.management_subnet = management_subnet - else: - print("\nManagement Network:") - print("-------------------\n") - - print(textwrap.fill( - "The management network is used for internal communication " - "between platform components. IP addresses on this network " - "are reachable only within the host.", 80)) - print('') - - self.management_subnet = IPNetwork( - constants.DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4) - min_addresses = 16 - while True: - user_input = input("Management subnet [" + - str(self.management_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.management_subnet - - try: - tmp_management_subnet = validate_network_str(user_input, - min_addresses) - if tmp_management_subnet.version == 6: - print("IPv6 management network not supported on " + - "simplex configuration") - continue - self.management_subnet = tmp_management_subnet - break - except ValidateFail as e: - print("{}".format(e)) - - self.management_interface = constants.LOOPBACK_IFNAME - self.management_interface_name = constants.LOOPBACK_IFNAME - self.management_start_address = self.management_subnet[2] - self.management_end_address = self.management_subnet[-2] - self.controller_floating_address = self.management_start_address - self.controller_address_0 = self.management_start_address + 1 - self.controller_address_1 = self.management_start_address + 2 - - """ create default Management NFS addresses """ - self.nfs_management_address_1 = self.controller_address_1 + 1 - self.nfs_management_address_2 = self.controller_address_1 + 2 - - """ Management interface configuration complete""" - self.management_interface_configured = True - - def is_valid_external_oam_subnet(self, ip_subnet): - """Determine whether an OAM subnet is valid.""" - if ip_subnet.size < 8: - print("Subnet too small - must have at least 8 addresses") - return False - elif ip_subnet.ip != ip_subnet.network: - print("Invalid network address") - return False - elif ip_subnet.version == 6 and ip_subnet.prefixlen < 64: - print("IPv6 minimum prefix length is 64") - return False - elif ip_subnet.is_multicast(): - print("Invalid network address - multicast address not allowed") - return False - elif ip_subnet.is_loopback(): - print("Invalid network address - loopback address not allowed") - return False - elif ((self.separate_pxeboot_network and - ip_subnet.ip in self.pxeboot_subnet) or - (ip_subnet.ip in self.management_subnet) or - (self.cluster_host_interface and - ip_subnet.ip in self.cluster_host_subnet)): - print("External OAM subnet overlaps with an already " - "configured subnet") - return False - else: - return True - - def is_valid_external_oam_address(self, ip_address): - """Determine whether an OAM address is valid.""" - if ip_address == self.external_oam_subnet.network: - print("Cannot use network address") - return False - elif ip_address == self.external_oam_subnet.broadcast: - print("Cannot use broadcast address") - return False - elif ip_address.is_multicast(): - print("Invalid network address - multicast address not allowed") - return False - elif ip_address.is_loopback(): - print("Invalid network address - loopback address not allowed") - return False - elif ip_address not in self.external_oam_subnet: - print("Address must be in the external OAM subnet") - return False - else: - return True - - def input_aio_simplex_oam_ip_address(self): - """Allow user to input external OAM IP and perform validation.""" - while True: - user_input = input( - "External OAM address [" + - str(self.external_oam_gateway_address + 1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_gateway_address + 1 - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_floating_address = ip_input - self.external_oam_address_0 = ip_input - self.external_oam_address_1 = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - def input_oam_ip_address(self): - """Allow user to input external OAM IP and perform validation.""" - while True: - user_input = input( - "External OAM floating address [" + - str(self.external_oam_gateway_address + 1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_gateway_address + 1 - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_floating_address = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - while True: - user_input = input("External OAM address for first " - "controller node [" + - str(self.external_oam_floating_address + 1) - + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_floating_address + 1 - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_address_0 = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - while True: - user_input = input("External OAM address for second " - "controller node [" + - str(self.external_oam_address_0 + 1) + - "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_address_0 + 1 - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_address_1 = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - def input_external_oam_config(self): - """Allow user to input external OAM config and perform validation.""" - - print("\nExternal OAM Network:") - print("---------------------\n") - print(textwrap.fill( - "The external OAM network is used for management of the " - "cloud. It also provides access to the " - "platform APIs. IP addresses on this network are reachable " - "outside the data center.", 80)) - print('') - - ext_oam_vlan_required = False - - while True: - print(textwrap.fill( - "An external OAM bond interface provides redundant " - "connections for the OAM network.", 80)) - print('') - user_input = input( - "External OAM interface link aggregation [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.lag_external_oam_interface = True - break - elif user_input.lower() == 'n': - self.lag_external_oam_interface = False - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if self.lag_external_oam_interface: - self.external_oam_interface = self.get_next_lag_name() - - user_input = input("External OAM interface [" + - str(self.external_oam_interface) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_interface - elif self.lag_external_oam_interface: - print(textwrap.fill( - "Warning: The default name for the external OAM bond " - "interface (%s) cannot be changed." % - self.external_oam_interface, 80)) - print('') - user_input = self.external_oam_interface - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.lag_external_oam_interface: - self.external_oam_interface = user_input - self.external_oam_interface_name = user_input - break - elif (interface_exists(user_input) or - user_input == self.management_interface): - self.external_oam_interface = user_input - self.external_oam_interface_name = user_input - if ((self.management_interface_configured and - user_input == self.management_interface)): - ext_oam_vlan_required = True - break - else: - print("Interface does not exist") - continue - - while True: - user_input = input( - "Configure an external OAM VLAN [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - while True: - user_input = input( - "External OAM VLAN Identifier [" + - str(self.external_oam_vlan) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif is_valid_vlan(user_input): - if ((user_input == self.management_vlan) or - (user_input == self.cluster_host_vlan)): - print(textwrap.fill( - "Invalid VLAN Identifier. Configured VLAN " - "Identifier is already in use by another " - "network.", 80)) - continue - self.external_oam_vlan = user_input - self.external_oam_interface_name = \ - self.external_oam_interface + '.' + \ - self.external_oam_vlan - break - else: - print("VLAN is invalid/unsupported") - continue - break - elif user_input.lower() in ('n', ''): - if ext_oam_vlan_required: - print(textwrap.fill( - "An external oam VLAN is required since the " - "configured external oam interface is the " - "same as either the configured management " - "or cluster-host interface.", 80)) - continue - self.external_oam_vlan = "" - break - else: - print("Invalid choice") - continue - - while True: - user_input = input("External OAM interface MTU [" + - str(self.external_oam_mtu) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_mtu - - if (self.management_interface_configured and - self.external_oam_interface == - self.management_interface and - self.external_oam_vlan and - user_input > self.management_mtu): - print("External OAM VLAN MTU must not be larger than " - "underlying management interface MTU") - continue - elif is_mtu_valid(user_input): - self.external_oam_mtu = user_input - break - else: - print("MTU is invalid/unsupported") - continue - - while True: - if not self.lag_external_oam_interface: - break - - print('') - print("Specify one of the bonding policies. Possible values are:") - print(" 1) Active-backup policy") - print(" 2) Balanced XOR policy") - print(" 3) 802.3ad (LACP) policy") - - user_input = input( - "\nExternal OAM interface bonding policy [" + - str(self.lag_external_oam_interface_policy) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == '1': - self.lag_external_oam_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - break - elif user_input == '2': - self.lag_external_oam_interface_policy = \ - constants.LAG_MODE_BALANCE_XOR - self.lag_external_oam_interface_txhash = \ - constants.LAG_TXHASH_LAYER2 - break - elif user_input == '3': - self.lag_external_oam_interface_policy = \ - constants.LAG_MODE_8023AD - self.lag_external_oam_interface_txhash = \ - constants.LAG_TXHASH_LAYER2 - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if not self.lag_external_oam_interface: - break - - print(textwrap.fill( - "A maximum of 2 physical interfaces can be attached to the " - "external OAM interface.", 80)) - print('') - - user_input = input( - "First external OAM interface member [" + - str(self.lag_external_oam_interface_member0) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_external_oam_interface_member0 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - self.lag_external_oam_interface_member0 = user_input - else: - print("Interface does not exist") - self.lag_external_oam_interface_member0 = "" - continue - - user_input = input( - "Second external oam interface member [" + - str(self.lag_external_oam_interface_member1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_external_oam_interface_member1 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif user_input == self.lag_external_oam_interface_member0: - print("Cannot use member 0 as member 1") - continue - if interface_exists(user_input): - self.lag_external_oam_interface_member1 = user_input - break - else: - print("Interface does not exist") - self.lag_external_oam_interface_member1 = "" - user_input = input( - "Do you want a single physical member in the bond " - "interface [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - break - elif user_input.lower() == 'n': - continue - - while True: - user_input = input("External OAM subnet [" + - str(self.external_oam_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_subnet - - try: - ip_input = IPNetwork(user_input) - if not self.is_valid_external_oam_subnet(ip_input): - continue - self.external_oam_subnet = ip_input - break - except AddrFormatError: - print("Invalid subnet - " - "please enter a valid IPv4 or IPv6 subnet" - ) - - while True: - user_input = input("External OAM gateway address [" + - str(self.external_oam_subnet[1]) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.external_oam_subnet[1] - - try: - ip_input = IPAddress(user_input) - if not self.is_valid_external_oam_address(ip_input): - continue - self.external_oam_gateway_address = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - " - "please enter a valid %s address" % - ip_version_to_string(self.external_oam_subnet.version) - ) - - if self.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX: - self.input_aio_simplex_oam_ip_address() - else: - self.input_oam_ip_address() - - """ External OAM interface configuration complete""" - self.external_oam_interface_configured = True - - def is_valid_cluster_host_address(self, ip_address): - """Determine whether cluster host address is valid.""" - if ip_address == self.cluster_host_subnet.network: - print("Cannot use network address") - return False - elif ip_address == self.cluster_host_subnet.broadcast: - print("Cannot use broadcast address") - return False - elif ip_address.is_multicast(): - print("Invalid network address - multicast address not allowed") - return False - elif ip_address.is_loopback(): - print("Invalid network address - loopback address not allowed") - return False - elif ip_address not in self.cluster_host_subnet: - print("Address must be in the cluster host subnet") - return False - else: - return True - - def input_cluster_host_config(self): - """Allow user to input cluster-host config and perform validation.""" - - print("\nCluster Host Network:") - print("-----------------------\n") - - print((textwrap.fill( - "The cluster host network is used for internal communication " - "between Kubernetes clusters. " - "IP addresses on this network are reachable only within the data " - "center.", 80))) - print('') - print(textwrap.fill( - "If a separate cluster host interface is not configured the " - "management network will be used.", 80)) - print('') - - while True: - print('') - print(textwrap.fill( - "An cluster host bond interface provides redundant " - "connections for the cluster host network.", 80)) - print('') - user_input = input( - "Cluster host interface link aggregation [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - self.lag_cluster_host_interface = True - break - elif user_input.lower() in ('n', ''): - self.lag_cluster_host_interface = False - break - else: - print("Invalid choice") - continue - - while True: - if self.lag_cluster_host_interface: - self.cluster_host_interface = self.get_next_lag_name() - - user_input = input("Cluster host interface [" + - str(self.management_interface) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == '': - user_input = self.management_interface - if user_input == '': - print("Invalid interface") - continue - elif self.lag_cluster_host_interface: - print(textwrap.fill( - "Warning: The default name for the cluster host bond " - "interface (%s) cannot be changed." % - self.cluster_host_interface, 80)) - print('') - user_input = self.cluster_host_interface - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.lag_cluster_host_interface: - self.cluster_host_interface = user_input - self.cluster_host_interface_name = user_input - break - elif interface_exists(user_input): - self.cluster_host_interface = user_input - self.cluster_host_interface_name = user_input - break - else: - print("Interface does not exist") - continue - - while True: - user_input = input( - "Configure a cluster host VLAN [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - while True: - user_input = input( - "Cluster host VLAN Identifier [" + - str(self.cluster_host_vlan) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif is_valid_vlan(user_input): - self.cluster_host_vlan = user_input - self.cluster_host_interface_name = \ - self.cluster_host_interface + '.' + \ - self.cluster_host_vlan - break - else: - print("VLAN is invalid/unsupported") - continue - break - elif user_input.lower() in ('n', ''): - self.cluster_host_vlan = "" - break - else: - print("Invalid choice") - continue - - while True: - if self.cluster_host_interface == self.management_interface: - break - user_input = input("Cluster host interface MTU [" + - str(self.cluster_host_mtu) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.cluster_host_mtu - - if (self.management_interface_configured and - self.cluster_host_interface == - self.management_interface and - self.cluster_host_vlan and - user_input > self.management_mtu): - print("Cluster host VLAN MTU must not be larger than " - "underlying management interface MTU") - continue - elif is_mtu_valid(user_input): - self.cluster_host_mtu = user_input - break - else: - print("MTU is invalid/unsupported") - continue - - while True: - if self.cluster_host_interface == self.management_interface: - break - if not self.lag_cluster_host_interface: - break - print('') - print("Specify one of the bonding policies. Possible values are:") - print(" 1) Active-backup policy") - print(" 2) Balanced XOR policy") - print(" 3) 802.3ad (LACP) policy") - - user_input = input( - "\nCluster host interface bonding policy [" + - str(self.lag_cluster_host_interface_policy) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == '1': - self.lag_cluster_host_interface_policy = \ - constants.LAG_MODE_ACTIVE_BACKUP - self.lag_cluster_host_interface_txhash = None - break - elif user_input == '2': - self.lag_cluster_host_interface_policy = \ - constants.LAG_MODE_BALANCE_XOR - self.lag_cluster_host_interface_txhash = \ - constants.LAG_TXHASH_LAYER2 - break - elif user_input == '3': - self.lag_cluster_host_interface_policy = \ - constants.LAG_MODE_8023AD - self.lag_cluster_host_interface_txhash = \ - constants.LAG_TXHASH_LAYER2 - break - elif user_input == "": - break - else: - print("Invalid choice") - continue - - while True: - if not self.lag_cluster_host_interface: - break - if self.cluster_host_interface == self.management_interface: - break - - print(textwrap.fill( - "A maximum of 2 physical interfaces can be attached to the " - "cluster host interface.", 80)) - print('') - - user_input = input( - "First cluster host interface member [" + - str(self.lag_cluster_host_interface_member0) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_cluster_host_interface_member0 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - self.lag_cluster_host_interface_member0 = user_input - else: - print("Interface does not exist") - self.lag_cluster_host_interface_member0 = "" - continue - - user_input = input( - "Second cluster host interface member [" + - str(self.lag_cluster_host_interface_member1) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.lag_cluster_host_interface_member1 - - if self.is_interface_in_bond(user_input): - print(textwrap.fill( - "Interface is already configured as part of an " - "aggregated interface.", 80)) - continue - elif self.is_interface_in_use(user_input): - print("Interface is already in use") - continue - elif interface_exists(user_input): - if user_input == self.lag_cluster_host_interface_member0: - print("Cannot use member 0 as member 1") - continue - else: - self.lag_cluster_host_interface_member1 = user_input - break - else: - print("Interface does not exist") - self.lag_cluster_host_interface_member1 = "" - user_input = input( - "Do you want a single physical member in the bond " - "interface [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - break - elif user_input.lower() in ('n', ''): - continue - else: - print("Invalid choice") - continue - - min_addresses = 8 - while True: - user_input = input("Cluster subnet [" + - str(self.cluster_host_subnet) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input == "": - user_input = self.cluster_host_subnet - - try: - ip_input = IPNetwork(user_input) - if ip_input.ip != ip_input.network: - print("Invalid network address") - continue - elif ip_input.size < min_addresses: - print("Cluster subnet too small - " - "must have at least 16 addresses") - continue - elif ip_input.version == 6 and ip_input.prefixlen < 64: - print("IPv6 minimum prefix length is 64") - continue - elif ((self.separate_pxeboot_network and - ip_input.ip in self.pxeboot_subnet) or - ip_input.ip in self.management_subnet): - print("Cluster host subnet overlaps with an already " - "configured subnet") - continue - - if ip_input.size < 255: - print("WARNING: Subnet allows only %d addresses." - % ip_input.size) - - self.cluster_host_subnet = ip_input - break - except AddrFormatError: - print("Invalid subnet - please enter a valid IPv4 subnet") - - """ Cluster host interface configuration complete""" - self.cluster_host_interface_configured = True - - def get_dns_servers(self): - """Produce a comma separated list of DNS servers.""" - servers = [str(s) for s in self.nameserver_addresses if s] - return ",".join(servers) - - def input_dns_config(self): - """Allow user to input DNS config and perform validation.""" - - print("\nDomain Name System (DNS):") - print("-------------------------\n") - print(textwrap.fill( - "Configuring DNS servers accessible through the external " - "OAM network allows domain names to be mapped to IP " - "addresses.", 80)) - print(textwrap.fill( - "The configuration of at least one DNS server is mandatory. To " - "skip the configuration of one or more nameservers (1 to 3 are " - "allowed), enter C to continue to the next configuration item.", - 80)) - print('') - - if self.external_oam_subnet.version == 6: - self.nameserver_addresses = ["2001:4860:4860::8888", - "2001:4860:4860::8844", ""] - - for server in range(0, len(self.nameserver_addresses)): - while True: - user_input = raw_input( - "Nameserver " + str(server + 1) + " [" + - str(self.nameserver_addresses[server]) + "]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'c': - if server == 0: - print("At least one DNS server is required.") - continue - for x in range(server, len(self.nameserver_addresses)): - self.nameserver_addresses[x] = "" - return - elif user_input == "": - user_input = self.nameserver_addresses[server] - # Pressing enter with a blank default will continue - if user_input == "": - return - - try: - try: - ip_input = validate_nameserver_address_str( - user_input, self.external_oam_subnet.version) - except ValidateFail as e: - print('{}'.format(e)) - continue - self.nameserver_addresses[server] = ip_input - break - except (AddrFormatError, ValueError): - print("Invalid address - please enter a valid IPv4 " - "address") - - def input_docker_proxy_config(self): - """Allow user to input docker proxy config.""" - - print("\nDocker Proxy:") - print("-------------\n") - print(textwrap.fill( - "Docker proxy is needed if host OAM network is behind a proxy.", - 80)) - print('') - while True: - user_input = input( - "Configure docker proxy [y/N]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - while True: - user_input = input( - "HTTP proxy (http://example.proxy:port): ") - if user_input.lower() == 'q': - raise UserQuit - if user_input: - if is_valid_url(user_input): - self.docker_http_proxy = user_input - break - else: - print("Please enter a valid url") - continue - else: - self.docker_http_proxy = "" - break - - while True: - user_input = input( - "HTTPS proxy (https://example.proxy:port): ") - if user_input.lower() == 'q': - raise UserQuit - if user_input: - if is_valid_url(user_input): - self.docker_https_proxy = user_input - break - else: - print("Please enter a valid url") - continue - else: - self.docker_https_proxy = "" - break - - if not self.docker_http_proxy and not self.docker_https_proxy: - print("At least one proxy required") - continue - else: - self.enable_docker_proxy = True - - while True: - # TODO: Current Docker version 18.03.1-ce utilizes go-lang - # net library for proxy setting. The go-lang net lib - # doesn't support CIDR notation until this commit: - # - # https://github.com/golang/net/commit/ - # c21de06aaf072cea07f3a65d6970e5c7d8b6cd6d - # - # After docker upgrades to a version that CIDR notation - # supported pre_set_no_proxy will be simplified to subnets - if self.system_mode == \ - sysinv_constants.SYSTEM_MODE_SIMPLEX: - pre_set_no_proxy = "localhost,127.0.0.1," + \ - str(self.controller_floating_address) + "," + \ - str(self.controller_address_0) + "," + \ - str(self.controller_address_1) + "," + \ - str(self.external_oam_address_0) - else: - pre_set_no_proxy = "localhost,127.0.0.1," + \ - str(self.controller_floating_address) + "," + \ - str(self.controller_address_0) + "," + \ - str(self.controller_address_1) + "," + \ - str(self.external_oam_floating_address) + "," + \ - str(self.external_oam_address_0) + "," + \ - str(self.external_oam_address_1) - - user_input = input( - "Additional NO proxy besides '" + - pre_set_no_proxy + - "'\n(Comma-separated addresses, " + - "wildcard/subnet not allowed)\n:") - if user_input.lower() == 'q': - raise UserQuit - if user_input: - input_addr_list = user_input.split(",") - valid_address = True - for input_addr in input_addr_list: - if not is_valid_domain_or_ip(input_addr): - print("Input address '%s' is invalid" % - input_addr) - valid_address = False - break - if valid_address: - self.docker_no_proxy = pre_set_no_proxy + \ - "," + user_input - break - else: - continue - else: - self.docker_no_proxy = pre_set_no_proxy - break - break - elif user_input.lower() in ('n', ''): - self.enable_docker_proxy = False - break - else: - print("Invalid choice") - continue - - def input_docker_registry_config(self): - """Allow user to input docker registry config.""" - - print("\nDocker Registry:") - print("----------------\n") - print("Configure docker registries to pull images from.\n" - "Default registries are:\n" - "k8s.gcr.io, gcr.io, quay.io, docker.io\n" - ) - while True: - user_input = input( - "Use default docker registries [Y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'n': - # unexpected newline displayed if textwrap.fill with - # '\n' included - print("\nEach registry can be specified as one of the" - "following:\n" - " - domain (e.g. example.domain)\n" - " - domain with port (e.g. example.domain:5000)\n" - " - IPv4 address (e.g. 1.2.3.4)\n" - " - IPv4 address with port (e.g. 1.2.3.4:5000)\n" - ) - while True: - user_input = input( - "Use a unified registry replacing all " - "default registries [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - # Input a unified registry to avoid - # inputing the same registry repeatly - while True: - user_input = input( - "Enter a unified registry: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_k8s_registry = user_input - self.docker_gcr_registry = user_input - self.docker_quay_registry = user_input - self.docker_docker_registry = user_input - self.docker_use_default_registry = False - break - else: - print("Please enter a valid registry address") - continue - - # Only if a unified registry set, it could be - # an insecure registry - while True: - user_input = input( - "Is '" + self.docker_k8s_registry + - "' a secure registry (https) [Y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() in ('y', ''): - self.is_secure_registry = True - break - elif user_input.lower() == 'n': - self.is_secure_registry = False - break - else: - print("Invalid choice") - continue - break - - elif user_input.lower() == 'n': - # Input alternative registries separately - while True: - user_input = input( - "Alternative registry to k8s.gcr.io: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_k8s_registry = user_input - break - else: - print("Please enter a valid registry address") - continue - - while True: - user_input = input( - "Alternative registry to gcr.io: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_gcr_registry = user_input - break - else: - print("Please enter a valid registry address") - continue - - while True: - user_input = input( - "Alternative registry to quay.io: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_quay_registry = user_input - break - else: - print("Please enter a valid registry address") - continue - - while True: - user_input = input( - "Alternative registry to docker.io: ") - if user_input.lower() == 'q': - raise UserQuit - if is_valid_domain_or_ip(user_input): - self.docker_docker_registry = user_input - break - else: - print("Please enter a valid registry address") - continue - - if (self.docker_k8s_registry or - self.docker_gcr_registry or - self.docker_quay_registry or - self.docker_docker_registry): - self.docker_use_default_registry = False - break - else: - print("At least one registry is required") - continue - else: - print("Invalid choice") - continue - break - - elif user_input.lower() in ('y', ''): - self.docker_use_default_registry = True - break - else: - print("Invalid choice") - continue - - def input_authentication_config(self): - """Allow user to input authentication config and perform validation. - """ - - print("\nCloud Authentication:") - print("-------------------------------\n") - print(textwrap.fill( - "Configure a password for the Cloud admin user " - "The Password must have a minimum length of 7 character, " - "and conform to password complexity rules", 80)) - - password_input = "" - while True: - user_input = getpass.getpass("Create admin user password: ") - if user_input.lower() == 'q': - raise UserQuit - - password_input = user_input - if len(password_input) < 1: - print("Password cannot be empty") - continue - - user_input = getpass.getpass("Repeat admin user password: ") - if user_input.lower() == 'q': - raise UserQuit - - if user_input != password_input: - print("Password did not match") - continue - else: - print("\n") - self.admin_password = user_input - # the admin password will be validated - self.add_password_for_validation('ADMIN_PASSWORD', - self.admin_password) - if self.process_validation_passwords(console=True): - break - - def default_config(self): - """Use default configuration suitable for testing in virtual env.""" - - self.admin_password = "Li69nux*" - self.management_interface_configured = True - self.external_oam_interface_configured = True - self.default_pxeboot_config() - if not self.kubernetes: - self.nameserver_addresses = ["", "", ""] - - if utils.is_cpe(): - self.system_mode = sysinv_constants.SYSTEM_MODE_DUPLEX - - def input_config(self): - """Allow user to input configuration.""" - print("System Configuration") - print("====================") - print("Enter Q at any prompt to abort...\n") - - self.set_time() - self.set_timezone(self) - if utils.is_cpe(): - self.input_system_mode_config() - self.check_storage_config() - if self.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX: - self.default_pxeboot_config() - self.input_aio_simplex_management_config() - else: - # An AIO system cannot function as a Distributed Cloud System - # Controller - if utils.get_system_type() != sysinv_constants.TIS_AIO_BUILD: - self.input_dc_selection() - self.input_pxeboot_config() - self.input_management_config() - if self.kubernetes: - self.input_cluster_host_config() - self.input_external_oam_config() - if self.kubernetes: - self.input_dns_config() - # Docker proxy is only used in kubernetes config - self.input_docker_proxy_config() - self.input_docker_registry_config() - self.input_authentication_config() - - def is_valid_management_multicast_subnet(self, ip_subnet): - """Determine whether the mgmt multicast subnet is valid.""" - # The multicast subnet must belong to the same Address Family - # as the management network - if ip_subnet.version != self.management_subnet.version: - print(textwrap.fill( - "Invalid network address - Management Multicast Subnet and " - " Network IP Families must be the same.", 80)) - return False - elif ip_subnet.size < 16: - print("Subnet too small - must have at least 16 addresses") - return False - elif ip_subnet.ip != ip_subnet.network: - print("Invalid network address") - return False - elif ip_subnet.version == 6 and ip_subnet.prefixlen < 64: - print("IPv6 minimum prefix length is 64") - return False - elif not ip_subnet.is_multicast(): - print("Invalid network address - must be multicast") - return False - else: - return True - - def input_config_from_file(self, configfile, restore=False): - """Read configuration from answer or config file. - - WARNING: Any changes made here need to be reflected in the code - that translates region config to this format in regionconfig.py. - """ - if not os.path.isfile(configfile): - print("Specified answer or config file not found") - raise ConfigFail("Answer or Config file not found") - - config = configparser.RawConfigParser() - config_sections = [] - - try: - config.read(configfile) - config_sections = config.sections() - - self.system_mode = config.get('cSYSTEM', 'SYSTEM_MODE') - if config.has_option('cSYSTEM', 'DISTRIBUTED_CLOUD_ROLE'): - self.system_dc_role = \ - config.get('cSYSTEM', 'DISTRIBUTED_CLOUD_ROLE') - - if config.has_option('cMETA', 'CONFIG_UUID'): - self.config_uuid = config.get('cMETA', 'CONFIG_UUID') - - if config.has_option('cREGION', 'REGION_CONFIG'): - self.region_config = config.getboolean( - 'cREGION', 'REGION_CONFIG') - - if config.has_option('cREGION', 'REGION_SERVICES_CREATE'): - self.region_services_create = config.getboolean( - 'cREGION', 'REGION_SERVICES_CREATE') - - # Timezone configuration - if config.has_option('cSYSTEM', 'TIMEZONE'): - self.timezone = config.get('cSYSTEM', 'TIMEZONE') - - # Storage configuration - if (config.has_option('cSTOR', 'DATABASE_STORAGE') or - config.has_option('cSTOR', 'IMAGE_STORAGE') or - config.has_option('cSTOR', 'BACKUP_STORAGE') or - config.has_option('cSTOR', 'IMAGE_CONVERSIONS_VOLUME') or - config.has_option('cSTOR', 'SHARED_INSTANCE_STORAGE') or - config.has_option('cSTOR', 'CINDER_BACKEND') or - config.has_option('cSTOR', 'CINDER_DEVICE') or - config.has_option('cSTOR', 'CINDER_LVM_TYPE') or - config.has_option('cSTOR', 'CINDER_STORAGE')): - msg = "DATABASE_STORAGE, IMAGE_STORAGE, BACKUP_STORAGE, " + \ - "IMAGE_CONVERSIONS_VOLUME, SHARED_INSTANCE_STORAGE, " + \ - "CINDER_BACKEND, CINDER_DEVICE, CINDER_LVM_TYPE, " + \ - "CINDER_STORAGE " + \ - "are not valid entries in config file." - raise ConfigFail(msg) - - # PXEBoot network configuration - if config.has_option('cPXEBOOT', 'PXEBOOT_SUBNET'): - self.separate_pxeboot_network = True - self.pxeboot_subnet = IPNetwork(config.get( - 'cPXEBOOT', 'PXEBOOT_SUBNET')) - if config.has_option('cPXEBOOT', 'PXEBOOT_START_ADDRESS'): - self.pxeboot_start_address = IPAddress(config.get( - 'cPXEBOOT', 'PXEBOOT_START_ADDRESS')) - if config.has_option('cPXEBOOT', 'PXEBOOT_END_ADDRESS'): - self.pxeboot_end_address = IPAddress(config.get( - 'cPXEBOOT', 'PXEBOOT_END_ADDRESS')) - if not self.pxeboot_start_address and \ - not self.pxeboot_end_address: - self.pxeboot_start_address = self.pxeboot_subnet[2] - self.pxeboot_end_address = self.pxeboot_subnet[-2] - self.use_entire_pxeboot_subnet = True - else: - self.use_entire_pxeboot_subnet = False - self.controller_pxeboot_address_0 = IPAddress(config.get( - 'cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_0')) - self.controller_pxeboot_address_1 = IPAddress(config.get( - 'cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_1')) - self.controller_pxeboot_floating_address = IPAddress( - config.get('cPXEBOOT', - 'CONTROLLER_PXEBOOT_FLOATING_ADDRESS')) - else: - self.default_pxeboot_config() - # Allow this to be optional for backwards compatibility - if config.has_option('cPXEBOOT', - 'PXECONTROLLER_FLOATING_HOSTNAME'): - self.pxecontroller_floating_hostname = config.get( - 'cPXEBOOT', 'PXECONTROLLER_FLOATING_HOSTNAME') - - # Management network configuration - if self.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX and \ - not self.subcloud_config(): - # For AIO-SX, only the management subnet is configurable - # (unless this is a subcloud). - if config.has_option('cMGMT', 'MANAGEMENT_SUBNET'): - management_subnet = IPNetwork(config.get( - 'cMGMT', 'MANAGEMENT_SUBNET')) - else: - management_subnet = IPNetwork( - constants.DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4) - self.input_aio_simplex_management_config( - management_subnet=management_subnet) - else: - self.management_interface_name = config.get( - 'cMGMT', 'MANAGEMENT_INTERFACE_NAME') - self.management_interface = config.get( - 'cMGMT', 'MANAGEMENT_INTERFACE') - self.management_mtu = config.get( - 'cMGMT', 'MANAGEMENT_MTU') - self.management_subnet = IPNetwork(config.get( - 'cMGMT', 'MANAGEMENT_SUBNET')) - if config.has_option('cMGMT', 'MANAGEMENT_GATEWAY_ADDRESS'): - self.management_gateway_address = IPAddress(config.get( - 'cMGMT', 'MANAGEMENT_GATEWAY_ADDRESS')) - else: - self.management_gateway_address = None - self.lag_management_interface = config.getboolean( - 'cMGMT', 'LAG_MANAGEMENT_INTERFACE') - if self.separate_pxeboot_network: - self.management_vlan = config.get('cMGMT', - 'MANAGEMENT_VLAN') - if self.lag_management_interface: - self.lag_management_interface_member0 = config.get( - 'cMGMT', 'MANAGEMENT_BOND_MEMBER_0') - self.lag_management_interface_member1 = config.get( - 'cMGMT', 'MANAGEMENT_BOND_MEMBER_1') - self.lag_management_interface_policy = config.get( - 'cMGMT', 'MANAGEMENT_BOND_POLICY') - self.controller_address_0 = IPAddress(config.get( - 'cMGMT', 'CONTROLLER_0_ADDRESS')) - self.controller_address_1 = IPAddress(config.get( - 'cMGMT', 'CONTROLLER_1_ADDRESS')) - self.controller_floating_address = IPAddress(config.get( - 'cMGMT', 'CONTROLLER_FLOATING_ADDRESS')) - if config.has_option('cMGMT', 'NFS_MANAGEMENT_ADDRESS_1'): - self.nfs_management_address_1 = IPAddress(config.get( - 'cMGMT', 'NFS_MANAGEMENT_ADDRESS_1')) - else: - self.nfs_management_address_1 = '' - if config.has_option('cMGMT', 'NFS_MANAGEMENT_ADDRESS_2'): - self.nfs_management_address_2 = IPAddress(config.get( - 'cMGMT', 'NFS_MANAGEMENT_ADDRESS_2')) - else: - self.nfs_management_address_2 = '' - self.controller_floating_hostname = config.get( - 'cMGMT', 'CONTROLLER_FLOATING_HOSTNAME') - self.controller_hostname_prefix = config.get( - 'cMGMT', 'CONTROLLER_HOSTNAME_PREFIX') - self.oamcontroller_floating_hostname = config.get( - 'cMGMT', 'OAMCONTROLLER_FLOATING_HOSTNAME') - - if config.has_option('cMGMT', 'MANAGEMENT_MULTICAST_SUBNET'): - self.management_multicast_subnet = IPNetwork(config.get( - 'cMGMT', 'MANAGEMENT_MULTICAST_SUBNET')) - else: - if self.management_subnet.version == 6: - # Management subnet is IPv6, so set the default value - self.management_multicast_subnet = \ - IPNetwork(constants.DEFAULT_MULTICAST_SUBNET_IPV6) - else: - self.management_multicast_subnet = \ - IPNetwork(constants.DEFAULT_MULTICAST_SUBNET_IPV4) - - self.management_interface_configured = True - if config.has_option('cMGMT', 'DYNAMIC_ADDRESS_ALLOCATION'): - self.dynamic_address_allocation = config.getboolean( - 'cMGMT', 'DYNAMIC_ADDRESS_ALLOCATION') - else: - self.dynamic_address_allocation = True - if config.has_option('cMGMT', 'MANAGEMENT_START_ADDRESS'): - self.management_start_address = IPAddress(config.get( - 'cMGMT', 'MANAGEMENT_START_ADDRESS')) - if config.has_option('cMGMT', 'MANAGEMENT_END_ADDRESS'): - self.management_end_address = IPAddress(config.get( - 'cMGMT', 'MANAGEMENT_END_ADDRESS')) - if not self.management_start_address and \ - not self.management_end_address: - self.management_start_address = self.management_subnet[2] - self.management_end_address = self.management_subnet[-2] - self.use_entire_mgmt_subnet = True - - # Cluster network configuration - if self.kubernetes: - if config.has_section('cCLUSTER'): - self.cluster_host_interface_name = config.get( - 'cCLUSTER', 'CLUSTER_INTERFACE_NAME') - self.cluster_host_interface = config.get( - 'cCLUSTER', 'CLUSTER_INTERFACE') - self.cluster_host_mtu = config.get( - 'cCLUSTER', 'CLUSTER_MTU') - self.cluster_host_vlan = '' - if config.has_option('cCLUSTER', 'CLUSTER_VLAN'): - cvalue = config.get('cCLUSTER', 'CLUSTER_VLAN') - if cvalue != 'NC': - self.cluster_host_vlan = cvalue - self.lag_cluster_host_interface = config.getboolean( - 'cCLUSTER', 'LAG_CLUSTER_INTERFACE') - if self.lag_cluster_host_interface: - self.lag_cluster_host_interface_member0 = config.get( - 'cCLUSTER', 'CLUSTER_BOND_MEMBER_0') - self.lag_cluster_host_interface_member1 = config.get( - 'cCLUSTER', 'CLUSTER_BOND_MEMBER_1') - self.lag_cluster_host_interface_policy = config.get( - 'cCLUSTER', 'CLUSTER_BOND_POLICY') - self.cluster_host_subnet = IPNetwork(config.get( - 'cCLUSTER', 'CLUSTER_SUBNET')) - else: - self.cluster_host_interface_name = \ - self.management_interface_name - self.cluster_host_interface = self.management_interface - self.cluster_host_vlan = self.management_vlan - self.cluster_host_interface_configured = True - - # External OAM network configuration - self.external_oam_interface_name = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_INTERFACE_NAME') - self.external_oam_interface = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_INTERFACE') - self.external_oam_mtu = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_MTU') - self.external_oam_vlan = '' - if config.has_option('cEXT_OAM', 'EXTERNAL_OAM_VLAN'): - cvalue = config.get('cEXT_OAM', 'EXTERNAL_OAM_VLAN') - if cvalue != 'NC': - self.external_oam_vlan = cvalue - self.external_oam_subnet = IPNetwork(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_SUBNET')) - self.lag_external_oam_interface = config.getboolean( - 'cEXT_OAM', 'LAG_EXTERNAL_OAM_INTERFACE') - if self.lag_external_oam_interface: - self.lag_external_oam_interface_member0 = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_0') - self.lag_external_oam_interface_member1 = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_1') - self.lag_external_oam_interface_policy = config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_BOND_POLICY') - else: - self.lag_external_oam_interface_member0 = None - self.lag_external_oam_interface_member1 = None - self.lag_external_oam_interface_policy = None - self.lag_external_oam_interface_txhash = None - - if config.has_option('cEXT_OAM', 'EXTERNAL_OAM_GATEWAY_ADDRESS'): - self.external_oam_gateway_address = IPAddress(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_GATEWAY_ADDRESS')) - else: - self.external_oam_gateway_address = None - self.external_oam_floating_address = IPAddress(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_FLOATING_ADDRESS')) - self.external_oam_address_0 = IPAddress(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_0_ADDRESS')) - self.external_oam_address_1 = IPAddress(config.get( - 'cEXT_OAM', 'EXTERNAL_OAM_1_ADDRESS')) - - self.external_oam_interface_configured = True - - # DNS configuration - if self.kubernetes: - if config.has_section('cDNS'): - self.nameserver_addresses = ["", "", ""] - for x in range(0, len(self.nameserver_addresses)): - if config.has_option('cDNS', - 'NAMESERVER_' + str(x + 1)): - cvalue = config.get('cDNS', - 'NAMESERVER_' + str(x + 1)) - if cvalue != "NC" and cvalue != "": - self.nameserver_addresses[x] = \ - IPAddress(cvalue) - - # Docker Proxy Configuration - if config.has_section('cDOCKER_PROXY'): - self.enable_docker_proxy = True - if config.has_option('cDOCKER_PROXY', - 'DOCKER_HTTP_PROXY'): - self.docker_http_proxy = config.get( - 'cDOCKER_PROXY', 'DOCKER_HTTP_PROXY') - if config.has_option('cDOCKER_PROXY', - 'DOCKER_HTTPS_PROXY'): - self.docker_https_proxy = config.get( - 'cDOCKER_PROXY', 'DOCKER_HTTPS_PROXY') - if config.has_option('cDOCKER_PROXY', - 'DOCKER_NO_PROXY'): - self.docker_no_proxy = config.get( - 'cDOCKER_PROXY', 'DOCKER_NO_PROXY') - - # Docker Registry Configuration - if config.has_section('cDOCKER_REGISTRY'): - self.docker_use_default_registry = False - if config.has_option('cDOCKER_REGISTRY', - 'DOCKER_K8S_REGISTRY'): - self.docker_k8s_registry = config.get( - 'cDOCKER_REGISTRY', 'DOCKER_K8S_REGISTRY') - if config.has_option('cDOCKER_REGISTRY', - 'DOCKER_GCR_REGISTRY'): - self.docker_gcr_registry = config.get( - 'cDOCKER_REGISTRY', 'DOCKER_GCR_REGISTRY') - if config.has_option('cDOCKER_REGISTRY', - 'DOCKER_QUAY_REGISTRY'): - self.docker_quay_registry = config.get( - 'cDOCKER_REGISTRY', 'DOCKER_QUAY_REGISTRY') - if config.has_option('cDOCKER_REGISTRY', - 'DOCKER_DOCKER_REGISTRY'): - self.docker_docker_registry = config.get( - 'cDOCKER_REGISTRY', 'DOCKER_DOCKER_REGISTRY') - if config.has_option('cDOCKER_REGISTRY', - 'IS_SECURE_REGISTRY'): - self.is_secure_registry = config.getboolean( - 'cDOCKER_REGISTRY', 'IS_SECURE_REGISTRY') - else: - self.is_secure_registry = True - - # SDN Network configuration - if config.has_option('cSDN', 'ENABLE_SDN'): - raise ConfigFail("The option ENABLE_SDN is no longer " - "supported.") - - # Authentication configuration - if config.has_section('cAUTHENTICATION'): - if config.has_option('cAUTHENTICATION', 'ADMIN_PASSWORD'): - self.admin_password = config.get( - 'cAUTHENTICATION', 'ADMIN_PASSWORD') - - if self.admin_password == "" and not restore: - print("Admin password must be set in answer file") - raise ConfigFail("Admin password not set in answer file") - # the admin password will be validated - self.add_password_for_validation('ADMIN_PASSWORD', - self.admin_password) - - if config.has_option('cUSERS', 'SYSADMIN_SIG'): - raise ConfigFail("The option SYSADMIN_SIG is " - "no longer supported.") - - # Licensing configuration - if config.has_option('cLICENSING', 'LICENSE_FILE'): - raise ConfigFail("The option LICENSE_FILE is " - "no longer supported") - - # Security configuration - if config.has_option('cSECURITY', 'CONFIG_SYSADMIN_PW_AGE'): - raise ConfigFail("The option CONFIG_SYSADMIN_PW_AGE is " - "no longer supported.") - if config.has_option('cSECURITY', 'ENABLE_HTTPS'): - raise ConfigFail("The option ENABLE_HTTPS is " - "no longer supported.") - if config.has_option('cSECURITY', 'FIREWALL_RULES_FILE'): - raise ConfigFail("The option FIREWALL_RULES_FILE is " - "no longer supported") - - # Region configuration - if self.region_config: - self.region_1_name = config.get( - 'cREGION', 'REGION_1_NAME') - self.region_2_name = config.get( - 'cREGION', 'REGION_2_NAME') - self.admin_username = config.get( - 'cREGION', 'ADMIN_USER_NAME') - if config.has_option('cREGION', 'ADMIN_USER_DOMAIN'): - self.admin_user_domain = config.get( - 'cREGION', 'ADMIN_USER_DOMAIN') - if config.has_option('cREGION', 'ADMIN_PROJECT_NAME'): - self.admin_project_name = config.get( - 'cREGION', 'ADMIN_PROJECT_NAME') - else: - self.admin_project_name = config.get( - 'cREGION', 'ADMIN_TENANT_NAME') - if config.has_option('cREGION', 'ADMIN_PROJECT_DOMAIN'): - self.admin_project_domain = config.get( - 'cREGION', 'ADMIN_PROJECT_DOMAIN') - if config.has_option('cREGION', 'SERVICE_PROJECT_NAME'): - self.service_project_name = config.get( - 'cREGION', 'SERVICE_PROJECT_NAME') - else: - self.service_project_name = config.get( - 'cREGION', 'SERVICE_TENANT_NAME') - if config.has_option('cREGION', 'USER_DOMAIN_NAME'): - self.service_user_domain = config.get( - 'cREGION', 'USER_DOMAIN_NAME') - if config.has_option('cREGION', 'PROJECT_DOMAIN_NAME'): - self.service_project_domain = config.get( - 'cREGION', 'PROJECT_DOMAIN_NAME') - self.keystone_auth_uri = config.get( - 'cREGION', 'KEYSTONE_AUTH_URI') - self.keystone_identity_uri = config.get( - 'cREGION', 'KEYSTONE_IDENTITY_URI') - self.keystone_admin_uri = config.get( - 'cREGION', 'KEYSTONE_ADMIN_URI') - self.keystone_internal_uri = config.get( - 'cREGION', 'KEYSTONE_INTERNAL_URI') - self.keystone_public_uri = config.get( - 'cREGION', 'KEYSTONE_PUBLIC_URI') - self.keystone_service_name = config.get( - 'cREGION', 'KEYSTONE_SERVICE_NAME') - self.keystone_service_type = config.get( - 'cREGION', 'KEYSTONE_SERVICE_TYPE') - if config.has_option('cREGION', 'LDAP_REGION_NAME'): - self.ldap_region_name = config.get( - 'cREGION', 'LDAP_REGION_NAME') - if config.has_option('cREGION', 'LDAP_SERVICE_NAME'): - self.ldap_service_name = config.get( - 'cREGION', 'LDAP_SERVICE_NAME') - if config.has_option('cREGION', 'LDAP_SERVICE_URI'): - self.ldap_service_uri = config.get( - 'cREGION', 'LDAP_SERVICE_URI') - self.patching_ks_user_name = config.get( - 'cREGION', 'PATCHING_USER_NAME') - self.patching_ks_password = config.get( - 'cREGION', 'PATCHING_PASSWORD') - self.add_password_for_validation('PATCHING_PASSWORD', - self.patching_ks_password) - self.sysinv_ks_user_name = config.get( - 'cREGION', 'SYSINV_USER_NAME') - self.sysinv_ks_password = config.get( - 'cREGION', 'SYSINV_PASSWORD') - self.add_password_for_validation('SYSINV_PASSWORD', - self.sysinv_ks_password) - self.sysinv_service_name = config.get( - 'cREGION', 'SYSINV_SERVICE_NAME') - self.sysinv_service_type = config.get( - 'cREGION', 'SYSINV_SERVICE_TYPE') - self.mtce_ks_user_name = config.get( - 'cREGION', 'MTCE_USER_NAME') - self.mtce_ks_password = config.get( - 'cREGION', 'MTCE_PASSWORD') - self.add_password_for_validation('MTCE_PASSWORD', - self.mtce_ks_password) - - self.nfv_ks_user_name = config.get( - 'cREGION', 'NFV_USER_NAME') - self.nfv_ks_password = config.get( - 'cREGION', 'NFV_PASSWORD') - self.add_password_for_validation('NFV_PASSWORD', - self.nfv_ks_password) - self.fm_ks_user_name = config.get( - 'cREGION', 'FM_USER_NAME') - self.fm_ks_password = config.get( - 'cREGION', 'FM_PASSWORD') - self.add_password_for_validation('FM_PASSWORD', - self.fm_ks_password) - - self.barbican_ks_user_name = config.get( - 'cREGION', 'BARBICAN_USER_NAME') - self.barbican_ks_password = config.get( - 'cREGION', 'BARBICAN_PASSWORD') - self.add_password_for_validation('BARBICAN_PASSWORD', - self.barbican_ks_password) - - self.shared_services.append(self.keystone_service_type) - - if self.subcloud_config(): - self.system_controller_subnet = IPNetwork(config.get( - 'cREGION', 'SYSTEM_CONTROLLER_SUBNET')) - self.system_controller_floating_ip = config.get( - 'cREGION', 'SYSTEM_CONTROLLER_FLOATING_ADDRESS') - - except Exception: - print("Error parsing answer file") - raise - - return config_sections - - def display_config(self): - """Display configuration that will be applied.""" - print("\nThe following configuration will be applied:") - - print("\nSystem Configuration") - print("--------------------") - print("Time Zone: " + str(self.timezone)) - print("System mode: %s" % self.system_mode) - if self.system_type != sysinv_constants.TIS_AIO_BUILD: - dc_role_true = "no" - if (self.system_dc_role == - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER): - dc_role_true = "yes" - print("Distributed Cloud System Controller: %s" % dc_role_true) - - print("\nPXEBoot Network Configuration") - print("-----------------------------") - if not self.separate_pxeboot_network: - print("Separate PXEBoot network not configured") - else: - print("PXEBoot subnet: " + str(self.pxeboot_subnet.cidr)) - print("PXEBoot floating address: " + - str(self.controller_pxeboot_floating_address)) - print("Controller 0 PXEBoot address: " + - str(self.controller_pxeboot_address_0)) - print("Controller 1 PXEBoot address: " + - str(self.controller_pxeboot_address_1)) - if not self.use_entire_pxeboot_subnet: - print("PXEBoot start address: " + - str(self.pxeboot_start_address)) - print("PXEBoot end address: " + str(self.pxeboot_end_address)) - print("PXEBoot Controller floating hostname: " + - str(self.pxecontroller_floating_hostname)) - - print("\nManagement Network Configuration") - print("--------------------------------") - print("Management interface name: " + self.management_interface_name) - print("Management interface: " + self.management_interface) - if self.management_vlan: - print("Management vlan: " + self.management_vlan) - print("Management interface MTU: " + self.management_mtu) - if self.lag_management_interface: - print("Management ae member 0: " + - self.lag_management_interface_member0) - print("Management ae member 1: " + - self.lag_management_interface_member1) - print("Management ae policy : " + - self.lag_management_interface_policy) - print("Management subnet: " + str(self.management_subnet.cidr)) - if self.management_gateway_address: - print("Management gateway address: " + - str(self.management_gateway_address)) - print("Controller floating address: " + - str(self.controller_floating_address)) - print("Controller 0 address: " + str(self.controller_address_0)) - print("Controller 1 address: " + str(self.controller_address_1)) - print("NFS Management Address 1: " + - str(self.nfs_management_address_1)) - print("NFS Management Address 2: " + - str(self.nfs_management_address_2)) - print("Controller floating hostname: " + - str(self.controller_floating_hostname)) - print("Controller hostname prefix: " + self.controller_hostname_prefix) - print("OAM Controller floating hostname: " + - str(self.oamcontroller_floating_hostname)) - if not self.use_entire_mgmt_subnet: - print("Management start address: " + - str(self.management_start_address)) - print("Management end address: " + - str(self.management_end_address)) - if self.dynamic_address_allocation: - print("Dynamic IP address allocation is selected") - print("Management multicast subnet: " + - str(self.management_multicast_subnet)) - - if self.kubernetes: - print("\nKubernetes Cluster Network Configuration") - print("----------------------------------------") - print("Cluster pod network subnet: " + - str(self.cluster_pod_subnet.cidr)) - print("Cluster service network subnet: " + - str(self.cluster_service_subnet.cidr)) - print("Cluster host interface name: " + - self.cluster_host_interface_name) - print("Cluster host interface: " + self.cluster_host_interface) - if self.cluster_host_vlan: - print("Cluster host vlan: " + self.cluster_host_vlan) - print("Cluster host interface MTU: " + self.cluster_host_mtu) - if self.lag_cluster_host_interface: - print("Cluster host ae member 0: " + - self.lag_cluster_host_interface_member0) - print("Cluster host ae member 1: " + - self.lag_cluster_host_interface_member1) - print("Cluster host ae policy : " + - self.lag_cluster_host_interface_policy) - print("Cluster host subnet: " + - str(self.cluster_host_subnet.cidr)) - - print("\nExternal OAM Network Configuration") - print("----------------------------------") - print("External OAM interface name: " + - self.external_oam_interface_name) - print("External OAM interface: " + self.external_oam_interface) - if self.external_oam_vlan: - print("External OAM vlan: " + self.external_oam_vlan) - print("External OAM interface MTU: " + self.external_oam_mtu) - if self.lag_external_oam_interface: - print("External OAM ae member 0: " + - self.lag_external_oam_interface_member0) - print("External OAM ae member 1: " + - self.lag_external_oam_interface_member1) - print("External OAM ae policy : " + - self.lag_external_oam_interface_policy) - print("External OAM subnet: " + str(self.external_oam_subnet)) - if self.external_oam_gateway_address: - print("External OAM gateway address: " + - str(self.external_oam_gateway_address)) - if self.system_mode != sysinv_constants.SYSTEM_MODE_SIMPLEX: - print("External OAM floating address: " + - str(self.external_oam_floating_address)) - print("External OAM 0 address: " + - str(self.external_oam_address_0)) - print("External OAM 1 address: " + - str(self.external_oam_address_1)) - else: - print("External OAM address: " + str(self.external_oam_address_0)) - - if self.kubernetes: - print("\nDNS Configuration") - print("-----------------") - dns_config = False - for x in range(0, len(self.nameserver_addresses)): - if self.nameserver_addresses[x]: - print("Nameserver " + str(x + 1) + ": " + - str(self.nameserver_addresses[x])) - dns_config = True - if not dns_config: - print("External DNS servers not configured") - if self.enable_docker_proxy: - print("\nDocker Proxy Configuration") - print("--------------------------") - if self.docker_http_proxy: - print("Docker HTTP proxy: " + self.docker_http_proxy) - if self.docker_https_proxy: - print("Docker HTTPS proxy: " + self.docker_https_proxy) - if self.docker_no_proxy: - print("Docker NO proxy: " + self.docker_no_proxy) - if not self.docker_use_default_registry: - print("\nDocker Registry Configuration") - print("-----------------------------") - if self.docker_k8s_registry: - print("Alternative registry to k8s.gcr.io: " + - self.docker_k8s_registry) - if self.docker_gcr_registry: - print("Alternative registry to gcr.io: " + - self.docker_gcr_registry) - if self.docker_quay_registry: - print("Alternative registry to quay.io: " + - self.docker_quay_registry) - if self.docker_docker_registry: - print("Alternative registry to docker.io: " + - self.docker_docker_registry) - print("Is registries secure: " + - str(self.is_secure_registry)) - - if self.region_config: - print("\nRegion Configuration") - print("--------------------") - print("Region 1 name: " + self.region_1_name) - print("Region 2 name: " + self.region_2_name) - print("Admin user name: " + self.admin_username) - print("Admin user domain: " + self.admin_user_domain) - print("Admin project name: " + self.admin_project_name) - print("Admin project domain: " + self.admin_project_domain) - print("Service project name: " + self.service_project_name) - print("Service user domain: " + self.service_user_domain) - print("Service project domain: " + self.service_project_domain) - print("Keystone auth URI: " + self.keystone_auth_uri) - print("Keystone identity URI: " + self.keystone_identity_uri) - print("Keystone admin URI: " + self.keystone_admin_uri) - print("Keystone internal URI: " + self.keystone_internal_uri) - print("Keystone public URI: " + self.keystone_public_uri) - print("Keystone service name: " + self.keystone_service_name) - print("Keystone service type: " + self.keystone_service_type) - print("LDAP service name: " + self.ldap_service_name) - print("LDAP region: " + self.ldap_region_name) - print("LDAP service URI:" + self.ldap_service_uri) - print("Patching user name: " + self.patching_ks_user_name) - print("Sysinv user name: " + self.sysinv_ks_user_name) - print("Sysinv service name: " + self.sysinv_service_name) - print("Sysinv service type: " + self.sysinv_service_type) - - if self.subcloud_config(): - print("\nSubcloud Configuration") - print("----------------------") - print("System controller subnet: " + - str(self.system_controller_subnet.cidr)) - print("System controller floating ip: " + - str(self.system_controller_floating_ip)) - - def write_config_file(self): - """Write configuration to a text file for later reference.""" - try: - os.makedirs(constants.CONFIG_WORKDIR, stat.S_IRWXU | stat.S_IRGRP | - stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) - except OSError as exc: - if exc.errno == errno.EEXIST and os.path.isdir( - constants.CONFIG_WORKDIR): - pass - else: - LOG.error("Failed to create config directory: %s", - constants.CONFIG_WORKDIR) - raise ConfigFail("Failed to write configuration file") - - try: - with open(constants.CGCS_CONFIG_FILE, 'w') as f: - # System configuration - f.write("[cSYSTEM]\n") - f.write("# System Configuration\n") - f.write("SYSTEM_MODE=" + str(self.system_mode) + "\n") - if self.system_dc_role is not None: - f.write("DISTRIBUTED_CLOUD_ROLE=" + - str(self.system_dc_role) + "\n") - # Time Zone configuration - f.write("TIMEZONE=" + str(self.timezone) + "\n") - - # PXEBoot network configuration - f.write("\n[cPXEBOOT]") - f.write("\n# PXEBoot Network Support Configuration\n") - if self.separate_pxeboot_network: - f.write("PXEBOOT_SUBNET=" + - str(self.pxeboot_subnet.cidr) + "\n") - f.write("CONTROLLER_PXEBOOT_FLOATING_ADDRESS=" + - str(self.controller_pxeboot_floating_address) + - "\n") - f.write("CONTROLLER_PXEBOOT_ADDRESS_0=" + - str(self.controller_pxeboot_address_0) + "\n") - f.write("CONTROLLER_PXEBOOT_ADDRESS_1=" + - str(self.controller_pxeboot_address_1) + "\n") - f.write("PXECONTROLLER_FLOATING_HOSTNAME=" + - str(self.pxecontroller_floating_hostname) + "\n") - - # Management network configuration - f.write("\n[cMGMT]") - f.write("\n# Management Network Configuration\n") - f.write("MANAGEMENT_INTERFACE_NAME=" + - self.management_interface_name + "\n") - f.write("MANAGEMENT_INTERFACE=" + self.management_interface + - "\n") - if self.separate_pxeboot_network: - f.write("MANAGEMENT_VLAN=" + self.management_vlan + "\n") - f.write("MANAGEMENT_MTU=" + self.management_mtu + "\n") - f.write("MANAGEMENT_SUBNET=" + - str(self.management_subnet.cidr) + "\n") - if self.management_gateway_address: - f.write("MANAGEMENT_GATEWAY_ADDRESS=" + - str(self.management_gateway_address) + "\n") - if self.lag_management_interface: - f.write("LAG_MANAGEMENT_INTERFACE=yes\n") - f.write("MANAGEMENT_BOND_MEMBER_0=" + - str(self.lag_management_interface_member0) + "\n") - f.write("MANAGEMENT_BOND_MEMBER_1=" + - str(self.lag_management_interface_member1) + "\n") - f.write("MANAGEMENT_BOND_POLICY=" + - str(self.lag_management_interface_policy) + "\n") - else: - f.write("LAG_MANAGEMENT_INTERFACE=no\n") - f.write("CONTROLLER_FLOATING_ADDRESS=" + - str(self.controller_floating_address) + "\n") - f.write("CONTROLLER_0_ADDRESS=" + - str(self.controller_address_0) + "\n") - f.write("CONTROLLER_1_ADDRESS=" + - str(self.controller_address_1) + "\n") - f.write("NFS_MANAGEMENT_ADDRESS_1=" + - str(self.nfs_management_address_1) + "\n") - f.write("NFS_MANAGEMENT_ADDRESS_2=" + - str(self.nfs_management_address_2) + "\n") - f.write("CONTROLLER_FLOATING_HOSTNAME=" + - str(self.controller_floating_hostname) + "\n") - f.write("CONTROLLER_HOSTNAME_PREFIX=" + - self.controller_hostname_prefix + "\n") - f.write("OAMCONTROLLER_FLOATING_HOSTNAME=" + - str(self.oamcontroller_floating_hostname) + "\n") - if self.dynamic_address_allocation: - f.write("DYNAMIC_ADDRESS_ALLOCATION=yes\n") - else: - f.write("DYNAMIC_ADDRESS_ALLOCATION=no\n") - if self.region_config or not self.use_entire_mgmt_subnet: - f.write("MANAGEMENT_START_ADDRESS=" + - str(self.management_start_address) + "\n") - f.write("MANAGEMENT_END_ADDRESS=" + - str(self.management_end_address) + "\n") - f.write("MANAGEMENT_MULTICAST_SUBNET=" + - str(self.management_multicast_subnet) + "\n") - - # Cluster host network configuration - if self.kubernetes: - f.write("\n[cCLUSTER]") - f.write("\n# Cluster Host Network Configuration\n") - f.write("CLUSTER_INTERFACE_NAME=" - + self.cluster_host_interface_name + "\n") - f.write("CLUSTER_INTERFACE=" - + self.cluster_host_interface + "\n") - if self.cluster_host_vlan: - f.write("CLUSTER_VLAN=" - + self.cluster_host_vlan + "\n") - else: - f.write("CLUSTER_VLAN=NC\n") - f.write("CLUSTER_MTU=" - + self.cluster_host_mtu + "\n") - f.write("CLUSTER_SUBNET=" + - str(self.cluster_host_subnet.cidr) + "\n") - if self.lag_cluster_host_interface: - f.write("LAG_CLUSTER_INTERFACE=yes\n") - f.write("CLUSTER_BOND_MEMBER_0=" + - str(self.lag_cluster_host_interface_member0) - + "\n") - f.write("CLUSTER_BOND_MEMBER_1=" + - str(self.lag_cluster_host_interface_member1) - + "\n") - f.write("CLUSTER_BOND_POLICY=" + - str(self.lag_cluster_host_interface_policy) - + "\n") - else: - f.write("LAG_CLUSTER_INTERFACE=no\n") - - # External OAM network configuration - f.write("\n[cEXT_OAM]") - f.write("\n# External OAM Network Configuration\n") - f.write("EXTERNAL_OAM_INTERFACE_NAME=" + - self.external_oam_interface_name + "\n") - f.write("EXTERNAL_OAM_INTERFACE=" + - self.external_oam_interface + "\n") - if self.external_oam_vlan: - f.write("EXTERNAL_OAM_VLAN=" - + self.external_oam_vlan + "\n") - else: - f.write("EXTERNAL_OAM_VLAN=NC\n") - f.write("EXTERNAL_OAM_MTU=" + - self.external_oam_mtu + "\n") - if self.lag_external_oam_interface: - f.write("LAG_EXTERNAL_OAM_INTERFACE=yes\n") - f.write("EXTERNAL_OAM_BOND_MEMBER_0=" + - str(self.lag_external_oam_interface_member0) + - "\n") - f.write("EXTERNAL_OAM_BOND_MEMBER_1=" + - str(self.lag_external_oam_interface_member1) + - "\n") - f.write("EXTERNAL_OAM_BOND_POLICY=" + - str(self.lag_external_oam_interface_policy) + - "\n") - else: - f.write("LAG_EXTERNAL_OAM_INTERFACE=no\n") - f.write("EXTERNAL_OAM_SUBNET=" + - str(self.external_oam_subnet) + "\n") - if self.external_oam_gateway_address: - f.write("EXTERNAL_OAM_GATEWAY_ADDRESS=" + - str(self.external_oam_gateway_address) + "\n") - f.write("EXTERNAL_OAM_FLOATING_ADDRESS=" + - str(self.external_oam_floating_address) + "\n") - f.write("EXTERNAL_OAM_0_ADDRESS=" + - str(self.external_oam_address_0) + "\n") - f.write("EXTERNAL_OAM_1_ADDRESS=" + - str(self.external_oam_address_1) + "\n") - - if self.kubernetes: - # DNS configuration - f.write("\n[cDNS]") - f.write("\n# DNS Configuration\n") - for x in range(0, len(self.nameserver_addresses)): - if self.nameserver_addresses[x]: - f.write("NAMESERVER_" + str(x + 1) + "=" + - str(self.nameserver_addresses[x]) + "\n") - else: - f.write("NAMESERVER_" + str(x + 1) + "=NC" + "\n") - - # Docker proxy configuration - if self.enable_docker_proxy: - f.write("\n[cDOCKER_PROXY]") - f.write("\n# Docker Proxy Configuration\n") - if self.docker_http_proxy: - f.write( - "DOCKER_HTTP_PROXY=" + - str(self.docker_http_proxy) + "\n") - if self.docker_https_proxy: - f.write( - "DOCKER_HTTPS_PROXY=" + - str(self.docker_https_proxy) + "\n") - if self.docker_no_proxy: - f.write( - "DOCKER_NO_PROXY=" + - str(self.docker_no_proxy) + "\n") - - # Docker registry configuration - if not self.docker_use_default_registry: - f.write("\n[cDOCKER_REGISTRY]") - f.write("\n# Docker Registry Configuration\n") - if self.docker_k8s_registry: - f.write( - "DOCKER_K8S_REGISTRY=" + - str(self.docker_k8s_registry) + "\n") - if self.docker_gcr_registry: - f.write( - "DOCKER_GCR_REGISTRY=" + - str(self.docker_gcr_registry) + "\n") - if self.docker_quay_registry: - f.write( - "DOCKER_QUAY_REGISTRY=" + - str(self.docker_quay_registry) + "\n") - if self.docker_docker_registry: - f.write( - "DOCKER_DOCKER_REGISTRY=" + - str(self.docker_docker_registry) + "\n") - f.write( - "IS_SECURE_REGISTRY=" + - str(self.is_secure_registry) + "\n") - - # Security configuration - f.write("\n[cSECURITY]") - - # Region configuration - f.write("\n[cREGION]") - f.write("\n# Region Configuration\n") - f.write("REGION_CONFIG=" + str(self.region_config) + "\n") - if self.region_config: - f.write("REGION_1_NAME=%s\n" % - self.region_1_name) - f.write("REGION_2_NAME=%s\n" % - self.region_2_name) - f.write("ADMIN_USER_NAME=%s\n" % - self.admin_username) - f.write("ADMIN_USER_DOMAIN=%s\n" % - self.admin_user_domain) - f.write("ADMIN_PROJECT_NAME=%s\n" % - self.admin_project_name) - f.write("ADMIN_PROJECT_DOMAIN=%s\n" % - self.admin_project_domain) - f.write("SERVICE_PROJECT_NAME=%s\n" % - self.service_project_name) - f.write("SERVICE_USER_DOMAIN=%s\n" % - self.service_user_domain) - f.write("SERVICE_PROJECT_DOMAIN=%s\n" % - self.service_project_domain) - f.write("KEYSTONE_AUTH_URI=%s\n" % - self.keystone_auth_uri) - f.write("KEYSTONE_IDENTITY_URI=%s\n" % - self.keystone_identity_uri) - f.write("KEYSTONE_ADMIN_URI=%s\n" % - self.keystone_admin_uri) - f.write("KEYSTONE_INTERNAL_URI=%s\n" % - self.keystone_internal_uri) - f.write("KEYSTONE_PUBLIC_URI=%s\n" % - self.keystone_public_uri) - f.write("KEYSTONE_SERVICE_NAME=%s\n" % - self.keystone_service_name) - f.write("KEYSTONE_SERVICE_TYPE=%s\n" % - self.keystone_service_type) - if self.ldap_service_name: - f.write("LDAP_SERVICE_NAME=%s\n" % - self.ldap_service_name) - if self.ldap_region_name: - f.write("LDAP_REGION_NAME=%s\n" % - self.ldap_region_name) - if self.ldap_service_uri: - f.write("LDAP_SERVICE_URI=%s\n" % - self.ldap_service_uri) - f.write("PATCHING_USER_NAME=%s\n" % - self.patching_ks_user_name) - f.write("PATCHING_PASSWORD=%s\n" % - self.patching_ks_password) - f.write("SYSINV_USER_NAME=%s\n" % - self.sysinv_ks_user_name) - f.write("SYSINV_PASSWORD=%s\n" % - self.sysinv_ks_password) - f.write("SYSINV_SERVICE_NAME=%s\n" % - self.sysinv_service_name) - f.write("SYSINV_SERVICE_TYPE=%s\n" % - self.sysinv_service_type) - f.write("NFV_USER_NAME=%s\n" % - self.nfv_ks_user_name) - f.write("NFV_PASSWORD=%s\n" % - self.nfv_ks_password) - f.write("MTCE_USER_NAME=%s\n" % - self.mtce_ks_user_name) - f.write("MTCE_PASSWORD=%s\n" % - self.mtce_ks_password) - f.write("FM_USER_NAME=%s\n" % - self.fm_ks_user_name) - f.write("FM_PASSWORD=%s\n" % - self.fm_ks_password) - f.write("BARBICAN_USER_NAME=%s\n" % - self.barbican_ks_user_name) - f.write("BARBICAN_PASSWORD=%s\n" % - self.barbican_ks_password) - - # Subcloud configuration - if self.subcloud_config(): - f.write("SUBCLOUD_CONFIG=%s\n" % - str(self.subcloud_config())) - f.write("SYSTEM_CONTROLLER_SUBNET=%s\n" % - str(self.system_controller_subnet)) - f.write("SYSTEM_CONTROLLER_FLOATING_ADDRESS=%s\n" % - str(self.system_controller_floating_ip)) - - except IOError: - LOG.error("Failed to open file: %s", constants.CGCS_CONFIG_FILE) - raise ConfigFail("Failed to write configuration file") - - def setup_pxeboot_files(self): - """Create links for default pxeboot configuration files""" - try: - if self.dynamic_address_allocation: - default_pxelinux = "/pxeboot/pxelinux.cfg.files/default" - efi_grub_cfg = "/pxeboot/pxelinux.cfg.files/grub.cfg" - else: - default_pxelinux = "/pxeboot/pxelinux.cfg.files/default.static" - efi_grub_cfg = "/pxeboot/pxelinux.cfg.files/grub.cfg.static" - subprocess.check_call(["ln", "-s", - default_pxelinux, - "/pxeboot/pxelinux.cfg/default"]) - subprocess.check_call(["ln", "-s", - efi_grub_cfg, - "/pxeboot/pxelinux.cfg/grub.cfg"]) - except subprocess.CalledProcessError: - LOG.error("Failed to create pxelinux.cfg/default or " - "grub.cfg symlink") - raise ConfigFail("Failed to persist config files") - - def verify_branding(self): - """ Verify the constraints for custom branding procedure """ - found = False - for f in os.listdir('/opt/branding'): - if f == 'applied': - continue - if not f.endswith('.tgz'): - raise ConfigFail('/opt/branding/%s is not a valid branding ' - 'file name, refer to the branding section ' - 'of the documentation' % f) - else: - if found: - raise ConfigFail( - 'Only one branding tarball is permitted in /opt/' - 'branding, refer to the branding section of the ' - 'documentation') - found = True - - def persist_local_config(self): - utils.persist_config() - - if os.path.isdir('/opt/banner'): - utils.apply_banner_customization() - - def finalize_controller_config(self): - - # restart maintenance to pick up configuration changes - utils.mtce_restart() - - self.setup_pxeboot_files() - - # pass control over to service management (SM) - utils.mark_config_complete() - - def wait_service_enable(self): - # wait for the following service groups to go active - services = [ - 'oam-services', - 'controller-services', - 'cloud-services', - 'patching-services', - 'directory-services', - 'web-services', - 'vim-services', - ] - - if self.system_dc_role == \ - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - services.append('distributed-cloud-services') - - count = len(services) - egrep = '"^(%s)[[:space:]]*active[[:space:]]*active"' % \ - '|'.join(services) - cmd = 'test $(sm-dump | grep -E %s | wc -l) -eq %d' % (egrep, count) - - interval = 10 - for _ in range(0, constants.SERVICE_ENABLE_TIMEOUT, interval): - try: - subprocess.check_call(cmd, shell=True, - stderr=subprocess.STDOUT) - return - except subprocess.CalledProcessError: - pass - time.sleep(interval) - else: - raise ConfigFail('Timeout waiting for service enable') - - def store_admin_password(self): - """Store the supplied admin password in the temporary keyring vault""" - os.environ["XDG_DATA_HOME"] = "/tmp" - keyring.set_password("CGCS", self.admin_username, self.admin_password) - del os.environ["XDG_DATA_HOME"] - - def create_bootstrap_config(self): - self.store_admin_password() - if self.region_config: - self._store_service_password() - utils.create_static_config() - - def apply_bootstrap_manifest(self): - filename = None - try: - utils.apply_manifest(self.controller_address_0, - sysinv_constants.CONTROLLER, - 'bootstrap', - constants.HIERADATA_WORKDIR, - runtime_filename=filename) - except Exception as e: - LOG.exception(e) - raise ConfigFail( - 'Failed to apply bootstrap manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - - def apply_controller_manifest(self): - try: - utils.apply_manifest(self.controller_address_0, - sysinv_constants.CONTROLLER, - 'controller', - constants.HIERADATA_PERMDIR) - except Exception as e: - LOG.exception(e) - raise ConfigFail( - 'Failed to apply controller manifest. ' - 'See /var/log/puppet/latest/puppet.log for details.') - - def add_password_for_validation(self, key, password): - """Add the config key and the password to be validated """ - if key and password: - for idx, stanza in enumerate(self.openstack_passwords): - if key in stanza: - # this password was previously added for validation, - # simply update the password value - self.openstack_passwords[idx][key] = password - return - self.openstack_passwords.append({key: password}) - - def process_validation_passwords(self, console=False): - """Validate the list of openstack passwords """ - if (self.os_password_rules_file and - not os.path.exists(self.os_password_rules_file)): - msg = ("Password rules file could not be found(%s) " - "Password rules cannot be applied" % - self.os_password_rules_file) - LOG.error(msg) - raise ConfigFail("Failed to apply Openstack password rules") - - if len(self.openstack_passwords) == 0: - # nothing to validate - return True - for stanza in self.openstack_passwords: - try: - ret, msg = validate_openstack_password( - stanza.values()[0], self.os_password_rules_file) - if not ret: - # one of the openstack passwords failed validation! - fail_msg = ("%s: %s" % (stanza.keys()[0], msg)) - if console: - print(textwrap.fill(fail_msg, 80)) - return False - raise ConfigFail(fail_msg) - except Exception as e: - # this implies an internal issue, either with - # the parsing rules or the validator. In the - # interest of robustness, we will proceed without - # password rules and possibly provision them - # later using service parameters - LOG.error("Failure on validating openstack password: %s" % e) - raise ConfigFail("%s" % e) - return True - - def _wait_system_config(self, client): - for _ in range(constants.SYSTEM_CONFIG_TIMEOUT): - try: - systems = client.sysinv.isystem.list() - if systems: - # only one system (default) - return systems[0] - except Exception: - pass - time.sleep(1) - else: - raise ConfigFail('Timeout waiting for default system ' - 'configuration') - - def _wait_ethernet_port_config(self, client, host): - count = 0 - for _ in range(constants.SYSTEM_CONFIG_TIMEOUT / 10): - try: - ports = client.sysinv.ethernet_port.list(host.uuid) - if ports and count == len(ports): - return ports - count = len(ports) - except Exception: - pass - time.sleep(10) - else: - raise ConfigFail('Timeout waiting for controller port ' - 'configuration') - - def _wait_disk_config(self, client, host): - count = 0 - for _ in range(constants.SYSTEM_CONFIG_TIMEOUT / 10): - try: - disks = client.sysinv.idisk.list(host.uuid) - if disks and count == len(disks): - return disks - count = len(disks) - except Exception: - pass - if disks: - time.sleep(1) # We don't need to wait that long - else: - time.sleep(10) - else: - raise ConfigFail('Timeout waiting for controller disk ' - 'configuration') - - def _wait_pv_config(self, client, host): - count = 0 - for _ in range(constants.SYSTEM_CONFIG_TIMEOUT / 10): - try: - pvs = client.sysinv.ipv.list(host.uuid) - if pvs and count == len(pvs): - return pvs - count = len(pvs) - except Exception: - pass - if pvs: - time.sleep(1) # We don't need to wait that long - else: - time.sleep(10) - else: - raise ConfigFail('Timeout waiting for controller PV ' - 'configuration') - - def _populate_system_config(self, client): - # Wait for pre-populated system - system = self._wait_system_config(client) - - # Update system attributes - capabilities = {'region_config': self.region_config, - 'vswitch_type': str(self.vswitch_type), - 'shared_services': str(self.shared_services), - 'sdn_enabled': self.enable_sdn, - 'https_enabled': self.enable_https} - - system_type = utils.get_system_type() - - region_name = constants.DEFAULT_REGION_NAME - if self.region_config: - region_name = self.region_2_name - - values = { - 'system_type': system_type, - 'system_mode': str(self.system_mode), - 'capabilities': capabilities, - 'timezone': str(self.timezone), - 'region_name': region_name, - 'service_project_name': self.service_project_name - } - if self.system_dc_role in \ - [sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER, - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD]: - values['distributed_cloud_role'] = self.system_dc_role - if self.system_dc_role == \ - sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD: - # Set the system name to the subcloud name for subclouds - values['name'] = region_name - - patch = sysinv.dict_to_patch(values) - client.sysinv.isystem.update(system.uuid, patch) - - if self.region_config: - self._populate_region_config(client) - - def _populate_region_config(self, client): - self._populate_service_config(client) - - def _populate_service_config(self, client): - # populate service attributes in services table - - # Strip the version from the URIs - modified_identity_uri = (re.split(r'/v[0-9]', - self.keystone_identity_uri)[0]) - modified_auth_uri = (re.split(r'/v[0-9]', - self.keystone_auth_uri)[0]) - modified_admin_uri = (re.split(r'/v[0-9]', - self.keystone_admin_uri)[0]) - modified_internal_uri = (re.split(r'/v[0-9]', - self.keystone_internal_uri)[0]) - modified_public_uri = (re.split(r'/v[0-9]', - self.keystone_public_uri)[0]) - - # always populates keystone config - capabilities = {'admin_user_domain': self.admin_user_domain, - 'admin_project_domain': self.admin_project_domain, - 'service_user_domain': self.service_user_domain, - 'service_project_domain': self.service_project_domain, - 'admin_user_name': self.admin_username, - 'admin_project_name': self.admin_project_name, - 'auth_uri': modified_auth_uri, - 'auth_url': modified_identity_uri, - 'service_name': self.keystone_service_name, - 'service_type': self.keystone_service_type, - 'region_services_create': self.region_services_create} - - # TODO (aning): Once we eliminate duplicated endpoints of shared - # services for non-primary region(s), we can remove the following code - # that pass over the URLs to sysinv for puppet to create these - # endpoints. - if modified_admin_uri: - capabilities.update({'admin_uri': modified_admin_uri}) - if modified_internal_uri: - capabilities.update({'internal_uri': modified_internal_uri}) - if modified_public_uri: - capabilities.update({'public_uri': modified_public_uri}) - - values = {'name': 'keystone', - 'enabled': True, - 'region_name': self.region_1_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # fm service config - capabilities = {'user_name': self.fm_ks_user_name} - values = {'name': "fm", - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # if ldap is a shared service - if self.ldap_service_uri: - capabilities = {'service_name': self.ldap_service_name} - capabilities.update({'service_uri': self.ldap_service_uri}) - values = {'name': self.ldap_service_name, - 'enabled': True, - 'region_name': self.ldap_region_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # sysinv service config - capabilities = {'service_name': self.sysinv_service_name, - 'service_type': self.sysinv_service_type, - 'user_name': self.sysinv_ks_user_name} - values = {'name': self.sysinv_service_name, - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # populate patching service config - capabilities = {'service_name': 'patching', - 'service_type': 'patching', - 'user_name': self.patching_ks_user_name} - values = {'name': 'patching', - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # mtc service config - capabilities = {'user_name': self.mtce_ks_user_name} - values = {'name': "mtce", - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # nfv service config - capabilities = {'user_name': self.nfv_ks_user_name} - values = {'name': "vim", - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - # barbican service config - capabilities = {'user_name': self.barbican_ks_user_name} - values = {'name': "barbican", - 'enabled': True, - 'region_name': self.region_2_name, - 'capabilities': capabilities} - client.sysinv.sm_service.service_create(**values) - - def _store_service_password(self): - # store service password in the temporary keyring vault - - os.environ["XDG_DATA_HOME"] = "/tmp" - - keyring.set_password(self.sysinv_service_name, - constants.DEFAULT_SERVICE_PROJECT_NAME, - self.sysinv_ks_password) - - keyring.set_password('patching', - constants.DEFAULT_SERVICE_PROJECT_NAME, - self.patching_ks_password) - - keyring.set_password('mtce', constants.DEFAULT_SERVICE_PROJECT_NAME, - self.mtce_ks_password) - - keyring.set_password('vim', constants.DEFAULT_SERVICE_PROJECT_NAME, - self.nfv_ks_password) - - keyring.set_password('fm', constants.DEFAULT_SERVICE_PROJECT_NAME, - self.fm_ks_password) - - keyring.set_password('barbican', - constants.DEFAULT_SERVICE_PROJECT_NAME, - self.barbican_ks_password) - - del os.environ["XDG_DATA_HOME"] - - def _populate_network_config(self, client): - self._populate_mgmt_network(client) - self._populate_pxeboot_network(client) - self._populate_oam_network(client) - self._populate_multicast_network(client) - if self.kubernetes: - self._populate_cluster_host_network(client) - self._populate_cluster_pod_network(client) - self._populate_cluster_service_network(client) - if self.subcloud_config(): - self._populate_system_controller_network(client) - - def _populate_mgmt_network(self, client): - # create the address pool - values = { - 'name': 'management', - 'network': str(self.management_subnet.network), - 'prefix': self.management_subnet.prefixlen, - 'ranges': [(str(self.management_start_address), - str(self.management_end_address))], - } - if self.management_gateway_address: - values.update({ - 'gateway_address': str(self.management_gateway_address)}) - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_MGMT, - 'name': sysinv_constants.NETWORK_TYPE_MGMT, - 'dynamic': self.dynamic_address_allocation, - 'pool_uuid': pool.uuid, - } - - client.sysinv.network.create(**values) - - def _populate_pxeboot_network(self, client): - # create the address pool - values = { - 'name': 'pxeboot', - 'network': str(self.pxeboot_subnet.network), - 'prefix': self.pxeboot_subnet.prefixlen, - 'ranges': [(str(self.pxeboot_start_address), - str(self.pxeboot_end_address))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_PXEBOOT, - 'name': sysinv_constants.NETWORK_TYPE_PXEBOOT, - 'dynamic': True, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_oam_network(self, client): - - # set default range if not specified as part of configuration - self.external_oam_start_address = self.external_oam_subnet[1] - self.external_oam_end_address = self.external_oam_subnet[-2] - - # create the address pool - values = { - 'name': 'oam', - 'network': str(self.external_oam_subnet.network), - 'prefix': self.external_oam_subnet.prefixlen, - 'ranges': [(str(self.external_oam_start_address), - str(self.external_oam_end_address))], - 'floating_address': str(self.external_oam_floating_address), - } - - if self.system_mode != sysinv_constants.SYSTEM_MODE_SIMPLEX: - values.update({ - 'controller0_address': str(self.external_oam_address_0), - 'controller1_address': str(self.external_oam_address_1), - }) - if self.external_oam_gateway_address: - values.update({ - 'gateway_address': str(self.external_oam_gateway_address), - }) - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_OAM, - 'name': sysinv_constants.NETWORK_TYPE_OAM, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - - client.sysinv.network.create(**values) - - def _populate_multicast_network(self, client): - # create the address pool - values = { - 'name': 'multicast-subnet', - 'network': str(self.management_multicast_subnet.network), - 'prefix': self.management_multicast_subnet.prefixlen, - 'ranges': [(str(self.management_multicast_subnet[1]), - str(self.management_multicast_subnet[-2]))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_MULTICAST, - 'name': sysinv_constants.NETWORK_TYPE_MULTICAST, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_system_controller_network(self, client): - # create the address pool - values = { - 'name': 'system-controller-subnet', - 'network': str(self.system_controller_subnet.network), - 'prefix': self.system_controller_subnet.prefixlen, - 'floating_address': str(self.system_controller_floating_ip), - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_SYSTEM_CONTROLLER, - 'name': sysinv_constants.NETWORK_TYPE_SYSTEM_CONTROLLER, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_cluster_host_network(self, client): - - # set default range if not specified as part of configuration - self.cluster_host_subnet_start_address = self.cluster_host_subnet[2] - self.cluster_host_subnet_end_address = self.cluster_host_subnet[-2] - - # create the address pool - values = { - 'name': 'cluster-host-subnet', - 'network': str(self.cluster_host_subnet.network), - 'prefix': self.cluster_host_subnet.prefixlen, - 'ranges': [(str(self.cluster_host_subnet_start_address), - str(self.cluster_host_subnet_end_address))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_CLUSTER_HOST, - 'name': sysinv_constants.NETWORK_TYPE_CLUSTER_HOST, - 'dynamic': self.dynamic_address_allocation, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_cluster_pod_network(self, client): - # create the address pool - values = { - 'name': 'cluster-pod-subnet', - 'network': str(self.cluster_pod_subnet.network), - 'prefix': self.cluster_pod_subnet.prefixlen, - 'ranges': [(str(self.cluster_pod_subnet[1]), - str(self.cluster_pod_subnet[-2]))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_CLUSTER_POD, - 'name': sysinv_constants.NETWORK_TYPE_CLUSTER_POD, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_cluster_service_network(self, client): - # create the address pool - values = { - 'name': 'cluster-service-subnet', - 'network': str(self.cluster_service_subnet.network), - 'prefix': self.cluster_service_subnet.prefixlen, - 'ranges': [(str(self.cluster_service_subnet[1]), - str(self.cluster_service_subnet[-2]))], - } - pool = client.sysinv.address_pool.create(**values) - - # create the network for the pool - values = { - 'type': sysinv_constants.NETWORK_TYPE_CLUSTER_SERVICE, - 'name': sysinv_constants.NETWORK_TYPE_CLUSTER_SERVICE, - 'dynamic': False, - 'pool_uuid': pool.uuid, - } - client.sysinv.network.create(**values) - - def _populate_network_addresses(self, client, pool, network, addresses): - for name, address in addresses.items(): - values = { - 'pool_uuid': pool.uuid, - 'address': str(address), - 'prefix': pool.prefix, - 'name': "%s-%s" % (name, network.type), - } - client.sysinv.address.create(**values) - - def _inventory_config_complete_wait(self, client, controller): - - # This is a gate for the generation of hiera data. - - # TODO: Really need this to detect when inventory is - # TODO: .. complete at the host level rather than each - # TODO: .. individual entity being populated as it is - # TODO: .. today for storage. - - # Wait for sysinv-agent to populate disks and PVs - self._wait_disk_config(client, controller) - self._wait_pv_config(client, controller) - - def _get_management_mac_address(self): - - if self.lag_management_interface: - ifname = self.lag_management_interface_member0 - else: - ifname = self.management_interface - - try: - filename = '/sys/class/net/%s/address' % ifname - with open(filename, 'r') as f: - return f.readline().rstrip() - except Exception: - raise ConfigFail("Failed to obtain mac address of %s" % ifname) - - def _populate_controller_config(self, client): - mgmt_mac = self._get_management_mac_address() - rootfs_device = get_device_from_function(get_rootfs_node) - boot_device = get_device_from_function(find_boot_device) - console = get_console_info() - tboot = get_tboot_info() - install_output = get_orig_install_mode() - - provision_state = sysinv.HOST_PROVISIONED - if utils.is_combined_load(): - provision_state = sysinv.HOST_PROVISIONING - - values = { - 'personality': sysinv.HOST_PERSONALITY_CONTROLLER, - 'hostname': self.controller_hostname_prefix + "0", - 'mgmt_ip': str(self.controller_address_0), - 'mgmt_mac': mgmt_mac, - 'administrative': sysinv.HOST_ADMIN_STATE_LOCKED, - 'operational': sysinv.HOST_OPERATIONAL_STATE_DISABLED, - 'availability': sysinv.HOST_AVAIL_STATE_OFFLINE, - 'invprovision': provision_state, - 'rootfs_device': rootfs_device, - 'boot_device': boot_device, - 'console': console, - 'tboot': tboot, - 'install_output': install_output, - } - controller = client.sysinv.ihost.create(**values) - return controller - - def _populate_interface_config(self, client, controller): - # Wait for Ethernet port inventory - self._wait_ethernet_port_config(client, controller) - - self._populate_management_interface(client, controller) - self._populate_oam_interface(client, controller) - if self.kubernetes: - self._populate_cluster_host_interface(client, controller) - - def _update_interface_config(self, client, values): - host_uuid = values.get('ihost_uuid') - ifname = values.get('ifname') - interfaces = client.sysinv.iinterface.list(host_uuid) - for interface in interfaces: - if interface.ifname == ifname: - patch = sysinv.dict_to_patch(values) - client.sysinv.iinterface.update(interface.uuid, patch) - break - else: - raise ConfigFail("Failed to find interface %s" % ifname) - - def _get_interface(self, client, host_uuid, ifname): - interfaces = client.sysinv.iinterface.list(host_uuid) - for interface in interfaces: - if interface.ifname == ifname: - return interface - else: - raise ConfigFail("Failed to find interface %s" % ifname) - - def _get_interface_aemode(self, aemode): - """Convert the AE mode to an AE mode supported by the interface API""" - if aemode == constants.LAG_MODE_ACTIVE_BACKUP: - return 'active_standby' - elif aemode == constants.LAG_MODE_BALANCE_XOR: - return 'balanced' - elif aemode == constants.LAG_MODE_8023AD: - return '802.3ad' - else: - raise ConfigFail("Unknown interface AE mode: %s" % aemode) - - def _get_interface_txhashpolicy(self, aemode): - """Convert the AE mode to a L2 hash supported by the interface API""" - if aemode == constants.LAG_MODE_ACTIVE_BACKUP: - return None - elif aemode == constants.LAG_MODE_BALANCE_XOR: - return constants.LAG_TXHASH_LAYER2 - elif aemode == constants.LAG_MODE_8023AD: - return constants.LAG_TXHASH_LAYER2 - else: - raise ConfigFail("Unknown interface AE mode: %s" % aemode) - - def _get_network(self, client, network_type): - networks = client.sysinv.network.list() - for net in networks: - if net.type == network_type: - return net - else: - raise ConfigFail("Failed to find network %s" % type) - - def _get_interface_mtu(self, ifname): - """ - This function determines the MTU value that must be configured on an - interface. It is accounting for the possibility that different network - types are sharing the same interfaces in which case the lowest - interface must have an interface equal to or greater than any of the - VLAN interfaces above it. The input semantic checks enforce specific - precedence rules (e.g., cluster-host must be less than or equal to the - mgmt mtu if cluster-host is a vlan over mgmt), but this function allows - for any permutation to avoid issues if the semantic checks are loosened - or if the ini input method allows different possibities. - - This function must not be used for VLAN interfaces. VLAN interfaces - have no requirement to be large enough to accomodate another VLAN above - it so for those interfaces we simply use the interface MTU as was - specified by the user. - """ - value = 0 - if self.management_interface_configured: - if ifname == self.management_interface: - value = max(value, self.management_mtu) - if self.cluster_host_interface_configured: - if ifname == self.cluster_host_interface: - value = max(value, self.cluster_host_mtu) - if self.external_oam_interface_configured: - if ifname == self.external_oam_interface: - value = max(value, self.external_oam_mtu) - assert value != 0 - return value - - def _populate_management_interface(self, client, controller): - """Configure the management/pxeboot interface(s)""" - - interface_class = sysinv_constants.INTERFACE_CLASS_PLATFORM - if self.management_vlan: - network = self._get_network(client, - sysinv_constants.NETWORK_TYPE_PXEBOOT) - else: - network = self._get_network(client, - sysinv_constants.NETWORK_TYPE_MGMT) - - if self.lag_management_interface: - members = [self.lag_management_interface_member0] - if self.lag_management_interface_member1: - members.append(self.lag_management_interface_member1) - - aemode = self._get_interface_aemode( - self.lag_management_interface_policy) - - txhashpolicy = self._get_interface_txhashpolicy( - self.lag_management_interface_policy) - - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.management_interface, - 'imtu': self.management_mtu, - 'iftype': 'ae', - 'aemode': aemode, - 'txhashpolicy': txhashpolicy, - 'ifclass': interface_class, - 'networks': [str(network.id)], - 'uses': members, - } - - client.sysinv.iinterface.create(**values) - elif self.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX and \ - not self.subcloud_config(): - # Create the management interface record for the loopback interface - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.management_interface, - 'imtu': self.management_mtu, - 'iftype': sysinv_constants.INTERFACE_TYPE_VIRTUAL, - 'ifclass': interface_class, - 'networks': [str(network.id)], - } - client.sysinv.iinterface.create(**values) - else: - # update MTU or network type of interface - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.management_interface, - 'imtu': self.management_mtu, - 'ifclass': interface_class, - 'networks': str(network.id), - } - self._update_interface_config(client, values) - - if self.management_vlan: - mgmt_network = self._get_network( - client, sysinv_constants.NETWORK_TYPE_MGMT) - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.management_interface_name, - 'imtu': self.management_mtu, - 'iftype': sysinv_constants.INTERFACE_TYPE_VLAN, - 'ifclass': interface_class, - 'networks': [str(mgmt_network.id)], - 'uses': [self.management_interface], - 'vlan_id': self.management_vlan, - } - client.sysinv.iinterface.create(**values) - elif self.subcloud_config(): - # Create a route to the system controller. - # For managament vlan case, route will get - # created upon interface creation if subcloud config. - management_interface = self._get_interface( - client, controller.uuid, self.management_interface_name) - values = { - 'interface_uuid': management_interface.uuid, - 'network': str(self.system_controller_subnet.ip), - 'prefix': self.system_controller_subnet.prefixlen, - 'gateway': str(self.management_gateway_address), - 'metric': 1, - } - client.sysinv.route.create(**values) - - def _populate_default_storage_backend(self, client, controller): - # Create the Ceph monitor for controller-0 - values = {'ihost_uuid': controller.uuid} - client.sysinv.ceph_mon.create(**values) - - # Create the Ceph default backend - values = {'confirmed': True} - client.sysinv.storage_ceph.create(**values) - - def _populate_cluster_host_interface(self, client, controller): - """Configure the cluster host interface(s)""" - network = self._get_network(client, - sysinv_constants.NETWORK_TYPE_CLUSTER_HOST) - - if (self.lag_cluster_host_interface and - self.cluster_host_interface_name != - self.management_interface_name): - members = [self.lag_cluster_host_interface_member0] - if self.lag_cluster_host_interface_member1: - members.append(self.lag_cluster_host_interface_member1) - - aemode = self._get_interface_aemode( - self.lag_cluster_host_interface_policy) - - txhashpolicy = self._get_interface_txhashpolicy( - self.lag_cluster_host_interface_policy) - - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.cluster_host_interface, - 'imtu': self._get_interface_mtu(self.cluster_host_interface), - 'iftype': sysinv_constants.INTERFACE_TYPE_AE, - 'aemode': aemode, - 'txhashpolicy': txhashpolicy, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks': [str(network.id)], - 'uses': members, - } - client.sysinv.iinterface.create(**values) - else: - # update MTU or network type of interface - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.cluster_host_interface, - } - values.update({ - 'imtu': self._get_interface_mtu(self.cluster_host_interface) - }) - if not self.cluster_host_vlan: - values.update({ - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks_to_add': str(network.id), - }) - self._update_interface_config(client, values) - - if self.cluster_host_vlan: - if (self.cluster_host_interface_name != - self.management_interface_name): - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.cluster_host_interface_name, - 'imtu': self.cluster_host_mtu, - 'iftype': sysinv_constants.INTERFACE_TYPE_VLAN, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks': [str(network.id)], - 'uses': [self.cluster_host_interface], - 'vlan_id': self.cluster_host_vlan, - } - client.sysinv.iinterface.create(**values) - else: - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.cluster_host_interface_name, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks_to_add': str(network.id), - } - self._update_interface_config(client, values) - - def _populate_oam_interface(self, client, controller): - """Configure the OAM interface(s)""" - - network = self._get_network(client, - sysinv_constants.NETWORK_TYPE_OAM) - - if self.lag_external_oam_interface: - members = [self.lag_external_oam_interface_member0] - if self.lag_external_oam_interface_member1: - members.append(self.lag_external_oam_interface_member1) - - aemode = self._get_interface_aemode( - self.lag_external_oam_interface_policy) - - txhashpolicy = self._get_interface_txhashpolicy( - self.lag_external_oam_interface_policy) - - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.external_oam_interface, - 'imtu': self._get_interface_mtu(self.external_oam_interface), - 'iftype': sysinv_constants.INTERFACE_TYPE_AE, - 'aemode': aemode, - 'txhashpolicy': txhashpolicy, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks': [str(network.id)], - 'uses': members, - } - - client.sysinv.iinterface.create(**values) - else: - # update MTU or network type of interface - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.external_oam_interface, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - } - values.update({ - 'imtu': self._get_interface_mtu(self.external_oam_interface) - }) - if not self.external_oam_vlan: - values.update({ - 'networks': str(network.id), - }) - - self._update_interface_config(client, values) - - if self.external_oam_vlan: - values = { - 'ihost_uuid': controller.uuid, - 'ifname': self.external_oam_interface_name, - 'imtu': self.external_oam_mtu, - 'iftype': sysinv_constants.INTERFACE_TYPE_VLAN, - 'ifclass': sysinv_constants.INTERFACE_CLASS_PLATFORM, - 'networks': [str(network.id)], - 'uses': [self.external_oam_interface], - 'vlan_id': self.external_oam_vlan, - } - client.sysinv.iinterface.create(**values) - - def _populate_load_config(self, client): - patch = {'software_version': SW_VERSION, "compatible_version": "N/A", - "required_patches": "N/A"} - client.sysinv.load.create(**patch) - - def _populate_dns_config(self, client): - # Retrieve the list of dns servers to get the uuid - dns_list = client.sysinv.idns.list() - dns_record = dns_list[0] - values = { - 'nameservers': self.get_dns_servers(), - 'action': 'apply' - } - patch = sysinv.dict_to_patch(values) - client.sysinv.idns.update(dns_record.uuid, patch) - - def _populate_docker_config(self, client): - if self.enable_docker_proxy: - proxy_parameter = {} - if self.docker_http_proxy: - proxy_parameter['http_proxy'] = self.docker_http_proxy - if self.docker_https_proxy: - proxy_parameter['https_proxy'] = self.docker_https_proxy - if self.docker_no_proxy: - proxy_parameter['no_proxy'] = self.docker_no_proxy - - if proxy_parameter: - client.sysinv.service_parameter.create( - sysinv_constants.SERVICE_TYPE_DOCKER, - sysinv_constants.SERVICE_PARAM_SECTION_DOCKER_PROXY, - None, - None, - proxy_parameter - ) - - if not self.docker_use_default_registry: - registry_parameter = {} - if self.docker_k8s_registry: - registry_parameter['k8s'] = \ - self.docker_k8s_registry - - if self.docker_gcr_registry: - registry_parameter['gcr'] = \ - self.docker_gcr_registry - - if self.docker_quay_registry: - registry_parameter['quay'] = \ - self.docker_quay_registry - - if self.docker_docker_registry: - registry_parameter['docker'] = \ - self.docker_docker_registry - - if not self.is_secure_registry: - registry_parameter['insecure_registry'] = "True" - - if registry_parameter: - client.sysinv.service_parameter.create( - sysinv_constants.SERVICE_TYPE_DOCKER, - sysinv_constants. - SERVICE_PARAM_SECTION_DOCKER_REGISTRY, - None, - None, - registry_parameter - ) - - def populate_initial_config(self): - """Populate initial system inventory configuration""" - try: - with openstack.OpenStack() as client: - self._populate_system_config(client) - self._populate_load_config(client) - self._populate_network_config(client) - if self.kubernetes: - self._populate_dns_config(client) - self._populate_docker_config(client) - controller = self._populate_controller_config(client) - # ceph_mon config requires controller host to be created - self._inventory_config_complete_wait(client, controller) - self._populate_interface_config(client, controller) - self._populate_default_storage_backend(client, controller) - - except (KeystoneFail, SysInvFail) as e: - LOG.exception(e) - raise ConfigFail("Failed to provision initial system " - "configuration") - - def create_puppet_config(self): - try: - utils.create_system_config() - utils.create_host_config() - except Exception as e: - LOG.exception(e) - raise ConfigFail("Failed to update hiera configuration") - - def provision(self, configfile): - """Perform system provisioning only""" - if not self.labmode: - raise ConfigFail("System provisioning only available with " - "lab mode enabled") - if not configfile: - raise ConfigFail("Missing input configuration file") - self.input_config_from_file(configfile) - self.populate_initial_config() - - def configure(self, configfile=None, default_config=False, - display_config=True): - """Configure initial controller node.""" - if (os.path.exists(constants.CGCS_CONFIG_FILE) or - os.path.exists(constants.CONFIG_PERMDIR) or - os.path.exists(constants.INITIAL_CONFIG_COMPLETE_FILE)): - raise ConfigFail("Configuration has already been done " - "and cannot be repeated.") - - try: - with open(os.devnull, "w") as fnull: - subprocess.check_call(["vgdisplay", "cgts-vg"], stdout=fnull, - stderr=fnull) - except subprocess.CalledProcessError: - LOG.error("The cgts-vg volume group was not found") - raise ConfigFail("Volume groups not configured") - - if default_config: - self.default_config() - elif not configfile: - self.input_config() - else: - self.input_config_from_file(configfile) - - if display_config: - self.display_config() - - # Validate Openstack passwords loaded in via config - if configfile: - self.process_validation_passwords() - - if not configfile and not default_config: - while True: - user_input = input( - "\nApply the above configuration? [y/n]: ") - if user_input.lower() == 'q': - raise UserQuit - elif user_input.lower() == 'y': - break - elif user_input.lower() == 'n': - raise UserQuit - else: - print("Invalid choice") - - # Verify at most one branding tarball is present - self.verify_branding() - - self.write_config_file() - utils.write_simplex_flag() - - print("\nApplying configuration (this will take several minutes):") - - runner = progress.ProgressRunner() - runner.add(self.create_bootstrap_config, - 'Creating bootstrap configuration') - runner.add(self.apply_bootstrap_manifest, - "Applying bootstrap manifest") - runner.add(self.persist_local_config, - 'Persisting local configuration') - runner.add(self.populate_initial_config, - 'Populating initial system inventory') - runner.add(self.create_puppet_config, - 'Creating system configuration') - runner.add(self.apply_controller_manifest, - 'Applying controller manifest') - runner.add(self.finalize_controller_config, - 'Finalize controller configuration') - runner.add(self.wait_service_enable, - 'Waiting for service activation') - runner.run() - - def check_required_interfaces_status(self): - if self.management_interface_configured: - if not is_interface_up(self.management_interface): - print('') - if (self.system_mode != - sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT - and self.system_mode != - sysinv_constants.SYSTEM_MODE_SIMPLEX): - print(textwrap.fill( - "Warning: The interface (%s) is not operational " - "and some platform services will not start properly. " - "Bring up the interface to enable the required " - "services." % self.management_interface, 80)) - - if self.cluster_host_interface_configured: - if not is_interface_up(self.cluster_host_interface): - if self.system_mode != \ - sysinv_constants.SYSTEM_MODE_DUPLEX_DIRECT: - print('') - print(textwrap.fill( - "Warning: The interface (%s) is not operational " - "and some platform services will not start properly. " - "Bring up the interface to enable the required " - "services." % self.cluster_host_interface, 80)) - - if self.external_oam_interface_configured: - if not is_interface_up(self.external_oam_interface): - print('') - print(textwrap.fill( - "Warning: The interface (%s) is not operational " - "and some OAM services will not start properly. " - "Bring up the interface to enable the required " - "services." % self.external_oam_interface, 80)) diff --git a/controllerconfig/controllerconfig/controllerconfig/openstack.py b/controllerconfig/controllerconfig/controllerconfig/openstack.py deleted file mode 100755 index ab25ae2779..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/openstack.py +++ /dev/null @@ -1,285 +0,0 @@ -# -# Copyright (c) 2014-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -OpenStack -""" - -import os -import time -import subprocess - -from controllerconfig.common import log -from controllerconfig.common.exceptions import SysInvFail -from controllerconfig.common.rest_api_utils import get_token -from controllerconfig import sysinv_api as sysinv - - -LOG = log.get_logger(__name__) - -KEYSTONE_AUTH_SERVER_RETRY_CNT = 60 -KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry - - -class OpenStack(object): - - def __init__(self): - self.admin_token = None - self.conf = {} - self._sysinv = None - - source_command = 'source /etc/platform/openrc && env' - - with open(os.devnull, "w") as fnull: - proc = subprocess.Popen( - ['bash', '-c', source_command], - stdout=subprocess.PIPE, stderr=fnull) - - for line in proc.stdout: - key, _, value = line.partition("=") - if key == 'OS_USERNAME': - self.conf['admin_user'] = value.strip() - elif key == 'OS_PASSWORD': - self.conf['admin_pwd'] = value.strip() - elif key == 'OS_PROJECT_NAME': - self.conf['admin_tenant'] = value.strip() - elif key == 'OS_AUTH_URL': - self.conf['auth_url'] = value.strip() - elif key == 'OS_REGION_NAME': - self.conf['region_name'] = value.strip() - elif key == 'OS_USER_DOMAIN_NAME': - self.conf['user_domain'] = value.strip() - elif key == 'OS_PROJECT_DOMAIN_NAME': - self.conf['project_domain'] = value.strip() - - proc.communicate() - - def __enter__(self): - if not self._connect(): - raise Exception('Failed to connect') - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self._disconnect() - - def __del__(self): - self._disconnect() - - def _connect(self): - """ Connect to an OpenStack instance """ - - if self.admin_token is not None: - self._disconnect() - - # Try to obtain an admin token from keystone - for _ in range(KEYSTONE_AUTH_SERVER_RETRY_CNT): - self.admin_token = get_token(self.conf['auth_url'], - self.conf['admin_tenant'], - self.conf['admin_user'], - self.conf['admin_pwd'], - self.conf['user_domain'], - self.conf['project_domain']) - if self.admin_token: - break - time.sleep(KEYSTONE_AUTH_SERVER_WAIT) - - return self.admin_token is not None - - def _disconnect(self): - """ Disconnect from an OpenStack instance """ - self.admin_token = None - - def lock_hosts(self, exempt_hostnames=None, progress_callback=None, - timeout=60): - """ Lock hosts of an OpenStack instance except for host names - in the exempt list - """ - failed_hostnames = [] - - if exempt_hostnames is None: - exempt_hostnames = [] - - hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name']) - if not hosts: - if progress_callback is not None: - progress_callback(0, 0, None, None) - return - - wait = False - host_i = 0 - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if host.is_unlocked(): - if not host.force_lock(self.admin_token, - self.conf['region_name']): - failed_hostnames.append(host.name) - LOG.warning("Could not lock %s" % host.name) - else: - wait = True - else: - host_i += 1 - if progress_callback is not None: - progress_callback(len(hosts), host_i, - ('locking %s' % host.name), - 'DONE') - - if wait and timeout > 5: - time.sleep(5) - timeout -= 5 - - for _ in range(0, timeout): - wait = False - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if (host.name not in failed_hostnames) and host.is_unlocked(): - host.refresh_data(self.admin_token, - self.conf['region_name']) - - if host.is_locked(): - LOG.info("Locked %s" % host.name) - host_i += 1 - if progress_callback is not None: - progress_callback(len(hosts), host_i, - ('locking %s' % host.name), - 'DONE') - else: - LOG.info("Waiting for lock of %s" % host.name) - wait = True - - if not wait: - break - - time.sleep(1) - else: - failed_hostnames.append(host.name) - LOG.warning("Wait failed for lock of %s" % host.name) - - return failed_hostnames - - def power_off_hosts(self, exempt_hostnames=None, progress_callback=None, - timeout=60): - """ Power-off hosts of an OpenStack instance except for host names - in the exempt list - """ - - if exempt_hostnames is None: - exempt_hostnames = [] - - hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name']) - - hosts[:] = [host for host in hosts if host.support_power_off()] - if not hosts: - if progress_callback is not None: - progress_callback(0, 0, None, None) - return - - wait = False - host_i = 0 - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if host.is_powered_on(): - if not host.power_off(self.admin_token, - self.conf['region_name']): - raise SysInvFail("Could not power-off %s" % host.name) - wait = True - else: - host_i += 1 - if progress_callback is not None: - progress_callback(len(hosts), host_i, - ('powering off %s' % host.name), - 'DONE') - - if wait and timeout > 5: - time.sleep(5) - timeout -= 5 - - for _ in range(0, timeout): - wait = False - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if host.is_powered_on(): - host.refresh_data(self.admin_token, - self.conf['region_name']) - - if host.is_powered_off(): - LOG.info("Powered-Off %s" % host.name) - host_i += 1 - if progress_callback is not None: - progress_callback(len(hosts), host_i, - ('powering off %s' % host.name), - 'DONE') - else: - LOG.info("Waiting for power-off of %s" % host.name) - wait = True - - if not wait: - break - - time.sleep(1) - else: - failed_hosts = [h.name for h in hosts if h.is_powered_on()] - msg = "Wait timeout for power-off of %s" % failed_hosts - LOG.info(msg) - raise SysInvFail(msg) - - def wait_for_hosts_disabled(self, exempt_hostnames=None, timeout=300, - interval_step=10): - """Wait for hosts to be identified as disabled. - Run check every interval_step seconds - """ - if exempt_hostnames is None: - exempt_hostnames = [] - - for _ in range(timeout / interval_step): - hosts = sysinv.get_hosts(self.admin_token, - self.conf['region_name']) - if not hosts: - time.sleep(interval_step) - continue - - for host in hosts: - if host.name in exempt_hostnames: - continue - - if host.is_enabled(): - LOG.info("host %s is still enabled" % host.name) - break - else: - LOG.info("all hosts disabled.") - return True - - time.sleep(interval_step) - - return False - - @property - def sysinv(self): - if self._sysinv is None: - # TOX cannot import cgts_client and all the dependencies therefore - # the client is being lazy loaded since TOX doesn't actually - # require the cgtsclient module. - from cgtsclient import client as cgts_client - - endpoint = self.admin_token.get_service_url( - self.conf['region_name'], "sysinv", "platform", 'admin') - self._sysinv = cgts_client.Client( - sysinv.API_VERSION, - endpoint=endpoint, - token=self.admin_token.get_id()) - - return self._sysinv diff --git a/controllerconfig/controllerconfig/controllerconfig/progress.py b/controllerconfig/controllerconfig/controllerconfig/progress.py deleted file mode 100644 index 72e4e7fcc6..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/progress.py +++ /dev/null @@ -1,31 +0,0 @@ -import sys - -from controllerconfig.common import log - -LOG = log.get_logger(__name__) - - -class ProgressRunner(object): - steps = [] - - def add(self, action, message): - self.steps.append((action, message)) - - def run(self): - total = len(self.steps) - for i, step in enumerate(self.steps, start=1): - action, message = step - LOG.info("Start step: %s" % message) - sys.stdout.write( - "\n%.2u/%.2u: %s ... " % (i, total, message)) - sys.stdout.flush() - try: - action() - sys.stdout.write('DONE') - sys.stdout.flush() - except Exception: - sys.stdout.flush() - raise - LOG.info("Finish step: %s" % message) - sys.stdout.write("\n") - sys.stdout.flush() diff --git a/controllerconfig/controllerconfig/controllerconfig/regionconfig.py b/controllerconfig/controllerconfig/controllerconfig/regionconfig.py deleted file mode 100755 index eee9b66f54..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/regionconfig.py +++ /dev/null @@ -1,629 +0,0 @@ -""" -Copyright (c) 2015-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from __future__ import print_function -from six.moves import configparser -import os -import subprocess -import sys -import textwrap -import time -from controllerconfig import utils -import uuid - -from controllerconfig.common import constants -from controllerconfig.common import log -from controllerconfig.common import rest_api_utils as rutils -from controllerconfig.common.exceptions import KeystoneFail -from controllerconfig.common.configobjects import REGION_CONFIG -from controllerconfig.common.configobjects import SUBCLOUD_CONFIG -from controllerconfig import ConfigFail -from controllerconfig.configassistant import ConfigAssistant -from controllerconfig.systemconfig import parse_system_config -from controllerconfig.systemconfig import configure_management_interface -from controllerconfig.systemconfig import create_cgcs_config_file -from controllerconfig import DEFAULT_DOMAIN_NAME - -# Temporary file for building cgcs_config -TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config" - -# For region mode, this is the list of users that we expect to find configured -# in the region config file as _USER_KEY and _PASSWORD. -# For distributed cloud, this is the list of users that we expect to find -# configured in keystone. The password for each user will be retrieved from -# the DC Manager in the system controller and added to the region config file. -# The format is: -# REGION_NAME = key in region config file for this user's region -# USER_KEY = key in region config file for this user's name -# USER_NAME = user name in keystone - -REGION_NAME = 0 -USER_KEY = 1 -USER_NAME = 2 - -EXPECTED_USERS = [ - ('REGION_2_SERVICES', 'SYSINV', 'sysinv'), - ('REGION_2_SERVICES', 'PATCHING', 'patching'), - ('REGION_2_SERVICES', 'NFV', 'vim'), - ('REGION_2_SERVICES', 'MTCE', 'mtce'), - ('REGION_2_SERVICES', 'FM', 'fm'), - ('REGION_2_SERVICES', 'BARBICAN', 'barbican')] - -# This a description of the region 2 endpoints that we expect to configure or -# find configured in keystone. The format is as follows: -# SERVICE_NAME = key in region config file for this service's name -# SERVICE_TYPE = key in region config file for this service's type -# PUBLIC_URL = required publicurl - {} is replaced with CAM floating IP -# INTERNAL_URL = required internalurl - {} is replaced with CLM floating IP -# ADMIN_URL = required adminurl - {} is replaced with CLM floating IP -# DESCRIPTION = Description of the service (for automatic configuration) - -SERVICE_NAME = 0 -SERVICE_TYPE = 1 -PUBLIC_URL = 2 -INTERNAL_URL = 3 -ADMIN_URL = 4 -DESCRIPTION = 5 - -EXPECTED_REGION2_ENDPOINTS = [ - ('SYSINV_SERVICE_NAME', 'SYSINV_SERVICE_TYPE', - 'http://{}:6385/v1', - 'http://{}:6385/v1', - 'http://{}:6385/v1', - 'SysInv Service'), - ('PATCHING_SERVICE_NAME', 'PATCHING_SERVICE_TYPE', - 'http://{}:15491', - 'http://{}:5491', - 'http://{}:5491', - 'Patching Service'), - ('NFV_SERVICE_NAME', 'NFV_SERVICE_TYPE', - 'http://{}:4545', - 'http://{}:4545', - 'http://{}:4545', - 'Virtual Infrastructure Manager'), - ('FM_SERVICE_NAME', 'FM_SERVICE_TYPE', - 'http://{}:18002', - 'http://{}:18002', - 'http://{}:18002', - 'Fault Management Service'), - ('BARBICAN_SERVICE_NAME', 'BARBICAN_SERVICE_TYPE', - 'http://{}:9311', - 'http://{}:9311', - 'http://{}:9311', - 'OpenStack Key Manager Service'), -] - -EXPECTED_KEYSTONE_ENDPOINT = ( - 'KEYSTONE_SERVICE_NAME', 'KEYSTONE_SERVICE_TYPE', - 'http://{}:8081/keystone/main/v2.0', - 'http://{}:8081/keystone/main/v2.0', - 'http://{}:8081/keystone/admin/v2.0', - 'OpenStack Identity') - - -LOG = log.get_logger(__name__) - - -def validate_region_one_keystone_config(region_config, token, api_url, users, - services, endpoints, create=False, - config_type=REGION_CONFIG, - user_config=None): - """ Validate that the required region one configuration are in place, - if create is True, any missing entries will be set up to be added - to keystone later on by puppet. - """ - - region_1_name = region_config.get('SHARED_SERVICES', 'REGION_NAME') - region_2_name = region_config.get('REGION_2_SERVICES', 'REGION_NAME') - - # Determine what keystone entries are expected - expected_users = EXPECTED_USERS - expected_region_2_endpoints = EXPECTED_REGION2_ENDPOINTS - # Keystone is always in region 1 - expected_region_1_endpoints = [EXPECTED_KEYSTONE_ENDPOINT] - - domains = rutils.get_domains(token, api_url) - # Verify service project domain, creating if necessary - if region_config.has_option('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME'): - project_domain = region_config.get('REGION_2_SERVICES', - 'PROJECT_DOMAIN_NAME') - else: - project_domain = DEFAULT_DOMAIN_NAME - project_domain_id = domains.get_domain_id(project_domain) - if not project_domain_id: - if create and config_type == REGION_CONFIG: - region_config.set('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME', - project_domain) - else: - raise ConfigFail( - "Keystone configuration error: service project domain '%s' is " - "not configured." % project_domain) - - # Verify service project, creating if necessary - if region_config.has_option('SHARED_SERVICES', - 'SERVICE_PROJECT_NAME'): - service_project = region_config.get('SHARED_SERVICES', - 'SERVICE_PROJECT_NAME') - else: - service_project = region_config.get('SHARED_SERVICES', - 'SERVICE_TENANT_NAME') - projects = rutils.get_projects(token, api_url) - project_id = projects.get_project_id(service_project) - if not project_id: - if create and config_type == REGION_CONFIG: - region_config.set('SHARED_SERVICES', 'SERVICE_TENANT_NAME', - service_project) - else: - raise ConfigFail( - "Keystone configuration error: service project '%s' is not " - "configured." % service_project) - - # Verify and retrieve the id of the admin role (only needed when creating) - roles = rutils.get_roles(token, api_url) - role_id = roles.get_role_id('admin') - if not role_id and create: - raise ConfigFail("Keystone configuration error: No admin role present") - - # verify that the service user domain is configured, creating if necessary - if region_config.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'): - user_domain = region_config.get('REGION_2_SERVICES', - 'USER_DOMAIN_NAME') - else: - user_domain = DEFAULT_DOMAIN_NAME - domains = rutils.get_domains(token, api_url) - user_domain_id = domains.get_domain_id(user_domain) - if not user_domain_id: - if create and config_type == REGION_CONFIG: - region_config.set('REGION_2_SERVICES', - 'USER_DOMAIN_NAME') - else: - raise ConfigFail( - "Unable to obtain id for for %s domain. Please ensure " - "keystone configuration is correct." % user_domain) - - auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL') - if config_type == REGION_CONFIG: - # Verify that all users are configured and can retrieve a token, - # Optionally set up to create missing users + their admin role - for user in expected_users: - auth_user = region_config.get(user[REGION_NAME], - user[USER_KEY] + '_USER_NAME') - user_id = users.get_user_id(auth_user) - auth_password = None - if not user_id and create: - if not region_config.has_option( - user[REGION_NAME], user[USER_KEY] + '_PASSWORD'): - # Generate random password for new user via - # /dev/urandom if necessary - try: - region_config.set( - user[REGION_NAME], user[USER_KEY] + '_PASSWORD', - uuid.uuid4().hex[:10] + "TiC2*") - except Exception as e: - raise ConfigFail("Failed to generate random user " - "password: %s" % e) - elif user_id and user_domain_id and\ - project_id and project_domain_id: - # If there is a user_id existing then we cannot use - # a randomized password as it was either created by - # a previous run of regionconfig or was created as - # part of Titanium Cloud Primary region config - if not region_config.has_option( - user[REGION_NAME], user[USER_KEY] + '_PASSWORD'): - raise ConfigFail("Failed to find configured password " - "for pre-defined user %s" % auth_user) - auth_password = region_config.get(user[REGION_NAME], - user[USER_KEY] + '_PASSWORD') - # Verify that the existing user can seek an auth token - user_token = rutils.get_token(auth_url, service_project, - auth_user, - auth_password, user_domain, - project_domain) - if not user_token: - raise ConfigFail( - "Unable to obtain keystone token for %s user. " - "Please ensure keystone configuration is correct." - % auth_user) - else: - # For subcloud configs we re-use the users from the system controller - # (the primary region). - for user in expected_users: - auth_user = user[USER_NAME] - user_id = users.get_user_id(auth_user) - auth_password = None - - if user_id: - # Add the password to the region config so it will be used when - # configuring services. - auth_password = user_config.get_password(user[USER_NAME]) - region_config.set(user[REGION_NAME], - user[USER_KEY] + '_PASSWORD', - auth_password) - else: - raise ConfigFail( - "Unable to obtain user (%s). Please ensure " - "keystone configuration is correct." % user[USER_NAME]) - - # Verify that the existing user can seek an auth token - user_token = rutils.get_token(auth_url, service_project, auth_user, - auth_password, user_domain, - project_domain) - if not user_token: - raise ConfigFail( - "Unable to obtain keystone token for %s user. " - "Please ensure keystone configuration is correct." % - auth_user) - - # Verify that region two endpoints & services for shared services - # match our requirements, optionally creating missing entries - for endpoint in expected_region_1_endpoints: - service_name = region_config.get('SHARED_SERVICES', - endpoint[SERVICE_NAME]) - service_type = region_config.get('SHARED_SERVICES', - endpoint[SERVICE_TYPE]) - - try: - service_id = services.get_service_id(service_name, service_type) - except KeystoneFail as ex: - # No option to create services for region one, if those are not - # present, something is seriously wrong - raise ex - - # Extract region one url information from the existing endpoint entry: - try: - endpoints.get_service_url( - region_1_name, service_id, "public") - endpoints.get_service_url( - region_1_name, service_id, "internal") - endpoints.get_service_url( - region_1_name, service_id, "admin") - except KeystoneFail as ex: - # Fail since shared services endpoints are not found - raise ConfigFail("Endpoint for shared service %s " - "is not configured" % service_name) - - # Verify that region two endpoints & services match our requirements, - # optionally creating missing entries - public_address = utils.get_optional(region_config, 'CAN_NETWORK', - 'CAN_IP_START_ADDRESS') - if not public_address: - public_address = utils.get_optional(region_config, 'CAN_NETWORK', - 'CAN_IP_FLOATING_ADDRESS') - if not public_address: - public_address = utils.get_optional(region_config, 'OAM_NETWORK', - 'IP_START_ADDRESS') - if not public_address: - # AIO-SX configuration - public_address = utils.get_optional(region_config, 'OAM_NETWORK', - 'IP_ADDRESS') - if not public_address: - public_address = region_config.get('OAM_NETWORK', - 'IP_FLOATING_ADDRESS') - - if region_config.has_section('CLM_NETWORK'): - internal_address = region_config.get('CLM_NETWORK', - 'CLM_IP_START_ADDRESS') - else: - internal_address = region_config.get('MGMT_NETWORK', - 'IP_START_ADDRESS') - - for endpoint in expected_region_2_endpoints: - service_name = utils.get_service(region_config, 'REGION_2_SERVICES', - endpoint[SERVICE_NAME]) - service_type = utils.get_service(region_config, 'REGION_2_SERVICES', - endpoint[SERVICE_TYPE]) - service_id = services.get_service_id(service_name, service_type) - - expected_public_url = endpoint[PUBLIC_URL].format(public_address) - - expected_internal_url = endpoint[INTERNAL_URL].format(internal_address) - expected_admin_url = endpoint[ADMIN_URL].format(internal_address) - - try: - public_url = endpoints.get_service_url(region_2_name, service_id, - "public") - internal_url = endpoints.get_service_url(region_2_name, service_id, - "internal") - admin_url = endpoints.get_service_url(region_2_name, service_id, - "admin") - except KeystoneFail as ex: - # The endpoint will be created optionally - if not create: - raise ConfigFail("Keystone configuration error: Unable to " - "find endpoints for service %s" - % service_name) - continue - - # Validate the existing endpoints - for endpointtype, found, expected in [ - ('public', public_url, expected_public_url), - ('internal', internal_url, expected_internal_url), - ('admin', admin_url, expected_admin_url)]: - if found != expected: - raise ConfigFail( - "Keystone configuration error for:\nregion ({}), " - "service name ({}), service type ({})\n" - "expected {}: {}\nconfigured {}: {}".format( - region_2_name, service_name, service_type, - endpointtype, expected, endpointtype, found)) - - -def validate_region_one_ldap_config(region_config): - """Validate ldap on region one by a ldap search""" - - ldapserver_uri = region_config.get('SHARED_SERVICES', 'LDAP_SERVICE_URL') - cmd = ["ldapsearch", "-xH", ldapserver_uri, - "-b", "dc=cgcs,dc=local", "(objectclass=*)"] - try: - with open(os.devnull, "w") as fnull: - subprocess.check_call(cmd, stdout=fnull, stderr=fnull) - except subprocess.CalledProcessError: - raise ConfigFail("LDAP configuration error: not accessible") - - -def set_subcloud_config_defaults(region_config): - """Set defaults in region_config for subclouds""" - - # We always create endpoints for subclouds - region_config.set('REGION_2_SERVICES', 'CREATE', 'Y') - - # We use the default service project - region_config.set('SHARED_SERVICES', 'SERVICE_PROJECT_NAME', - constants.DEFAULT_SERVICE_PROJECT_NAME) - - # Add the necessary users to the region config, which will allow the - # validation code to run and will later result in services being - # configured to use the users from the system controller. - expected_users = EXPECTED_USERS - - for user in expected_users: - # Add the user to the region config so to allow validation. - region_config.set(user[REGION_NAME], user[USER_KEY] + '_USER_NAME', - user[USER_NAME]) - - -def configure_region(config_file, config_type=REGION_CONFIG): - """Configure the region""" - - # Parse the region/subcloud config file - print("Parsing configuration file... ", end=' ') - region_config = parse_system_config(config_file) - print("DONE") - - if config_type == SUBCLOUD_CONFIG: - # Set defaults in region_config for subclouds - set_subcloud_config_defaults(region_config) - - # Validate the region/subcloud config file - print("Validating configuration file... ", end=' ') - try: - create_cgcs_config_file(None, region_config, None, None, None, - config_type=config_type, - validate_only=True) - except configparser.Error as e: - raise ConfigFail("Error parsing configuration file %s: %s" % - (config_file, e)) - print("DONE") - - # Bring up management interface to allow us to reach Region 1 - print("Configuring management interface... ", end=' ') - configure_management_interface(region_config, config_type=config_type) - print("DONE") - - # Get token from keystone - print("Retrieving keystone token...", end=' ') - sys.stdout.flush() - auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL') - if region_config.has_option('SHARED_SERVICES', 'ADMIN_TENANT_NAME'): - auth_project = region_config.get('SHARED_SERVICES', - 'ADMIN_TENANT_NAME') - else: - auth_project = region_config.get('SHARED_SERVICES', - 'ADMIN_PROJECT_NAME') - auth_user = region_config.get('SHARED_SERVICES', 'ADMIN_USER_NAME') - auth_password = region_config.get('SHARED_SERVICES', 'ADMIN_PASSWORD') - if region_config.has_option('SHARED_SERVICES', 'ADMIN_USER_DOMAIN'): - admin_user_domain = region_config.get('SHARED_SERVICES', - 'ADMIN_USER_DOMAIN') - else: - admin_user_domain = DEFAULT_DOMAIN_NAME - if region_config.has_option('SHARED_SERVICES', - 'ADMIN_PROJECT_DOMAIN'): - admin_project_domain = region_config.get('SHARED_SERVICES', - 'ADMIN_PROJECT_DOMAIN') - else: - admin_project_domain = DEFAULT_DOMAIN_NAME - - attempts = 0 - token = None - # Wait for connectivity to region one. It can take some time, especially if - # we have LAG on the management network. - while not token: - token = rutils.get_token(auth_url, auth_project, auth_user, - auth_password, admin_user_domain, - admin_project_domain) - if not token: - attempts += 1 - if attempts < 10: - print("\rRetrieving keystone token...{}".format( - '.' * attempts), end=' ') - sys.stdout.flush() - time.sleep(10) - else: - raise ConfigFail( - "Unable to obtain keystone token. Please ensure " - "networking and keystone configuration is correct.") - print("DONE") - - # Get services, endpoints, users and domains from keystone - print("Retrieving services, endpoints and users from keystone... ", - end=' ') - region_name = region_config.get('SHARED_SERVICES', 'REGION_NAME') - service_name = region_config.get('SHARED_SERVICES', - 'KEYSTONE_SERVICE_NAME') - service_type = region_config.get('SHARED_SERVICES', - 'KEYSTONE_SERVICE_TYPE') - - api_url = token.get_service_url( - region_name, service_name, service_type, "admin").replace( - 'v2.0', 'v3') - - services = rutils.get_services(token, api_url) - endpoints = rutils.get_endpoints(token, api_url) - users = rutils.get_users(token, api_url) - domains = rutils.get_domains(token, api_url) - if not services or not endpoints or not users: - raise ConfigFail( - "Unable to retrieve services, endpoints or users from keystone. " - "Please ensure networking and keystone configuration is correct.") - print("DONE") - - user_config = None - if config_type == SUBCLOUD_CONFIG: - # Retrieve subcloud configuration from dcmanager - print("Retrieving configuration from dcmanager... ", end=' ') - dcmanager_url = token.get_service_url( - 'SystemController', 'dcmanager', 'dcmanager', "admin") - subcloud_name = region_config.get('REGION_2_SERVICES', - 'REGION_NAME') - subcloud_management_subnet = region_config.get('MGMT_NETWORK', - 'CIDR') - hash_string = subcloud_name + subcloud_management_subnet - subcloud_config = rutils.get_subcloud_config(token, dcmanager_url, - subcloud_name, - hash_string) - user_config = subcloud_config['users'] - print("DONE") - - try: - # Configure missing region one keystone entries - create = True - # Prepare region configuration for puppet to create keystone identities - if (region_config.has_option('REGION_2_SERVICES', 'CREATE') and - region_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'): - print("Preparing keystone configuration... ", end=' ') - # If keystone configuration for this region already in place, - # validate it only - else: - # Validate region one keystone config - create = False - print("Validating keystone configuration... ", end=' ') - - validate_region_one_keystone_config(region_config, token, api_url, - users, services, endpoints, create, - config_type=config_type, - user_config=user_config) - print("DONE") - - # validate ldap if it is shared - if region_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL'): - print("Validating ldap configuration... ", end=' ') - validate_region_one_ldap_config(region_config) - print("DONE") - - # Create cgcs_config file - print("Creating config apply file... ", end=' ') - try: - create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, region_config, - services, endpoints, domains, - config_type=config_type) - except configparser.Error as e: - raise ConfigFail("Error parsing configuration file %s: %s" % - (config_file, e)) - print("DONE") - - # Configure controller - assistant = ConfigAssistant() - assistant.configure(TEMP_CGCS_CONFIG_FILE, display_config=False) - - except ConfigFail as e: - print("A configuration failure has occurred.", end=' ') - raise e - - -def show_help_region(): - print("Usage: %s [OPTIONS] " % sys.argv[0]) - print(textwrap.fill( - "Perform region configuration using the region " - "configuration from CONFIG_FILE.", 80)) - print("--allow-ssh Allow configuration to be executed in " - "ssh\n") - - -def show_help_subcloud(): - print("Usage: %s [OPTIONS] " % sys.argv[0]) - print(textwrap.fill( - "Perform subcloud configuration using the subcloud " - "configuration from CONFIG_FILE.", 80)) - print("--allow-ssh Allow configuration to be executed in " - "ssh\n") - - -def config_main(config_type=REGION_CONFIG): - allow_ssh = False - if config_type == REGION_CONFIG: - config_file = "/home/sysadmin/region_config" - elif config_type == SUBCLOUD_CONFIG: - config_file = "/home/sysadmin/subcloud_config" - else: - raise ConfigFail("Invalid config_type: %s" % config_type) - - arg = 1 - while arg < len(sys.argv): - if sys.argv[arg] in ['--help', '-h', '-?']: - if config_type == REGION_CONFIG: - show_help_region() - else: - show_help_subcloud() - exit(1) - elif sys.argv[arg] == "--allow-ssh": - allow_ssh = True - elif arg == len(sys.argv) - 1: - config_file = sys.argv[arg] - else: - print("Invalid option. Use --help for more information.") - exit(1) - arg += 1 - - log.configure() - - # Check if that the command is being run from the console - if utils.is_ssh_parent(): - if allow_ssh: - print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80)) - print('') - else: - print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80)) - exit(1) - - if not os.path.isfile(config_file): - print("Config file %s does not exist." % config_file) - exit(1) - - try: - configure_region(config_file, config_type=config_type) - except KeyboardInterrupt: - print("\nAborting configuration") - except ConfigFail as e: - LOG.exception(e) - print("\nConfiguration failed: {}".format(e)) - except Exception as e: - LOG.exception(e) - print("\nConfiguration failed: {}".format(e)) - else: - print("\nConfiguration finished successfully.") - finally: - if os.path.isfile(TEMP_CGCS_CONFIG_FILE): - os.remove(TEMP_CGCS_CONFIG_FILE) - - -def region_main(): - config_main(REGION_CONFIG) - - -def subcloud_main(): - config_main(SUBCLOUD_CONFIG) diff --git a/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py b/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py deleted file mode 100644 index dd520c5b92..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/sysinv_api.py +++ /dev/null @@ -1,579 +0,0 @@ -# -# Copyright (c) 2014-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -System Inventory Interactions -""" - -import json -import openstack - -from six.moves.urllib import request as urlrequest -from six.moves.urllib.error import URLError -from six.moves.urllib.error import HTTPError - -from controllerconfig.common import log -from controllerconfig.common.exceptions import KeystoneFail - - -LOG = log.get_logger(__name__) - -API_VERSION = 1 - -# Host Personality Constants -HOST_PERSONALITY_NOT_SET = "" -HOST_PERSONALITY_UNKNOWN = "unknown" -HOST_PERSONALITY_CONTROLLER = "controller" -HOST_PERSONALITY_WORKER = "worker" -HOST_PERSONALITY_STORAGE = "storage" - -# Host Administrative State Constants -HOST_ADMIN_STATE_NOT_SET = "" -HOST_ADMIN_STATE_UNKNOWN = "unknown" -HOST_ADMIN_STATE_LOCKED = "locked" -HOST_ADMIN_STATE_UNLOCKED = "unlocked" - -# Host Operational State Constants -HOST_OPERATIONAL_STATE_NOT_SET = "" -HOST_OPERATIONAL_STATE_UNKNOWN = "unknown" -HOST_OPERATIONAL_STATE_ENABLED = "enabled" -HOST_OPERATIONAL_STATE_DISABLED = "disabled" - -# Host Availability State Constants -HOST_AVAIL_STATE_NOT_SET = "" -HOST_AVAIL_STATE_UNKNOWN = "unknown" -HOST_AVAIL_STATE_AVAILABLE = "available" -HOST_AVAIL_STATE_ONLINE = "online" -HOST_AVAIL_STATE_OFFLINE = "offline" -HOST_AVAIL_STATE_POWERED_OFF = "powered-off" -HOST_AVAIL_STATE_POWERED_ON = "powered-on" - -# Host Board Management Constants -HOST_BM_TYPE_NOT_SET = "" -HOST_BM_TYPE_UNKNOWN = "unknown" -HOST_BM_TYPE_ILO3 = 'ilo3' -HOST_BM_TYPE_ILO4 = 'ilo4' - -# Host invprovision state -HOST_PROVISIONING = "provisioning" -HOST_PROVISIONED = "provisioned" - - -class Host(object): - def __init__(self, hostname, host_data=None): - self.name = hostname - self.personality = HOST_PERSONALITY_NOT_SET - self.admin_state = HOST_ADMIN_STATE_NOT_SET - self.operational_state = HOST_OPERATIONAL_STATE_NOT_SET - self.avail_status = [] - self.bm_type = HOST_BM_TYPE_NOT_SET - self.uuid = None - self.config_status = None - self.invprovision = None - self.boot_device = None - self.rootfs_device = None - self.console = None - self.tboot = None - - if host_data is not None: - self.__host_set_state__(host_data) - - def __host_set_state__(self, host_data): - if host_data is None: - self.admin_state = HOST_ADMIN_STATE_UNKNOWN - self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN - self.avail_status = [] - self.bm_type = HOST_BM_TYPE_NOT_SET - - # Set personality - if host_data['personality'] == "controller": - self.personality = HOST_PERSONALITY_CONTROLLER - elif host_data['personality'] == "worker": - self.personality = HOST_PERSONALITY_WORKER - elif host_data['personality'] == "storage": - self.personality = HOST_PERSONALITY_STORAGE - else: - self.personality = HOST_PERSONALITY_UNKNOWN - - # Set administrative state - if host_data['administrative'] == "locked": - self.admin_state = HOST_ADMIN_STATE_LOCKED - elif host_data['administrative'] == "unlocked": - self.admin_state = HOST_ADMIN_STATE_UNLOCKED - else: - self.admin_state = HOST_ADMIN_STATE_UNKNOWN - - # Set operational state - if host_data['operational'] == "enabled": - self.operational_state = HOST_OPERATIONAL_STATE_ENABLED - elif host_data['operational'] == "disabled": - self.operational_state = HOST_OPERATIONAL_STATE_DISABLED - else: - self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN - - # Set availability status - self.avail_status[:] = [] - if host_data['availability'] == "available": - self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE) - elif host_data['availability'] == "online": - self.avail_status.append(HOST_AVAIL_STATE_ONLINE) - elif host_data['availability'] == "offline": - self.avail_status.append(HOST_AVAIL_STATE_OFFLINE) - elif host_data['availability'] == "power-on": - self.avail_status.append(HOST_AVAIL_STATE_POWERED_ON) - elif host_data['availability'] == "power-off": - self.avail_status.append(HOST_AVAIL_STATE_POWERED_OFF) - else: - self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE) - - # Set board management type - if host_data['bm_type'] is None: - self.bm_type = HOST_BM_TYPE_NOT_SET - elif host_data['bm_type'] == 'ilo3': - self.bm_type = HOST_BM_TYPE_ILO3 - elif host_data['bm_type'] == 'ilo4': - self.bm_type = HOST_BM_TYPE_ILO4 - else: - self.bm_type = HOST_BM_TYPE_UNKNOWN - - if host_data['invprovision'] == 'provisioned': - self.invprovision = HOST_PROVISIONED - else: - self.invprovision = HOST_PROVISIONING - - self.uuid = host_data['uuid'] - self.config_status = host_data['config_status'] - self.boot_device = host_data['boot_device'] - self.rootfs_device = host_data['rootfs_device'] - self.console = host_data['console'] - self.tboot = host_data['tboot'] - - def __host_update__(self, admin_token, region_name): - try: - url = admin_token.get_service_admin_url("platform", "sysinv", - region_name) - url += "/ihosts/" + self.name - - request_info = urlrequest.Request(url) - request_info.add_header("X-Auth-Token", admin_token.get_id()) - request_info.add_header("Accept", "application/json") - - request = urlrequest.urlopen(request_info) - response = json.loads(request.read()) - request.close() - return response - - except KeystoneFail as e: - LOG.error("Keystone authentication failed:{} ".format(e)) - return None - - except HTTPError as e: - LOG.error("%s, %s" % (e.code, e.read())) - if e.code == 401: - admin_token.set_expired() - return None - - except URLError as e: - LOG.error(e) - return None - - def __host_action__(self, admin_token, action, region_name): - try: - url = admin_token.get_service_admin_url("platform", "sysinv", - region_name) - url += "/ihosts/" + self.name - - request_info = urlrequest.Request(url) - request_info.get_method = lambda: 'PATCH' - request_info.add_header("X-Auth-Token", admin_token.get_id()) - request_info.add_header("Content-type", "application/json") - request_info.add_header("Accept", "application/json") - request_info.add_data(action) - - request = urlrequest.urlopen(request_info) - request.close() - return True - - except KeystoneFail as e: - LOG.error("Keystone authentication failed:{} ".format(e)) - return False - - except HTTPError as e: - LOG.error("%s, %s" % (e.code, e.read())) - if e.code == 401: - admin_token.set_expired() - return False - - except URLError as e: - LOG.error(e) - return False - - def is_unlocked(self): - return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED) - - def is_locked(self): - return(not self.is_unlocked()) - - def is_enabled(self): - return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and - self.operational_state == HOST_OPERATIONAL_STATE_ENABLED) - - def is_controller_enabled_provisioned(self): - return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and - self.operational_state == HOST_OPERATIONAL_STATE_ENABLED and - self.personality == HOST_PERSONALITY_CONTROLLER and - self.invprovision == HOST_PROVISIONED) - - def is_disabled(self): - return(not self.is_enabled()) - - def support_power_off(self): - return(HOST_BM_TYPE_NOT_SET != self.bm_type) - - def is_powered_off(self): - for status in self.avail_status: - if status == HOST_AVAIL_STATE_POWERED_OFF: - return(self.admin_state == HOST_ADMIN_STATE_LOCKED and - self.operational_state == - HOST_OPERATIONAL_STATE_DISABLED) - return False - - def is_powered_on(self): - return not self.is_powered_off() - - def refresh_data(self, admin_token, region_name): - """ Ask the System Inventory for an update view of the host """ - - host_data = self.__host_update__(admin_token, region_name) - self.__host_set_state__(host_data) - - def lock(self, admin_token, region_name): - """ Asks the Platform to perform a lock against a host """ - - if self.is_unlocked(): - action = json.dumps([{"path": "/action", - "value": "lock", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - def force_lock(self, admin_token, region_name): - """ Asks the Platform to perform a force lock against a host """ - - if self.is_unlocked(): - action = json.dumps([{"path": "/action", - "value": "force-lock", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - def unlock(self, admin_token, region_name): - """ Asks the Platform to perform an ulock against a host """ - - if self.is_locked(): - action = json.dumps([{"path": "/action", - "value": "unlock", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - def power_off(self, admin_token, region_name): - """ Asks the Platform to perform a power-off against a host """ - - if self.is_powered_on(): - action = json.dumps([{"path": "/action", - "value": "power-off", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - def power_on(self, admin_token, region_name): - """ Asks the Platform to perform a power-on against a host """ - - if self.is_powered_off(): - action = json.dumps([{"path": "/action", - "value": "power-on", "op": "replace"}]) - - return self.__host_action__(admin_token, action, region_name) - - return True - - -def get_hosts(admin_token, region_name, personality=None, - exclude_hostnames=None): - """ Asks System Inventory for a list of hosts """ - - if exclude_hostnames is None: - exclude_hostnames = [] - - try: - url = admin_token.get_service_admin_url("platform", "sysinv", - region_name) - url += "/ihosts/" - - request_info = urlrequest.Request(url) - request_info.add_header("X-Auth-Token", admin_token.get_id()) - request_info.add_header("Accept", "application/json") - - request = urlrequest.urlopen(request_info) - response = json.loads(request.read()) - request.close() - - host_list = [] - if personality is None: - for host in response['ihosts']: - if host['hostname'] not in exclude_hostnames: - host_list.append(Host(host['hostname'], host)) - else: - for host in response['ihosts']: - if host['hostname'] not in exclude_hostnames: - if (host['personality'] == "controller" and - personality == HOST_PERSONALITY_CONTROLLER): - host_list.append(Host(host['hostname'], host)) - - elif (host['personality'] == "worker" and - personality == HOST_PERSONALITY_WORKER): - host_list.append(Host(host['hostname'], host)) - - elif (host['personality'] == "storage" and - personality == HOST_PERSONALITY_STORAGE): - host_list.append(Host(host['hostname'], host)) - - return host_list - - except KeystoneFail as e: - LOG.error("Keystone authentication failed:{} ".format(e)) - return [] - - except HTTPError as e: - LOG.error("%s, %s" % (e.code, e.read())) - if e.code == 401: - admin_token.set_expired() - return [] - - except URLError as e: - LOG.error(e) - return [] - - -def dict_to_patch(values, install_action=False): - # install default action - if install_action: - values.update({'action': 'install'}) - patch = [] - for key, value in values.items(): - path = '/' + key - patch.append({'op': 'replace', 'path': path, 'value': value}) - return patch - - -def get_shared_services(): - try: - services = "" - with openstack.OpenStack() as client: - systems = client.sysinv.isystem.list() - if systems: - services = systems[0].capabilities.get("shared_services", "") - except Exception as e: - LOG.exception("failed to get shared services") - raise e - - return services - - -def get_alarms(): - """ get all alarms """ - alarm_list = [] - try: - with openstack.OpenStack() as client: - alarm_list = client.sysinv.ialarm.list() - except Exception as e: - LOG.exception("failed to get alarms") - raise e - return alarm_list - - -def controller_enabled_provisioned(hostname): - """ check if host is enabled """ - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if (hostname == host.name and - host.is_controller_enabled_provisioned()): - LOG.info("host %s is enabled/provisioned" % host.name) - return True - except Exception as e: - LOG.exception("failed to check if host is enabled/provisioned") - raise e - return False - - -def get_system_uuid(): - """ get system uuid """ - try: - sysuuid = "" - with openstack.OpenStack() as client: - systems = client.sysinv.isystem.list() - if systems: - sysuuid = systems[0].uuid - except Exception as e: - LOG.exception("failed to get system uuid") - raise e - return sysuuid - - -def get_oam_ip(): - """ get OAM ip details """ - try: - with openstack.OpenStack() as client: - oam_list = client.sysinv.iextoam.list() - if oam_list: - return oam_list[0] - except Exception as e: - LOG.exception("failed to get OAM IP") - raise e - return None - - -def get_mac_addresses(hostname): - """ get MAC addresses for the host """ - macs = {} - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - port_list = client.sysinv.ethernet_port.list(host.uuid) - macs = {port.name: port.mac for port in port_list} - except Exception as e: - LOG.exception("failed to get MAC addresses") - raise e - return macs - - -def get_disk_serial_ids(hostname): - """ get disk serial ids for the host """ - disk_serial_ids = {} - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - disk_list = client.sysinv.idisk.list(host.uuid) - disk_serial_ids = { - disk.device_node: disk.serial_id for disk in disk_list} - except Exception as e: - LOG.exception("failed to get disks") - raise e - return disk_serial_ids - - -def update_clone_system(descr, hostname): - """ update system parameters on clone installation """ - try: - with openstack.OpenStack() as client: - systems = client.sysinv.isystem.list() - if not systems: - return False - values = { - 'name': "Cloned_system", - 'description': descr - } - patch = dict_to_patch(values) - LOG.info("Updating system: {} [{}]".format(systems[0].name, patch)) - client.sysinv.isystem.update(systems[0].uuid, patch) - - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - values = { - 'location': {}, - 'serialid': "" - } - patch = dict_to_patch(values) - client.sysinv.ihost.update(host.uuid, patch) - LOG.info("Updating host: {} [{}]".format(host, patch)) - except Exception as e: - LOG.exception("failed to update system parameters") - raise e - return True - - -def get_config_status(hostname): - """ get config status of the host """ - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - return host.config_status - except Exception as e: - LOG.exception("failed to get config status") - raise e - return None - - -def get_host_data(hostname): - """ get data for the specified host """ - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - return host - except Exception as e: - LOG.exception("failed to get host data") - raise e - return None - - -def do_worker_config_complete(hostname): - """ enable worker functionality """ - try: - with openstack.OpenStack() as client: - hosts = get_hosts(client.admin_token, - client.conf['region_name']) - for host in hosts: - if hostname == host.name: - # Create/apply worker manifests - values = { - 'action': "subfunction_config" - } - patch = dict_to_patch(values) - LOG.info("Applying worker manifests: {} [{}]" - .format(host, patch)) - client.sysinv.ihost.update(host.uuid, patch) - except Exception as e: - LOG.exception("worker_config_complete failed") - raise e - - -def get_storage_backend_services(): - """ get all storage backends and their assigned services """ - backend_service_dict = {} - try: - with openstack.OpenStack() as client: - backend_list = client.sysinv.storage_backend.list() - for backend in backend_list: - backend_service_dict.update( - {backend.backend: backend.services}) - - except Exception as e: - LOG.exception("failed to get storage backend services") - raise e - - return backend_service_dict diff --git a/controllerconfig/controllerconfig/controllerconfig/systemconfig.py b/controllerconfig/controllerconfig/controllerconfig/systemconfig.py deleted file mode 100644 index 801b02d66e..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/systemconfig.py +++ /dev/null @@ -1,499 +0,0 @@ -""" -Copyright (c) 2015-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from __future__ import print_function -from six.moves import configparser -import os -import readline -import sys -import textwrap - -from controllerconfig.common import constants -from controllerconfig.common import log -from controllerconfig.common.exceptions import BackupFail -from controllerconfig.common.exceptions import RestoreFail -from controllerconfig.common.exceptions import UserQuit -from controllerconfig.common.exceptions import CloneFail -from controllerconfig import lag_mode_to_str -from controllerconfig import Network -from controllerconfig import validate -from controllerconfig import ConfigFail -from controllerconfig import DEFAULT_CONFIG -from controllerconfig import REGION_CONFIG -from controllerconfig import SUBCLOUD_CONFIG -from controllerconfig import MGMT_TYPE -from controllerconfig import HP_NAMES -from controllerconfig import DEFAULT_NAMES -from controllerconfig.configassistant import ConfigAssistant -from controllerconfig import backup_restore -from controllerconfig import utils -from controllerconfig import clone - -# Temporary file for building cgcs_config -TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config" - -LOG = log.get_logger(__name__) - - -def parse_system_config(config_file): - """Parse system config file""" - system_config = configparser.RawConfigParser() - try: - system_config.read(config_file) - except Exception as e: - LOG.exception(e) - raise ConfigFail("Error parsing system config file") - - # Dump configuration for debugging - # for section in config.sections(): - # print "Section: %s" % section - # for (name, value) in config.items(section): - # print "name: %s, value: %s" % (name, value) - return system_config - - -def configure_management_interface(region_config, config_type=REGION_CONFIG): - """Bring up management interface - """ - mgmt_network = Network() - if region_config.has_section('CLM_NETWORK'): - naming_type = HP_NAMES - else: - naming_type = DEFAULT_NAMES - - if config_type == SUBCLOUD_CONFIG: - min_addresses = 5 - else: - min_addresses = 8 - try: - mgmt_network.parse_config(region_config, config_type, MGMT_TYPE, - min_addresses=min_addresses, - naming_type=naming_type) - except ConfigFail: - raise - except Exception as e: - LOG.exception("Error parsing configuration file") - raise ConfigFail("Error parsing configuration file: %s" % e) - - try: - # Remove interface config files currently installed - utils.remove_interface_config_files() - - # Create the management interface configuration files. - # Code based on ConfigAssistant._write_interface_config_management - parameters = utils.get_interface_config_static( - mgmt_network.start_address, - mgmt_network.cidr, - mgmt_network.gateway_address) - - if mgmt_network.logical_interface.lag_interface: - management_interface = 'bond0' - else: - management_interface = mgmt_network.logical_interface.ports[0] - - if mgmt_network.vlan: - management_interface_name = "%s.%s" % (management_interface, - mgmt_network.vlan) - utils.write_interface_config_vlan( - management_interface_name, - mgmt_network.logical_interface.mtu, - parameters) - - # underlying interface has no additional parameters - parameters = None - else: - management_interface_name = management_interface - - if mgmt_network.logical_interface.lag_interface: - utils.write_interface_config_bond( - management_interface, - mgmt_network.logical_interface.mtu, - lag_mode_to_str(mgmt_network.logical_interface.lag_mode), - None, - constants.LAG_MIIMON_FREQUENCY, - mgmt_network.logical_interface.ports[0], - mgmt_network.logical_interface.ports[1], - parameters) - else: - utils.write_interface_config_ethernet( - management_interface, - mgmt_network.logical_interface.mtu, - parameters) - - # Restart networking with the new management interface configuration - utils.restart_networking() - - # Send a GARP for floating address. Doing this to help in - # cases where we are re-installing in a lab and another node - # previously held the floating address. - if mgmt_network.cidr.version == 4: - utils.send_interface_garp(management_interface_name, - mgmt_network.start_address) - except Exception: - LOG.exception("Failed to configure management interface") - raise ConfigFail("Failed to configure management interface") - - -def create_cgcs_config_file(output_file, system_config, - services, endpoints, domains, - config_type=REGION_CONFIG, validate_only=False): - """ - Create cgcs_config file or just perform validation of the system_config if - validate_only=True. - :param output_file: filename of output cgcs_config file - :param system_config: system configuration - :param services: keystone services (not used if validate_only) - :param endpoints: keystone endpoints (not used if validate_only) - :param domains: keystone domains (not used if validate_only) - :param config_type: specify region, subcloud or standard config - :param validate_only: used to validate the input system_config - :return: - """ - cgcs_config = None - if not validate_only: - cgcs_config = configparser.RawConfigParser() - cgcs_config.optionxform = str - - # general error checking, if not validate_only cgcs config data is returned - validate(system_config, config_type, cgcs_config) - - # Region configuration: services, endpoints and domain - if config_type in [REGION_CONFIG, SUBCLOUD_CONFIG] and not validate_only: - # The services and endpoints are not available in the validation phase - region_1_name = system_config.get('SHARED_SERVICES', 'REGION_NAME') - keystone_service_name = system_config.get('SHARED_SERVICES', - 'KEYSTONE_SERVICE_NAME') - keystone_service_type = system_config.get('SHARED_SERVICES', - 'KEYSTONE_SERVICE_TYPE') - keystone_service_id = services.get_service_id(keystone_service_name, - keystone_service_type) - keystone_admin_url = endpoints.get_service_url(region_1_name, - keystone_service_id, - "admin") - keystone_internal_url = endpoints.get_service_url(region_1_name, - keystone_service_id, - "internal") - keystone_public_url = endpoints.get_service_url(region_1_name, - keystone_service_id, - "public") - - cgcs_config.set('cREGION', 'KEYSTONE_AUTH_URI', keystone_internal_url) - cgcs_config.set('cREGION', 'KEYSTONE_IDENTITY_URI', keystone_admin_url) - cgcs_config.set('cREGION', 'KEYSTONE_ADMIN_URI', keystone_admin_url) - cgcs_config.set('cREGION', 'KEYSTONE_INTERNAL_URI', - keystone_internal_url) - cgcs_config.set('cREGION', 'KEYSTONE_PUBLIC_URI', keystone_public_url) - - # if ldap is a shared service - if (system_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL')): - ldap_service_url = system_config.get('SHARED_SERVICES', - 'LDAP_SERVICE_URL') - cgcs_config.set('cREGION', 'LDAP_SERVICE_URI', ldap_service_url) - cgcs_config.set('cREGION', 'LDAP_SERVICE_NAME', 'open-ldap') - cgcs_config.set('cREGION', 'LDAP_REGION_NAME', region_1_name) - - # If primary region is non-TiC and keystone entries already created, - # the flag will tell puppet not to create them. - if (system_config.has_option('REGION_2_SERVICES', 'CREATE') and - system_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'): - cgcs_config.set('cREGION', 'REGION_SERVICES_CREATE', 'True') - - # System Timezone configuration - if system_config.has_option('SYSTEM', 'TIMEZONE'): - timezone = system_config.get('SYSTEM', 'TIMEZONE') - if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone): - raise ConfigFail( - "Timezone file %s does not exist" % timezone) - - # Dump results for debugging - # for section in cgcs_config.sections(): - # print "[%s]" % section - # for (name, value) in cgcs_config.items(section): - # print "%s=%s" % (name, value) - - if not validate_only: - # Write config file - with open(output_file, 'w') as config_file: - cgcs_config.write(config_file) - - -def configure_system(config_file): - """Configure the system""" - - # Parse the system config file - print("Parsing system configuration file... ", end=' ') - system_config = parse_system_config(config_file) - print("DONE") - - # Validate the system config file - print("Validating system configuration file... ", end=' ') - try: - create_cgcs_config_file(None, system_config, None, None, None, - DEFAULT_CONFIG, validate_only=True) - except configparser.Error as e: - raise ConfigFail("Error parsing configuration file %s: %s" % - (config_file, e)) - print("DONE") - - # Create cgcs_config file - print("Creating config apply file... ", end=' ') - try: - create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, system_config, - None, None, None, DEFAULT_CONFIG) - except configparser.Error as e: - raise ConfigFail("Error parsing configuration file %s: %s" % - (config_file, e)) - print("DONE") - - -def show_help(): - print("Usage: %s\n" - "--backup Backup configuration using the given " - "name\n" - "--clone-iso Clone and create an image with " - "the given file name\n" - "--clone-status Status of the last installation of " - "cloned image\n" - "--restore-system " - " " - "\n" - " Restore system configuration from backup " - "file with\n" - " the given name, full path required\n" - % sys.argv[0]) - - -def show_help_lab_only(): - print("Usage: %s\n" - "Perform initial configuration\n" - "\nThe following options are for lab use only:\n" - "--answerfile Apply the configuration from the specified " - "file without\n" - " any validation or user interaction\n" - "--default Apply default configuration with no NTP or " - "DNS server\n" - " configuration (suitable for testing in a " - "virtual\n" - " environment)\n" - "--archive-dir Directory to store the archive in\n" - "--provision Provision initial system data only\n" - % sys.argv[0]) - - -def no_complete(text, state): - return - - -def main(): - options = {} - answerfile = None - backup_name = None - archive_dir = constants.BACKUPS_PATH - do_default_config = False - do_backup = False - do_system_restore = False - include_storage_reinstall = False - do_clone = False - do_non_interactive = False - do_provision = False - system_config_file = "/home/sysadmin/system_config" - allow_ssh = False - - # Disable completion as the default completer shows python commands - readline.set_completer(no_complete) - - # remove any previous config fail flag file - if os.path.exists(constants.CONFIG_FAIL_FILE) is True: - os.remove(constants.CONFIG_FAIL_FILE) - - if os.environ.get('CGCS_LABMODE'): - options['labmode'] = True - - arg = 1 - while arg < len(sys.argv): - if sys.argv[arg] == "--answerfile": - arg += 1 - if arg < len(sys.argv): - answerfile = sys.argv[arg] - else: - print("--answerfile option requires a file to be specified") - exit(1) - elif sys.argv[arg] == "--backup": - arg += 1 - if arg < len(sys.argv): - backup_name = sys.argv[arg] - else: - print("--backup requires the name of the backup") - exit(1) - do_backup = True - elif sys.argv[arg] == "--restore-system": - arg += 1 - if arg < len(sys.argv): - if sys.argv[arg] in ["include-storage-reinstall", - "exclude-storage-reinstall"]: - if sys.argv[arg] == "include-storage-reinstall": - include_storage_reinstall = True - arg += 1 - if arg < len(sys.argv): - backup_name = sys.argv[arg] - else: - print(textwrap.fill( - "--restore-system requires the filename " - " of the backup", 80)) - exit(1) - else: - backup_name = sys.argv[arg] - else: - print(textwrap.fill( - "--restore-system requires the filename " - "of the backup", 80)) - exit(1) - do_system_restore = True - elif sys.argv[arg] == "--archive-dir": - arg += 1 - if arg < len(sys.argv): - archive_dir = sys.argv[arg] - else: - print("--archive-dir requires a directory") - exit(1) - elif sys.argv[arg] == "--clone-iso": - arg += 1 - if arg < len(sys.argv): - backup_name = sys.argv[arg] - else: - print("--clone-iso requires the name of the image") - exit(1) - do_clone = True - elif sys.argv[arg] == "--clone-status": - clone.clone_status() - exit(0) - elif sys.argv[arg] == "--default": - do_default_config = True - elif sys.argv[arg] == "--config-file": - arg += 1 - if arg < len(sys.argv): - system_config_file = sys.argv[arg] - else: - print("--config-file requires the filename of the config file") - exit(1) - do_non_interactive = True - elif sys.argv[arg] in ["--help", "-h", "-?"]: - show_help() - exit(1) - elif sys.argv[arg] == "--labhelp": - show_help_lab_only() - exit(1) - elif sys.argv[arg] == "--provision": - do_provision = True - elif sys.argv[arg] == "--allow-ssh": - allow_ssh = True - elif sys.argv[arg] == "--kubernetes": - # This is a temporary flag for use during development. Once things - # are stable, we will remove it and make kubernetes the default. - options['kubernetes'] = True - else: - print("Invalid option. Use --help for more information.") - exit(1) - arg += 1 - - if [do_backup, - do_system_restore, - do_clone, - do_default_config, - do_non_interactive].count(True) > 1: - print("Invalid combination of options selected") - exit(1) - - if answerfile and [do_backup, - do_system_restore, - do_clone, - do_default_config, - do_non_interactive].count(True) > 0: - print("The --answerfile option cannot be used with the selected " - "option") - exit(1) - - log.configure() - - if not do_backup and not do_clone: - # Check if that the command is being run from the console - if utils.is_ssh_parent(): - if allow_ssh: - print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80)) - print('') - else: - print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80)) - exit(1) - - # Reduce the printk console log level to avoid noise during configuration - printk_levels = '' - with open('/proc/sys/kernel/printk', 'r') as f: - printk_levels = f.readline() - - temp_printk_levels = '3' + printk_levels[1:] - with open('/proc/sys/kernel/printk', 'w') as f: - f.write(temp_printk_levels) - - try: - if do_backup: - backup_restore.backup(backup_name, archive_dir) - print("\nBackup complete") - elif do_system_restore: - backup_restore.restore_system(backup_name, - include_storage_reinstall) - print("\nSystem restore complete") - elif do_clone: - clone.clone(backup_name, archive_dir) - print("\nCloning complete") - elif do_provision: - assistant = ConfigAssistant(**options) - assistant.provision(answerfile) - else: - print(textwrap.fill( - "Please use bootstrap playbook to configure the " - "first controller.", 80)) - exit(1) - - if do_non_interactive: - if not os.path.isfile(system_config_file): - raise ConfigFail("Config file %s does not exist." % - system_config_file) - if (os.path.exists(constants.CGCS_CONFIG_FILE) or - os.path.exists(constants.CONFIG_PERMDIR) or - os.path.exists( - constants.INITIAL_CONFIG_COMPLETE_FILE)): - raise ConfigFail("Configuration has already been done " - "and cannot be repeated.") - configure_system(system_config_file) - answerfile = TEMP_CGCS_CONFIG_FILE - assistant = ConfigAssistant(**options) - assistant.configure(answerfile, do_default_config) - print("\nConfiguration was applied\n") - print(textwrap.fill( - "Please complete any out of service commissioning steps " - "with system commands and unlock controller to proceed.", 80)) - assistant.check_required_interfaces_status() - - except KeyboardInterrupt: - print("\nAborting configuration") - except BackupFail as e: - print("\nBackup failed: {}".format(e)) - except RestoreFail as e: - print("\nRestore failed: {}".format(e)) - except ConfigFail as e: - print("\nConfiguration failed: {}".format(e)) - except CloneFail as e: - print("\nCloning failed: {}".format(e)) - except UserQuit: - print("\nAborted configuration") - finally: - if os.path.isfile(TEMP_CGCS_CONFIG_FILE): - os.remove(TEMP_CGCS_CONFIG_FILE) - - # Restore the printk console log level - with open('/proc/sys/kernel/printk', 'w') as f: - f.write(printk_levels) diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/__init__.py b/controllerconfig/controllerconfig/controllerconfig/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly b/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly deleted file mode 100755 index 547856f6ba..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly +++ /dev/null @@ -1,78 +0,0 @@ -[SYSTEM] -SYSTEM_MODE=duplex - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_3] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth2 - -[MGMT_NETWORK] -VLAN=121 -IP_START_ADDRESS=192.168.204.102 -IP_END_ADDRESS=192.168.204.199 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 -DYNAMIC_ALLOCATION=N - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.99 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[REGION2_PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_USER_DOMAIN=admin_domain -ADMIN_PROJECT_DOMAIN=admin_domain -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=FULL_TEST - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -USER_DOMAIN_NAME=service_domain -PROJECT_DOMAIN_NAME=service_domain - -SYSINV_USER_NAME=sysinvTWO -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patchingTWO -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vimTWO -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtceTWO -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fmTWO -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly.result deleted file mode 100755 index b9624b18d5..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.share.keystoneonly.result +++ /dev/null @@ -1,78 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXEBOOT_SUBNET = 192.168.203.0/24 -CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2 -CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3 -CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4 -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth0 -MANAGEMENT_VLAN = 121 -MANAGEMENT_INTERFACE_NAME = eth0.121 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth1 -EXTERNAL_OAM_INTERFACE_NAME = eth1 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = admin_domain -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = admin_domain -SERVICE_PROJECT_NAME = FULL_TEST -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patchingTWO -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinvTWO -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vimTWO -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtceTWO -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fmTWO -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = service_domain -PROJECT_DOMAIN_NAME = service_domain -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall b/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall deleted file mode 100755 index 8c24d037df..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall +++ /dev/null @@ -1,77 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_3] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth2 - -[MGMT_NETWORK] -VLAN=121 -IP_START_ADDRESS=192.168.204.102 -IP_END_ADDRESS=192.168.204.199 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 -DYNAMIC_ALLOCATION=N - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.99 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[REGION2_PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=FULL_TEST - -LDAP_SERVICE_URL=ldap://192.168.204.12:389 - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinvTWO -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patchingTWO -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vimTWO -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtceTWO -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fmTWO -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall.result deleted file mode 100755 index 17c6982bd8..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/TiS_region_config.shareall.result +++ /dev/null @@ -1,81 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXEBOOT_SUBNET = 192.168.203.0/24 -CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2 -CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3 -CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4 -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth0 -MANAGEMENT_VLAN = 121 -MANAGEMENT_INTERFACE_NAME = eth0.121 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth1 -EXTERNAL_OAM_INTERFACE_NAME = eth1 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = FULL_TEST -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patchingTWO -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinvTWO -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vimTWO -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtceTWO -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fmTWO -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 -LDAP_SERVICE_URI = ldap://192.168.204.12:389 -LDAP_SERVICE_NAME = open-ldap -LDAP_REGION_NAME = RegionOne - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/certificate.pem b/controllerconfig/controllerconfig/controllerconfig/tests/files/certificate.pem deleted file mode 100644 index d2ef173b37..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/certificate.pem +++ /dev/null @@ -1 +0,0 @@ -# Dummy certificate file diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ceph b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ceph deleted file mode 100755 index b4b3a1249f..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ceph +++ /dev/null @@ -1,62 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.2 -CONTROLLER_0_ADDRESS=192.168.204.3 -CONTROLLER_1_ADDRESS=192.168.204.4 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.7 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.8 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=False - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.default b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.default deleted file mode 100755 index c071de26ea..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.default +++ /dev/null @@ -1,62 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.2 -CONTROLLER_0_ADDRESS=192.168.204.3 -CONTROLLER_1_ADDRESS=192.168.204.4 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.5 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.6 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=False - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ipv6 b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ipv6 deleted file mode 100755 index 97b357ba70..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.ipv6 +++ /dev/null @@ -1,62 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=1234::/64 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=1234::2 -CONTROLLER_0_ADDRESS=1234::3 -CONTROLLER_1_ADDRESS=1234::4 -NFS_MANAGEMENT_ADDRESS_1=1234::5 -NFS_MANAGEMENT_ADDRESS_2=1234::6 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_MULTICAST_SUBNET=ff08::1:1:0/124 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=abcd::/64 -EXTERNAL_OAM_GATEWAY_ADDRESS=abcd::1 -EXTERNAL_OAM_FLOATING_ADDRESS=abcd::2 -EXTERNAL_OAM_0_ADDRESS=abcd::3 -EXTERNAL_OAM_1_ADDRESS=abcd::4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=False - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.kubernetes b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.kubernetes deleted file mode 100755 index f340017eb8..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.kubernetes +++ /dev/null @@ -1,76 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.2 -CONTROLLER_0_ADDRESS=192.168.204.3 -CONTROLLER_1_ADDRESS=192.168.204.4 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.5 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.6 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=1.2.3.4 -NAMESERVER_2=5.6.7.8 -NAMESERVER_3=NC - -[cDOCKER_PROXY] -# Docker Proxy Configuration -DOCKER_HTTP_PROXY=http://proxy.com:123 -DOCKER_HTTPS_PROXY=https://proxy.com:123 -DOCKER_NO_PROXY=localhost,127.0.0.1,192.168.204.2 - -[cDOCKER_REGISTRY] -# Docker Registry Configuration -DOCKER_K8S_REGISTRY=my.registry.com:5000 -DOCKER_GCR_REGISTRY=my.registry.com -DOCKER_QUAY_REGISTRY=1.2.3.4:5000 -DOCKER_DOCKER_REGISTRY=[1:2:3:4:a:b:c:d]:5000 -IS_SECURE_REGISTRY=False - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=False - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region deleted file mode 100755 index 7be15f8887..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region +++ /dev/null @@ -1,94 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.102 -CONTROLLER_0_ADDRESS=192.168.204.103 -CONTROLLER_1_ADDRESS=192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_START_ADDRESS=192.168.204.102 -MANAGEMENT_END_ADDRESS=192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=True -REGION_1_NAME=RegionOne -REGION_2_NAME=RegionTwo -ADMIN_USER_NAME=admin -ADMIN_USER_DOMAIN=Default -ADMIN_PROJECT_NAME=admin -ADMIN_PROJECT_DOMAIN=Default -SERVICE_PROJECT_NAME=service -SERVICE_USER_DOMAIN=Default -SERVICE_PROJECT_DOMAIN=Default -KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region_nuage_vrs b/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region_nuage_vrs deleted file mode 100755 index 7be15f8887..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/cgcs_config.region_nuage_vrs +++ /dev/null @@ -1,94 +0,0 @@ -[cSYSTEM] -# System Configuration -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[cPXEBOOT] -# PXEBoot Network Support Configuration -PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller - -[cMGMT] -# Management Network Configuration -MANAGEMENT_INTERFACE_NAME=eth1 -MANAGEMENT_INTERFACE=eth1 -MANAGEMENT_MTU=1500 -MANAGEMENT_SUBNET=192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE=no -CONTROLLER_FLOATING_ADDRESS=192.168.204.102 -CONTROLLER_0_ADDRESS=192.168.204.103 -CONTROLLER_1_ADDRESS=192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1=192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2=192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME=controller -CONTROLLER_HOSTNAME_PREFIX=controller- -OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller -DYNAMIC_ADDRESS_ALLOCATION=yes -MANAGEMENT_START_ADDRESS=192.168.204.102 -MANAGEMENT_END_ADDRESS=192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28 - -[cCLUSTER] -# Cluster Host Network Configuration -CLUSTER_INTERFACE_NAME=eth1 -CLUSTER_INTERFACE=eth1 -CLUSTER_VLAN=NC -CLUSTER_MTU=1500 -CLUSTER_SUBNET=192.168.206.0/24 -LAG_CLUSTER_INTERFACE=no - -[cEXT_OAM] -# External OAM Network Configuration -EXTERNAL_OAM_INTERFACE_NAME=eth0 -EXTERNAL_OAM_INTERFACE=eth0 -EXTERNAL_OAM_VLAN=NC -EXTERNAL_OAM_MTU=1500 -LAG_EXTERNAL_OAM_INTERFACE=no -EXTERNAL_OAM_SUBNET=10.10.10.0/24 -EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2 -EXTERNAL_OAM_0_ADDRESS=10.10.10.3 -EXTERNAL_OAM_1_ADDRESS=10.10.10.4 - -[cDNS] -# DNS Configuration -NAMESERVER_1=8.8.8.8 -NAMESERVER_2=8.8.4.4 -NAMESERVER_3=NC - -[cSECURITY] -[cREGION] -# Region Configuration -REGION_CONFIG=True -REGION_1_NAME=RegionOne -REGION_2_NAME=RegionTwo -ADMIN_USER_NAME=admin -ADMIN_USER_DOMAIN=Default -ADMIN_PROJECT_NAME=admin -ADMIN_PROJECT_DOMAIN=Default -SERVICE_PROJECT_NAME=service -SERVICE_USER_DOMAIN=Default -SERVICE_PROJECT_DOMAIN=Default -KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[cAUTHENTICATION] -ADMIN_PASSWORD=Li69nux* diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/iptables.rules b/controllerconfig/controllerconfig/controllerconfig/tests/files/iptables.rules deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan deleted file mode 100755 index 46a945c310..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan +++ /dev/null @@ -1,72 +0,0 @@ -[SYSTEM] -SYSTEM_MODE=duplex -TIMEZONE=UTC - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=Y -LAG_MODE=4 -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1,eth2 - -[CLM_NETWORK] -CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -CAN_VLAN=125 -CAN_IP_START_ADDRESS=10.10.10.2 -CAN_IP_END_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -;CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[REGION2_PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan.result deleted file mode 100755 index b0e5af3efa..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.lag.vlan.result +++ /dev/null @@ -1,82 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXEBOOT_SUBNET = 192.168.203.0/24 -CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2 -CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3 -CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4 -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = yes -MANAGEMENT_BOND_MEMBER_0 = eth1 -MANAGEMENT_BOND_MEMBER_1 = eth2 -MANAGEMENT_BOND_POLICY = 802.3ad -MANAGEMENT_INTERFACE = bond0 -MANAGEMENT_VLAN = 123 -MANAGEMENT_INTERFACE_NAME = bond0.123 -MANAGEMENT_GATEWAY_ADDRESS = 192.168.204.12 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = bond0 -EXTERNAL_OAM_VLAN = 125 -EXTERNAL_OAM_INTERFACE_NAME = bond0.125 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = service -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patching -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinv -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vim -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtce -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fm -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs deleted file mode 100755 index a3b262f150..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs +++ /dev/null @@ -1,81 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[CLM_NETWORK] -;CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -;CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -;CAN_VLAN= -CAN_IP_START_ADDRESS=10.10.10.2 -CAN_IP_END_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[REGION2_PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[NETWORK] -VSWITCH_TYPE=nuage_vrs -METADATA_PROXY_SHARED_SECRET=NuageNetworksSharedSecret - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs.result deleted file mode 100755 index 4d502b1701..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.nuage_vrs.result +++ /dev/null @@ -1,73 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth1 -MANAGEMENT_INTERFACE_NAME = eth1 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth0 -EXTERNAL_OAM_INTERFACE_NAME = eth0 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = service -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patching -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinv -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vim -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtce -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fm -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security deleted file mode 100755 index 00779938e8..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security +++ /dev/null @@ -1,77 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[CLM_NETWORK] -;CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -;CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -;CAN_VLAN= -CAN_IP_START_ADDRESS=10.10.10.2 -CAN_IP_END_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[REGION2_PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security.result deleted file mode 100755 index 4d502b1701..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.security.result +++ /dev/null @@ -1,73 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth1 -MANAGEMENT_INTERFACE_NAME = eth1 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth0 -EXTERNAL_OAM_INTERFACE_NAME = eth0 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = service -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patching -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinv -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vim -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtce -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fm -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple deleted file mode 100755 index 00779938e8..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple +++ /dev/null @@ -1,77 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[CLM_NETWORK] -;CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -;CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -;CAN_VLAN= -CAN_IP_START_ADDRESS=10.10.10.2 -CAN_IP_END_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[REGION2_PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.can_ips b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.can_ips deleted file mode 100755 index f18359bb5d..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.can_ips +++ /dev/null @@ -1,78 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -[STORAGE] - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[CLM_NETWORK] -;CLM_VLAN=123 -CLM_IP_START_ADDRESS=192.168.204.102 -CLM_IP_END_ADDRESS=192.168.204.199 -CLM_CIDR=192.168.204.0/24 -CLM_MULTICAST_CIDR=239.1.1.0/28 -;CLM_GATEWAY=192.168.204.12 -CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CAN_NETWORK] -;CAN_VLAN= -CAN_IP_FLOATING_ADDRESS=10.10.10.2 -CAN_IP_UNIT_0_ADDRESS=10.10.10.3 -CAN_IP_UNIT_1_ADDRESS=10.10.10.4 -CAN_CIDR=10.10.10.0/24 -CAN_GATEWAY=10.10.10.1 -CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[REGION2_PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[SHARED_SERVICES] -REGION_NAME=RegionOne -ADMIN_PROJECT_NAME=admin -ADMIN_USER_NAME=admin -ADMIN_PASSWORD=Li69nux* -KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_SERVICE_NAME=keystone -KEYSTONE_SERVICE_TYPE=identity -SERVICE_PROJECT_NAME=service - -[REGION_2_SERVICES] -REGION_NAME=RegionTwo -SYSINV_USER_NAME=sysinv -SYSINV_PASSWORD=password2WO* -SYSINV_SERVICE_NAME=sysinv -SYSINV_SERVICE_TYPE=platform -PATCHING_USER_NAME=patching -PATCHING_PASSWORD=password2WO* -PATCHING_SERVICE_NAME=patching -PATCHING_SERVICE_TYPE=patching -NFV_USER_NAME=vim -NFV_PASSWORD=password2WO* -MTCE_USER_NAME=mtce -MTCE_PASSWORD=password2WO* -FM_USER_NAME=fm -FM_PASSWORD=password2WO* -BARBICAN_USER_NAME=barbican -BARBICAN_PASSWORD=barbican2WO* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.result b/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.result deleted file mode 100755 index 4d502b1701..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/region_config.simple.result +++ /dev/null @@ -1,73 +0,0 @@ -[cSYSTEM] -TIMEZONE = UTC -SYSTEM_MODE = duplex - -[cPXEBOOT] -PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller - -[cMGMT] -MANAGEMENT_MTU = 1500 -MANAGEMENT_SUBNET = 192.168.204.0/24 -LAG_MANAGEMENT_INTERFACE = no -MANAGEMENT_INTERFACE = eth1 -MANAGEMENT_INTERFACE_NAME = eth1 -CONTROLLER_FLOATING_ADDRESS = 192.168.204.102 -CONTROLLER_0_ADDRESS = 192.168.204.103 -CONTROLLER_1_ADDRESS = 192.168.204.104 -NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105 -NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106 -CONTROLLER_FLOATING_HOSTNAME = controller -CONTROLLER_HOSTNAME_PREFIX = controller- -OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller -DYNAMIC_ADDRESS_ALLOCATION = no -MANAGEMENT_START_ADDRESS = 192.168.204.102 -MANAGEMENT_END_ADDRESS = 192.168.204.199 -MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28 - -[cEXT_OAM] -EXTERNAL_OAM_MTU = 1500 -EXTERNAL_OAM_SUBNET = 10.10.10.0/24 -LAG_EXTERNAL_OAM_INTERFACE = no -EXTERNAL_OAM_INTERFACE = eth0 -EXTERNAL_OAM_INTERFACE_NAME = eth0 -EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1 -EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2 -EXTERNAL_OAM_0_ADDRESS = 10.10.10.3 -EXTERNAL_OAM_1_ADDRESS = 10.10.10.4 - -[cREGION] -REGION_CONFIG = True -REGION_1_NAME = RegionOne -REGION_2_NAME = RegionTwo -ADMIN_USER_NAME = admin -ADMIN_USER_DOMAIN = Default -ADMIN_PROJECT_NAME = admin -ADMIN_PROJECT_DOMAIN = Default -SERVICE_PROJECT_NAME = service -KEYSTONE_SERVICE_NAME = keystone -KEYSTONE_SERVICE_TYPE = identity -PATCHING_USER_NAME = patching -PATCHING_PASSWORD = password2WO* -SYSINV_USER_NAME = sysinv -SYSINV_PASSWORD = password2WO* -SYSINV_SERVICE_NAME = sysinv -SYSINV_SERVICE_TYPE = platform -NFV_USER_NAME = vim -NFV_PASSWORD = password2WO* -MTCE_USER_NAME = mtce -MTCE_PASSWORD = password2WO* -FM_USER_NAME = fm -FM_PASSWORD = password2WO* -BARBICAN_USER_NAME = barbican -BARBICAN_PASSWORD = barbican2WO* -USER_DOMAIN_NAME = Default -PROJECT_DOMAIN_NAME = Default -KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0 -KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0 -KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0 - -[cAUTHENTICATION] -ADMIN_PASSWORD = Li69nux* - diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ceph b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ceph deleted file mode 100755 index c82f87dfe0..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ceph +++ /dev/null @@ -1,55 +0,0 @@ -[SYSTEM] -SYSTEM_MODE = duplex - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -IP_START_ADDRESS=192.168.204.2 -IP_END_ADDRESS=192.168.204.99 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=Y -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.4 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ipv6 b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ipv6 deleted file mode 100755 index a53219de18..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.ipv6 +++ /dev/null @@ -1,53 +0,0 @@ -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -VLAN=123 -CIDR=1234::/64 -MULTICAST_CIDR=ff08::1:1:0/124 -DYNAMIC_ALLOCATION=Y -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -;IP_START_ADDRESS=abcd::2 -;IP_END_ADDRESS=abcd::4 -IP_FLOATING_ADDRESS=abcd::2 -IP_UNIT_0_ADDRESS=abcd::3 -IP_UNIT_1_ADDRESS=abcd::4 -CIDR=abcd::/64 -GATEWAY=abcd::1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.kubernetes b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.kubernetes deleted file mode 100755 index 10e8d54691..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.kubernetes +++ /dev/null @@ -1,70 +0,0 @@ -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -INTERFACE_MTU=1500 -INTERFACE_LINK_CAPACITY=1000 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -;INTERFACE_LINK_CAPACITY= -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=Y -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CLUSTER_NETWORK] -CIDR=192.168.206.0/24 -DYNAMIC_ALLOCATION=Y -IP_START_ADDRESS=192.168.206.2 -IP_END_ADDRESS=192.168.206.245 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -;IP_START_ADDRESS=10.10.10.2 -;IP_END_ADDRESS=10.10.10.4 -IP_FLOATING_ADDRESS=10.10.10.20 -IP_UNIT_0_ADDRESS=10.10.10.30 -IP_UNIT_1_ADDRESS=10.10.10.40 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[DNS] -# DNS Configuration -NAMESERVER_1=1.2.3.4 -NAMESERVER_2=5.6.7.8 - -[DOCKER_PROXY] -# Docker Proxy Configuration -DOCKER_HTTP_PROXY=http://proxy.com:123 -DOCKER_HTTPS_PROXY=https://proxy.com:123 -DOCKER_NO_PROXY=localhost,127.0.0.1,192.168.204.2 - -[DOCKER_REGISTRY] -# Docker Registry Configuration -DOCKER_K8S_REGISTRY=my.registry.com:5000 -DOCKER_GCR_REGISTRY=my.registry.com -DOCKER_QUAY_REGISTRY=1.2.3.4:5000 -DOCKER_DOCKER_REGISTRY=[1:2:3:4:a:b:c:d]:5000 -IS_SECURE_REGISTRY=False - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.lag.vlan b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.lag.vlan deleted file mode 100755 index 8bcd0b7e70..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.lag.vlan +++ /dev/null @@ -1,55 +0,0 @@ -[SYSTEM] -SYSTEM_MODE=duplex - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=Y -LAG_MODE=4 -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1,eth2 - -[MGMT_NETWORK] -VLAN=123 -IP_START_ADDRESS=192.168.204.102 -IP_END_ADDRESS=192.168.204.199 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[CLUSTER_NETWORK] -VLAN=126 -IP_START_ADDRESS=192.168.206.102 -IP_END_ADDRESS=192.168.206.199 -CIDR=192.168.206.0/24 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -VLAN=125 -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.4 -CIDR=10.10.10.0/24 -;GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.pxeboot b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.pxeboot deleted file mode 100755 index b5d7e708e2..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.pxeboot +++ /dev/null @@ -1,49 +0,0 @@ -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -INTERFACE_MTU=1500 -INTERFACE_LINK_CAPACITY=1000 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -;INTERFACE_LINK_CAPACITY= -INTERFACE_PORTS=eth0 - -[PXEBOOT_NETWORK] -PXEBOOT_CIDR=192.168.102.0/24 -IP_START_ADDRESS=192.168.102.32 -IP_END_ADDRESS=192.168.102.54 - -[MGMT_NETWORK] -VLAN=123 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=Y -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -;IP_START_ADDRESS=10.10.10.2 -;IP_END_ADDRESS=10.10.10.4 -IP_FLOATING_ADDRESS=10.10.10.20 -IP_UNIT_0_ADDRESS=10.10.10.30 -IP_UNIT_1_ADDRESS=10.10.10.40 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.security b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.security deleted file mode 100755 index 2aded723c4..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.security +++ /dev/null @@ -1,51 +0,0 @@ -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -IP_START_ADDRESS=192.168.204.102 -IP_END_ADDRESS=192.168.204.199 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.4 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -[BOARD_MANAGEMENT_NETWORK] -VLAN=1 -MTU=1496 -SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simple b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simple deleted file mode 100755 index 3c69db1f06..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simple +++ /dev/null @@ -1,63 +0,0 @@ -;[DNS] -;NAMESERVER_1=8.8.8.8 -;NAMESERVER_2=8.8.4.4 -;NAMESERVER_3= - -;[NTP] -;NTP_SERVER_1=0.pool.ntp.org -;NTP_SERVER_2=1.pool.ntp.org -;NTP_SERVER_3=2.pool.ntp.org - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=Y -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -;IP_START_ADDRESS=10.10.10.2 -;IP_END_ADDRESS=10.10.10.4 -IP_FLOATING_ADDRESS=10.10.10.20 -IP_UNIT_0_ADDRESS=10.10.10.30 -IP_UNIT_1_ADDRESS=10.10.10.40 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex deleted file mode 100644 index 050e007c14..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex +++ /dev/null @@ -1,46 +0,0 @@ -;[DNS] -;NAMESERVER_1=8.8.8.8 -;NAMESERVER_2=8.8.4.4 -;NAMESERVER_3= - -;[NTP] -;NTP_SERVER_1=0.pool.ntp.org -;NTP_SERVER_2=1.pool.ntp.org -;NTP_SERVER_3=2.pool.ntp.org - -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[OAM_NETWORK] -IP_ADDRESS=10.10.10.20 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION - -[SYSTEM] -SYSTEM_TYPE=All-in-one -SYSTEM_MODE=simplex diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex_mgmt b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex_mgmt deleted file mode 100644 index c555f037fa..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.simplex_mgmt +++ /dev/null @@ -1,24 +0,0 @@ -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -CIDR=192.168.42.0/28 - -[OAM_NETWORK] -IP_ADDRESS=10.10.10.20 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION - -[SYSTEM] -SYSTEM_TYPE=All-in-one -SYSTEM_MODE=simplex diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.static_addr b/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.static_addr deleted file mode 100755 index d368cd446e..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/files/system_config.static_addr +++ /dev/null @@ -1,52 +0,0 @@ -;LOGICAL_INTERFACE_ -; LAG_INTERFACE -; LAG_MODE One of 1) Active-backup policy -; 2) Balanced XOR policy -; 4) 802.3ad (LACP) policy -; Interface for pxebooting can only be LACP -; INTERFACE_MTU -; INTERFACE_PORTS - -[LOGICAL_INTERFACE_1] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth1 - -[LOGICAL_INTERFACE_2] -LAG_INTERFACE=N -;LAG_MODE= -INTERFACE_MTU=1500 -INTERFACE_PORTS=eth0 - -[MGMT_NETWORK] -;VLAN=123 -IP_START_ADDRESS=192.168.204.20 -IP_END_ADDRESS=192.168.204.99 -CIDR=192.168.204.0/24 -MULTICAST_CIDR=239.1.1.0/28 -DYNAMIC_ALLOCATION=N -;GATEWAY=192.168.204.12 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_1 - -[OAM_NETWORK] -;VLAN= -IP_START_ADDRESS=10.10.10.2 -IP_END_ADDRESS=10.10.10.4 -CIDR=10.10.10.0/24 -GATEWAY=10.10.10.1 -LOGICAL_INTERFACE=LOGICAL_INTERFACE_2 - -;[PXEBOOT_NETWORK] -;PXEBOOT_CIDR=192.168.203.0/24 - -;[BOARD_MANAGEMENT_NETWORK] -;VLAN=1 -;MTU=1496 -;SUBNET=192.168.203.0/24 - -[AUTHENTICATION] -ADMIN_PASSWORD=Li69nux* - -[VERSION] -RELEASE = TEST.SW.VERSION diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/test_answerfile.py b/controllerconfig/controllerconfig/controllerconfig/tests/test_answerfile.py deleted file mode 100644 index d87c735caa..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/test_answerfile.py +++ /dev/null @@ -1,103 +0,0 @@ -""" -Copyright (c) 2014 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -import difflib -import filecmp -import os -from mock import patch - -import controllerconfig.configassistant as ca -import controllerconfig.common.constants as constants - - -@patch('controllerconfig.configassistant.get_rootfs_node') -@patch('controllerconfig.configassistant.get_net_device_list') -def _test_answerfile(tmpdir, filename, - mock_get_net_device_list, - mock_get_rootfs_node, - compare_results=True, - ca_options={}): - """ Test import and generation of answerfile """ - mock_get_net_device_list.return_value = \ - ['eth0', 'eth1', 'eth2'] - mock_get_rootfs_node.return_value = '/dev/sda' - - assistant = ca.ConfigAssistant(**ca_options) - - # Create the path to the answerfile - answerfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", filename) - - # Input the config from the answerfile - assistant.input_config_from_file(answerfile) - - # Test the display method - print("Output from display_config:") - assistant.display_config() - - # Ensure we can write the configuration - constants.CONFIG_WORKDIR = os.path.join(str(tmpdir), 'config_workdir') - constants.CGCS_CONFIG_FILE = os.path.join(constants.CONFIG_WORKDIR, - 'cgcs_config') - assistant.write_config_file() - - # Add the password to the generated file so it can be compared with the - # answerfile - with open(constants.CGCS_CONFIG_FILE, 'a') as f: - f.write("\n[cAUTHENTICATION]\nADMIN_PASSWORD=Li69nux*\n") - - # Do a diff between the answerfile and the generated config file - print("\n\nDiff of answerfile vs. generated config file:\n") - with open(answerfile) as a, open(constants.CGCS_CONFIG_FILE) as b: - a_lines = a.readlines() - b_lines = b.readlines() - - differ = difflib.Differ() - diff = differ.compare(a_lines, b_lines) - print(''.join(diff)) - - if compare_results: - # Fail the testcase if the answerfile and generated config file don't - # match. - assert filecmp.cmp(answerfile, constants.CGCS_CONFIG_FILE) - - -def test_answerfile_default(tmpdir): - """ Test import of answerfile with default values """ - - _test_answerfile(tmpdir, "cgcs_config.default") - - -def test_answerfile_ipv6(tmpdir): - """ Test import of answerfile with ipv6 oam values """ - - _test_answerfile(tmpdir, "cgcs_config.ipv6") - - -def test_answerfile_ceph(tmpdir): - """ Test import of answerfile with ceph backend values """ - - _test_answerfile(tmpdir, "cgcs_config.ceph") - - -def test_answerfile_region(tmpdir): - """ Test import of answerfile with region values """ - - _test_answerfile(tmpdir, "cgcs_config.region") - - -def test_answerfile_region_nuage_vrs(tmpdir): - """ Test import of answerfile with region values for nuage_vrs""" - - _test_answerfile(tmpdir, "cgcs_config.region_nuage_vrs") - - -def test_answerfile_kubernetes(tmpdir): - """ Test import of answerfile with kubernetes values """ - - _test_answerfile(tmpdir, "cgcs_config.kubernetes", - ca_options={"kubernetes": True}) diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/test_region_config.py b/controllerconfig/controllerconfig/controllerconfig/tests/test_region_config.py deleted file mode 100644 index 0808b3b439..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/test_region_config.py +++ /dev/null @@ -1,759 +0,0 @@ -""" -Copyright (c) 2014-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from __future__ import print_function -from six.moves import configparser -import difflib -import filecmp -import fileinput -import mock -from mock import patch -import os -import pytest -import shutil -import sys - -import controllerconfig.common.exceptions as exceptions -from controllerconfig import REGION_CONFIG -from controllerconfig import validate -import controllerconfig.common.keystone as keystone -from controllerconfig.tests import test_answerfile - -sys.modules['fm_core'] = mock.Mock() - -import controllerconfig.systemconfig as cr # noqa: E402 - -FAKE_SERVICE_DATA = {u'services': [ - {u'type': u'keystore', u'description': u'Barbican Key Management Service', - u'enabled': True, u'id': u'9029af23540f4eecb0b7f70ac5e00152', - u'name': u'barbican'}, - {u'type': u'network', u'description': u'OpenStack Networking service', - u'enabled': True, u'id': u'85a8a3342a644df193af4b68d5b65ce5', - u'name': u'neutron'}, {u'type': u'cloudformation', - u'description': - u'OpenStack Cloudformation Service', - u'enabled': True, - u'id': u'abbf431acb6d45919cfbefe55a0f27fa', - u'name': u'heat-cfn'}, - {u'type': u'object-store', u'description': u'OpenStack object-store', - u'enabled': True, u'id': u'd588956f759f4bbda9e65a1019902b9c', - u'name': u'swift'}, - {u'type': u'volumev2', - u'description': u'OpenStack Volume Service v2.0 API', - u'enabled': True, u'id': u'e6e356112daa4af588d9b9dadcf98bc4', - u'name': u'cinderv2'}, - {u'type': u'volume', u'description': u'OpenStack Volume Service', - u'enabled': True, u'id': u'505aa37457774e55b545654aa8630822', - u'name': u'cinder'}, {u'type': u'orchestration', - u'description': u'OpenStack Orchestration Service', - u'enabled': True, - u'id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'name': u'heat'}, - {u'type': u'compute', u'description': u'OpenStack Compute Service', - u'enabled': True, u'id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'name': u'nova'}, - {u'type': u'identity', u'description': u'OpenStack Identity', - u'enabled': True, u'id': u'1fe7b1de187b47228fe853fbbd149664', - u'name': u'keystone'}, - {u'type': u'image', u'description': u'OpenStack Image Service', - u'enabled': True, u'id': u'd41750c98a864fdfb25c751b4ad84996', - u'name': u'glance'}, - {u'type': u'database', u'description': u'Trove Database As A Service', - u'enabled': True, u'id': u'82265e39a77b4097bd8aee4f78e13867', - u'name': u'trove'}, - {u'type': u'patching', u'description': u'Patching Service', - u'enabled': True, u'id': u'8515c4f28f9346199eb8704bca4f5db4', - u'name': u'patching'}, - {u'type': u'platform', u'description': u'SysInv Service', u'enabled': True, - u'id': u'08758bed8d894ddaae744a97db1080b3', u'name': u'sysinv'}, - {u'type': u'computev3', u'description': u'Openstack Compute Service v3', - u'enabled': True, u'id': u'959f2214543a47549ffd8c66f98d27d4', - u'name': u'novav3'}]} - -FAKE_ENDPOINT_DATA = {u'endpoints': [ - {u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'505aa37457774e55b545654aa8630822', - u'id': u'de19beb4a4924aa1ba25af3ee64e80a0', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'505aa37457774e55b545654aa8630822', - u'id': u'de19beb4a4924aa1ba25af3ee64e80a1', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8776/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'505aa37457774e55b545654aa8630822', - u'id': u'de19beb4a4924aa1ba25af3ee64e80a2', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'373259a6bbcf493b86c9f9530e86d323', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'373259a6bbcf493b86c9f9530e86d324', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8774/v2/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'373259a6bbcf493b86c9f9530e86d324', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'c51dc9354b5a41c9883ec3871b9fd271', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'c51dc9354b5a41c9883ec3871b9fd272', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8004/v1/%(tenant_id)s', - u'region': u'RegionTwo', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'c51dc9354b5a41c9883ec3871b9fd273', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne', - u'enabled': True, u'interface': u'admin', - u'id': u'e132bb9dd0fe459687c3b04074bcb1ac', - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'}, - {u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne', - u'enabled': True, u'interface': u'internal', - u'id': u'e132bb9dd0fe459687c3b04074bcb1ad', - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'}, - {u'url': u'http://10.10.10.2:8000/v1', u'region': u'RegionOne', - u'enabled': True, u'interface': u'public', - u'id': u'e132bb9dd0fe459687c3b04074bcb1ae', - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'}, - - {u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'959f2214543a47549ffd8c66f98d27d4', - u'id': u'031bfbfd581f4a42b361f93fdc4fe266', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'959f2214543a47549ffd8c66f98d27d4', - u'id': u'031bfbfd581f4a42b361f93fdc4fe267', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8774/v3', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'959f2214543a47549ffd8c66f98d27d4', - u'id': u'031bfbfd581f4a42b361f93fdc4fe268', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8081/keystone/admin/v2.0', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'1fe7b1de187b47228fe853fbbd149664', - u'id': u'6fa36df1cc4f4e97a1c12767c8a1159f', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8081/keystone/main/v2.0', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'1fe7b1de187b47228fe853fbbd149664', - u'id': u'6fa36df1cc4f4e97a1c12767c8a11510', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8081/keystone/main/v2.0', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'1fe7b1de187b47228fe853fbbd149664', - u'id': u'6fa36df1cc4f4e97a1c12767c8a11512', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'74a7a918dd854b66bb33f1e4e0e768bc', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'74a7a918dd854b66bb33f1e4e0e768bd', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:9696/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'74a7a918dd854b66bb33f1e4e0e768be', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'08758bed8d894ddaae744a97db1080b3', - u'id': u'd8ae3a69f08046d1a8f031bbd65381a3', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'08758bed8d894ddaae744a97db1080b3', - u'id': u'd8ae3a69f08046d1a8f031bbd65381a4', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:6385/v1', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'08758bed8d894ddaae744a97db1080b5', - u'id': u'd8ae3a69f08046d1a8f031bbd65381a3', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'61ad227efa3b4cdd867618041a7064dc', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'61ad227efa3b4cdd867618041a7064dd', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8004/v1/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3', - u'id': u'61ad227efa3b4cdd867618041a7064de', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8888/v1', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'd588956f759f4bbda9e65a1019902b9c', - u'id': u'be557ddb742e46328159749a21e6e286', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8888/v1/AUTH_$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'd588956f759f4bbda9e65a1019902b9c', - u'id': u'be557ddb742e46328159749a21e6e287', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:8888/v1/AUTH_$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'd588956f759f4bbda9e65a1019902b9c', - u'id': u'be557ddb742e46328159749a21e6e288', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'050d07db8c5041288f29020079177f0b', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'050d07db8c5041288f29020079177f0c', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8777', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'050d07db8c5041288f29020079177f0d', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'8515c4f28f9346199eb8704bca4f5db4', - u'id': u'53af565e4d7245929df7af2ba0ff46db', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'8515c4f28f9346199eb8704bca4f5db4', - u'id': u'53af565e4d7245929df7af2ba0ff46dc', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:5491', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'8515c4f28f9346199eb8704bca4f5db4', - u'id': u'53af565e4d7245929df7af2ba0ff46dd', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'82265e39a77b4097bd8aee4f78e13867', - u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'82265e39a77b4097bd8aee4f78e13867', - u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8779/v1.0/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'82265e39a77b4097bd8aee4f78e13867', - u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10a', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10b', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:9292/v2', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10c', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10a', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10b', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:9292/v2', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'd41750c98a864fdfb25c751b4ad84996', - u'id': u'06fdb367cb63414987ee1653a016d10c', - u'interface': u'public'}, - - - {u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'f15d22a9526648ff8833460e2dce1431', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'f15d22a9526648ff8833460e2dce1432', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:8777/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba', - u'id': u'f15d22a9526648ff8833460e2dce1433', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa', - u'id': u'5e6c6ffdbcd544f8838430937a0d81a7', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa', - u'id': u'5e6c6ffdbcd544f8838430937a0d81a8', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8000/v1/', u'region': u'RegionTwo', - u'enabled': True, - u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa', - u'id': u'5e6c6ffdbcd544f8838430937a0d81a9', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'87dc648502ee49fb86a4ca87d8d6028d', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'87dc648502ee49fb86a4ca87d8d6028e', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.2:8774/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac', - u'id': u'87dc648502ee49fb86a4ca87d8d6028f', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'd326bf63f6f94b12924b03ff42ba63bd', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'd326bf63f6f94b12924b03ff42ba63be', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:9696/', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'85a8a3342a644df193af4b68d5b65ce5', - u'id': u'd326bf63f6f94b12924b03ff42ba63bf', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4', - u'id': u'61b8bb77edf644f1ad4edf9b953d44c7', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4', - u'id': u'61b8bb77edf644f1ad4edf9b953d44c8', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:8776/v2/$(tenant_id)s', - u'region': u'RegionOne', u'enabled': True, - u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4', - u'id': u'61b8bb77edf644f1ad4edf9b953d44c9', - u'interface': u'public'}, - - {u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'9029af23540f4eecb0b7f70ac5e00152', - u'id': u'a1aa2af22caf460eb421d75ab1ce6125', - u'interface': u'admin'}, - {u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'9029af23540f4eecb0b7f70ac5e00152', - u'id': u'a1aa2af22caf460eb421d75ab1ce6126', - u'interface': u'internal'}, - {u'url': u'http://10.10.10.12:9312/v1', u'region': u'RegionOne', - u'enabled': True, - u'service_id': u'9029af23540f4eecb0b7f70ac5e00152', - u'id': u'a1aa2af22caf460eb421d75ab1ce6127', - u'interface': u'public'}]} - -FAKE_DOMAIN_DATA = {u'domains': [ - {u'id': u'default', u'enabled': True, - u'description': - u'Owns users and tenants (i.e. projects) available on Identity API ' - u'v2.', - u'links': { - u'self': - u'http://192.168.204.12:8081/keystone/main/v3/domains/default'}, - u'name': u'Default'}, - {u'id': u'05d847889e9a4cb9aa94f541eb6b9e2e', - u'enabled': True, - u'description': u'Contains users and projects created by heat', - u'links': { - u'self': - u'http://192.168.204.12:8081/keystone/main/v3/domains/' - u'05d847889e9a4cb9aa94f541eb6b9e2e'}, - u'name': u'heat'}], - u'links': { - u'self': u'http://192.168.204.12:8081/keystone/main/v3/domains', - u'next': None, - u'previous': None}} - - -def _dump_config(config): - """ Prints contents of config object """ - for section in config.sections(): - print("[%s]" % section) - for (name, value) in config.items(section): - print("%s=%s" % (name, value)) - - -def _replace_in_file(filename, old, new): - """ Replaces old with new in file filename. """ - for line in fileinput.FileInput(filename, inplace=1): - line = line.replace(old, new) - print(line, end='') - fileinput.close() - - -@patch('controllerconfig.configassistant.ConfigAssistant.get_sysadmin_sig') -def _test_region_config(tmpdir, inputfile, resultfile, - mock_get_sysadmin_sig): - """ Test import and generation of answerfile """ - - mock_get_sysadmin_sig.return_value = None - - # Create the path to the output file - outputfile = os.path.join(str(tmpdir), 'output') - - # Parse the region_config file - region_config = cr.parse_system_config(inputfile) - - # Dump results for debugging - print("Parsed region_config:\n") - _dump_config(region_config) - - # Validate the region config file - cr.create_cgcs_config_file(outputfile, region_config, - keystone.ServiceList(FAKE_SERVICE_DATA), - keystone.EndpointList(FAKE_ENDPOINT_DATA), - keystone.DomainList(FAKE_DOMAIN_DATA)) - - # Make a local copy of the results file - local_resultfile = os.path.join(str(tmpdir), 'result') - shutil.copyfile(resultfile, local_resultfile) - - # Do a diff between the output and the expected results - print("\n\nDiff of output file vs. expected results file:\n") - with open(outputfile) as a, open(local_resultfile) as b: - a_lines = a.readlines() - b_lines = b.readlines() - - differ = difflib.Differ() - diff = differ.compare(a_lines, b_lines) - print(''.join(diff)) - # Fail the testcase if the output doesn't match the expected results - assert filecmp.cmp(outputfile, local_resultfile) - - # Now test that configassistant can parse this answerfile. We can't - # compare the resulting cgcs_config file because the ordering, spacing - # and comments are different between the answerfile generated by - # systemconfig and ConfigAssistant. - test_answerfile._test_answerfile(tmpdir, outputfile, compare_results=False) - - # Validate the region config file. - # Using onboard validation since the validator's reference version number - # is only set at build-time when validating offboard - validate(region_config, REGION_CONFIG, None, False) - - -def test_region_config_simple(tmpdir): - """ Test import of simple region_config file """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.simple") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.simple.result") - - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_simple_can_ips(tmpdir): - """ Test import of simple region_config file with unit ips for CAN """ - print("IN TEST ################################################") - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.simple.can_ips") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.simple.result") - - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_lag_vlan(tmpdir): - """ Test import of region_config file with lag and vlan """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.lag.vlan") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.lag.vlan.result") - - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_security(tmpdir): - """ Test import of region_config file with security config """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.security") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.security.result") - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_nuage_vrs(tmpdir): - """ Test import of region_config file with nuage vrs config """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.nuage_vrs") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "region_config.nuage_vrs.result") - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_share_keystone_only(tmpdir): - """ Test import of Titanium Cloud region_config file with - shared keystone """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "TiS_region_config.share.keystoneonly") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "TiS_region_config.share.keystoneonly.result") - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_share_keystone_glance_cinder(tmpdir): - """ Test import of Titanium Cloud region_config file with shared keystone, - glance and cinder """ - - regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "TiS_region_config.shareall") - resultfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "TiS_region_config.shareall.result") - _test_region_config(tmpdir, regionfile, resultfile) - - -def test_region_config_validation(): - """ Test detection of various errors in region_config file """ - - # Create the path to the region_config files - simple_regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "region_config.simple") - lag_vlan_regionfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "region_config.lag.vlan") - - # Test detection of non-required CINDER_* parameters - region_config = cr.parse_system_config(simple_regionfile) - region_config.set('STORAGE', 'CINDER_BACKEND', 'lvm') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, True) - - region_config = cr.parse_system_config(simple_regionfile) - region_config.set('STORAGE', 'CINDER_DEVICE', - '/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config = cr.parse_system_config(simple_regionfile) - region_config.set('STORAGE', 'CINDER_STORAGE', '10') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test detection of an invalid PXEBOOT_CIDR - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR', - 'FD00::0000/64') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR', - '192.168.1.0/29') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config.remove_option('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR') - with pytest.raises(configparser.NoOptionError): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(configparser.NoOptionError): - validate(region_config, REGION_CONFIG, None, False) - - # Test overlap of CLM_CIDR - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('CLM_NETWORK', 'CLM_CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test invalid CLM LAG_MODE - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test CLM_VLAN not allowed - region_config = cr.parse_system_config(simple_regionfile) - region_config.set('CLM_NETWORK', 'CLM_VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test CLM_VLAN missing - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.remove_option('CLM_NETWORK', 'CLM_VLAN') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test overlap of CAN_CIDR - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.204.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.205.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test invalid CAN LAG_MODE - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.add_section('LOGICAL_INTERFACE_2') - region_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y') - region_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3') - region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500') - region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4') - region_config.set('CAN_NETWORK', 'CAN_LOGICAL_INTERFACE', - 'LOGICAL_INTERFACE_2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test CAN_VLAN overlap - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('CAN_NETWORK', 'CAN_VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test CAN_VLAN missing - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.remove_option('CAN_NETWORK', 'CAN_VLAN') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test missing gateway - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.remove_option('CLM_NETWORK', 'CLM_GATEWAY') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) - - # Test two gateways - region_config = cr.parse_system_config(lag_vlan_regionfile) - region_config.set('CAN_NETWORK', 'CAN_GATEWAY', '10.10.10.1') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, region_config, None, None, None, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(region_config, REGION_CONFIG, None, False) diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/test_system_config.py b/controllerconfig/controllerconfig/controllerconfig/tests/test_system_config.py deleted file mode 100644 index 9976d460fa..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/test_system_config.py +++ /dev/null @@ -1,601 +0,0 @@ -""" -Copyright (c) 2014-2019 Wind River Systems, Inc. - -SPDX-License-Identifier: Apache-2.0 - -""" - -from six.moves import configparser -import mock -import os -import pytest -import sys - -import controllerconfig.common.exceptions as exceptions -from controllerconfig import validate -from controllerconfig import DEFAULT_CONFIG - -sys.modules['fm_core'] = mock.Mock() - -import controllerconfig.systemconfig as cr # noqa: E402 - - -def _dump_config(config): - """ Prints contents of config object """ - for section in config.sections(): - print("[%s]" % section) - for (name, value) in config.items(section): - print("%s=%s" % (name, value)) - - -def _test_system_config(filename): - """ Test import and generation of answerfile """ - - # Parse the system_config file - system_config = cr.parse_system_config(filename) - - # Dump results for debugging - print("Parsed system_config:\n") - _dump_config(system_config) - - # Validate the system config file - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - - # Validate the system config file. - # Using onboard validation since the validator's reference version number - # is only set at build-time when validating offboard - validate(system_config, DEFAULT_CONFIG, None, False) - - -def test_system_config_simple(): - """ Test import of simple system_config file """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.simple") - - _test_system_config(systemfile) - - -def test_system_config_ipv6(): - """ Test import of system_config file with ipv6 oam """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6") - - _test_system_config(systemfile) - - -def test_system_config_lag_vlan(): - """ Test import of system_config file with lag and vlan """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan") - - _test_system_config(systemfile) - - -def test_system_config_security(): - """ Test import of system_config file with security config """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.security") - - _test_system_config(systemfile) - - -def test_system_config_ceph(): - """ Test import of system_config file with ceph config """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph") - - _test_system_config(systemfile) - - -def test_system_config_simplex(): - """ Test import of system_config file for AIO-simplex """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.simplex") - - _test_system_config(systemfile) - - -def test_system_config_simplex_mgmt(): - """ Test import of system_config file for AIO-simplex with management - configuration""" - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "system_config.simplex_mgmt") - - _test_system_config(systemfile) - - # Test MGMT_NETWORK parameters that are not allowed - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'GATEWAY', '192.168.42.1') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'LOGICAL_INTERFACE', - 'LOGICAL_INTERFACE_1') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test overlap with OAM network - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'CIDR', '10.10.10.0/24') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test IPv6 management CIDR (not supported) - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'CIDR', 'FD01::0000/64') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test management CIDR that is too small - system_config = cr.parse_system_config(systemfile) - system_config.set('MGMT_NETWORK', 'CIDR', '192.168.42.0/29') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - -def test_system_config_validation(): - """ Test detection of various errors in system_config file """ - - # Create the path to the system_config files - simple_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.simple") - ipv6_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6") - lag_vlan_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan") - ceph_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph") - static_addr_systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "system_config.static_addr") - - # Test floating outside of OAM_NETWORK CIDR - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.set('OAM_NETWORK', 'IP_FLOATING_ADDRESS', '5555::5') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test non-ipv6 unit address - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.set('OAM_NETWORK', 'IP_UNIT_0_ADDRESS', '10.10.10.3') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test missing pxeboot network when using IPv6 management network - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.remove_section('PXEBOOT_NETWORK') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test ridiculously sized management network - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '1234::b:0:0:0') - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', - '1234::b:ffff:ffff:ffff') - system_config.remove_option('MGMT_NETWORK', 'IP_FLOATING_ADDRESS') - system_config.remove_option('MGMT_NETWORK', 'IP_UNIT_0_ADDRESS') - system_config.remove_option('MGMT_NETWORK', 'IP_UNIT_1_ADDRESS') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test using start/end addresses - system_config = cr.parse_system_config(ipv6_systemfile) - system_config.set('OAM_NETWORK', 'IP_START_ADDRESS', 'abcd::2') - system_config.set('OAM_NETWORK', 'IP_END_ADDRESS', 'abcd::4') - system_config.remove_option('OAM_NETWORK', 'IP_FLOATING_ADDRESS') - system_config.remove_option('OAM_NETWORK', 'IP_UNIT_0_ADDRESS') - system_config.remove_option('OAM_NETWORK', 'IP_UNIT_1_ADDRESS') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of an invalid PXEBOOT_CIDR - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR', - 'FD00::0000/64') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR', - '192.168.1.0/29') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.remove_option('PXEBOOT_NETWORK', 'PXEBOOT_CIDR') - with pytest.raises(configparser.NoOptionError): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(configparser.NoOptionError): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test overlap of MGMT_NETWORK CIDR - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('MGMT_NETWORK', 'CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test invalid MGMT_NETWORK LAG_MODE - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK VLAN not allowed - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK VLAN missing - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.remove_option('MGMT_NETWORK', 'VLAN') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK start address specified without end address - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK end address specified without start address - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.200') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK start and end range does not have enough addresses - system_config = cr.parse_system_config(static_addr_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2') - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.8') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK start address not in subnet - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.200.2') - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.254') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test MGMT_NETWORK end address not in subnet - system_config = cr.parse_system_config(simple_systemfile) - system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2') - system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.214.254') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test overlap of CLUSTER_NETWORK CIDR - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('CLUSTER_NETWORK', 'CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('CLUSTER_NETWORK', 'CIDR', '192.168.204.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test invalid CLUSTER_NETWORK LAG_MODE - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.add_section('LOGICAL_INTERFACE_2') - system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y') - system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3') - system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500') - system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4') - system_config.set('CLUSTER_NETWORK', 'LOGICAL_INTERFACE', - 'LOGICAL_INTERFACE_2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test CLUSTER_NETWORK VLAN overlap - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('CLUSTER_NETWORK', 'VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test overlap of OAM_NETWORK CIDR - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('OAM_NETWORK', 'CIDR', '192.168.203.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('OAM_NETWORK', 'CIDR', '192.168.204.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('OAM_NETWORK', 'CIDR', '192.168.205.0/26') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test invalid OAM_NETWORK LAG_MODE - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.add_section('LOGICAL_INTERFACE_2') - system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y') - system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3') - system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500') - system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4') - system_config.set('OAM_NETWORK', 'LOGICAL_INTERFACE', - 'LOGICAL_INTERFACE_2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test OAM_NETWORK VLAN overlap - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('OAM_NETWORK', 'VLAN', '123') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - system_config.set('OAM_NETWORK', 'VLAN', '126') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test OAM_NETWORK VLAN missing - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.remove_option('OAM_NETWORK', 'VLAN') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test missing gateway - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.remove_option('MGMT_NETWORK', 'GATEWAY') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test two gateways - system_config = cr.parse_system_config(lag_vlan_systemfile) - system_config.set('OAM_NETWORK', 'GATEWAY', '10.10.10.1') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of unsupported NTP NTP_SERVER - system_config = cr.parse_system_config(simple_systemfile) - system_config.add_section('NTP') - system_config.set('NTP', 'NTP_SERVER_1', '0.pool.ntp.org') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - - # Test detection of overspecification of MGMT network addresses - system_config = cr.parse_system_config(ceph_systemfile) - system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '192.168.204.3') - system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '192.168.204.6') - system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '192.168.204.9') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of overspecification of OAM network addresses - system_config = cr.parse_system_config(ceph_systemfile) - system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '10.10.10.2') - system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '10.10.10.3') - system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '10.10.10.4') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of invalid release version - system_config = cr.parse_system_config(ceph_systemfile) - system_config.set('VERSION', 'RELEASE', '15.12') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - -def test_pxeboot_range(): - """ Test import of system_config file for PXEBoot network address """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", "system_config.pxeboot") - - # Test import and generation of answer file - _test_system_config(systemfile) - - # Test detection of invalid PXEBoot network start address - system_config = cr.parse_system_config(systemfile) - system_config.set('PXEBOOT_NETWORK', 'IP_START_ADDRESS', '8.123.122.345') - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of invalid PXEBoot network end address - system_config = cr.parse_system_config(systemfile) - system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '128.123.122.345') - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of smaller PXEBoot network end address - system_config = cr.parse_system_config(systemfile) - system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '192.168.102.30') - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of PXEBoot network range less than min required (8) - system_config = cr.parse_system_config(systemfile) - system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '128.123.122.34') - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - -def test_kubernetes(): - """ Test import of system_config file for kubernetes """ - - # Create the path to the system_config file - systemfile = os.path.join( - os.getcwd(), "controllerconfig/tests/files/", - "system_config.kubernetes") - - # Test import and generation of answer file - _test_system_config(systemfile) - - # Test CLUSTER_NETWORK start address specified without end address - system_config = cr.parse_system_config(systemfile) - system_config.set('CLUSTER_NETWORK', 'IP_START_ADDRESS', '192.168.204.2') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test CLUSTER_NETWORK end address specified without start address - system_config = cr.parse_system_config(systemfile) - system_config.set('CLUSTER_NETWORK', 'IP_END_ADDRESS', '192.168.204.200') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test detection of overspecification of CLUSTER network addresses - system_config = cr.parse_system_config(systemfile) - system_config.set('CLUSTER_NETWORK', 'IP_FLOATING_ADDRESS', - '192.168.206.103') - system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_0_ADDRESS', - '192.168.206.106') - system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_1_ADDRESS', - '192.168.206.109') - with pytest.raises(exceptions.ConfigFail): - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - with pytest.raises(exceptions.ConfigFail): - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test absence of optional DNS configuration - system_config = cr.parse_system_config(systemfile) - system_config.remove_section('DNS') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test absence of optional docker proxy configuration - system_config = cr.parse_system_config(systemfile) - system_config.remove_section('DOCKER_PROXY') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) - - # Test absence of optional docker registry configuration - system_config = cr.parse_system_config(systemfile) - system_config.remove_section('DOCKER_REGISTRY') - cr.create_cgcs_config_file(None, system_config, None, None, None, 0, - validate_only=True) - validate(system_config, DEFAULT_CONFIG, None, False) diff --git a/controllerconfig/controllerconfig/controllerconfig/tidy_storage.py b/controllerconfig/controllerconfig/controllerconfig/tidy_storage.py index 0aa8baeb09..cfce88d4ba 100644 --- a/controllerconfig/controllerconfig/controllerconfig/tidy_storage.py +++ b/controllerconfig/controllerconfig/controllerconfig/tidy_storage.py @@ -19,11 +19,12 @@ from cinderclient.v3 import client as cinder_client_v3 from glanceclient import Client from cinderclient import utils as c_utils -from controllerconfig.common import log from controllerconfig.common.rest_api_utils import get_token from controllerconfig.common.exceptions import TidyStorageFail -LOG = log.get_logger(__name__) +from oslo_log import log + +LOG = log.getLogger(__name__) KEYSTONE_AUTH_SERVER_RETRY_CNT = 60 KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry @@ -565,8 +566,6 @@ def main(): show_help() exit(1) - log.configure() - result_file = sys.argv[1] try: diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py index b46833616a..225cad97ef 100644 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py +++ b/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016-2019 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -42,13 +42,12 @@ from tsconfig.tsconfig import CONTROLLER_UPGRADE_STARTED_FLAG from tsconfig.tsconfig import RESTORE_IN_PROGRESS_FLAG from controllerconfig.common import constants -from controllerconfig.common import log from controllerconfig import utils as cutils -from controllerconfig import backup_restore - from controllerconfig.upgrades import utils -LOG = log.get_logger(__name__) +from oslo_log import log + +LOG = log.getLogger(__name__) POSTGRES_MOUNT_PATH = '/mnt/postgresql' POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump' @@ -865,8 +864,6 @@ def main(): exit(1) arg += 1 - log.configure() - if not from_release or not to_release: print("Both the FROM_RELEASE and TO_RELEASE must be specified") exit(1) @@ -955,9 +952,10 @@ def extract_data_from_archive(archive, staging_dir, from_release, to_release): extract_relative_directory(archive, 'config/ssh_config', tmp_config_path + '/ssh_config') + # TODO: Switch this over to use Ansible # Restore certificate files if they are in the archive - backup_restore.restore_etc_ssl_dir(archive, - configpath=tmp_config_path) + # backup_restore.restore_etc_ssl_dir(archive, + # configpath=tmp_config_path) # Extract etc files archive.extract('etc/hostname', '/') @@ -975,11 +973,12 @@ def extract_data_from_archive(archive, staging_dir, from_release, to_release): path = 'config/' + file extract_relative_file(archive, path, tmp_config_path) + # TODO: Switch this over to use Ansible # Extract distributed cloud addn_hosts file if present in archive. - if backup_restore.file_exists_in_archive( - archive, 'config/dnsmasq.addn_hosts_dc'): - extract_relative_file( - archive, 'config/dnsmasq.addn_hosts_dc', tmp_config_path) + # if backup_restore.file_exists_in_archive( + # archive, 'config/dnsmasq.addn_hosts_dc'): + # extract_relative_file( + # archive, 'config/dnsmasq.addn_hosts_dc', tmp_config_path) def extract_postgres_data(archive): @@ -1114,7 +1113,8 @@ def upgrade_controller_simplex(backup_file): to_release = metadata['upgrade']['to_release'] check_load_version(to_release) - backup_restore.check_load_subfunctions(archive, staging_dir) + # TODO: Switch this over to use Ansible + # backup_restore.check_load_subfunctions(archive, staging_dir) # Patching is potentially a multi-phase step. # If the controller is impacted by patches from the backup, @@ -1271,7 +1271,8 @@ def upgrade_controller_simplex(backup_file): LOG.info("Generating manifests for %s" % sysinv_constants.CONTROLLER_0_HOSTNAME) - backup_restore.configure_loopback_interface(archive) + # TODO: Switch this over to use Ansible + # backup_restore.configure_loopback_interface(archive) print_log_info("Creating configs...") cutils.create_system_config() @@ -1301,10 +1302,10 @@ def upgrade_controller_simplex(backup_file): cutils.apply_banner_customization() - backup_restore.restore_ldap(archive, backup_restore.ldap_permdir, - staging_dir) - - backup_restore.restore_std_dir(archive, backup_restore.home_permdir) + # TODO: Switch this over to use Ansible + # backup_restore.restore_ldap(archive, backup_restore.ldap_permdir, + # staging_dir) + # backup_restore.restore_std_dir(archive, backup_restore.home_permdir) archive.close() shutil.rmtree(staging_dir, ignore_errors=True) @@ -1352,8 +1353,6 @@ def simplex_main(): exit(1) arg += 1 - log.configure() - # Enforce that the command is being run from the console if cutils.is_ssh_parent(): print ( diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py index 2a73d1ef78..fecba26025 100644 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py +++ b/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2019 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -15,13 +15,13 @@ import subprocess import tsconfig.tsconfig as tsc -from controllerconfig import backup_restore -from controllerconfig.common import log from controllerconfig.common import constants from sysinv.common import constants as sysinv_constants from controllerconfig.upgrades import utils -LOG = log.get_logger(__name__) +from oslo_log import log + +LOG = log.getLogger(__name__) def get_upgrade_databases(shared_services): @@ -197,8 +197,9 @@ def create_simplex_backup(software_upgrade): with open(metadata_filename, 'w') as metadata_file: metadata_file.write(json_data) - backup_filename = get_upgrade_backup_filename(software_upgrade) - backup_restore.backup(backup_filename, constants.BACKUPS_PATH) + # TODO: Switch this over to use Ansible + # backup_filename = get_upgrade_backup_filename(software_upgrade) + # backup_restore.backup(backup_filename, constants.BACKUPS_PATH) LOG.info("Create simplex backup complete") diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py index 1b32454f82..230e70e7f8 100644 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py +++ b/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016-2019 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -21,14 +21,13 @@ import yaml from tsconfig.tsconfig import SW_VERSION from tsconfig.tsconfig import PLATFORM_PATH -from controllerconfig import DEFAULT_DOMAIN_NAME from controllerconfig import utils as cutils -from controllerconfig.common import log from controllerconfig.common import constants from sysinv.common import constants as sysinv_constants +from oslo_log import log -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) POSTGRES_PATH = '/var/lib/postgresql' POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION) @@ -36,6 +35,9 @@ RABBIT_PATH = '/var/lib/rabbitmq' CONTROLLER_1_HOSTNAME = "controller-1" DB_CONNECTION = "postgresql://%s:%s@127.0.0.1/%s\n" +# well-known default domain name +DEFAULT_DOMAIN_NAME = 'Default' + # Migration script actions ACTION_START = "start" ACTION_MIGRATE = "migrate" diff --git a/controllerconfig/controllerconfig/controllerconfig/utils.py b/controllerconfig/controllerconfig/controllerconfig/utils.py index fbd21decb1..59fec88575 100644 --- a/controllerconfig/controllerconfig/controllerconfig/utils.py +++ b/controllerconfig/controllerconfig/controllerconfig/utils.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2014-2019 Wind River Systems, Inc. +# Copyright (c) 2014-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -8,151 +8,27 @@ Utilities """ -import collections -import errno import glob import os import shutil -import socket import subprocess import time -import sys import yaml -from six.moves import configparser import re import six import netaddr from tsconfig import tsconfig -from sysinv.common import constants as sysinv_constants from controllerconfig.common import constants -from controllerconfig.common import log from controllerconfig.common.exceptions import ValidateFail +from oslo_log import log -LOOPBACK_IFNAME = 'lo' - -NETWORK_SCRIPTS_PATH = '/etc/sysconfig/network-scripts' -NETWORK_SCRIPTS_PREFIX = 'ifcfg' -NETWORK_SCRIPTS_LOOPBACK = '%s-%s' % (NETWORK_SCRIPTS_PREFIX, LOOPBACK_IFNAME) - -BOND_MIIMON_DEFAULT = 100 - - -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) DEVNULL = open(os.devnull, 'w') -EXPECTED_SERVICE_NAME_AND_TYPE = ( - {"KEYSTONE_SERVICE_NAME": "keystone", - "KEYSTONE_SERVICE_TYPE": "identity", - "SYSINV_SERVICE_NAME": "sysinv", - "SYSINV_SERVICE_TYPE": "platform", - "PATCHING_SERVICE_NAME": "patching", - "PATCHING_SERVICE_TYPE": "patching", - "NFV_SERVICE_NAME": "vim", - "NFV_SERVICE_TYPE": "nfv", - "FM_SERVICE_NAME": "fm", - "FM_SERVICE_TYPE": "faultmanagement", - "BARBICAN_SERVICE_NAME": "barbican", - "BARBICAN_SERVICE_TYPE": "key-manager", - }) - - -def filesystem_get_free_space(path): - """ Get Free space of directory """ - statvfs = os.statvfs(path) - return (statvfs.f_frsize * statvfs.f_bavail) - - -def directory_get_size(start_dir, regex=None): - """ - Get total size of a directory tree in bytes - :param start_dir: top of tree - :param regex: only include files matching this regex (if provided) - :return: size in bytes - """ - total_size = 0 - for dirpath, _, filenames in os.walk(start_dir): - for filename in filenames: - if regex is None or regex.match(filename): - filep = os.path.join(dirpath, filename) - try: - total_size += os.path.getsize(filep) - except OSError as e: - if e.errno != errno.ENOENT: - raise e - return total_size - - -def print_bytes(sizeof): - """ Pretty print bytes """ - for size in ['Bytes', 'KB', 'MB', 'GB', 'TB']: - if abs(sizeof) < 1024.0: - return "%3.1f %s" % (sizeof, size) - sizeof /= 1024.0 - - -def modprobe_drbd(): - """Load DRBD module""" - try: - mod_parms = subprocess.check_output(['drbdadm', 'sh-mod-parms'], - close_fds=True).rstrip() - subprocess.call(["modprobe", "-s", "drbd", mod_parms], stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to load drbd module") - raise - - -def drbd_start(resource): - """Start drbd resource""" - try: - subprocess.check_call(["drbdadm", "up", resource], - stdout=DEVNULL) - - subprocess.check_call(["drbdadm", "primary", resource], - stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to start drbd %s" % resource) - raise - - -def drbd_stop(resource): - """Stop drbd resource""" - try: - subprocess.check_call(["drbdadm", "secondary", resource], - stdout=DEVNULL) - # Allow time for demotion to be processed - time.sleep(1) - subprocess.check_call(["drbdadm", "down", resource], stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to stop drbd %s" % resource) - raise - - -def mount(device, directory): - """Mount a directory""" - try: - subprocess.check_call(["mount", device, directory], stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to mount %s filesystem" % directory) - raise - - -def umount(directory): - """Unmount a directory""" - try: - subprocess.check_call(["umount", directory], stdout=DEVNULL) - - except subprocess.CalledProcessError: - LOG.error("Failed to umount %s filesystem" % directory) - raise - def start_service(name): """ Start a systemd service """ @@ -181,48 +57,6 @@ def restart_service(name): raise -def start_lsb_service(name): - """ Start a Linux Standard Base service """ - try: - script = os.path.join("/etc/init.d", name) - # Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment - subprocess.check_call([script, "start"], - env=dict(os.environ, - **{"SYSTEMCTL_SKIP_REDIRECT": "1"}), - stdout=DEVNULL) - except subprocess.CalledProcessError: - LOG.error("Failed to start %s service" % name) - raise - - -def stop_lsb_service(name): - """ Stop a Linux Standard Base service """ - try: - script = os.path.join("/etc/init.d", name) - # Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment - subprocess.check_call([script, "stop"], - env=dict(os.environ, - **{"SYSTEMCTL_SKIP_REDIRECT": "1"}), - stdout=DEVNULL) - except subprocess.CalledProcessError: - LOG.error("Failed to stop %s service" % name) - raise - - -def restart_lsb_service(name): - """ Restart a Linux Standard Base service """ - try: - script = os.path.join("/etc/init.d", name) - # Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment - subprocess.check_call([script, "restart"], - env=dict(os.environ, - **{"SYSTEMCTL_SKIP_REDIRECT": "1"}), - stdout=DEVNULL) - except subprocess.CalledProcessError: - LOG.error("Failed to restart %s service" % name) - raise - - def check_sm_service(service, state): """ Check whether an SM service has the supplied state """ try: @@ -245,34 +79,6 @@ def wait_sm_service(service, timeout=180): return False -def is_active(service): - """ Check whether an SM service is active """ - return check_sm_service(service, 'enabled-active') - - -def get_controller_hostname(): - """ - Get the hostname for this controller - :return: controller hostname - """ - return socket.gethostname() - - -def get_mate_controller_hostname(): - """ - Get the hostname for the mate controller - :return: mate controller hostname - """ - my_hostname = socket.gethostname() - if my_hostname.endswith('-0'): - postfix = '-1' - elif my_hostname.endswith('-1'): - postfix = '-0' - else: - raise Exception("Invalid controller hostname") - return my_hostname.rsplit('-', 1)[0] + postfix - - def get_address_from_hosts_file(hostname): """ Get the IP address of a host from the /etc/hosts file @@ -286,303 +92,6 @@ def get_address_from_hosts_file(hostname): raise Exception("Hostname %s not found in /etc/hosts" % hostname) -def validate_and_normalize_mac(address): - """Validate a MAC address and return normalized form. - - Checks whether the supplied MAC address is formally correct and - normalize it to all lower case. - - :param address: MAC address to be validated and normalized. - :returns: Normalized and validated MAC address. - :raises: InvalidMAC If the MAC address is not valid. - - """ - if not is_valid_mac(address): - raise Exception("InvalidMAC %s" % address) - return address.lower() - - -def is_valid_ip(address): - if not is_valid_ipv4(address): - return is_valid_ipv6(address) - return True - - -def lag_mode_to_str(lag_mode): - if lag_mode == 0: - return "balance-rr" - if lag_mode == 1: - return "active-backup" - elif lag_mode == 2: - return "balance-xor" - elif lag_mode == 3: - return "broadcast" - elif lag_mode == 4: - return "802.3ad" - elif lag_mode == 5: - return "balance-tlb" - elif lag_mode == 6: - return "balance-alb" - else: - raise Exception( - "Invalid LAG_MODE value of %d. Valid values: 0-6" % lag_mode) - - -def is_combined_load(): - return 'worker' in tsconfig.subfunctions - - -def get_system_type(): - if is_combined_load(): - return sysinv_constants.TIS_AIO_BUILD - return sysinv_constants.TIS_STD_BUILD - - -def get_security_profile(): - eprofile = sysinv_constants.SYSTEM_SECURITY_PROFILE_EXTENDED - if tsconfig.security_profile == eprofile: - return eprofile - return sysinv_constants.SYSTEM_SECURITY_PROFILE_STANDARD - - -def is_cpe(): - return get_system_type() == sysinv_constants.TIS_AIO_BUILD - - -def get_interface_config_common(device, mtu=None): - """ - Return the interface configuration parameters that is common to all - device types. - """ - parameters = collections.OrderedDict() - parameters['BOOTPROTO'] = 'none' - parameters['ONBOOT'] = 'yes' - parameters['DEVICE'] = device - # Increased to accommodate devices that require more time to - # complete link auto-negotiation - parameters['LINKDELAY'] = '20' - if mtu: - parameters['MTU'] = mtu - return parameters - - -def get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway): - """ - Return the interface configuration parameters for all IPv4 static - addressing. - """ - parameters = collections.OrderedDict() - parameters['IPADDR'] = ip_address - parameters['NETMASK'] = ip_subnet.netmask - parameters['BROADCAST'] = ip_subnet.broadcast - if ip_gateway: - parameters['GATEWAY'] = ip_gateway - return parameters - - -def get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway): - """ - Return the interface configuration parameters for all IPv6 static - addressing. - """ - parameters = collections.OrderedDict() - parameters['IPV6INIT'] = 'yes' - parameters['IPV6ADDR'] = netaddr.IPNetwork('%s/%u' % (ip_address, - ip_subnet.prefixlen)) - if ip_gateway: - parameters['IPV6_DEFAULTGW'] = ip_gateway - return parameters - - -def get_interface_config_static(ip_address, ip_subnet, ip_gateway=None): - """ - Return the interface configuration parameters for all IP static - addressing. - """ - if netaddr.IPAddress(ip_address).version == 4: - return get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway) - else: - return get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway) - - -def write_interface_config_file(device, parameters): - """ - Write interface configuration parameters to the network scripts - directory named after the supplied device. - - :param device device name as str - :param parameters dict of parameters - """ - filename = os.path.join(NETWORK_SCRIPTS_PATH, "%s-%s" % - (NETWORK_SCRIPTS_PREFIX, device)) - try: - with open(filename, 'w') as f: - for parameter, value in parameters.items(): - f.write("%s=%s\n" % (parameter, str(value))) - except IOError: - LOG.error("Failed to create file: %s" % filename) - raise - - -def write_interface_config_ethernet(device, mtu=None, parameters=None): - """Write the interface configuration for an Ethernet device.""" - config = get_interface_config_common(device, mtu) - if parameters: - config.update(parameters) - write_interface_config_file(device, config) - - -def write_interface_config_vlan(device, mtu, parameters=None): - """Write the interface configuration for a VLAN device.""" - config = get_interface_config_vlan() - if parameters: - config.update(parameters) - write_interface_config_ethernet(device, mtu, parameters=config) - - -def write_interface_config_slave(device, master, parameters=None): - """Write the interface configuration for a bond slave device.""" - config = get_interface_config_slave(master) - if parameters: - config.update(parameters) - write_interface_config_ethernet(device, parameters=config) - - -def write_interface_config_bond(device, mtu, mode, txhash, miimon, - member1, member2, parameters=None): - """Write the interface configuration for a bond master device.""" - config = get_interface_config_bond(mode, txhash, miimon) - if parameters: - config.update(parameters) - write_interface_config_ethernet(device, mtu, parameters=config) - - # create slave device configuration files - if member1: - write_interface_config_slave(member1, device) - if member2: - write_interface_config_slave(member2, device) - - -def get_interface_config_vlan(): - """ - Return the interface configuration parameters for all IP static - addressing. - """ - parameters = collections.OrderedDict() - parameters['VLAN'] = 'yes' - return parameters - - -def get_interface_config_slave(master): - """ - Return the interface configuration parameters for bond interface - slave devices. - """ - parameters = collections.OrderedDict() - parameters['MASTER'] = master - parameters['SLAVE'] = 'yes' - parameters['PROMISC'] = 'yes' - return parameters - - -def get_interface_config_bond(mode, txhash, miimon): - """ - Return the interface configuration parameters for bond interface - master devices. - """ - options = "mode=%s miimon=%s" % (mode, miimon) - - if txhash: - options += " xmit_hash_policy=%s" % txhash - - if mode == constants.LAG_MODE_8023AD: - options += " lacp_rate=fast" - - parameters = collections.OrderedDict() - parameters['BONDING_OPTS'] = "\"%s\"" % options - return parameters - - -def remove_interface_config_files(stdout=None, stderr=None): - """ - Remove all existing interface configuration files. - """ - files = glob.glob1(NETWORK_SCRIPTS_PATH, "%s-*" % NETWORK_SCRIPTS_PREFIX) - for file in [f for f in files if f != NETWORK_SCRIPTS_LOOPBACK]: - ifname = file[len(NETWORK_SCRIPTS_PREFIX) + 1:] # remove prefix - subprocess.check_call(["ifdown", ifname], - stdout=stdout, stderr=stderr) - os.remove(os.path.join(NETWORK_SCRIPTS_PATH, file)) - - -def remove_interface_ip_address(device, ip_address, ip_subnet, - stdout=None, stderr=None): - """Remove an IP address from an interface""" - subprocess.check_call( - ["ip", "addr", "del", - str(ip_address) + "/" + str(ip_subnet.prefixlen), - "dev", device], - stdout=stdout, stderr=stderr) - - -def send_interface_garp(device, ip_address, stdout=None, stderr=None): - """Send a GARP message for the supplied address""" - subprocess.call( - ["arping", "-c", "3", "-A", "-q", "-I", - device, str(ip_address)], - stdout=stdout, stderr=stderr) - - -def restart_networking(stdout=None, stderr=None): - """ - Restart networking services. - """ - # Kill any leftover dhclient process from the boot - subprocess.call(["pkill", "dhclient"]) - - # remove any existing IP addresses - ifs = glob.glob1('/sys/class/net', "*") - for i in [i for i in ifs if i != LOOPBACK_IFNAME]: - subprocess.call( - ["ip", "link", "set", "dev", i, "down"]) - subprocess.call( - ["ip", "addr", "flush", "dev", i]) - subprocess.call( - ["ip", "-6", "addr", "flush", "dev", i]) - - subprocess.check_call(["systemctl", "restart", "network"], - stdout=stdout, stderr=stderr) - - -def output_to_dict(output): - dict = {} - output = [_f for _f in output.split('\n') if _f] - - for row in output: - values = row.split() - if len(values) != 2: - raise Exception("The following output does not respect the " - "format: %s" % row) - dict[values[1]] = values[0] - - return dict - - -def get_install_uuid(): - """ Get the install uuid from the feed directory. """ - uuid_fname = None - try: - uuid_dir = '/www/pages/feed/rel-' + tsconfig.SW_VERSION - uuid_fname = os.path.join(uuid_dir, 'install_uuid') - with open(uuid_fname, 'r') as uuid_file: - install_uuid = uuid_file.readline().rstrip() - except IOError: - LOG.error("Failed to open file: %s", uuid_fname) - raise Exception("Failed to retrieve install UUID") - - return install_uuid - - def write_simplex_flag(): """ Write simplex flag. """ simplex_flag = "/etc/platform/simplex" @@ -634,37 +143,6 @@ def apply_manifest(controller_address_0, personality, manifest, hieradata, raise Exception(msg) -def create_system_controller_config(filename): - """ Create any additional parameters needed for system controller""" - # set keystone endpoint region name and sysinv keystone authtoken - # region name - config = { - 'keystone::endpoint::region': - sysinv_constants.SYSTEM_CONTROLLER_REGION, - 'sysinv::region_name': - sysinv_constants.SYSTEM_CONTROLLER_REGION, - } - try: - with open(filename, 'w') as f: - yaml.dump(config, f, default_flow_style=False) - except Exception: - LOG.exception("failed to write config file: %s" % filename) - raise - - -def create_static_config(): - cmd = ["/usr/bin/sysinv-puppet", - "create-static-config", - constants.HIERADATA_WORKDIR] - try: - os.makedirs(constants.HIERADATA_WORKDIR) - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - msg = "Failed to create puppet hiera static config" - print(msg) - raise Exception(msg) - - def create_system_config(): cmd = ["/usr/bin/sysinv-puppet", "create-system-config", @@ -692,34 +170,6 @@ def create_host_config(hostname=None): raise Exception(msg) -def shutdown_file_systems(): - """ Shutdown filesystems """ - - umount("/var/lib/postgresql") - drbd_stop("drbd-pgsql") - - stop_service("www-pages-helm_charts.mount") - umount("/opt/platform") - drbd_stop("drbd-platform") - - umount("/opt/extension") - drbd_stop("drbd-extension") - - if os.path.exists("/opt/patch-vault"): - umount("/opt/patch-vault") - drbd_stop("drbd-patch-vault") - - umount("/var/lib/rabbitmq") - drbd_stop("drbd-rabbit") - - stop_service("etcd.service") - stop_service("opt-etcd.mount") - drbd_stop("drbd-etcd") - - umount("/var/lib/docker-distribution") - drbd_stop("drbd-dockerdistribution") - - def persist_config(): """Copy temporary config files into new DRBD filesystem""" @@ -862,24 +312,6 @@ def configure_hostname(hostname): raise Exception("Failed to configure hostname") -def progress(steps, step, action, result, newline=False): - """Display progress.""" - if steps == 0: - hashes = 45 - percentage = 100 - else: - hashes = (step * 45) / steps - percentage = (step * 100) / steps - - sys.stdout.write("\rStep {0:{width}d} of {1:d} [{2:45s}] " - "[{3:d}%]".format(min(step, steps), steps, - '#' * hashes, percentage, - width=len(str(steps)))) - if step == steps or newline: - sys.stdout.write("\n") - sys.stdout.flush() - - def touch(fname): with open(fname, 'a'): os.utime(fname, None) @@ -898,47 +330,6 @@ def is_ssh_parent(): return False -def is_valid_vlan(vlan): - """Determine whether vlan is valid.""" - try: - if 0 < int(vlan) < 4095: - return True - else: - return False - except (ValueError, TypeError): - return False - - -def is_mtu_valid(mtu): - """Determine whether a mtu is valid.""" - try: - if int(mtu) < 576: - return False - elif int(mtu) > 9216: - return False - else: - return True - except (ValueError, TypeError): - return False - - -def is_valid_hostname(hostname): - """Determine whether a hostname is valid as per RFC 1123.""" - - # Maximum length of 255 - if not hostname or len(hostname) > 255: - return False - # Allow a single dot on the right hand side - if hostname[-1] == ".": - hostname = hostname[:-1] - # Create a regex to ensure: - # - hostname does not begin or end with a dash - # - each segment is 1 to 63 characters long - # - valid characters are A-Z (any case) and 0-9 - valid_re = re.compile("(?!-)[A-Z\d-]{1,63}(?/dev/null -else - echo "Admin credentials not found" - exit -fi - -# Delete all the servers -echo "Deleting all servers [`openstack server list --all`]" -found=false -for i in $(openstack server list --all -c ID -f value); do - `openstack server delete $i &> /dev/null` - echo $i deleted - found=true -done -if $found; then - sleep 30 -fi -echo "Deleted all servers [`openstack server list --all`]" -# Delete all the volumes -echo "Deleting all volumes [`openstack volume list --all`]" -found=false -for i in $(openstack volume list --all -c ID -f value); do - `openstack volume delete $i &> /dev/null` - echo $i deleted - found=true -done -if $found; then - sleep 30 -fi -echo "Deleted all volumes [`openstack volume list --all`]" - diff --git a/controllerconfig/controllerconfig/scripts/install_clone.py b/controllerconfig/controllerconfig/scripts/install_clone.py deleted file mode 100755 index 1aaa996106..0000000000 --- a/controllerconfig/controllerconfig/scripts/install_clone.py +++ /dev/null @@ -1,321 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2017 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import time -import uuid -import shutil -import tempfile -import subprocess -from six.moves import configparser - -import tsconfig.tsconfig as tsconfig -from controllerconfig.common import log -import controllerconfig.utils as utils -import controllerconfig.sysinv_api as sysinv -import controllerconfig.backup_restore as backup_restore -import controllerconfig.clone as clone -from controllerconfig.common.exceptions import CloneFail -from sysinv.common import constants as si_const - -LOG = log.get_logger("cloning") -DEVNULL = open(os.devnull, 'w') -INI_FILE = os.path.join("/", clone.CLONE_ARCHIVE_DIR, clone.CLONE_ISO_INI) -SECTION = "clone_iso" -parser = configparser.SafeConfigParser() -clone_name = "" - - -def console_log(str, err=False): - """ Log onto console also """ - if err: - str = "Failed to install clone-image. " + str - LOG.error(str) - else: - LOG.info(str) - print("\n" + str) - - -def persist(key, value): - """ Write into ini file """ - parser.set(SECTION, key, value) - with open(INI_FILE, 'w') as f: - parser.write(f) - - -def set_result(value): - """ Set the result of installation of clone image """ - persist(clone.RESULT, value) - persist(clone.INSTALLED, time.strftime("%Y-%m-%d %H:%M:%S %Z")) - - -def validate_hardware_compatibility(): - """ validate if cloned-image can be installed on this h/w """ - valid = True - disk_paths = "" - if parser.has_option(SECTION, "disks"): - disk_paths = parser.get(SECTION, "disks") - if not disk_paths: - console_log("Missing value [disks] in ini file") - valid = False - for d in disk_paths.split(): - disk_path, size = d.split('#') - if os.path.exists('/dev/disk/by-path/' + disk_path): - LOG.info("Disk [{}] exists".format(disk_path)) - disk_size = clone.get_disk_size('/dev/disk/by-path/' + - disk_path) - if int(disk_size) >= int(size): - LOG.info("Disk size is good: {} >= {}" - .format(utils.print_bytes(int(disk_size)), - utils.print_bytes(int(size)))) - else: - console_log("Not enough disk size[{}], " - "found:{} looking_for:{}".format( - disk_path, utils.print_bytes(int(disk_size)), - utils.print_bytes(int(size))), err=True) - valid = False - else: - console_log("Disk [{}] does not exist!" - .format(disk_path), err=True) - valid = False - - interfaces = "" - if parser.has_option(SECTION, "interfaces"): - interfaces = parser.get(SECTION, "interfaces") - if not interfaces: - console_log("Missing value [interfaces] in ini file") - valid = False - for f in interfaces.split(): - if os.path.exists('/sys/class/net/' + f): - LOG.info("Interface [{}] exists".format(f)) - else: - console_log("Interface [{}] does not exist!" - .format(f), err=True) - valid = False - - maxcpuid = "" - if parser.has_option(SECTION, "cpus"): - maxcpuid = parser.get(SECTION, "cpus") - if not maxcpuid: - console_log("Missing value [cpus] in ini file") - valid = False - else: - my_maxcpuid = clone.get_online_cpus() - if int(maxcpuid) <= int(my_maxcpuid): - LOG.info("Got enough cpus {},{}".format( - maxcpuid, my_maxcpuid)) - else: - console_log("Not enough CPUs, found:{} looking_for:{}" - .format(my_maxcpuid, maxcpuid), err=True) - valid = False - - mem_total = "" - if parser.has_option(SECTION, "mem"): - mem_total = parser.get(SECTION, "mem") - if not mem_total: - console_log("Missing value [mem] in ini file") - valid = False - else: - my_mem_total = clone.get_total_mem() - # relaxed RAM check: within 1 GiB - if (int(mem_total) - (1024 * 1024)) <= int(my_mem_total): - LOG.info("Got enough memory {},{}".format( - mem_total, my_mem_total)) - else: - console_log("Not enough memory; found:{} kB, " - "looking for a minimum of {} kB" - .format(my_mem_total, mem_total), err=True) - valid = False - - if not valid: - console_log("Validation failure!") - set_result(clone.FAIL) - time.sleep(20) - exit(1) - - console_log("Successful validation") - - -def update_sysuuid_in_archive(tmpdir): - """Update system uuid in system archive file.""" - sysuuid = str(uuid.uuid4()) - clone.find_and_replace( - [os.path.join(tmpdir, 'postgres/sysinv.sql.data')], - "CLONEISO_SYSTEM_UUID", sysuuid) - LOG.info("System uuid updated [%s]" % sysuuid) - - -def update_db(archive_dir, backup_name): - """ Update DB before restore """ - path_to_archive = os.path.join(archive_dir, backup_name) - LOG.info("Updating system archive [%s] DB." % path_to_archive) - tmpdir = tempfile.mkdtemp(dir=archive_dir) - try: - subprocess.check_call( - ['gunzip', path_to_archive + '.tgz'], - stdout=DEVNULL, stderr=DEVNULL) - # Extract only postgres dir to update system uuid - subprocess.check_call( - ['tar', '-x', - '--directory=' + tmpdir, - '-f', path_to_archive + '.tar', - 'postgres'], - stdout=DEVNULL, stderr=DEVNULL) - update_sysuuid_in_archive(tmpdir) - subprocess.check_call( - ['tar', '--update', - '--directory=' + tmpdir, - '-f', path_to_archive + '.tar', - 'postgres'], - stdout=DEVNULL, stderr=DEVNULL) - subprocess.check_call(['gzip', path_to_archive + '.tar']) - shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz') - - except Exception as e: - LOG.error("Update of system archive {} failed {}".format( - path_to_archive, str(e))) - raise CloneFail("Failed to update system archive") - - finally: - shutil.rmtree(tmpdir, ignore_errors=True) - - -def config_worker(): - """ - Enable worker functionality for AIO system. - :return: True if worker-config-complete is executed - """ - if utils.get_system_type() == si_const.TIS_AIO_BUILD: - console_log("Applying worker manifests for {}. " - "Node will reboot on completion." - .format(utils.get_controller_hostname())) - sysinv.do_worker_config_complete(utils.get_controller_hostname()) - time.sleep(30) - # worker-config-complete has no logs to console. So, wait - # for some time before showing the login prompt. - for i in range(1, 10): - console_log("worker-config in progress..") - time.sleep(30) - console_log("Timed out on do_worker_config_complete") - raise CloneFail("Timed out on do_worker_config_complete") - return True - else: - # worker_config_complete is not needed. - return False - - -def finalize_install(): - """ Complete the installation """ - subprocess.call(["rm", "-f", tsconfig.CONFIG_PATH + '/dnsmasq.leases']) - console_log("Updating system parameters...") - i = 1 - system_update = False - # Retries if sysinv is not yet ready - while i < 10: - time.sleep(20) - LOG.info("Attempt %d to update system parameters..." % i) - try: - if sysinv.update_clone_system('Cloned_from_' + clone_name, - utils.get_controller_hostname()): - system_update = True - break - except Exception: - # Sysinv might not be ready yet - pass - i += 1 - if not system_update: - LOG.error("System update failed") - raise CloneFail("System update failed") - - try: - output = subprocess.check_output(["finish_install_clone.sh"], - stderr=subprocess.STDOUT) - LOG.info("finish_install_clone out: {}".format(output)) - except Exception: - console_log("Failed to cleanup stale OpenStack resources. " - "Manually delete the Volumes and Instances.") - - -def cleanup(): - """ Cleanup after installation """ - LOG.info("Cleaning up...") - subprocess.call(['systemctl', 'disable', 'install-clone'], stderr=DEVNULL) - OLD_FILE = os.path.join(tsconfig.PLATFORM_CONF_PATH, clone.CLONE_ISO_INI) - if os.path.exists(OLD_FILE): - os.remove(OLD_FILE) - if os.path.exists(INI_FILE): - os.chmod(INI_FILE, 0o400) - shutil.move(INI_FILE, tsconfig.PLATFORM_CONF_PATH) - shutil.rmtree(os.path.join("/", clone.CLONE_ARCHIVE_DIR), - ignore_errors=True) - - -log.configure() -if os.path.exists(INI_FILE): - try: - parser.read(INI_FILE) - if parser.has_section(SECTION): - clone_name = parser.get(SECTION, clone.NAME) - LOG.info("System archive [%s] to be installed." % clone_name) - - first_boot = False - last_result = clone.IN_PROGRESS - if not parser.has_option(SECTION, clone.RESULT): - # first boot after cloning - first_boot = True - else: - last_result = parser.get(SECTION, clone.RESULT) - LOG.info("Last attempt to install clone was [{}]" - .format(last_result)) - - if last_result == clone.IN_PROGRESS: - if first_boot: - update_db(os.path.join("/", clone.CLONE_ARCHIVE_DIR), - clone_name + '_system') - else: - # Booting up after patch application, do validation - validate_hardware_compatibility() - - console_log("+++++ Starting to install clone-image [{}] +++++" - .format(clone_name)) - set_result(clone.IN_PROGRESS) - clone_arch_path = os.path.join("/", clone.CLONE_ARCHIVE_DIR, - clone_name) - if (backup_restore.RESTORE_RERUN_REQUIRED == - backup_restore.restore_system( - clone_arch_path + "_system.tgz", - clone=True)): - # If there are no patches to be applied, run validation - # code and resume restore. If patches were applied, node - # will be rebooted and validate will after reboot. - validate_hardware_compatibility() - LOG.info("validate passed, resuming restore...") - backup_restore.restore_system( - clone_arch_path + "_system.tgz", clone=True) - console_log("System archive installed from [%s]" % clone_name) - backup_restore.restore_images(clone_arch_path + "_images.tgz", - clone=True) - console_log("Images archive installed from [%s]" % clone_name) - finalize_install() - set_result(clone.OK) - if not config_worker(): - # do cleanup if worker_config_complete is not required - cleanup() - elif last_result == clone.OK: - # Installation completed successfully before last reboot - cleanup() - else: - LOG.error("Bad file: {}".format(INI_FILE)) - set_result(clone.FAIL) - exit(1) - except Exception as e: - console_log("Clone [%s] installation failed" % clone_name) - LOG.exception("install failed") - set_result(clone.FAIL) - exit(1) -else: - console_log("nothing to do, Not installing clone?") diff --git a/controllerconfig/controllerconfig/scripts/keyringstaging b/controllerconfig/controllerconfig/scripts/keyringstaging deleted file mode 100755 index d3d692c56f..0000000000 --- a/controllerconfig/controllerconfig/scripts/keyringstaging +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/python - -# -# Copyright (c) 2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import keyring -import os -import sys - -def get_stealth_password(): - """Get the stealth password vault for manifest to run""" - orig_root = os.environ.get('XDG_DATA_HOME', None) - os.environ["XDG_DATA_HOME"] = "/tmp" - - stealth_pw = keyring.get_password("CGCS", "admin") - - if orig_root is not None: - os.environ("XDG_DATA_HOME",orig_root) - else: - del os.environ["XDG_DATA_HOME"] - return stealth_pw - -if __name__ == "__main__": - sys.stdout.write(get_stealth_password()) - sys.stdout.flush() - sys.exit(0) - diff --git a/controllerconfig/controllerconfig/setup.py b/controllerconfig/controllerconfig/setup.py index b8ddf8a94c..c7e3a6b4a6 100644 --- a/controllerconfig/controllerconfig/setup.py +++ b/controllerconfig/controllerconfig/setup.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2017 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -18,9 +18,6 @@ setup( include_package_data=False, entry_points={ 'console_scripts': [ - 'config_controller = controllerconfig.systemconfig:main', - 'config_region = controllerconfig.regionconfig:region_main', - 'config_subcloud = controllerconfig.regionconfig:subcloud_main', 'config_management = controllerconfig.config_management:main', 'upgrade_controller = controllerconfig.upgrades.controller:main', 'upgrade_controller_simplex = ' diff --git a/controllerconfig/controllerconfig/tox.ini b/controllerconfig/controllerconfig/tox.ini index c8dafd41cd..9a834423c8 100644 --- a/controllerconfig/controllerconfig/tox.ini +++ b/controllerconfig/controllerconfig/tox.ini @@ -4,7 +4,7 @@ # and then run "tox" from this directory. [tox] -envlist = flake8, py27, pylint +envlist = flake8, pylint # Tox does not work if the path to the workdir is too long, so move it to /tmp toxworkdir = /tmp/{env:USER}_cctox stxdir = {toxinidir}/../../.. @@ -41,21 +41,13 @@ commands = flake8 {posargs} # H101: Use TODO(NAME) # H102: Apache 2.0 license header not found # H104: File contains nothing but comments -# H238: old style class declaration, use new style (inherit from `object`) # H306: imports not in alphabetical order # H401: docstring should not start with a space -# H403: multi line docstrings should end on a new line # H404: multi line docstring should start without a leading new line # H405: multi line docstring summary not separated with an empty line -ignore = H101,H102,H104,H238,H306,H401,H403,H404,H405 +ignore = H101,H102,H104,H306,H401,H404,H405 exclude = build -[testenv:py27] -basepython = python2.7 -commands = - find . -type f -name "*.pyc" -delete - py.test {posargs} - [testenv:cover] basepython = python2.7 deps = {[testenv]deps} diff --git a/controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py b/controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py index a6ebff4451..42a3e78518 100755 --- a/controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py +++ b/controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py @@ -14,9 +14,9 @@ import psycopg2 import sys from psycopg2.extras import RealDictCursor -from controllerconfig.common import log +from oslo_log import log -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) def main(): diff --git a/controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py b/controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py index 425dcf7f1a..d071f9f859 100644 --- a/controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py +++ b/controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py @@ -13,9 +13,9 @@ import sys from sysinv.common import constants from psycopg2.extras import RealDictCursor -from controllerconfig.common import log +from oslo_log import log -LOG = log.get_logger(__name__) +LOG = log.getLogger(__name__) # Sections that need to be removed from retired Ceph cache tiering feature SERVICE_PARAM_SECTION_CEPH_CACHE_TIER = 'cache_tiering' diff --git a/controllerconfig/opensuse/controllerconfig.spec b/controllerconfig/opensuse/controllerconfig.spec index eb9e0219c8..b578a9dc53 100644 --- a/controllerconfig/opensuse/controllerconfig.spec +++ b/controllerconfig/opensuse/controllerconfig.spec @@ -55,10 +55,7 @@ Configuration for the Controller node. #install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/ install -d -m 755 %{buildroot}%{local_bindir} -install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password -install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone -install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh install -d -m 755 %{buildroot}%{local_goenabledd} install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py index 650614d652..3bc8340e01 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py @@ -16,7 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. # -# Copyright (c) 2013-2019 Wind River Systems, Inc. +# Copyright (c) 2013-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -41,7 +41,6 @@ import wsme import wsmeext.pecan as wsme_pecan from wsme import types as wtypes -from controllerconfig import HOST_XML_ATTRIBUTES from fm_api import constants as fm_constants from fm_api import fm_api from pecan import expose @@ -98,6 +97,12 @@ from sysinv.common import health LOG = log.getLogger(__name__) KEYRING_BM_SERVICE = "BM" ERR_CODE_LOCK_SOLE_SERVICE_PROVIDER = "-1003" +HOST_XML_ATTRIBUTES = ['hostname', 'personality', 'subfunctions', + 'mgmt_mac', 'mgmt_ip', + 'bm_ip', 'bm_type', 'bm_username', + 'bm_password', 'boot_device', 'rootfs_device', + 'install_output', 'console', 'vsc_controllers', + 'power_on', 'location'] def _get_controller_address(hostname): diff --git a/sysinv/sysinv/sysinv/sysinv/common/health.py b/sysinv/sysinv/sysinv/sysinv/common/health.py index f02789d4b5..9f9fe8022a 100755 --- a/sysinv/sysinv/sysinv/sysinv/common/health.py +++ b/sysinv/sysinv/sysinv/sysinv/common/health.py @@ -1,12 +1,11 @@ # -# Copyright (c) 2018-2019 Wind River Systems, Inc. +# Copyright (c) 2018-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # from eventlet.green import subprocess import os -from controllerconfig import backup_restore from fm_api import fm_api @@ -208,12 +207,14 @@ class Health(object): def _check_simplex_available_space(self): """Ensures there is free space for the backup""" - try: - backup_restore.check_size("/opt/backups", True) - except backup_restore.BackupFail: - return False - return True + # TODO: Switch this over to use Ansible + # try: + # backup_restore.check_size("/opt/backups", True) + # except backup_restore.BackupFail: + # return False + # return True + LOG.info("Skip the check of the enough free space.") def _check_kube_nodes_ready(self): """Checks that each kubernetes node is ready"""